index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/pojos/Word2VecModelAggregateMethod.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.pojos;
public enum Word2VecModelAggregateMethod {
AVERAGE,
NONE,
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/pojos/Word2VecModelOutputV3.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.pojos;
import com.google.gson.Gson;
import com.google.gson.annotations.*;
public class Word2VecModelOutputV3 extends ModelOutputSchemaV3 {
/**
* Number of epochs executed
*/
public int epochs;
/*------------------------------------------------------------------------------------------------------------------
// INHERITED
//------------------------------------------------------------------------------------------------------------------
// Column names
public String[] names;
// Original column names
public String[] originalNames;
// Column types
public String[] columnTypes;
// Domains for categorical columns
public String[][] domains;
// Cross-validation models (model ids)
public ModelKeyV3[] crossValidationModels;
// Cross-validation predictions, one per cv model (deprecated, use cross_validation_holdout_predictions_frame_id
// instead)
public FrameKeyV3[] crossValidationPredictions;
// Cross-validation holdout predictions (full out-of-sample predictions on training data)
public FrameKeyV3 crossValidationHoldoutPredictionsFrameId;
// Cross-validation fold assignment (each row is assigned to one holdout fold)
public FrameKeyV3 crossValidationFoldAssignmentFrameId;
// Category of the model (e.g., Binomial)
public ModelCategory modelCategory;
// Model summary
public TwoDimTableV3 modelSummary;
// Scoring history
public TwoDimTableV3 scoringHistory;
// Cross-Validation scoring history
public TwoDimTableV3[] cvScoringHistory;
// Model reproducibility information
public TwoDimTableV3[] reproducibilityInformationTable;
// Training data model metrics
public ModelMetricsBaseV3 trainingMetrics;
// Validation data model metrics
public ModelMetricsBaseV3 validationMetrics;
// Cross-validation model metrics
public ModelMetricsBaseV3 crossValidationMetrics;
// Cross-validation model metrics summary
public TwoDimTableV3 crossValidationMetricsSummary;
// Job status
public String status;
// Start time in milliseconds
public long startTime;
// End time in milliseconds
public long endTime;
// Runtime in milliseconds
public long runTime;
// Default threshold used for predictions
public double defaultThreshold;
// Help information for output fields
public Map<String,String> help;
*/
/**
* Public constructor
*/
public Word2VecModelOutputV3() {
epochs = 0;
status = "";
startTime = 0L;
endTime = 0L;
runTime = 0L;
defaultThreshold = 0.0;
}
/**
* Return the contents of this object as a JSON String.
*/
@Override
public String toString() {
return new Gson().toJson(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/pojos/Word2VecModelV3.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.pojos;
import com.google.gson.Gson;
import com.google.gson.annotations.*;
public class Word2VecModelV3 extends ModelSchemaV3<Word2VecParametersV3, Word2VecModelOutputV3> {
/*------------------------------------------------------------------------------------------------------------------
// INHERITED
//------------------------------------------------------------------------------------------------------------------
// The build parameters for the model (e.g. K for KMeans).
public Word2VecParametersV3 parameters;
// The build output for the model (e.g. the cluster centers for KMeans).
public Word2VecModelOutputV3 output;
// Compatible frames, if requested
public String[] compatibleFrames;
// Checksum for all the things that go into building the Model.
public long checksum;
// Model key
public ModelKeyV3 modelId;
// The algo name for this Model.
public String algo;
// The pretty algo name for this Model (e.g., Generalized Linear Model, rather than GLM).
public String algoFullName;
// The response column name for this Model (if applicable). Is null otherwise.
public String responseColumnName;
// The treatment column name for this Model (if applicable). Is null otherwise.
public String treatmentColumnName;
// The Model's training frame key
public FrameKeyV3 dataFrame;
// Timestamp for when this model was completed
public long timestamp;
// Indicator, whether export to POJO is available
public boolean havePojo;
// Indicator, whether export to MOJO is available
public boolean haveMojo;
*/
/**
* Public constructor
*/
public Word2VecModelV3() {
checksum = 0L;
algo = "";
algoFullName = "";
responseColumnName = "";
treatmentColumnName = "";
timestamp = 0L;
havePojo = false;
haveMojo = false;
}
/**
* Return the contents of this object as a JSON String.
*/
@Override
public String toString() {
return new Gson().toJson(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/pojos/Word2VecNormModel.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.pojos;
public enum Word2VecNormModel {
HSM,
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/pojos/Word2VecParametersV3.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.pojos;
import com.google.gson.Gson;
import com.google.gson.annotations.*;
public class Word2VecParametersV3 extends ModelParametersSchemaV3 {
/**
* Set size of word vectors
*/
@SerializedName("vec_size")
public int vecSize;
/**
* Set max skip length between words
*/
@SerializedName("window_size")
public int windowSize;
/**
* Set threshold for occurrence of words. Those that appear with higher frequency in the training data
* will be randomly down-sampled; useful range is (0, 1e-5)
*/
@SerializedName("sent_sample_rate")
public float sentSampleRate;
/**
* Use Hierarchical Softmax
*/
@SerializedName("norm_model")
public Word2VecNormModel normModel;
/**
* Number of training iterations to run
*/
public int epochs;
/**
* This will discard words that appear less than <int> times
*/
@SerializedName("min_word_freq")
public int minWordFreq;
/**
* Set the starting learning rate
*/
@SerializedName("init_learning_rate")
public float initLearningRate;
/**
* The word model to use (SkipGram or CBOW)
*/
@SerializedName("word_model")
public Word2VecWordModel wordModel;
/**
* Id of a data frame that contains a pre-trained (external) word2vec model
*/
@SerializedName("pre_trained")
public FrameKeyV3 preTrained;
/*------------------------------------------------------------------------------------------------------------------
// INHERITED
//------------------------------------------------------------------------------------------------------------------
// Destination id for this model; auto-generated if not specified.
public ModelKeyV3 modelId;
// Id of the training data frame.
public FrameKeyV3 trainingFrame;
// Id of the validation data frame.
public FrameKeyV3 validationFrame;
// Number of folds for K-fold cross-validation (0 to disable or >= 2).
public int nfolds;
// Whether to keep the cross-validation models.
public boolean keepCrossValidationModels;
// Whether to keep the predictions of the cross-validation models.
public boolean keepCrossValidationPredictions;
// Whether to keep the cross-validation fold assignment.
public boolean keepCrossValidationFoldAssignment;
// Allow parallel training of cross-validation models
public boolean parallelizeCrossValidation;
// Distribution function
public GenmodelutilsDistributionFamily distribution;
// Tweedie power for Tweedie regression, must be between 1 and 2.
public double tweediePower;
// Desired quantile for Quantile regression, must be between 0 and 1.
public double quantileAlpha;
// Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be between 0 and 1).
public double huberAlpha;
// Response variable column.
public ColSpecifierV3 responseColumn;
// Column with observation weights. Giving some observation a weight of zero is equivalent to excluding it from the
// dataset; giving an observation a relative weight of 2 is equivalent to repeating that row twice. Negative weights
// are not allowed. Note: Weights are per-row observation weights and do not increase the size of the data frame.
// This is typically the number of times a row is repeated, but non-integer values are supported as well. During
// training, rows with higher weights matter more, due to the larger loss function pre-factor. If you set weight = 0
// for a row, the returned prediction frame at that row is zero and this is incorrect. To get an accurate
// prediction, remove all rows with weight == 0.
public ColSpecifierV3 weightsColumn;
// Offset column. This will be added to the combination of columns before applying the link function.
public ColSpecifierV3 offsetColumn;
// Column with cross-validation fold index assignment per observation.
public ColSpecifierV3 foldColumn;
// Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified' option will stratify
// the folds based on the response variable, for classification problems.
public ModelParametersFoldAssignmentScheme foldAssignment;
// Encoding scheme for categorical features
public ModelParametersCategoricalEncodingScheme categoricalEncoding;
// For every categorical feature, only use this many most frequent categorical levels for model training. Only used
// for categorical_encoding == EnumLimited.
public int maxCategoricalLevels;
// Names of columns to ignore for training.
public String[] ignoredColumns;
// Ignore constant columns.
public boolean ignoreConstCols;
// Whether to score during each iteration of model training.
public boolean scoreEachIteration;
// Model checkpoint to resume training with.
public ModelKeyV3 checkpoint;
// Early stopping based on convergence of stopping_metric. Stop if simple moving average of length k of the
// stopping_metric does not improve for k:=stopping_rounds scoring events (0 to disable)
public int stoppingRounds;
// Maximum allowed runtime in seconds for model training. Use 0 to disable.
public double maxRuntimeSecs;
// Metric to use for early stopping (AUTO: logloss for classification, deviance for regression and anomaly_score for
// Isolation Forest). Note that custom and custom_increasing can only be used in GBM and DRF with the Python client.
public ScoreKeeperStoppingMetric stoppingMetric;
// Relative tolerance for metric-based stopping criterion (stop if relative improvement is not at least this much)
public double stoppingTolerance;
// Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic binning.
public int gainsliftBins;
// Reference to custom evaluation function, format: `language:keyName=funcName`
public String customMetricFunc;
// Reference to custom distribution, format: `language:keyName=funcName`
public String customDistributionFunc;
// Automatically export generated models to this directory.
public String exportCheckpointsDir;
// Set default multinomial AUC type.
public MultinomialAucType aucType;
*/
/**
* Public constructor
*/
public Word2VecParametersV3() {
vecSize = 100;
windowSize = 5;
sentSampleRate = 0.001f;
normModel = Word2VecNormModel.HSM;
epochs = 5;
minWordFreq = 5;
initLearningRate = 0.025f;
wordModel = Word2VecWordModel.SkipGram;
nfolds = 0;
keepCrossValidationModels = true;
keepCrossValidationPredictions = false;
keepCrossValidationFoldAssignment = false;
parallelizeCrossValidation = true;
distribution = GenmodelutilsDistributionFamily.AUTO;
tweediePower = 1.5;
quantileAlpha = 0.5;
huberAlpha = 0.9;
foldAssignment = ModelParametersFoldAssignmentScheme.AUTO;
categoricalEncoding = ModelParametersCategoricalEncodingScheme.AUTO;
maxCategoricalLevels = 10;
ignoreConstCols = true;
scoreEachIteration = false;
stoppingRounds = 0;
maxRuntimeSecs = 0.0;
stoppingMetric = ScoreKeeperStoppingMetric.AUTO;
stoppingTolerance = 0.001;
gainsliftBins = -1;
customMetricFunc = "";
customDistributionFunc = "";
exportCheckpointsDir = "";
aucType = MultinomialAucType.AUTO;
}
/**
* Return the contents of this object as a JSON String.
*/
@Override
public String toString() {
return new Gson().toJson(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/pojos/Word2VecSynonymsV3.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.pojos;
import com.google.gson.Gson;
import com.google.gson.annotations.*;
public class Word2VecSynonymsV3 extends SchemaV3 {
/**
* Source word2vec Model
*/
public ModelKeyV3 model;
/**
* Target word to find synonyms for
*/
public String word;
/**
* Number of synonyms
*/
public int count;
/**
* Synonymous words
*/
public String[] synonyms;
/**
* Similarity scores
*/
public double[] scores;
/**
* Public constructor
*/
public Word2VecSynonymsV3() {
word = "";
count = 0;
}
/**
* Return the contents of this object as a JSON String.
*/
@Override
public String toString() {
return new Gson().toJson(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/pojos/Word2VecTransformV3.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.pojos;
import com.google.gson.Gson;
import com.google.gson.annotations.*;
public class Word2VecTransformV3 extends SchemaV3 {
/**
* Source word2vec Model
*/
public ModelKeyV3 model;
/**
* Words Frame
*/
@SerializedName("words_frame")
public FrameKeyV3 wordsFrame;
/**
* Method of aggregating word-vector sequences into a single vector
*/
@SerializedName("aggregate_method")
public Word2VecModelAggregateMethod aggregateMethod;
/**
* Word Vectors Frame
*/
@SerializedName("vectors_frame")
public FrameKeyV3 vectorsFrame;
/**
* Public constructor
*/
public Word2VecTransformV3() {
}
/**
* Return the contents of this object as a JSON String.
*/
@Override
public String toString() {
return new Gson().toJson(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/pojos/Word2VecV3.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.pojos;
import com.google.gson.Gson;
import com.google.gson.annotations.*;
public class Word2VecV3 extends ModelBuilderSchema<Word2VecParametersV3> {
/*------------------------------------------------------------------------------------------------------------------
// INHERITED
//------------------------------------------------------------------------------------------------------------------
// Model builder parameters.
public Word2VecParametersV3 parameters;
// The algo name for this ModelBuilder.
public String algo;
// The pretty algo name for this ModelBuilder (e.g., Generalized Linear Model, rather than GLM).
public String algoFullName;
// Model categories this ModelBuilder can build.
public ModelCategory[] canBuild;
// Indicator whether the model is supervised or not.
public boolean supervised;
// Should the builder always be visible, be marked as beta, or only visible if the user starts up with the
// experimental flag?
public ModelBuilderBuilderVisibility visibility;
// Job Key
public JobV3 job;
// Parameter validation messages
public ValidationMessageV3[] messages;
// Count of parameter validation errors
public int errorCount;
// HTTP status to return for this build.
public int __httpStatus;
// Comma-separated list of JSON field paths to exclude from the result, used like:
// "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
public String _excludeFields;
*/
/**
* Public constructor
*/
public Word2VecV3() {
algo = "";
algoFullName = "";
supervised = false;
errorCount = 0;
__httpStatus = 0;
_excludeFields = "";
}
/**
* Return the contents of this object as a JSON String.
*/
@Override
public String toString() {
return new Gson().toJson(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/pojos/Word2VecWordModel.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.pojos;
public enum Word2VecWordModel {
CBOW,
SkipGram,
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/pojos/XGBoostExecReqV3.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.pojos;
import com.google.gson.Gson;
import com.google.gson.annotations.*;
public class XGBoostExecReqV3 {
/**
* Identifier
*/
public KeyV3 key;
/**
* Arbitrary request data stored as Base64 encoded binary
*/
public String data;
/**
* Public constructor
*/
public XGBoostExecReqV3() {
data = "";
}
/**
* Return the contents of this object as a JSON String.
*/
@Override
public String toString() {
return new Gson().toJson(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/pojos/XGBoostExecRespV3.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.pojos;
import com.google.gson.Gson;
import com.google.gson.annotations.*;
public class XGBoostExecRespV3 {
/**
* Identifier
*/
public KeyV3 key;
/**
* Arbitrary response data stored as Base64 encoded binary
*/
public String data;
/**
* Public constructor
*/
public XGBoostExecRespV3() {
data = "";
}
/**
* Return the contents of this object as a JSON String.
*/
@Override
public String toString() {
return new Gson().toJson(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/pojos/XGBoostModelOutputV3.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.pojos;
import com.google.gson.Gson;
import com.google.gson.annotations.*;
public class XGBoostModelOutputV3 extends ModelOutputSchemaV3 {
/**
* Variable Importances
*/
@SerializedName("variable_importances")
public TwoDimTableV3 variableImportances;
/**
* Variable Importances - Cover
*/
@SerializedName("variable_importances_cover")
public TwoDimTableV3 variableImportancesCover;
/**
* Variable Importances - Frequency
*/
@SerializedName("variable_importances_frequency")
public TwoDimTableV3 variableImportancesFrequency;
/**
* XGBoost Native Parameters
*/
@SerializedName("native_parameters")
public TwoDimTableV3 nativeParameters;
/**
* Sparse
*/
public boolean sparse;
/*------------------------------------------------------------------------------------------------------------------
// INHERITED
//------------------------------------------------------------------------------------------------------------------
// Column names
public String[] names;
// Original column names
public String[] originalNames;
// Column types
public String[] columnTypes;
// Domains for categorical columns
public String[][] domains;
// Cross-validation models (model ids)
public ModelKeyV3[] crossValidationModels;
// Cross-validation predictions, one per cv model (deprecated, use cross_validation_holdout_predictions_frame_id
// instead)
public FrameKeyV3[] crossValidationPredictions;
// Cross-validation holdout predictions (full out-of-sample predictions on training data)
public FrameKeyV3 crossValidationHoldoutPredictionsFrameId;
// Cross-validation fold assignment (each row is assigned to one holdout fold)
public FrameKeyV3 crossValidationFoldAssignmentFrameId;
// Category of the model (e.g., Binomial)
public ModelCategory modelCategory;
// Model summary
public TwoDimTableV3 modelSummary;
// Scoring history
public TwoDimTableV3 scoringHistory;
// Cross-Validation scoring history
public TwoDimTableV3[] cvScoringHistory;
// Model reproducibility information
public TwoDimTableV3[] reproducibilityInformationTable;
// Training data model metrics
public ModelMetricsBaseV3 trainingMetrics;
// Validation data model metrics
public ModelMetricsBaseV3 validationMetrics;
// Cross-validation model metrics
public ModelMetricsBaseV3 crossValidationMetrics;
// Cross-validation model metrics summary
public TwoDimTableV3 crossValidationMetricsSummary;
// Job status
public String status;
// Start time in milliseconds
public long startTime;
// End time in milliseconds
public long endTime;
// Runtime in milliseconds
public long runTime;
// Default threshold used for predictions
public double defaultThreshold;
// Help information for output fields
public Map<String,String> help;
*/
/**
* Public constructor
*/
public XGBoostModelOutputV3() {
sparse = false;
status = "";
startTime = 0L;
endTime = 0L;
runTime = 0L;
defaultThreshold = 0.0;
}
/**
* Return the contents of this object as a JSON String.
*/
@Override
public String toString() {
return new Gson().toJson(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/pojos/XGBoostModelV3.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.pojos;
import com.google.gson.Gson;
import com.google.gson.annotations.*;
public class XGBoostModelV3 extends ModelSchemaV3<XGBoostParametersV3, XGBoostModelOutputV3> {
/*------------------------------------------------------------------------------------------------------------------
// INHERITED
//------------------------------------------------------------------------------------------------------------------
// The build parameters for the model (e.g. K for KMeans).
public XGBoostParametersV3 parameters;
// The build output for the model (e.g. the cluster centers for KMeans).
public XGBoostModelOutputV3 output;
// Compatible frames, if requested
public String[] compatibleFrames;
// Checksum for all the things that go into building the Model.
public long checksum;
// Model key
public ModelKeyV3 modelId;
// The algo name for this Model.
public String algo;
// The pretty algo name for this Model (e.g., Generalized Linear Model, rather than GLM).
public String algoFullName;
// The response column name for this Model (if applicable). Is null otherwise.
public String responseColumnName;
// The treatment column name for this Model (if applicable). Is null otherwise.
public String treatmentColumnName;
// The Model's training frame key
public FrameKeyV3 dataFrame;
// Timestamp for when this model was completed
public long timestamp;
// Indicator, whether export to POJO is available
public boolean havePojo;
// Indicator, whether export to MOJO is available
public boolean haveMojo;
*/
/**
* Public constructor
*/
public XGBoostModelV3() {
checksum = 0L;
algo = "";
algoFullName = "";
responseColumnName = "";
treatmentColumnName = "";
timestamp = 0L;
havePojo = false;
haveMojo = false;
}
/**
* Return the contents of this object as a JSON String.
*/
@Override
public String toString() {
return new Gson().toJson(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/pojos/XGBoostParametersV3.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.pojos;
import com.google.gson.Gson;
import com.google.gson.annotations.*;
public class XGBoostParametersV3 extends ModelParametersSchemaV3 {
/**
* (same as n_estimators) Number of trees.
*/
public int ntrees;
/**
* Maximum tree depth (0 for unlimited).
*/
@SerializedName("max_depth")
public int maxDepth;
/**
* (same as min_child_weight) Fewest allowed (weighted) observations in a leaf.
*/
@SerializedName("min_rows")
public double minRows;
/**
* (same as min_rows) Fewest allowed (weighted) observations in a leaf.
*/
@SerializedName("min_child_weight")
public double minChildWeight;
/**
* (same as eta) Learning rate (from 0.0 to 1.0)
*/
@SerializedName("learn_rate")
public double learnRate;
/**
* (same as learn_rate) Learning rate (from 0.0 to 1.0)
*/
public double eta;
/**
* (same as subsample) Row sample rate per tree (from 0.0 to 1.0)
*/
@SerializedName("sample_rate")
public double sampleRate;
/**
* (same as sample_rate) Row sample rate per tree (from 0.0 to 1.0)
*/
public double subsample;
/**
* (same as colsample_bylevel) Column sample rate (from 0.0 to 1.0)
*/
@SerializedName("col_sample_rate")
public double colSampleRate;
/**
* (same as col_sample_rate) Column sample rate (from 0.0 to 1.0)
*/
@SerializedName("colsample_bylevel")
public double colsampleBylevel;
/**
* (same as colsample_bytree) Column sample rate per tree (from 0.0 to 1.0)
*/
@SerializedName("col_sample_rate_per_tree")
public double colSampleRatePerTree;
/**
* (same as col_sample_rate_per_tree) Column sample rate per tree (from 0.0 to 1.0)
*/
@SerializedName("colsample_bytree")
public double colsampleBytree;
/**
* Column sample rate per tree node (from 0.0 to 1.0)
*/
@SerializedName("colsample_bynode")
public double colsampleBynode;
/**
* A mapping representing monotonic constraints. Use +1 to enforce an increasing constraint and -1 to specify a
* decreasing constraint.
*/
@SerializedName("monotone_constraints")
public KeyValueV3[] monotoneConstraints;
/**
* (same as max_delta_step) Maximum absolute value of a leaf node prediction
*/
@SerializedName("max_abs_leafnode_pred")
public float maxAbsLeafnodePred;
/**
* (same as max_abs_leafnode_pred) Maximum absolute value of a leaf node prediction
*/
@SerializedName("max_delta_step")
public float maxDeltaStep;
/**
* Score the model after every so many trees. Disabled if set to 0.
*/
@SerializedName("score_tree_interval")
public int scoreTreeInterval;
/**
* Seed for pseudo random number generator (if applicable)
*/
public long seed;
/**
* (same as gamma) Minimum relative improvement in squared error reduction for a split to happen
*/
@SerializedName("min_split_improvement")
public float minSplitImprovement;
/**
* (same as min_split_improvement) Minimum relative improvement in squared error reduction for a split to happen
*/
public float gamma;
/**
* Number of parallel threads that can be used to run XGBoost. Cannot exceed H2O cluster limits (-nthreads
* parameter). Defaults to maximum available
*/
public int nthread;
/**
* Run on one node only; no network overhead but fewer cpus used. Suitable for small datasets.
*/
@SerializedName("build_tree_one_node")
public boolean buildTreeOneNode;
/**
* Directory where to save matrices passed to XGBoost library. Useful for debugging.
*/
@SerializedName("save_matrix_directory")
public String saveMatrixDirectory;
/**
* Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class probabilities. Calibration can
* provide more accurate estimates of class probabilities.
*/
@SerializedName("calibrate_model")
public boolean calibrateModel;
/**
* Data for model calibration
*/
@SerializedName("calibration_frame")
public FrameKeyV3 calibrationFrame;
/**
* Calibration method to use
*/
@SerializedName("calibration_method")
public TreeCalibrationHelperCalibrationMethod calibrationMethod;
/**
* For tree_method=hist only: maximum number of bins
*/
@SerializedName("max_bins")
public int maxBins;
/**
* For tree_method=hist only: maximum number of leaves
*/
@SerializedName("max_leaves")
public int maxLeaves;
/**
* Tree method
*/
@SerializedName("tree_method")
public TreexgboostXGBoostModelXGBoostParametersTreeMethod treeMethod;
/**
* Grow policy - depthwise is standard GBM, lossguide is LightGBM
*/
@SerializedName("grow_policy")
public TreexgboostXGBoostModelXGBoostParametersGrowPolicy growPolicy;
/**
* Booster type
*/
public TreexgboostXGBoostModelXGBoostParametersBooster booster;
/**
* L2 regularization
*/
@SerializedName("reg_lambda")
public float regLambda;
/**
* L1 regularization
*/
@SerializedName("reg_alpha")
public float regAlpha;
/**
* Enable quiet mode
*/
@SerializedName("quiet_mode")
public boolean quietMode;
/**
* For booster=dart only: sample_type
*/
@SerializedName("sample_type")
public TreexgboostXGBoostModelXGBoostParametersDartSampleType sampleType;
/**
* For booster=dart only: normalize_type
*/
@SerializedName("normalize_type")
public TreexgboostXGBoostModelXGBoostParametersDartNormalizeType normalizeType;
/**
* For booster=dart only: rate_drop (0..1)
*/
@SerializedName("rate_drop")
public float rateDrop;
/**
* For booster=dart only: one_drop
*/
@SerializedName("one_drop")
public boolean oneDrop;
/**
* For booster=dart only: skip_drop (0..1)
*/
@SerializedName("skip_drop")
public float skipDrop;
/**
* Type of DMatrix. For sparse, NAs and 0 are treated equally.
*/
@SerializedName("dmatrix_type")
public TreexgboostXGBoostModelXGBoostParametersDMatrixType dmatrixType;
/**
* Backend. By default (auto), a GPU is used if available.
*/
public TreexgboostXGBoostModelXGBoostParametersBackend backend;
/**
* Which GPU(s) to use.
*/
@SerializedName("gpu_id")
public int[] gpuId;
/**
* A set of allowed column interactions.
*/
@SerializedName("interaction_constraints")
public String[][] interactionConstraints;
/**
* Controls the effect of observations with positive labels in relation to the observations with negative labels on
* gradient calculation. Useful for imbalanced problems.
*/
@SerializedName("scale_pos_weight")
public float scalePosWeight;
/**
* Specification of evaluation metric that will be passed to the native XGBoost backend.
*/
@SerializedName("eval_metric")
public String evalMetric;
/**
* If enabled, score only the evaluation metric. This can make model training faster if scoring is frequent (eg.
* each iteration).
*/
@SerializedName("score_eval_metric_only")
public boolean scoreEvalMetricOnly;
/*------------------------------------------------------------------------------------------------------------------
// INHERITED
//------------------------------------------------------------------------------------------------------------------
// Destination id for this model; auto-generated if not specified.
public ModelKeyV3 modelId;
// Id of the training data frame.
public FrameKeyV3 trainingFrame;
// Id of the validation data frame.
public FrameKeyV3 validationFrame;
// Number of folds for K-fold cross-validation (0 to disable or >= 2).
public int nfolds;
// Whether to keep the cross-validation models.
public boolean keepCrossValidationModels;
// Whether to keep the predictions of the cross-validation models.
public boolean keepCrossValidationPredictions;
// Whether to keep the cross-validation fold assignment.
public boolean keepCrossValidationFoldAssignment;
// Allow parallel training of cross-validation models
public boolean parallelizeCrossValidation;
// Distribution function
public GenmodelutilsDistributionFamily distribution;
// Tweedie power for Tweedie regression, must be between 1 and 2.
public double tweediePower;
// Desired quantile for Quantile regression, must be between 0 and 1.
public double quantileAlpha;
// Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be between 0 and 1).
public double huberAlpha;
// Response variable column.
public ColSpecifierV3 responseColumn;
// Column with observation weights. Giving some observation a weight of zero is equivalent to excluding it from the
// dataset; giving an observation a relative weight of 2 is equivalent to repeating that row twice. Negative weights
// are not allowed. Note: Weights are per-row observation weights and do not increase the size of the data frame.
// This is typically the number of times a row is repeated, but non-integer values are supported as well. During
// training, rows with higher weights matter more, due to the larger loss function pre-factor. If you set weight = 0
// for a row, the returned prediction frame at that row is zero and this is incorrect. To get an accurate
// prediction, remove all rows with weight == 0.
public ColSpecifierV3 weightsColumn;
// Offset column. This will be added to the combination of columns before applying the link function.
public ColSpecifierV3 offsetColumn;
// Column with cross-validation fold index assignment per observation.
public ColSpecifierV3 foldColumn;
// Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified' option will stratify
// the folds based on the response variable, for classification problems.
public ModelParametersFoldAssignmentScheme foldAssignment;
// Encoding scheme for categorical features
public ModelParametersCategoricalEncodingScheme categoricalEncoding;
// For every categorical feature, only use this many most frequent categorical levels for model training. Only used
// for categorical_encoding == EnumLimited.
public int maxCategoricalLevels;
// Names of columns to ignore for training.
public String[] ignoredColumns;
// Ignore constant columns.
public boolean ignoreConstCols;
// Whether to score during each iteration of model training.
public boolean scoreEachIteration;
// Model checkpoint to resume training with.
public ModelKeyV3 checkpoint;
// Early stopping based on convergence of stopping_metric. Stop if simple moving average of length k of the
// stopping_metric does not improve for k:=stopping_rounds scoring events (0 to disable)
public int stoppingRounds;
// Maximum allowed runtime in seconds for model training. Use 0 to disable.
public double maxRuntimeSecs;
// Metric to use for early stopping (AUTO: logloss for classification, deviance for regression and anomaly_score for
// Isolation Forest). Note that custom and custom_increasing can only be used in GBM and DRF with the Python client.
public ScoreKeeperStoppingMetric stoppingMetric;
// Relative tolerance for metric-based stopping criterion (stop if relative improvement is not at least this much)
public double stoppingTolerance;
// Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic binning.
public int gainsliftBins;
// Reference to custom evaluation function, format: `language:keyName=funcName`
public String customMetricFunc;
// Reference to custom distribution, format: `language:keyName=funcName`
public String customDistributionFunc;
// Automatically export generated models to this directory.
public String exportCheckpointsDir;
// Set default multinomial AUC type.
public MultinomialAucType aucType;
*/
/**
* Public constructor
*/
public XGBoostParametersV3() {
ntrees = 50;
maxDepth = 6;
minRows = 1.0;
minChildWeight = 1.0;
learnRate = 0.3;
eta = 0.3;
sampleRate = 1.0;
subsample = 1.0;
colSampleRate = 1.0;
colsampleBylevel = 1.0;
colSampleRatePerTree = 1.0;
colsampleBytree = 1.0;
colsampleBynode = 1.0;
maxAbsLeafnodePred = 0.0f;
maxDeltaStep = 0.0f;
scoreTreeInterval = 0;
seed = -1L;
minSplitImprovement = 0.0f;
gamma = 0.0f;
nthread = -1;
buildTreeOneNode = false;
saveMatrixDirectory = "";
calibrateModel = false;
calibrationMethod = TreeCalibrationHelperCalibrationMethod.AUTO;
maxBins = 256;
maxLeaves = 0;
treeMethod = TreexgboostXGBoostModelXGBoostParametersTreeMethod.auto;
growPolicy = TreexgboostXGBoostModelXGBoostParametersGrowPolicy.depthwise;
booster = TreexgboostXGBoostModelXGBoostParametersBooster.gbtree;
regLambda = 1.0f;
regAlpha = 0.0f;
quietMode = true;
sampleType = TreexgboostXGBoostModelXGBoostParametersDartSampleType.uniform;
normalizeType = TreexgboostXGBoostModelXGBoostParametersDartNormalizeType.tree;
rateDrop = 0.0f;
oneDrop = false;
skipDrop = 0.0f;
dmatrixType = TreexgboostXGBoostModelXGBoostParametersDMatrixType.auto;
backend = TreexgboostXGBoostModelXGBoostParametersBackend.auto;
scalePosWeight = 1.0f;
evalMetric = "";
scoreEvalMetricOnly = false;
nfolds = 0;
keepCrossValidationModels = true;
keepCrossValidationPredictions = false;
keepCrossValidationFoldAssignment = false;
parallelizeCrossValidation = true;
distribution = GenmodelutilsDistributionFamily.AUTO;
tweediePower = 1.5;
quantileAlpha = 0.5;
huberAlpha = 0.9;
foldAssignment = ModelParametersFoldAssignmentScheme.AUTO;
categoricalEncoding = ModelParametersCategoricalEncodingScheme.AUTO;
maxCategoricalLevels = 10;
ignoreConstCols = true;
scoreEachIteration = false;
stoppingRounds = 0;
maxRuntimeSecs = 0.0;
stoppingMetric = ScoreKeeperStoppingMetric.AUTO;
stoppingTolerance = 0.001;
gainsliftBins = -1;
customMetricFunc = "";
customDistributionFunc = "";
exportCheckpointsDir = "";
aucType = MultinomialAucType.AUTO;
}
/**
* Return the contents of this object as a JSON String.
*/
@Override
public String toString() {
return new Gson().toJson(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/pojos/XGBoostV3.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.pojos;
import com.google.gson.Gson;
import com.google.gson.annotations.*;
public class XGBoostV3 extends ModelBuilderSchema<XGBoostParametersV3> {
/*------------------------------------------------------------------------------------------------------------------
// INHERITED
//------------------------------------------------------------------------------------------------------------------
// Model builder parameters.
public XGBoostParametersV3 parameters;
// The algo name for this ModelBuilder.
public String algo;
// The pretty algo name for this ModelBuilder (e.g., Generalized Linear Model, rather than GLM).
public String algoFullName;
// Model categories this ModelBuilder can build.
public ModelCategory[] canBuild;
// Indicator whether the model is supervised or not.
public boolean supervised;
// Should the builder always be visible, be marked as beta, or only visible if the user starts up with the
// experimental flag?
public ModelBuilderBuilderVisibility visibility;
// Job Key
public JobV3 job;
// Parameter validation messages
public ValidationMessageV3[] messages;
// Count of parameter validation errors
public int errorCount;
// HTTP status to return for this build.
public int __httpStatus;
// Comma-separated list of JSON field paths to exclude from the result, used like:
// "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
public String _excludeFields;
*/
/**
* Public constructor
*/
public XGBoostV3() {
algo = "";
algoFullName = "";
supervised = false;
errorCount = 0;
__httpStatus = 0;
_excludeFields = "";
}
/**
* Return the contents of this object as a JSON String.
*/
@Override
public String toString() {
return new Gson().toJson(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/About.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface About {
/**
* Return information about this H2O cluster.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/About")
Call<AboutV3> get(@Query("_exclude_fields") String _exclude_fields);
@GET("/3/About")
Call<AboutV3> get();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Assembly.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Assembly {
/**
* Generate a MOJO 2 pipeline artifact from the Assembly
* @param assembly_id The key of the Assembly object to retrieve from the DKV.
* @param file_name The name of the file (and generated class in case of pojo)
* @param steps A list of steps describing the assembly line.
* @param frame Input Frame for the assembly.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/99/Assembly.fetch_mojo_pipeline/{assembly_id}/{file_name}")
Call<StreamingSchema> fetchMojoPipeline(
@Path("assembly_id") String assembly_id,
@Path("file_name") String file_name,
@Query("steps") String[] steps,
@Query("frame") String frame,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/99/Assembly.fetch_mojo_pipeline/{assembly_id}/{file_name}")
Call<StreamingSchema> fetchMojoPipeline(
@Path("assembly_id") String assembly_id,
@Path("file_name") String file_name
);
/**
* Generate a Java POJO from the Assembly
* @param assembly_id The key of the Assembly object to retrieve from the DKV.
* @param file_name The name of the file (and generated class in case of pojo)
* @param steps A list of steps describing the assembly line.
* @param frame Input Frame for the assembly.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/99/Assembly.java/{assembly_id}/{file_name}")
Call<AssemblyV99> toJava(
@Path("assembly_id") String assembly_id,
@Path("file_name") String file_name,
@Query("steps") String[] steps,
@Query("frame") String frame,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/99/Assembly.java/{assembly_id}/{file_name}")
Call<AssemblyV99> toJava(
@Path("assembly_id") String assembly_id,
@Path("file_name") String file_name
);
/**
* Fit an assembly to an input frame
* @param steps A list of steps describing the assembly line.
* @param frame Input Frame for the assembly.
* @param file_name The name of the file (and generated class in case of pojo)
* @param assembly_id The key of the Assembly object to retrieve from the DKV.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/99/Assembly")
Call<AssemblyV99> fit(
@Field("steps") String[] steps,
@Field("frame") String frame,
@Field("file_name") String file_name,
@Field("assembly_id") String assembly_id,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/99/Assembly")
Call<AssemblyV99> fit();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Capabilities.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Capabilities {
/**
* List of all registered capabilities
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Capabilities")
Call<CapabilitiesV3> listAll(@Query("_exclude_fields") String _exclude_fields);
@GET("/3/Capabilities")
Call<CapabilitiesV3> listAll();
/**
* List registered core capabilities
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Capabilities/Core")
Call<CapabilitiesV3> listCore(@Query("_exclude_fields") String _exclude_fields);
@GET("/3/Capabilities/Core")
Call<CapabilitiesV3> listCore();
/**
* List of all registered Rest API capabilities
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Capabilities/API")
Call<CapabilitiesV3> listRest(@Query("_exclude_fields") String _exclude_fields);
@GET("/3/Capabilities/API")
Call<CapabilitiesV3> listRest();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Cloud.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Cloud {
/**
* Determine the status of the nodes in the H2O cloud.
* @param skip_ticks skip_ticks
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Cloud")
Call<CloudV3> status(
@Query("skip_ticks") boolean skip_ticks,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Cloud")
Call<CloudV3> status();
/**
* Determine the status of the nodes in the H2O cloud.
* @param skip_ticks skip_ticks
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@HEAD("/3/Cloud")
Call<CloudV3> head(
@Field("skip_ticks") boolean skip_ticks,
@Field("_exclude_fields") String _exclude_fields
);
@HEAD("/3/Cloud")
Call<CloudV3> head();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/CloudLock.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface CloudLock {
/**
* Lock the cloud.
* @param reason reason
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/CloudLock")
Call<CloudLockV3> lock(
@Field("reason") String reason,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/CloudLock")
Call<CloudLockV3> lock();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/ComputeGram.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface ComputeGram {
/**
* Get weighted gram matrix
* @param X source data
* @param W weight vector
* @param use_all_factor_levels use all factor levels when doing 1-hot encoding
* @param standardize standardize data
* @param skip_missing skip missing values
*/
@GET("/3/ComputeGram")
Call<GramV3> computeGram(
@Query("X") String X,
@Query("W") String W,
@Query("use_all_factor_levels") boolean use_all_factor_levels,
@Query("standardize") boolean standardize,
@Query("skip_missing") boolean skip_missing
);
@GET("/3/ComputeGram")
Call<GramV3> computeGram(@Query("X") String X);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/CreateFrame.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface CreateFrame {
/**
* Create a synthetic H2O Frame with random data. You can specify the number of rows/columns, as well as column types:
* integer, real, boolean, time, string, categorical. The frame may also have a dedicated "response" column, and some
* of the entries in the dataset may be created as missing.
* @param dest destination key
* @param rows Number of rows
* @param cols Number of data columns (in addition to the first response column)
* @param seed Random number seed that determines the random values
* @param seed_for_column_types Random number seed for setting the column types
* @param randomize Whether frame should be randomized
* @param value Constant value (for randomize=false)
* @param real_range Range for real variables (-range ... range)
* @param categorical_fraction Fraction of categorical columns (for randomize=true)
* @param factors Factor levels for categorical variables
* @param integer_fraction Fraction of integer columns (for randomize=true)
* @param integer_range Range for integer variables (-range ... range)
* @param binary_fraction Fraction of binary columns (for randomize=true)
* @param binary_ones_fraction Fraction of 1's in binary columns
* @param time_fraction Fraction of date/time columns (for randomize=true)
* @param string_fraction Fraction of string columns (for randomize=true)
* @param missing_fraction Fraction of missing values
* @param has_response Whether an additional response column should be generated
* @param response_factors Number of factor levels of the first column (1=real, 2=binomial, N=multinomial or
* ordinal)
* @param positive_response For real-valued response variable: Whether the response should be positive only.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/CreateFrame")
Call<JobV3> run(
@Field("dest") String dest,
@Field("rows") long rows,
@Field("cols") int cols,
@Field("seed") long seed,
@Field("seed_for_column_types") long seed_for_column_types,
@Field("randomize") boolean randomize,
@Field("value") long value,
@Field("real_range") long real_range,
@Field("categorical_fraction") double categorical_fraction,
@Field("factors") int factors,
@Field("integer_fraction") double integer_fraction,
@Field("integer_range") long integer_range,
@Field("binary_fraction") double binary_fraction,
@Field("binary_ones_fraction") double binary_ones_fraction,
@Field("time_fraction") double time_fraction,
@Field("string_fraction") double string_fraction,
@Field("missing_fraction") double missing_fraction,
@Field("has_response") boolean has_response,
@Field("response_factors") int response_factors,
@Field("positive_response") boolean positive_response,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/CreateFrame")
Call<JobV3> run();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/DCTTransformer.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface DCTTransformer {
/**
* Row-by-row discrete cosine transforms in 1D, 2D and 3D.
* @param dataset Dataset
* @param destination_frame Destination Frame ID
* @param dimensions Dimensions of the input array: Height, Width, Depth (Nx1x1 for 1D, NxMx1 for 2D)
* @param inverse Whether to do the inverse transform
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/99/DCTTransformer")
Call<JobV3> run(
@Field("dataset") String dataset,
@Field("destination_frame") String destination_frame,
@Field("dimensions") int[] dimensions,
@Field("inverse") boolean inverse,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/99/DCTTransformer")
Call<JobV3> run(
@Field("dataset") String dataset,
@Field("dimensions") int[] dimensions
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/DKV.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface DKV {
/**
* Remove an arbitrary key from the H2O distributed K/V store.
* @param key Object to be removed.
* @param cascade If true, removal operation will cascade down the object tree.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@DELETE("/3/DKV/{key}")
Call<RemoveV3> remove(
@Path("key") String key,
@Field("cascade") boolean cascade,
@Field("_exclude_fields") String _exclude_fields
);
@DELETE("/3/DKV/{key}")
Call<RemoveV3> remove(@Path("key") String key);
/**
* Remove all keys from the H2O distributed K/V store.
* @param retained_keys Keys of the models to retain
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@DELETE("/3/DKV")
Call<RemoveAllV3> removeAll(
@Field("retained_keys") String[] retained_keys,
@Field("_exclude_fields") String _exclude_fields
);
@DELETE("/3/DKV")
Call<RemoveAllV3> removeAll();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/DataInfoFrame.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface DataInfoFrame {
/**
* Test only
* @param frame input frame
* @param interactions interactions
* @param use_all use all factor levels
* @param standardize standardize
* @param interactions_only interactions only returned
*/
@FormUrlEncoded
@POST("/3/DataInfoFrame")
Call<DataInfoFrameV3> getDataInfoFrame(
@Field("frame") String frame,
@Field("interactions") String[] interactions,
@Field("use_all") boolean use_all,
@Field("standardize") boolean standardize,
@Field("interactions_only") boolean interactions_only
);
@FormUrlEncoded
@POST("/3/DataInfoFrame")
Call<DataInfoFrameV3> getDataInfoFrame();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/DecryptionSetup.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface DecryptionSetup {
/**
* Install a decryption tool for parsing of encrypted data.
* @param decrypt_tool_id Target key for the Decryption Tool
* @param decrypt_impl Implementation of the Decryption Tool
* @param keystore_id Location of Java Keystore
* @param keystore_type Keystore type
* @param key_alias Key alias
* @param password Key password
* @param cipher_spec Specification of the cipher (and padding)
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/DecryptionSetup")
Call<DecryptionSetupV3> setupDecryption(
@Field("decrypt_tool_id") String decrypt_tool_id,
@Field("decrypt_impl") String decrypt_impl,
@Field("keystore_id") String keystore_id,
@Field("keystore_type") String keystore_type,
@Field("key_alias") String key_alias,
@Field("password") String password,
@Field("cipher_spec") String cipher_spec,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/DecryptionSetup")
Call<DecryptionSetupV3> setupDecryption();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/DownloadDataset.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface DownloadDataset {
/**
* Download dataset as a CSV.
* @param frame_id Frame to download
* @param hex_string Emit double values in a machine readable lossless format with Double.toHexString().
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/DownloadDataset")
Call<DownloadDataV3> fetch(
@Query("frame_id") String frame_id,
@Query("hex_string") boolean hex_string,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/DownloadDataset")
Call<DownloadDataV3> fetch(@Query("frame_id") String frame_id);
/**
* Download dataset as a CSV.
* @param frame_id Frame to download
* @param hex_string Emit double values in a machine readable lossless format with Double.toHexString().
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/DownloadDataset.bin")
Call<DownloadDataV3> fetchStreaming(
@Query("frame_id") String frame_id,
@Query("hex_string") boolean hex_string,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/DownloadDataset.bin")
Call<DownloadDataV3> fetchStreaming(@Query("frame_id") String frame_id);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Endpoints.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Endpoints {
/**
* Returns the list of all REST API (v4) endpoints.
* @param __schema Url describing the schema of the current object.
*/
@GET("/4/endpoints")
Call<EndpointsListV4> listRoutes4(@Query("__schema") String __schema);
@GET("/4/endpoints")
Call<EndpointsListV4> listRoutes4();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/FeatureInteraction.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface FeatureInteraction {
/**
* Fetch feature interaction data
* @param model_id Model id of interest
* @param max_interaction_depth Maximum interaction depth
* @param max_tree_depth Maximum tree depth
* @param max_deepening Maximum deepening
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/FeatureInteraction")
Call<FeatureInteractionV3> makeFeatureInteraction(
@Field("model_id") String model_id,
@Field("max_interaction_depth") int max_interaction_depth,
@Field("max_tree_depth") int max_tree_depth,
@Field("max_deepening") int max_deepening,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/FeatureInteraction")
Call<FeatureInteractionV3> makeFeatureInteraction(
@Field("max_interaction_depth") int max_interaction_depth,
@Field("max_tree_depth") int max_tree_depth,
@Field("max_deepening") int max_deepening
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Find.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Find {
/**
* Find a value within a Frame.
* @param key Frame to search
* @param column Column, or null for all
* @param row Starting row for search
* @param match Value to search for; leave blank for a search for missing values
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Find")
Call<FindV3> find(
@Query("key") FrameV3 key,
@Query("column") String column,
@Query("row") long row,
@Query("match") String match,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Find")
Call<FindV3> find(
@Query("key") FrameV3 key,
@Query("row") long row
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/FrameChunks.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface FrameChunks {
/**
* Return information about chunks for a given frame.
* @param frame_id ID of a given frame
*/
@GET("/3/FrameChunks/{frame_id}")
Call<FrameChunksV3> fetch(@Path("frame_id") String frame_id);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Frames.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Frames {
/**
* Export a Frame to the given path with optional overwrite.
* @param frame_id Name of Frame of interest
* @param column Name of column of interest
* @param row_offset Row offset to return
* @param row_count Number of rows to return
* @param column_offset Column offset to return
* @param full_column_count Number of full columns to return. The columns between full_column_count and column_count
* will be returned without the data
* @param column_count Number of columns to return
* @param find_compatible_models Find and return compatible models?
* @param path File output path
* @param force Overwrite existing file
* @param num_parts Number of part files to use (1=single file,-1=automatic)
* @param parallel Use parallel export to a single file (doesn't apply when num_parts != 1, creates temporary files
* in the destination directory)
* @param format Output file format. Defaults to 'csv'.
* @param compression Compression method (default none; gzip, bzip2, zstd and snappy available depending on runtime
* environment)
* @param write_checksum Specifies if checksum should be written next to data files on export (if supported by
* export format).
* @param tz_adjust_from_local Specifies if the timezone should be adjusted from local to UTC timezone (parquet
* only).
* @param separator Field separator (default ',')
* @param header Use header (default true)
* @param quote_header Quote column names in header line (default true)
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/Frames/{frame_id}/export")
Call<FramesV3> export(
@Path("frame_id") String frame_id,
@Field("column") String column,
@Field("row_offset") long row_offset,
@Field("row_count") int row_count,
@Field("column_offset") int column_offset,
@Field("full_column_count") int full_column_count,
@Field("column_count") int column_count,
@Field("find_compatible_models") boolean find_compatible_models,
@Field("path") String path,
@Field("force") boolean force,
@Field("num_parts") int num_parts,
@Field("parallel") boolean parallel,
@Field("format") UtilExportFileFormat format,
@Field("compression") String compression,
@Field("write_checksum") boolean write_checksum,
@Field("tz_adjust_from_local") boolean tz_adjust_from_local,
@Field("separator") byte separator,
@Field("header") boolean header,
@Field("quote_header") boolean quote_header,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/Frames/{frame_id}/export")
Call<FramesV3> export(@Path("frame_id") String frame_id);
/**
* Save frame data to the given path.
* @param frame_id Name of Frame of interest
* @param dir Destination directory (hdfs, s3, local)
* @param force Overwrite destination file in case it exists or throw exception if set to false.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/Frames/{frame_id}/save")
Call<FrameSaveV3> save(
@Path("frame_id") String frame_id,
@Field("dir") String dir,
@Field("force") boolean force,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/Frames/{frame_id}/save")
Call<FrameSaveV3> save(@Path("frame_id") String frame_id);
/**
* Load a frame from data on given path.
* @param frame_id Import frame under given key into DKV.
* @param dir Source directory (hdfs, s3, local) containing serialized frame
* @param force Override existing frame in case it exists or throw exception if set to false
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/Frames/load")
Call<FrameLoadV3> load(
@Field("frame_id") String frame_id,
@Field("dir") String dir,
@Field("force") boolean force,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/Frames/load")
Call<FrameLoadV3> load();
/**
* Return the summary metrics for a column, e.g. min, max, mean, sigma, percentiles, etc.
* @param frame_id Name of Frame of interest
* @param column Name of column of interest
* @param row_offset Row offset to return
* @param row_count Number of rows to return
* @param column_offset Column offset to return
* @param full_column_count Number of full columns to return. The columns between full_column_count and column_count
* will be returned without the data
* @param column_count Number of columns to return
* @param find_compatible_models Find and return compatible models?
* @param path File output path
* @param force Overwrite existing file
* @param num_parts Number of part files to use (1=single file,-1=automatic)
* @param parallel Use parallel export to a single file (doesn't apply when num_parts != 1, creates temporary files
* in the destination directory)
* @param format Output file format. Defaults to 'csv'.
* @param compression Compression method (default none; gzip, bzip2, zstd and snappy available depending on runtime
* environment)
* @param write_checksum Specifies if checksum should be written next to data files on export (if supported by
* export format).
* @param tz_adjust_from_local Specifies if the timezone should be adjusted from local to UTC timezone (parquet
* only).
* @param separator Field separator (default ',')
* @param header Use header (default true)
* @param quote_header Quote column names in header line (default true)
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Frames/{frame_id}/columns/{column}/summary")
Call<FramesV3> columnSummary(
@Path("frame_id") String frame_id,
@Path("column") String column,
@Query("row_offset") long row_offset,
@Query("row_count") int row_count,
@Query("column_offset") int column_offset,
@Query("full_column_count") int full_column_count,
@Query("column_count") int column_count,
@Query("find_compatible_models") boolean find_compatible_models,
@Query("path") String path,
@Query("force") boolean force,
@Query("num_parts") int num_parts,
@Query("parallel") boolean parallel,
@Query("format") UtilExportFileFormat format,
@Query("compression") String compression,
@Query("write_checksum") boolean write_checksum,
@Query("tz_adjust_from_local") boolean tz_adjust_from_local,
@Query("separator") byte separator,
@Query("header") boolean header,
@Query("quote_header") boolean quote_header,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Frames/{frame_id}/columns/{column}/summary")
Call<FramesV3> columnSummary(
@Path("frame_id") String frame_id,
@Path("column") String column
);
/**
* Return the domains for the specified categorical column ("null" if the column is not a categorical).
* @param frame_id Name of Frame of interest
* @param column Name of column of interest
* @param row_offset Row offset to return
* @param row_count Number of rows to return
* @param column_offset Column offset to return
* @param full_column_count Number of full columns to return. The columns between full_column_count and column_count
* will be returned without the data
* @param column_count Number of columns to return
* @param find_compatible_models Find and return compatible models?
* @param path File output path
* @param force Overwrite existing file
* @param num_parts Number of part files to use (1=single file,-1=automatic)
* @param parallel Use parallel export to a single file (doesn't apply when num_parts != 1, creates temporary files
* in the destination directory)
* @param format Output file format. Defaults to 'csv'.
* @param compression Compression method (default none; gzip, bzip2, zstd and snappy available depending on runtime
* environment)
* @param write_checksum Specifies if checksum should be written next to data files on export (if supported by
* export format).
* @param tz_adjust_from_local Specifies if the timezone should be adjusted from local to UTC timezone (parquet
* only).
* @param separator Field separator (default ',')
* @param header Use header (default true)
* @param quote_header Quote column names in header line (default true)
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Frames/{frame_id}/columns/{column}/domain")
Call<FramesV3> columnDomain(
@Path("frame_id") String frame_id,
@Path("column") String column,
@Query("row_offset") long row_offset,
@Query("row_count") int row_count,
@Query("column_offset") int column_offset,
@Query("full_column_count") int full_column_count,
@Query("column_count") int column_count,
@Query("find_compatible_models") boolean find_compatible_models,
@Query("path") String path,
@Query("force") boolean force,
@Query("num_parts") int num_parts,
@Query("parallel") boolean parallel,
@Query("format") UtilExportFileFormat format,
@Query("compression") String compression,
@Query("write_checksum") boolean write_checksum,
@Query("tz_adjust_from_local") boolean tz_adjust_from_local,
@Query("separator") byte separator,
@Query("header") boolean header,
@Query("quote_header") boolean quote_header,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Frames/{frame_id}/columns/{column}/domain")
Call<FramesV3> columnDomain(
@Path("frame_id") String frame_id,
@Path("column") String column
);
/**
* Return the specified column from a Frame.
* @param frame_id Name of Frame of interest
* @param column Name of column of interest
* @param row_offset Row offset to return
* @param row_count Number of rows to return
* @param column_offset Column offset to return
* @param full_column_count Number of full columns to return. The columns between full_column_count and column_count
* will be returned without the data
* @param column_count Number of columns to return
* @param find_compatible_models Find and return compatible models?
* @param path File output path
* @param force Overwrite existing file
* @param num_parts Number of part files to use (1=single file,-1=automatic)
* @param parallel Use parallel export to a single file (doesn't apply when num_parts != 1, creates temporary files
* in the destination directory)
* @param format Output file format. Defaults to 'csv'.
* @param compression Compression method (default none; gzip, bzip2, zstd and snappy available depending on runtime
* environment)
* @param write_checksum Specifies if checksum should be written next to data files on export (if supported by
* export format).
* @param tz_adjust_from_local Specifies if the timezone should be adjusted from local to UTC timezone (parquet
* only).
* @param separator Field separator (default ',')
* @param header Use header (default true)
* @param quote_header Quote column names in header line (default true)
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Frames/{frame_id}/columns/{column}")
Call<FramesV3> column(
@Path("frame_id") String frame_id,
@Path("column") String column,
@Query("row_offset") long row_offset,
@Query("row_count") int row_count,
@Query("column_offset") int column_offset,
@Query("full_column_count") int full_column_count,
@Query("column_count") int column_count,
@Query("find_compatible_models") boolean find_compatible_models,
@Query("path") String path,
@Query("force") boolean force,
@Query("num_parts") int num_parts,
@Query("parallel") boolean parallel,
@Query("format") UtilExportFileFormat format,
@Query("compression") String compression,
@Query("write_checksum") boolean write_checksum,
@Query("tz_adjust_from_local") boolean tz_adjust_from_local,
@Query("separator") byte separator,
@Query("header") boolean header,
@Query("quote_header") boolean quote_header,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Frames/{frame_id}/columns/{column}")
Call<FramesV3> column(
@Path("frame_id") String frame_id,
@Path("column") String column
);
/**
* Return all the columns from a Frame.
* @param frame_id Name of Frame of interest
* @param column Name of column of interest
* @param row_offset Row offset to return
* @param row_count Number of rows to return
* @param column_offset Column offset to return
* @param full_column_count Number of full columns to return. The columns between full_column_count and column_count
* will be returned without the data
* @param column_count Number of columns to return
* @param find_compatible_models Find and return compatible models?
* @param path File output path
* @param force Overwrite existing file
* @param num_parts Number of part files to use (1=single file,-1=automatic)
* @param parallel Use parallel export to a single file (doesn't apply when num_parts != 1, creates temporary files
* in the destination directory)
* @param format Output file format. Defaults to 'csv'.
* @param compression Compression method (default none; gzip, bzip2, zstd and snappy available depending on runtime
* environment)
* @param write_checksum Specifies if checksum should be written next to data files on export (if supported by
* export format).
* @param tz_adjust_from_local Specifies if the timezone should be adjusted from local to UTC timezone (parquet
* only).
* @param separator Field separator (default ',')
* @param header Use header (default true)
* @param quote_header Quote column names in header line (default true)
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Frames/{frame_id}/columns")
Call<FramesV3> columns(
@Path("frame_id") String frame_id,
@Query("column") String column,
@Query("row_offset") long row_offset,
@Query("row_count") int row_count,
@Query("column_offset") int column_offset,
@Query("full_column_count") int full_column_count,
@Query("column_count") int column_count,
@Query("find_compatible_models") boolean find_compatible_models,
@Query("path") String path,
@Query("force") boolean force,
@Query("num_parts") int num_parts,
@Query("parallel") boolean parallel,
@Query("format") UtilExportFileFormat format,
@Query("compression") String compression,
@Query("write_checksum") boolean write_checksum,
@Query("tz_adjust_from_local") boolean tz_adjust_from_local,
@Query("separator") byte separator,
@Query("header") boolean header,
@Query("quote_header") boolean quote_header,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Frames/{frame_id}/columns")
Call<FramesV3> columns(@Path("frame_id") String frame_id);
/**
* Return a Frame, including the histograms, after forcing computation of rollups.
* @param frame_id Name of Frame of interest
* @param column Name of column of interest
* @param row_offset Row offset to return
* @param row_count Number of rows to return
* @param column_offset Column offset to return
* @param full_column_count Number of full columns to return. The columns between full_column_count and column_count
* will be returned without the data
* @param column_count Number of columns to return
* @param find_compatible_models Find and return compatible models?
* @param path File output path
* @param force Overwrite existing file
* @param num_parts Number of part files to use (1=single file,-1=automatic)
* @param parallel Use parallel export to a single file (doesn't apply when num_parts != 1, creates temporary files
* in the destination directory)
* @param format Output file format. Defaults to 'csv'.
* @param compression Compression method (default none; gzip, bzip2, zstd and snappy available depending on runtime
* environment)
* @param write_checksum Specifies if checksum should be written next to data files on export (if supported by
* export format).
* @param tz_adjust_from_local Specifies if the timezone should be adjusted from local to UTC timezone (parquet
* only).
* @param separator Field separator (default ',')
* @param header Use header (default true)
* @param quote_header Quote column names in header line (default true)
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Frames/{frame_id}/summary")
Call<FramesV3> summary(
@Path("frame_id") String frame_id,
@Query("column") String column,
@Query("row_offset") long row_offset,
@Query("row_count") int row_count,
@Query("column_offset") int column_offset,
@Query("full_column_count") int full_column_count,
@Query("column_count") int column_count,
@Query("find_compatible_models") boolean find_compatible_models,
@Query("path") String path,
@Query("force") boolean force,
@Query("num_parts") int num_parts,
@Query("parallel") boolean parallel,
@Query("format") UtilExportFileFormat format,
@Query("compression") String compression,
@Query("write_checksum") boolean write_checksum,
@Query("tz_adjust_from_local") boolean tz_adjust_from_local,
@Query("separator") byte separator,
@Query("header") boolean header,
@Query("quote_header") boolean quote_header,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Frames/{frame_id}/summary")
Call<FramesV3> summary(@Path("frame_id") String frame_id);
/**
* Return a basic info about Frame to fill client Rapid expression cache.
* @param frame_id Name of Frame of interest
* @param column Name of column of interest
* @param row_offset Row offset to return
* @param row_count Number of rows to return
* @param column_offset Column offset to return
* @param full_column_count Number of full columns to return. The columns between full_column_count and column_count
* will be returned without the data
* @param column_count Number of columns to return
* @param find_compatible_models Find and return compatible models?
* @param path File output path
* @param force Overwrite existing file
* @param num_parts Number of part files to use (1=single file,-1=automatic)
* @param parallel Use parallel export to a single file (doesn't apply when num_parts != 1, creates temporary files
* in the destination directory)
* @param format Output file format. Defaults to 'csv'.
* @param compression Compression method (default none; gzip, bzip2, zstd and snappy available depending on runtime
* environment)
* @param write_checksum Specifies if checksum should be written next to data files on export (if supported by
* export format).
* @param tz_adjust_from_local Specifies if the timezone should be adjusted from local to UTC timezone (parquet
* only).
* @param separator Field separator (default ',')
* @param header Use header (default true)
* @param quote_header Quote column names in header line (default true)
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Frames/{frame_id}/light")
Call<FramesV3> fetchLight(
@Path("frame_id") String frame_id,
@Query("column") String column,
@Query("row_offset") long row_offset,
@Query("row_count") int row_count,
@Query("column_offset") int column_offset,
@Query("full_column_count") int full_column_count,
@Query("column_count") int column_count,
@Query("find_compatible_models") boolean find_compatible_models,
@Query("path") String path,
@Query("force") boolean force,
@Query("num_parts") int num_parts,
@Query("parallel") boolean parallel,
@Query("format") UtilExportFileFormat format,
@Query("compression") String compression,
@Query("write_checksum") boolean write_checksum,
@Query("tz_adjust_from_local") boolean tz_adjust_from_local,
@Query("separator") byte separator,
@Query("header") boolean header,
@Query("quote_header") boolean quote_header,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Frames/{frame_id}/light")
Call<FramesV3> fetchLight(@Path("frame_id") String frame_id);
/**
* Return the specified Frame.
* @param frame_id Name of Frame of interest
* @param column Name of column of interest
* @param row_offset Row offset to return
* @param row_count Number of rows to return
* @param column_offset Column offset to return
* @param full_column_count Number of full columns to return. The columns between full_column_count and column_count
* will be returned without the data
* @param column_count Number of columns to return
* @param find_compatible_models Find and return compatible models?
* @param path File output path
* @param force Overwrite existing file
* @param num_parts Number of part files to use (1=single file,-1=automatic)
* @param parallel Use parallel export to a single file (doesn't apply when num_parts != 1, creates temporary files
* in the destination directory)
* @param format Output file format. Defaults to 'csv'.
* @param compression Compression method (default none; gzip, bzip2, zstd and snappy available depending on runtime
* environment)
* @param write_checksum Specifies if checksum should be written next to data files on export (if supported by
* export format).
* @param tz_adjust_from_local Specifies if the timezone should be adjusted from local to UTC timezone (parquet
* only).
* @param separator Field separator (default ',')
* @param header Use header (default true)
* @param quote_header Quote column names in header line (default true)
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Frames/{frame_id}")
Call<FramesV3> fetch(
@Path("frame_id") String frame_id,
@Query("column") String column,
@Query("row_offset") long row_offset,
@Query("row_count") int row_count,
@Query("column_offset") int column_offset,
@Query("full_column_count") int full_column_count,
@Query("column_count") int column_count,
@Query("find_compatible_models") boolean find_compatible_models,
@Query("path") String path,
@Query("force") boolean force,
@Query("num_parts") int num_parts,
@Query("parallel") boolean parallel,
@Query("format") UtilExportFileFormat format,
@Query("compression") String compression,
@Query("write_checksum") boolean write_checksum,
@Query("tz_adjust_from_local") boolean tz_adjust_from_local,
@Query("separator") byte separator,
@Query("header") boolean header,
@Query("quote_header") boolean quote_header,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Frames/{frame_id}")
Call<FramesV3> fetch(@Path("frame_id") String frame_id);
/**
* Return all Frames in the H2O distributed K/V store.
* @param frame_id Name of Frame of interest
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Frames")
Call<FramesListV3> list(
@Query("frame_id") String frame_id,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Frames")
Call<FramesListV3> list();
/**
* Delete the specified Frame from the H2O distributed K/V store.
* @param frame_id Name of Frame of interest
* @param column Name of column of interest
* @param row_offset Row offset to return
* @param row_count Number of rows to return
* @param column_offset Column offset to return
* @param full_column_count Number of full columns to return. The columns between full_column_count and column_count
* will be returned without the data
* @param column_count Number of columns to return
* @param find_compatible_models Find and return compatible models?
* @param path File output path
* @param force Overwrite existing file
* @param num_parts Number of part files to use (1=single file,-1=automatic)
* @param parallel Use parallel export to a single file (doesn't apply when num_parts != 1, creates temporary files
* in the destination directory)
* @param format Output file format. Defaults to 'csv'.
* @param compression Compression method (default none; gzip, bzip2, zstd and snappy available depending on runtime
* environment)
* @param write_checksum Specifies if checksum should be written next to data files on export (if supported by
* export format).
* @param tz_adjust_from_local Specifies if the timezone should be adjusted from local to UTC timezone (parquet
* only).
* @param separator Field separator (default ',')
* @param header Use header (default true)
* @param quote_header Quote column names in header line (default true)
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@DELETE("/3/Frames/{frame_id}")
Call<FramesV3> delete(
@Path("frame_id") String frame_id,
@Field("column") String column,
@Field("row_offset") long row_offset,
@Field("row_count") int row_count,
@Field("column_offset") int column_offset,
@Field("full_column_count") int full_column_count,
@Field("column_count") int column_count,
@Field("find_compatible_models") boolean find_compatible_models,
@Field("path") String path,
@Field("force") boolean force,
@Field("num_parts") int num_parts,
@Field("parallel") boolean parallel,
@Field("format") UtilExportFileFormat format,
@Field("compression") String compression,
@Field("write_checksum") boolean write_checksum,
@Field("tz_adjust_from_local") boolean tz_adjust_from_local,
@Field("separator") byte separator,
@Field("header") boolean header,
@Field("quote_header") boolean quote_header,
@Field("_exclude_fields") String _exclude_fields
);
@DELETE("/3/Frames/{frame_id}")
Call<FramesV3> delete(@Path("frame_id") String frame_id);
/**
* Delete all Frames from the H2O distributed K/V store.
* @param frame_id Name of Frame of interest
* @param column Name of column of interest
* @param row_offset Row offset to return
* @param row_count Number of rows to return
* @param column_offset Column offset to return
* @param full_column_count Number of full columns to return. The columns between full_column_count and column_count
* will be returned without the data
* @param column_count Number of columns to return
* @param find_compatible_models Find and return compatible models?
* @param path File output path
* @param force Overwrite existing file
* @param num_parts Number of part files to use (1=single file,-1=automatic)
* @param parallel Use parallel export to a single file (doesn't apply when num_parts != 1, creates temporary files
* in the destination directory)
* @param format Output file format. Defaults to 'csv'.
* @param compression Compression method (default none; gzip, bzip2, zstd and snappy available depending on runtime
* environment)
* @param write_checksum Specifies if checksum should be written next to data files on export (if supported by
* export format).
* @param tz_adjust_from_local Specifies if the timezone should be adjusted from local to UTC timezone (parquet
* only).
* @param separator Field separator (default ',')
* @param header Use header (default true)
* @param quote_header Quote column names in header line (default true)
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@DELETE("/3/Frames")
Call<FramesV3> deleteAll(
@Field("frame_id") String frame_id,
@Field("column") String column,
@Field("row_offset") long row_offset,
@Field("row_count") int row_count,
@Field("column_offset") int column_offset,
@Field("full_column_count") int full_column_count,
@Field("column_count") int column_count,
@Field("find_compatible_models") boolean find_compatible_models,
@Field("path") String path,
@Field("force") boolean force,
@Field("num_parts") int num_parts,
@Field("parallel") boolean parallel,
@Field("format") UtilExportFileFormat format,
@Field("compression") String compression,
@Field("write_checksum") boolean write_checksum,
@Field("tz_adjust_from_local") boolean tz_adjust_from_local,
@Field("separator") byte separator,
@Field("header") boolean header,
@Field("quote_header") boolean quote_header,
@Field("_exclude_fields") String _exclude_fields
);
@DELETE("/3/Frames")
Call<FramesV3> deleteAll();
/**
* Create frame with random (uniformly distributed) data. You can specify how many columns of each type to make; and
* what the desired range for each column type.
* @param dest Id for the frame to be created.
* @param seed Random number seed that determines the random values.
* @param nrows Number of rows.
* @param ncols_real Number of real-valued columns. Values in these columns will be uniformly distributed between
* real_lb and real_ub.
* @param ncols_int Number of integer columns.
* @param ncols_enum Number of enum (categorical) columns.
* @param ncols_bool Number of boolean (binary) columns.
* @param ncols_str Number of string columns.
* @param ncols_time Number of time columns.
* @param real_lb Lower bound for the range of the real-valued columns.
* @param real_ub Upper bound for the range of the real-valued columns.
* @param int_lb Lower bound for the range of integer columns.
* @param int_ub Upper bound for the range of integer columns.
* @param enum_nlevels Number of levels (categories) for the enum columns.
* @param bool_p Fraction of ones in each boolean (binary) column.
* @param time_lb Lower bound for the range of time columns (in ms since the epoch).
* @param time_ub Upper bound for the range of time columns (in ms since the epoch).
* @param str_length Length of generated strings in string columns.
* @param missing_fraction Fraction of missing values.
* @param response_type Type of the response column to add.
* @param response_lb Lower bound for the response variable (real/int/time types).
* @param response_ub Upper bound for the response variable (real/int/time types).
* @param response_p Frequency of 1s for the bool (binary) response column.
* @param response_nlevels Number of categorical levels for the enum response column.
* @param _fields Filter on the set of output fields: if you set _fields="foo,bar,baz", then only those fields will
* be included in the output; or you can specify _fields="-goo,gee" to include all fields except goo
* and gee. If the result contains nested data structures, then you can refer to the fields within
* those structures as well. For example if you specify _fields="foo(oof),bar(-rab)", then only
* fields foo and bar will be included, and within foo there will be only field oof, whereas within
* bar all fields except rab will be reported.
*/
@FormUrlEncoded
@POST("/4/Frames/$simple")
Call<JobV4> createSimpleFrame(
@Field("dest") String dest,
@Field("seed") long seed,
@Field("nrows") int nrows,
@Field("ncols_real") int ncols_real,
@Field("ncols_int") int ncols_int,
@Field("ncols_enum") int ncols_enum,
@Field("ncols_bool") int ncols_bool,
@Field("ncols_str") int ncols_str,
@Field("ncols_time") int ncols_time,
@Field("real_lb") double real_lb,
@Field("real_ub") double real_ub,
@Field("int_lb") int int_lb,
@Field("int_ub") int int_ub,
@Field("enum_nlevels") int enum_nlevels,
@Field("bool_p") double bool_p,
@Field("time_lb") long time_lb,
@Field("time_ub") long time_ub,
@Field("str_length") int str_length,
@Field("missing_fraction") double missing_fraction,
@Field("response_type") SimpleRecipeResponseType response_type,
@Field("response_lb") double response_lb,
@Field("response_ub") double response_ub,
@Field("response_p") double response_p,
@Field("response_nlevels") int response_nlevels,
@Field("_fields") String _fields
);
@FormUrlEncoded
@POST("/4/Frames/$simple")
Call<JobV4> createSimpleFrame();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/FriedmansPopescusH.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface FriedmansPopescusH {
/**
* Fetch Friedman Popescus H.
* @param model_id Model id of interest
* @param frame Frame the model has been fitted to
* @param variables Variables of interest
* @param h Value of H statistic
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/FriedmansPopescusH")
Call<FriedmanPopescusHV3> makeFriedmansPopescusH(
@Field("model_id") String model_id,
@Field("frame") FrameV3 frame,
@Field("variables") String[] variables,
@Field("h") double h,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/FriedmansPopescusH")
Call<FriedmanPopescusHV3> makeFriedmansPopescusH(
@Field("frame") FrameV3 frame,
@Field("variables") String[] variables
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/GarbageCollect.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface GarbageCollect {
/**
* Explicitly call System.gc().
*/
@FormUrlEncoded
@POST("/3/GarbageCollect")
Call<GarbageCollectV3> gc();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/GetGLMRegPath.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface GetGLMRegPath {
/**
* Get full regularization path
* @param model source model
* @param lambdas Computed lambda values
* @param alphas alpha values used in building submodels
* @param explained_deviance_train explained deviance on the training set
* @param explained_deviance_valid explained deviance on the validation set
* @param coefficients coefficients for all lambdas
* @param coefficients_std standardized coefficients for all lambdas
* @param coefficient_names coefficient names
* @param z_values z-values
* @param p_values p-values
* @param std_errs standard error
*/
@GET("/3/GetGLMRegPath")
Call<GLMRegularizationPathV3> extractRegularizationPath(
@Query("model") String model,
@Query("lambdas") double[] lambdas,
@Query("alphas") double[] alphas,
@Query("explained_deviance_train") double[] explained_deviance_train,
@Query("explained_deviance_valid") double[] explained_deviance_valid,
@Query("coefficients") double[][] coefficients,
@Query("coefficients_std") double[][] coefficients_std,
@Query("coefficient_names") String[] coefficient_names,
@Query("z_values") double[][] z_values,
@Query("p_values") double[][] p_values,
@Query("std_errs") double[][] std_errs
);
@GET("/3/GetGLMRegPath")
Call<GLMRegularizationPathV3> extractRegularizationPath(@Query("model") String model);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Grid.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface Grid {
/**
* Run grid search for XGBoost model.
* @param ntrees (same as n_estimators) Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows (same as min_child_weight) Fewest allowed (weighted) observations in a leaf.
* @param min_child_weight (same as min_rows) Fewest allowed (weighted) observations in a leaf.
* @param learn_rate (same as eta) Learning rate (from 0.0 to 1.0)
* @param eta (same as learn_rate) Learning rate (from 0.0 to 1.0)
* @param sample_rate (same as subsample) Row sample rate per tree (from 0.0 to 1.0)
* @param subsample (same as sample_rate) Row sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate (same as colsample_bylevel) Column sample rate (from 0.0 to 1.0)
* @param colsample_bylevel (same as col_sample_rate) Column sample rate (from 0.0 to 1.0)
* @param col_sample_rate_per_tree (same as colsample_bytree) Column sample rate per tree (from 0.0 to 1.0)
* @param colsample_bytree (same as col_sample_rate_per_tree) Column sample rate per tree (from 0.0 to 1.0)
* @param colsample_bynode Column sample rate per tree node (from 0.0 to 1.0)
* @param monotone_constraints A mapping representing monotonic constraints. Use +1 to enforce an increasing
* constraint and -1 to specify a decreasing constraint.
* @param max_abs_leafnode_pred (same as max_delta_step) Maximum absolute value of a leaf node prediction
* @param max_delta_step (same as max_abs_leafnode_pred) Maximum absolute value of a leaf node prediction
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param seed Seed for pseudo random number generator (if applicable)
* @param min_split_improvement (same as gamma) Minimum relative improvement in squared error reduction for a split
* to happen
* @param gamma (same as min_split_improvement) Minimum relative improvement in squared error reduction for a split
* to happen
* @param nthread Number of parallel threads that can be used to run XGBoost. Cannot exceed H2O cluster limits
* (-nthreads parameter). Defaults to maximum available
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param save_matrix_directory Directory where to save matrices passed to XGBoost library. Useful for debugging.
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param max_bins For tree_method=hist only: maximum number of bins
* @param max_leaves For tree_method=hist only: maximum number of leaves
* @param tree_method Tree method
* @param grow_policy Grow policy - depthwise is standard GBM, lossguide is LightGBM
* @param booster Booster type
* @param reg_lambda L2 regularization
* @param reg_alpha L1 regularization
* @param quiet_mode Enable quiet mode
* @param sample_type For booster=dart only: sample_type
* @param normalize_type For booster=dart only: normalize_type
* @param rate_drop For booster=dart only: rate_drop (0..1)
* @param one_drop For booster=dart only: one_drop
* @param skip_drop For booster=dart only: skip_drop (0..1)
* @param dmatrix_type Type of DMatrix. For sparse, NAs and 0 are treated equally.
* @param backend Backend. By default (auto), a GPU is used if available.
* @param gpu_id Which GPU(s) to use.
* @param interaction_constraints A set of allowed column interactions.
* @param scale_pos_weight Controls the effect of observations with positive labels in relation to the observations
* with negative labels on gradient calculation. Useful for imbalanced problems.
* @param eval_metric Specification of evaluation metric that will be passed to the native XGBoost backend.
* @param score_eval_metric_only If enabled, score only the evaluation metric. This can make model training faster
* if scoring is frequent (eg. each iteration).
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/xgboost")
Call<XGBoostV3> trainXgboost(
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("min_child_weight") double min_child_weight,
@Field("learn_rate") double learn_rate,
@Field("eta") double eta,
@Field("sample_rate") double sample_rate,
@Field("subsample") double subsample,
@Field("col_sample_rate") double col_sample_rate,
@Field("colsample_bylevel") double colsample_bylevel,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("colsample_bytree") double colsample_bytree,
@Field("colsample_bynode") double colsample_bynode,
@Field("monotone_constraints") KeyValueV3[] monotone_constraints,
@Field("max_abs_leafnode_pred") float max_abs_leafnode_pred,
@Field("max_delta_step") float max_delta_step,
@Field("score_tree_interval") int score_tree_interval,
@Field("seed") long seed,
@Field("min_split_improvement") float min_split_improvement,
@Field("gamma") float gamma,
@Field("nthread") int nthread,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("save_matrix_directory") String save_matrix_directory,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("max_bins") int max_bins,
@Field("max_leaves") int max_leaves,
@Field("tree_method") TreexgboostXGBoostModelXGBoostParametersTreeMethod tree_method,
@Field("grow_policy") TreexgboostXGBoostModelXGBoostParametersGrowPolicy grow_policy,
@Field("booster") TreexgboostXGBoostModelXGBoostParametersBooster booster,
@Field("reg_lambda") float reg_lambda,
@Field("reg_alpha") float reg_alpha,
@Field("quiet_mode") boolean quiet_mode,
@Field("sample_type") TreexgboostXGBoostModelXGBoostParametersDartSampleType sample_type,
@Field("normalize_type") TreexgboostXGBoostModelXGBoostParametersDartNormalizeType normalize_type,
@Field("rate_drop") float rate_drop,
@Field("one_drop") boolean one_drop,
@Field("skip_drop") float skip_drop,
@Field("dmatrix_type") TreexgboostXGBoostModelXGBoostParametersDMatrixType dmatrix_type,
@Field("backend") TreexgboostXGBoostModelXGBoostParametersBackend backend,
@Field("gpu_id") int[] gpu_id,
@Field("interaction_constraints") String[][] interaction_constraints,
@Field("scale_pos_weight") float scale_pos_weight,
@Field("eval_metric") String eval_metric,
@Field("score_eval_metric_only") boolean score_eval_metric_only,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/xgboost")
Call<XGBoostV3> trainXgboost();
/**
* Resume grid search for XGBoost model.
* @param ntrees (same as n_estimators) Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows (same as min_child_weight) Fewest allowed (weighted) observations in a leaf.
* @param min_child_weight (same as min_rows) Fewest allowed (weighted) observations in a leaf.
* @param learn_rate (same as eta) Learning rate (from 0.0 to 1.0)
* @param eta (same as learn_rate) Learning rate (from 0.0 to 1.0)
* @param sample_rate (same as subsample) Row sample rate per tree (from 0.0 to 1.0)
* @param subsample (same as sample_rate) Row sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate (same as colsample_bylevel) Column sample rate (from 0.0 to 1.0)
* @param colsample_bylevel (same as col_sample_rate) Column sample rate (from 0.0 to 1.0)
* @param col_sample_rate_per_tree (same as colsample_bytree) Column sample rate per tree (from 0.0 to 1.0)
* @param colsample_bytree (same as col_sample_rate_per_tree) Column sample rate per tree (from 0.0 to 1.0)
* @param colsample_bynode Column sample rate per tree node (from 0.0 to 1.0)
* @param monotone_constraints A mapping representing monotonic constraints. Use +1 to enforce an increasing
* constraint and -1 to specify a decreasing constraint.
* @param max_abs_leafnode_pred (same as max_delta_step) Maximum absolute value of a leaf node prediction
* @param max_delta_step (same as max_abs_leafnode_pred) Maximum absolute value of a leaf node prediction
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param seed Seed for pseudo random number generator (if applicable)
* @param min_split_improvement (same as gamma) Minimum relative improvement in squared error reduction for a split
* to happen
* @param gamma (same as min_split_improvement) Minimum relative improvement in squared error reduction for a split
* to happen
* @param nthread Number of parallel threads that can be used to run XGBoost. Cannot exceed H2O cluster limits
* (-nthreads parameter). Defaults to maximum available
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param save_matrix_directory Directory where to save matrices passed to XGBoost library. Useful for debugging.
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param max_bins For tree_method=hist only: maximum number of bins
* @param max_leaves For tree_method=hist only: maximum number of leaves
* @param tree_method Tree method
* @param grow_policy Grow policy - depthwise is standard GBM, lossguide is LightGBM
* @param booster Booster type
* @param reg_lambda L2 regularization
* @param reg_alpha L1 regularization
* @param quiet_mode Enable quiet mode
* @param sample_type For booster=dart only: sample_type
* @param normalize_type For booster=dart only: normalize_type
* @param rate_drop For booster=dart only: rate_drop (0..1)
* @param one_drop For booster=dart only: one_drop
* @param skip_drop For booster=dart only: skip_drop (0..1)
* @param dmatrix_type Type of DMatrix. For sparse, NAs and 0 are treated equally.
* @param backend Backend. By default (auto), a GPU is used if available.
* @param gpu_id Which GPU(s) to use.
* @param interaction_constraints A set of allowed column interactions.
* @param scale_pos_weight Controls the effect of observations with positive labels in relation to the observations
* with negative labels on gradient calculation. Useful for imbalanced problems.
* @param eval_metric Specification of evaluation metric that will be passed to the native XGBoost backend.
* @param score_eval_metric_only If enabled, score only the evaluation metric. This can make model training faster
* if scoring is frequent (eg. each iteration).
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/xgboost/resume")
Call<XGBoostV3> resumeXgboost(
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("min_child_weight") double min_child_weight,
@Field("learn_rate") double learn_rate,
@Field("eta") double eta,
@Field("sample_rate") double sample_rate,
@Field("subsample") double subsample,
@Field("col_sample_rate") double col_sample_rate,
@Field("colsample_bylevel") double colsample_bylevel,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("colsample_bytree") double colsample_bytree,
@Field("colsample_bynode") double colsample_bynode,
@Field("monotone_constraints") KeyValueV3[] monotone_constraints,
@Field("max_abs_leafnode_pred") float max_abs_leafnode_pred,
@Field("max_delta_step") float max_delta_step,
@Field("score_tree_interval") int score_tree_interval,
@Field("seed") long seed,
@Field("min_split_improvement") float min_split_improvement,
@Field("gamma") float gamma,
@Field("nthread") int nthread,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("save_matrix_directory") String save_matrix_directory,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("max_bins") int max_bins,
@Field("max_leaves") int max_leaves,
@Field("tree_method") TreexgboostXGBoostModelXGBoostParametersTreeMethod tree_method,
@Field("grow_policy") TreexgboostXGBoostModelXGBoostParametersGrowPolicy grow_policy,
@Field("booster") TreexgboostXGBoostModelXGBoostParametersBooster booster,
@Field("reg_lambda") float reg_lambda,
@Field("reg_alpha") float reg_alpha,
@Field("quiet_mode") boolean quiet_mode,
@Field("sample_type") TreexgboostXGBoostModelXGBoostParametersDartSampleType sample_type,
@Field("normalize_type") TreexgboostXGBoostModelXGBoostParametersDartNormalizeType normalize_type,
@Field("rate_drop") float rate_drop,
@Field("one_drop") boolean one_drop,
@Field("skip_drop") float skip_drop,
@Field("dmatrix_type") TreexgboostXGBoostModelXGBoostParametersDMatrixType dmatrix_type,
@Field("backend") TreexgboostXGBoostModelXGBoostParametersBackend backend,
@Field("gpu_id") int[] gpu_id,
@Field("interaction_constraints") String[][] interaction_constraints,
@Field("scale_pos_weight") float scale_pos_weight,
@Field("eval_metric") String eval_metric,
@Field("score_eval_metric_only") boolean score_eval_metric_only,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/xgboost/resume")
Call<XGBoostV3> resumeXgboost();
/**
* Run grid search for Infogram model.
* @param seed Seed for pseudo random number generator (if applicable).
* @param standardize Standardize numeric columns to have zero mean and unit variance.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues).
* @param max_iterations Maximum number of iterations.
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param algorithm Type of machine learning algorithm used to build the infogram. Options include 'AUTO' (gbm),
* 'deeplearning' (Deep Learning with default parameters), 'drf' (Random Forest with default
* parameters), 'gbm' (GBM with default parameters), 'glm' (GLM with default parameters), or
* 'xgboost' (if available, XGBoost with default parameters).
* @param algorithm_params Customized parameters for the machine learning algorithm specified in the algorithm
* parameter.
* @param protected_columns Columns that contain features that are sensitive and need to be protected (legally, or
* otherwise), if applicable. These features (e.g. race, gender, etc) should not drive the
* prediction of the response.
* @param total_information_threshold A number between 0 and 1 representing a threshold for total information,
* defaulting to 0.1. For a specific feature, if the total information is higher
* than this threshold, and the corresponding net information is also higher than
* the threshold ``net_information_threshold``, that feature will be considered
* admissible. The total information is the x-axis of the Core Infogram. Default
* is -1 which gets set to 0.1.
* @param net_information_threshold A number between 0 and 1 representing a threshold for net information,
* defaulting to 0.1. For a specific feature, if the net information is higher
* than this threshold, and the corresponding total information is also higher than
* the total_information_threshold, that feature will be considered admissible. The
* net information is the y-axis of the Core Infogram. Default is -1 which gets set
* to 0.1.
* @param relevance_index_threshold A number between 0 and 1 representing a threshold for the relevance index,
* defaulting to 0.1. This is only used when ``protected_columns`` is set by the
* user. For a specific feature, if the relevance index value is higher than this
* threshold, and the corresponding safety index is also higher than the
* safety_index_threshold``, that feature will be considered admissible. The
* relevance index is the x-axis of the Fair Infogram. Default is -1 which gets set
* to 0.1.
* @param safety_index_threshold A number between 0 and 1 representing a threshold for the safety index, defaulting
* to 0.1. This is only used when protected_columns is set by the user. For a
* specific feature, if the safety index value is higher than this threshold, and the
* corresponding relevance index is also higher than the relevance_index_threshold,
* that feature will be considered admissible. The safety index is the y-axis of the
* Fair Infogram. Default is -1 which gets set to 0.1.
* @param data_fraction The fraction of training frame to use to build the infogram model. Defaults to 1.0, and any
* value greater than 0 and less than or equal to 1.0 is acceptable.
* @param top_n_features An integer specifying the number of columns to evaluate in the infogram. The columns are
* ranked by variable importance, and the top N are evaluated. Defaults to 50.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/infogram")
Call<InfogramV3> trainInfogram(
@Field("seed") long seed,
@Field("standardize") boolean standardize,
@Field("plug_values") String plug_values,
@Field("max_iterations") int max_iterations,
@Field("prior") double prior,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("algorithm") InfogramAlgorithm algorithm,
@Field("algorithm_params") String algorithm_params,
@Field("protected_columns") String[] protected_columns,
@Field("total_information_threshold") double total_information_threshold,
@Field("net_information_threshold") double net_information_threshold,
@Field("relevance_index_threshold") double relevance_index_threshold,
@Field("safety_index_threshold") double safety_index_threshold,
@Field("data_fraction") double data_fraction,
@Field("top_n_features") int top_n_features,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/infogram")
Call<InfogramV3> trainInfogram();
/**
* Resume grid search for Infogram model.
* @param seed Seed for pseudo random number generator (if applicable).
* @param standardize Standardize numeric columns to have zero mean and unit variance.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues).
* @param max_iterations Maximum number of iterations.
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param algorithm Type of machine learning algorithm used to build the infogram. Options include 'AUTO' (gbm),
* 'deeplearning' (Deep Learning with default parameters), 'drf' (Random Forest with default
* parameters), 'gbm' (GBM with default parameters), 'glm' (GLM with default parameters), or
* 'xgboost' (if available, XGBoost with default parameters).
* @param algorithm_params Customized parameters for the machine learning algorithm specified in the algorithm
* parameter.
* @param protected_columns Columns that contain features that are sensitive and need to be protected (legally, or
* otherwise), if applicable. These features (e.g. race, gender, etc) should not drive the
* prediction of the response.
* @param total_information_threshold A number between 0 and 1 representing a threshold for total information,
* defaulting to 0.1. For a specific feature, if the total information is higher
* than this threshold, and the corresponding net information is also higher than
* the threshold ``net_information_threshold``, that feature will be considered
* admissible. The total information is the x-axis of the Core Infogram. Default
* is -1 which gets set to 0.1.
* @param net_information_threshold A number between 0 and 1 representing a threshold for net information,
* defaulting to 0.1. For a specific feature, if the net information is higher
* than this threshold, and the corresponding total information is also higher than
* the total_information_threshold, that feature will be considered admissible. The
* net information is the y-axis of the Core Infogram. Default is -1 which gets set
* to 0.1.
* @param relevance_index_threshold A number between 0 and 1 representing a threshold for the relevance index,
* defaulting to 0.1. This is only used when ``protected_columns`` is set by the
* user. For a specific feature, if the relevance index value is higher than this
* threshold, and the corresponding safety index is also higher than the
* safety_index_threshold``, that feature will be considered admissible. The
* relevance index is the x-axis of the Fair Infogram. Default is -1 which gets set
* to 0.1.
* @param safety_index_threshold A number between 0 and 1 representing a threshold for the safety index, defaulting
* to 0.1. This is only used when protected_columns is set by the user. For a
* specific feature, if the safety index value is higher than this threshold, and the
* corresponding relevance index is also higher than the relevance_index_threshold,
* that feature will be considered admissible. The safety index is the y-axis of the
* Fair Infogram. Default is -1 which gets set to 0.1.
* @param data_fraction The fraction of training frame to use to build the infogram model. Defaults to 1.0, and any
* value greater than 0 and less than or equal to 1.0 is acceptable.
* @param top_n_features An integer specifying the number of columns to evaluate in the infogram. The columns are
* ranked by variable importance, and the top N are evaluated. Defaults to 50.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/infogram/resume")
Call<InfogramV3> resumeInfogram(
@Field("seed") long seed,
@Field("standardize") boolean standardize,
@Field("plug_values") String plug_values,
@Field("max_iterations") int max_iterations,
@Field("prior") double prior,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("algorithm") InfogramAlgorithm algorithm,
@Field("algorithm_params") String algorithm_params,
@Field("protected_columns") String[] protected_columns,
@Field("total_information_threshold") double total_information_threshold,
@Field("net_information_threshold") double net_information_threshold,
@Field("relevance_index_threshold") double relevance_index_threshold,
@Field("safety_index_threshold") double safety_index_threshold,
@Field("data_fraction") double data_fraction,
@Field("top_n_features") int top_n_features,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/infogram/resume")
Call<InfogramV3> resumeInfogram();
/**
* Run grid search for TargetEncoder model.
* @param columns_to_encode List of categorical columns or groups of categorical columns to encode. When groups of
* columns are specified, each group is encoded as a single column (interactions are
* created internally).
* @param keep_original_categorical_columns If true, the original non-encoded categorical features will remain in
* the result frame.
* @param blending If true, enables blending of posterior probabilities (computed for a given categorical value)
* with prior probabilities (computed on the entire set). This allows to mitigate the effect of
* categorical values with small cardinality. The blending effect can be tuned using the
* `inflection_point` and `smoothing` parameters.
* @param inflection_point Inflection point of the sigmoid used to blend probabilities (see `blending` parameter).
* For a given categorical value, if it appears less that `inflection_point` in a data
* sample, then the influence of the posterior probability will be smaller than the prior.
* @param smoothing Smoothing factor corresponds to the inverse of the slope at the inflection point on the sigmoid
* used to blend probabilities (see `blending` parameter). If smoothing tends towards 0, then the
* sigmoid used for blending turns into a Heaviside step function.
* @param data_leakage_handling Data leakage handling strategy used to generate the encoding. Supported options are:
* 1) "none" (default) - no holdout, using the entire training frame.
* 2) "leave_one_out" - current row's response value is subtracted from the per-level
* frequencies pre-calculated on the entire training frame.
* 3) "k_fold" - encodings for a fold are generated based on out-of-fold data.
* @param noise The amount of noise to add to the encoded column. Use 0 to disable noise, and -1 (=AUTO) to let the
* algorithm determine a reasonable amount of noise.
* @param seed Seed used to generate the noise. By default, the seed is chosen randomly.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/targetencoder")
Call<TargetEncoderV3> trainTargetencoder(
@Field("columns_to_encode") String[][] columns_to_encode,
@Field("keep_original_categorical_columns") boolean keep_original_categorical_columns,
@Field("blending") boolean blending,
@Field("inflection_point") double inflection_point,
@Field("smoothing") double smoothing,
@Field("data_leakage_handling") H2otargetencodingTargetEncoderModelDataLeakageHandlingStrategy data_leakage_handling,
@Field("noise") double noise,
@Field("seed") long seed,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/targetencoder")
Call<TargetEncoderV3> trainTargetencoder();
/**
* Resume grid search for TargetEncoder model.
* @param columns_to_encode List of categorical columns or groups of categorical columns to encode. When groups of
* columns are specified, each group is encoded as a single column (interactions are
* created internally).
* @param keep_original_categorical_columns If true, the original non-encoded categorical features will remain in
* the result frame.
* @param blending If true, enables blending of posterior probabilities (computed for a given categorical value)
* with prior probabilities (computed on the entire set). This allows to mitigate the effect of
* categorical values with small cardinality. The blending effect can be tuned using the
* `inflection_point` and `smoothing` parameters.
* @param inflection_point Inflection point of the sigmoid used to blend probabilities (see `blending` parameter).
* For a given categorical value, if it appears less that `inflection_point` in a data
* sample, then the influence of the posterior probability will be smaller than the prior.
* @param smoothing Smoothing factor corresponds to the inverse of the slope at the inflection point on the sigmoid
* used to blend probabilities (see `blending` parameter). If smoothing tends towards 0, then the
* sigmoid used for blending turns into a Heaviside step function.
* @param data_leakage_handling Data leakage handling strategy used to generate the encoding. Supported options are:
* 1) "none" (default) - no holdout, using the entire training frame.
* 2) "leave_one_out" - current row's response value is subtracted from the per-level
* frequencies pre-calculated on the entire training frame.
* 3) "k_fold" - encodings for a fold are generated based on out-of-fold data.
* @param noise The amount of noise to add to the encoded column. Use 0 to disable noise, and -1 (=AUTO) to let the
* algorithm determine a reasonable amount of noise.
* @param seed Seed used to generate the noise. By default, the seed is chosen randomly.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/targetencoder/resume")
Call<TargetEncoderV3> resumeTargetencoder(
@Field("columns_to_encode") String[][] columns_to_encode,
@Field("keep_original_categorical_columns") boolean keep_original_categorical_columns,
@Field("blending") boolean blending,
@Field("inflection_point") double inflection_point,
@Field("smoothing") double smoothing,
@Field("data_leakage_handling") H2otargetencodingTargetEncoderModelDataLeakageHandlingStrategy data_leakage_handling,
@Field("noise") double noise,
@Field("seed") long seed,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/targetencoder/resume")
Call<TargetEncoderV3> resumeTargetencoder();
/**
* Run grid search for DeepLearning model.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs.
* @param activation Activation function.
* @param hidden Hidden layer sizes (e.g. [100, 100]).
* @param epochs How many times the dataset should be iterated (streamed), can be fractional.
* @param train_samples_per_iteration Number of training samples (globally) per MapReduce iteration. Special values
* are 0: one epoch, -1: all available data (e.g., replicated training data), -2:
* automatic.
* @param target_ratio_comm_to_comp Target ratio of communication overhead to computation. Only for multi-node
* operation and train_samples_per_iteration = -2 (auto-tuning).
* @param seed Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded.
* @param adaptive_rate Adaptive learning rate.
* @param rho Adaptive learning rate time decay factor (similarity to prior updates).
* @param epsilon Adaptive learning rate smoothing factor (to avoid divisions by zero and allow progress).
* @param rate Learning rate (higher => less stable, lower => slower convergence).
* @param rate_annealing Learning rate annealing: rate / (1 + rate_annealing * samples).
* @param rate_decay Learning rate decay factor between layers (N-th layer: rate * rate_decay ^ (n - 1).
* @param momentum_start Initial momentum at the beginning of training (try 0.5).
* @param momentum_ramp Number of training samples for which momentum increases.
* @param momentum_stable Final momentum after the ramp is over (try 0.99).
* @param nesterov_accelerated_gradient Use Nesterov accelerated gradient (recommended).
* @param input_dropout_ratio Input layer dropout ratio (can improve generalization, try 0.1 or 0.2).
* @param hidden_dropout_ratios Hidden layer dropout ratios (can improve generalization), specify one value per
* hidden layer, defaults to 0.5.
* @param l1 L1 regularization (can add stability and improve generalization, causes many weights to become 0).
* @param l2 L2 regularization (can add stability and improve generalization, causes many weights to be small.
* @param max_w2 Constraint for squared sum of incoming weights per unit (e.g. for Rectifier).
* @param initial_weight_distribution Initial weight distribution.
* @param initial_weight_scale Uniform: -value...value, Normal: stddev.
* @param initial_weights A list of H2OFrame ids to initialize the weight matrices of this model with.
* @param initial_biases A list of H2OFrame ids to initialize the bias vectors of this model with.
* @param loss Loss function.
* @param score_interval Shortest time interval (in seconds) between model scoring.
* @param score_training_samples Number of training set samples for scoring (0 for all).
* @param score_validation_samples Number of validation set samples for scoring (0 for all).
* @param score_duty_cycle Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring).
* @param classification_stop Stopping criterion for classification error fraction on training data (-1 to disable).
* @param regression_stop Stopping criterion for regression error (MSE) on training data (-1 to disable).
* @param quiet_mode Enable quiet mode for less output to standard output.
* @param score_validation_sampling Method used to sample validation dataset for scoring.
* @param overwrite_with_best_model If enabled, override the final model with the best model found during training.
* @param autoencoder Auto-Encoder.
* @param use_all_factor_levels Use all factor levels of categorical variables. Otherwise, the first factor level is
* omitted (without loss of accuracy). Useful for variable importances and auto-enabled
* for autoencoder.
* @param standardize If enabled, automatically standardize the data. If disabled, the user must provide properly
* scaled input data.
* @param diagnostics Enable diagnostics for hidden layers.
* @param variable_importances Compute variable importances for input features (Gedeon method) - can be slow for
* large networks.
* @param fast_mode Enable fast mode (minor approximation in back-propagation).
* @param force_load_balance Force extra load balancing to increase training speed for small datasets (to keep all
* cores busy).
* @param replicate_training_data Replicate the entire training dataset onto every node for faster training on small
* datasets.
* @param single_node_mode Run on a single node for fine-tuning of model parameters.
* @param shuffle_training_data Enable shuffling of training data (recommended if training data is replicated and
* train_samples_per_iteration is close to #nodes x #rows, of if using
* balance_classes).
* @param missing_values_handling Handling of missing values. Either MeanImputation or Skip.
* @param sparse Sparse data handling (more efficient for data with lots of 0 values).
* @param col_major #DEPRECATED Use a column major weight matrix for input layer. Can speed up forward propagation,
* but might slow down backpropagation.
* @param average_activation Average activation for sparse auto-encoder. #Experimental
* @param sparsity_beta Sparsity regularization. #Experimental
* @param max_categorical_features Max. number of categorical features, enforced via hashing. #Experimental
* @param reproducible Force reproducibility on small data (will be slow - only uses 1 thread).
* @param export_weights_and_biases Whether to export Neural Network weights and biases to H2O Frames.
* @param mini_batch_size Mini-batch size (smaller leads to better fit, larger can speed up and generalize better).
* @param elastic_averaging Elastic averaging between compute nodes can improve distributed model convergence.
* #Experimental
* @param elastic_averaging_moving_rate Elastic averaging moving rate (only if elastic averaging is enabled).
* @param elastic_averaging_regularization Elastic averaging regularization strength (only if elastic averaging is
* enabled).
* @param pretrained_autoencoder Pretrained autoencoder model to initialize this model with.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/deeplearning")
Call<DeepLearningV3> trainDeeplearning(
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("activation") DeepLearningActivation activation,
@Field("hidden") int[] hidden,
@Field("epochs") double epochs,
@Field("train_samples_per_iteration") long train_samples_per_iteration,
@Field("target_ratio_comm_to_comp") double target_ratio_comm_to_comp,
@Field("seed") long seed,
@Field("adaptive_rate") boolean adaptive_rate,
@Field("rho") double rho,
@Field("epsilon") double epsilon,
@Field("rate") double rate,
@Field("rate_annealing") double rate_annealing,
@Field("rate_decay") double rate_decay,
@Field("momentum_start") double momentum_start,
@Field("momentum_ramp") double momentum_ramp,
@Field("momentum_stable") double momentum_stable,
@Field("nesterov_accelerated_gradient") boolean nesterov_accelerated_gradient,
@Field("input_dropout_ratio") double input_dropout_ratio,
@Field("hidden_dropout_ratios") double[] hidden_dropout_ratios,
@Field("l1") double l1,
@Field("l2") double l2,
@Field("max_w2") float max_w2,
@Field("initial_weight_distribution") DeepLearningInitialWeightDistribution initial_weight_distribution,
@Field("initial_weight_scale") double initial_weight_scale,
@Field("initial_weights") String[] initial_weights,
@Field("initial_biases") String[] initial_biases,
@Field("loss") DeepLearningLoss loss,
@Field("score_interval") double score_interval,
@Field("score_training_samples") long score_training_samples,
@Field("score_validation_samples") long score_validation_samples,
@Field("score_duty_cycle") double score_duty_cycle,
@Field("classification_stop") double classification_stop,
@Field("regression_stop") double regression_stop,
@Field("quiet_mode") boolean quiet_mode,
@Field("score_validation_sampling") DeepLearningClassSamplingMethod score_validation_sampling,
@Field("overwrite_with_best_model") boolean overwrite_with_best_model,
@Field("autoencoder") boolean autoencoder,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("standardize") boolean standardize,
@Field("diagnostics") boolean diagnostics,
@Field("variable_importances") boolean variable_importances,
@Field("fast_mode") boolean fast_mode,
@Field("force_load_balance") boolean force_load_balance,
@Field("replicate_training_data") boolean replicate_training_data,
@Field("single_node_mode") boolean single_node_mode,
@Field("shuffle_training_data") boolean shuffle_training_data,
@Field("missing_values_handling") DeepLearningMissingValuesHandling missing_values_handling,
@Field("sparse") boolean sparse,
@Field("col_major") boolean col_major,
@Field("average_activation") double average_activation,
@Field("sparsity_beta") double sparsity_beta,
@Field("max_categorical_features") int max_categorical_features,
@Field("reproducible") boolean reproducible,
@Field("export_weights_and_biases") boolean export_weights_and_biases,
@Field("mini_batch_size") int mini_batch_size,
@Field("elastic_averaging") boolean elastic_averaging,
@Field("elastic_averaging_moving_rate") double elastic_averaging_moving_rate,
@Field("elastic_averaging_regularization") double elastic_averaging_regularization,
@Field("pretrained_autoencoder") String pretrained_autoencoder,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/deeplearning")
Call<DeepLearningV3> trainDeeplearning();
/**
* Resume grid search for DeepLearning model.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs.
* @param activation Activation function.
* @param hidden Hidden layer sizes (e.g. [100, 100]).
* @param epochs How many times the dataset should be iterated (streamed), can be fractional.
* @param train_samples_per_iteration Number of training samples (globally) per MapReduce iteration. Special values
* are 0: one epoch, -1: all available data (e.g., replicated training data), -2:
* automatic.
* @param target_ratio_comm_to_comp Target ratio of communication overhead to computation. Only for multi-node
* operation and train_samples_per_iteration = -2 (auto-tuning).
* @param seed Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded.
* @param adaptive_rate Adaptive learning rate.
* @param rho Adaptive learning rate time decay factor (similarity to prior updates).
* @param epsilon Adaptive learning rate smoothing factor (to avoid divisions by zero and allow progress).
* @param rate Learning rate (higher => less stable, lower => slower convergence).
* @param rate_annealing Learning rate annealing: rate / (1 + rate_annealing * samples).
* @param rate_decay Learning rate decay factor between layers (N-th layer: rate * rate_decay ^ (n - 1).
* @param momentum_start Initial momentum at the beginning of training (try 0.5).
* @param momentum_ramp Number of training samples for which momentum increases.
* @param momentum_stable Final momentum after the ramp is over (try 0.99).
* @param nesterov_accelerated_gradient Use Nesterov accelerated gradient (recommended).
* @param input_dropout_ratio Input layer dropout ratio (can improve generalization, try 0.1 or 0.2).
* @param hidden_dropout_ratios Hidden layer dropout ratios (can improve generalization), specify one value per
* hidden layer, defaults to 0.5.
* @param l1 L1 regularization (can add stability and improve generalization, causes many weights to become 0).
* @param l2 L2 regularization (can add stability and improve generalization, causes many weights to be small.
* @param max_w2 Constraint for squared sum of incoming weights per unit (e.g. for Rectifier).
* @param initial_weight_distribution Initial weight distribution.
* @param initial_weight_scale Uniform: -value...value, Normal: stddev.
* @param initial_weights A list of H2OFrame ids to initialize the weight matrices of this model with.
* @param initial_biases A list of H2OFrame ids to initialize the bias vectors of this model with.
* @param loss Loss function.
* @param score_interval Shortest time interval (in seconds) between model scoring.
* @param score_training_samples Number of training set samples for scoring (0 for all).
* @param score_validation_samples Number of validation set samples for scoring (0 for all).
* @param score_duty_cycle Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring).
* @param classification_stop Stopping criterion for classification error fraction on training data (-1 to disable).
* @param regression_stop Stopping criterion for regression error (MSE) on training data (-1 to disable).
* @param quiet_mode Enable quiet mode for less output to standard output.
* @param score_validation_sampling Method used to sample validation dataset for scoring.
* @param overwrite_with_best_model If enabled, override the final model with the best model found during training.
* @param autoencoder Auto-Encoder.
* @param use_all_factor_levels Use all factor levels of categorical variables. Otherwise, the first factor level is
* omitted (without loss of accuracy). Useful for variable importances and auto-enabled
* for autoencoder.
* @param standardize If enabled, automatically standardize the data. If disabled, the user must provide properly
* scaled input data.
* @param diagnostics Enable diagnostics for hidden layers.
* @param variable_importances Compute variable importances for input features (Gedeon method) - can be slow for
* large networks.
* @param fast_mode Enable fast mode (minor approximation in back-propagation).
* @param force_load_balance Force extra load balancing to increase training speed for small datasets (to keep all
* cores busy).
* @param replicate_training_data Replicate the entire training dataset onto every node for faster training on small
* datasets.
* @param single_node_mode Run on a single node for fine-tuning of model parameters.
* @param shuffle_training_data Enable shuffling of training data (recommended if training data is replicated and
* train_samples_per_iteration is close to #nodes x #rows, of if using
* balance_classes).
* @param missing_values_handling Handling of missing values. Either MeanImputation or Skip.
* @param sparse Sparse data handling (more efficient for data with lots of 0 values).
* @param col_major #DEPRECATED Use a column major weight matrix for input layer. Can speed up forward propagation,
* but might slow down backpropagation.
* @param average_activation Average activation for sparse auto-encoder. #Experimental
* @param sparsity_beta Sparsity regularization. #Experimental
* @param max_categorical_features Max. number of categorical features, enforced via hashing. #Experimental
* @param reproducible Force reproducibility on small data (will be slow - only uses 1 thread).
* @param export_weights_and_biases Whether to export Neural Network weights and biases to H2O Frames.
* @param mini_batch_size Mini-batch size (smaller leads to better fit, larger can speed up and generalize better).
* @param elastic_averaging Elastic averaging between compute nodes can improve distributed model convergence.
* #Experimental
* @param elastic_averaging_moving_rate Elastic averaging moving rate (only if elastic averaging is enabled).
* @param elastic_averaging_regularization Elastic averaging regularization strength (only if elastic averaging is
* enabled).
* @param pretrained_autoencoder Pretrained autoencoder model to initialize this model with.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/deeplearning/resume")
Call<DeepLearningV3> resumeDeeplearning(
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("activation") DeepLearningActivation activation,
@Field("hidden") int[] hidden,
@Field("epochs") double epochs,
@Field("train_samples_per_iteration") long train_samples_per_iteration,
@Field("target_ratio_comm_to_comp") double target_ratio_comm_to_comp,
@Field("seed") long seed,
@Field("adaptive_rate") boolean adaptive_rate,
@Field("rho") double rho,
@Field("epsilon") double epsilon,
@Field("rate") double rate,
@Field("rate_annealing") double rate_annealing,
@Field("rate_decay") double rate_decay,
@Field("momentum_start") double momentum_start,
@Field("momentum_ramp") double momentum_ramp,
@Field("momentum_stable") double momentum_stable,
@Field("nesterov_accelerated_gradient") boolean nesterov_accelerated_gradient,
@Field("input_dropout_ratio") double input_dropout_ratio,
@Field("hidden_dropout_ratios") double[] hidden_dropout_ratios,
@Field("l1") double l1,
@Field("l2") double l2,
@Field("max_w2") float max_w2,
@Field("initial_weight_distribution") DeepLearningInitialWeightDistribution initial_weight_distribution,
@Field("initial_weight_scale") double initial_weight_scale,
@Field("initial_weights") String[] initial_weights,
@Field("initial_biases") String[] initial_biases,
@Field("loss") DeepLearningLoss loss,
@Field("score_interval") double score_interval,
@Field("score_training_samples") long score_training_samples,
@Field("score_validation_samples") long score_validation_samples,
@Field("score_duty_cycle") double score_duty_cycle,
@Field("classification_stop") double classification_stop,
@Field("regression_stop") double regression_stop,
@Field("quiet_mode") boolean quiet_mode,
@Field("score_validation_sampling") DeepLearningClassSamplingMethod score_validation_sampling,
@Field("overwrite_with_best_model") boolean overwrite_with_best_model,
@Field("autoencoder") boolean autoencoder,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("standardize") boolean standardize,
@Field("diagnostics") boolean diagnostics,
@Field("variable_importances") boolean variable_importances,
@Field("fast_mode") boolean fast_mode,
@Field("force_load_balance") boolean force_load_balance,
@Field("replicate_training_data") boolean replicate_training_data,
@Field("single_node_mode") boolean single_node_mode,
@Field("shuffle_training_data") boolean shuffle_training_data,
@Field("missing_values_handling") DeepLearningMissingValuesHandling missing_values_handling,
@Field("sparse") boolean sparse,
@Field("col_major") boolean col_major,
@Field("average_activation") double average_activation,
@Field("sparsity_beta") double sparsity_beta,
@Field("max_categorical_features") int max_categorical_features,
@Field("reproducible") boolean reproducible,
@Field("export_weights_and_biases") boolean export_weights_and_biases,
@Field("mini_batch_size") int mini_batch_size,
@Field("elastic_averaging") boolean elastic_averaging,
@Field("elastic_averaging_moving_rate") double elastic_averaging_moving_rate,
@Field("elastic_averaging_regularization") double elastic_averaging_regularization,
@Field("pretrained_autoencoder") String pretrained_autoencoder,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/deeplearning/resume")
Call<DeepLearningV3> resumeDeeplearning();
/**
* Run grid search for GLM model.
* @param seed Seed for pseudo random number generator (if applicable).
* @param family Family. Use binomial for classification with logistic regression, others are for regression
* problems.
* @param tweedie_variance_power Tweedie variance power
* @param dispersion_learning_rate Dispersion learning rate is only valid for tweedie family dispersion parameter
* estimation using ml. It must be > 0. This controls how much the dispersion
* parameter estimate is to be changed when the calculated loglikelihood actually
* decreases with the new dispersion. In this case, instead of setting new
* dispersion = dispersion + change, we set new dispersion = dispersion +
* dispersion_learning_rate * change. Defaults to 0.5.
* @param tweedie_link_power Tweedie link power.
* @param theta Theta
* @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
* with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
* datasets with many columns.
* @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
* alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
* specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
* 0.5 otherwise.
* @param lambda Regularization strength
* @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min.
* @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided).
* @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
* set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
* otherwise it is set to 100.
* @param score_iteration_interval Perform scoring for every score_iteration_interval iterations.
* @param standardize Standardize numeric columns to have zero mean and unit variance.
* @param cold_start Only applicable to multiple alpha/lambda values. If false, build the next model for next set
* of alpha/lambda values starting from the values provided by current model. If true will start
* GLM model from scratch.
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param influence If set to dfbetas will calculate the difference in beta when a datarow is included and excluded
* in the dataset.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues).
* @param non_negative Restrict coefficients (not intercept) to be non-negative.
* @param max_iterations Maximum number of iterations. Value should >=1. A value of 0 is only set when only the
* model coefficient names and model coefficient dimensions are needed.
* @param beta_epsilon Converge if beta changes less (using L-infinity norm) than beta esilon. ONLY applies to IRLSM
* solver.
* @param objective_epsilon Converge if objective value changes less than this. Default (of -1.0) indicates: If
* lambda_search is set to True the value of objective_epsilon is set to .0001. If the
* lambda_search is set to False and lambda is equal to zero, the value of
* objective_epsilon is set to .000001, for any other value of lambda the default value of
* objective_epsilon is set to .0001.
* @param gradient_epsilon Converge if objective changes less (using L-infinity norm) than this, ONLY applies to
* L-BFGS solver. Default (of -1.0) indicates: If lambda_search is set to False and lambda
* is equal to zero, the default value of gradient_epsilon is equal to .000001, otherwise
* the default value is .0001. If lambda_search is set to True, the conditional values above
* are 1E-8 and 1E-6 respectively.
* @param obj_reg Likelihood divider in objective value computation, default (of -1.0) will set it to 1/nobs.
* @param link Link function.
* @param dispersion_parameter_method Method used to estimate the dispersion parameter for Tweedie, Gamma and
* Negative Binomial only.
* @param startval double array to initialize coefficients for GLM. If standardize is true, the standardized
* coefficients should be used. Otherwise, use the regular coefficients.
* @param calc_like if true, will return likelihood function value.
* @param generate_variable_inflation_factors if true, will generate variable inflation factors for numerical
* predictors. Default to false.
* @param intercept Include constant term in the model
* @param build_null_model If set, will build a model with only the intercept. Default to false.
* @param fix_dispersion_parameter Only used for Tweedie, Gamma and Negative Binomial GLM. If set, will use the
* dispsersion parameter in init_dispersion_parameter as the standard error and use
* it to calculate the p-values. Default to false.
* @param init_dispersion_parameter Only used for Tweedie, Gamma and Negative Binomial GLM. Store the initial value
* of dispersion parameter. If fix_dispersion_parameter is set, this value will be
* used in the calculation of p-values.
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
* lambda that drives all coefficients to zero). Default indicates: if the number of
* observations is greater than the number of variables, then lambda_min_ratio is set to
* 0.0001; if the number of observations is less than the number of variables, then
* lambda_min_ratio is set to 0.01.
* @param beta_constraints Beta constraints
* @param linear_constraints Linear constraints: used to specify linear constraints involving more than one
* coefficients in standard form. It is only supported for solver IRLSM. It contains
* four columns: names (strings for coefficient names or constant), values, types (
* strings of 'Equal' or 'LessThanEqual'), constraint_numbers (0 for first linear
* constraint, 1 for second linear constraint, ...).
* @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
* to prevent expensive model building with many predictors. Default indicates: If the
* IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
* is set to 100000000.
* @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
* for the list.
* @param interaction_pairs A list of pairwise (first order) column interactions.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs.
* @param compute_p_values Request p-values computation, p-values work only with IRLSM solver.
* @param fix_tweedie_variance_power If true, will fix tweedie variance power value to the value set in
* tweedie_variance_power.
* @param remove_collinear_columns In case of linearly dependent columns, remove the dependent columns.
* @param dispersion_epsilon If changes in dispersion parameter estimation or loglikelihood value is smaller than
* dispersion_epsilon, will break out of the dispersion parameter estimation loop using
* maximum likelihood.
* @param tweedie_epsilon In estimating tweedie dispersion parameter using maximum likelihood, this is used to
* choose the lower and upper indices in the approximating of the infinite series summation.
* @param max_iterations_dispersion Control the maximum number of iterations in the dispersion parameter estimation
* loop using maximum likelihood.
* @param generate_scoring_history If set to true, will generate scoring history for GLM. This may significantly
* slow down the algo.
* @param init_optimal_glm If true, will initialize coefficients with values derived from GLM runs without linear
* constraints. Only available for linear constraints.
* @param separate_linear_beta If true, will keep the beta constraints and linear constraints separate. After new
* coefficients are found, first beta constraints will be applied followed by the
* application of linear constraints. Note that the beta constraints in this case will
* not be part of the objective function. If false, will combine the beta and linear
* constraints.
* @param constraint_eta0 For constrained GLM only. It affects the setting of eta_k+1=eta_0/power(ck+1, alpha).
* @param constraint_tau For constrained GLM only. It affects the setting of c_k+1=tau*c_k.
* @param constraint_alpha For constrained GLM only. It affects the setting of eta_k = eta_0/pow(c_0, alpha).
* @param constraint_beta For constrained GLM only. It affects the setting of eta_k+1 = eta_k/pow(c_k, beta).
* @param constraint_c0 For constrained GLM only. It affects the initial setting of epsilon_k = 1/c_0.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/glm")
Call<GLMV3> trainGlm(
@Field("seed") long seed,
@Field("family") GLMFamily family,
@Field("tweedie_variance_power") double tweedie_variance_power,
@Field("dispersion_learning_rate") double dispersion_learning_rate,
@Field("tweedie_link_power") double tweedie_link_power,
@Field("theta") double theta,
@Field("solver") GLMSolver solver,
@Field("alpha") double[] alpha,
@Field("lambda") double[] lambda,
@Field("lambda_search") boolean lambda_search,
@Field("early_stopping") boolean early_stopping,
@Field("nlambdas") int nlambdas,
@Field("score_iteration_interval") int score_iteration_interval,
@Field("standardize") boolean standardize,
@Field("cold_start") boolean cold_start,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("influence") GLMInfluence influence,
@Field("plug_values") String plug_values,
@Field("non_negative") boolean non_negative,
@Field("max_iterations") int max_iterations,
@Field("beta_epsilon") double beta_epsilon,
@Field("objective_epsilon") double objective_epsilon,
@Field("gradient_epsilon") double gradient_epsilon,
@Field("obj_reg") double obj_reg,
@Field("link") GLMLink link,
@Field("dispersion_parameter_method") GLMDispersionMethod dispersion_parameter_method,
@Field("startval") double[] startval,
@Field("calc_like") boolean calc_like,
@Field("generate_variable_inflation_factors") boolean generate_variable_inflation_factors,
@Field("intercept") boolean intercept,
@Field("build_null_model") boolean build_null_model,
@Field("fix_dispersion_parameter") boolean fix_dispersion_parameter,
@Field("init_dispersion_parameter") double init_dispersion_parameter,
@Field("prior") double prior,
@Field("lambda_min_ratio") double lambda_min_ratio,
@Field("beta_constraints") String beta_constraints,
@Field("linear_constraints") String linear_constraints,
@Field("max_active_predictors") int max_active_predictors,
@Field("interactions") String[] interactions,
@Field("interaction_pairs") StringPairV3[] interaction_pairs,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("compute_p_values") boolean compute_p_values,
@Field("fix_tweedie_variance_power") boolean fix_tweedie_variance_power,
@Field("remove_collinear_columns") boolean remove_collinear_columns,
@Field("dispersion_epsilon") double dispersion_epsilon,
@Field("tweedie_epsilon") double tweedie_epsilon,
@Field("max_iterations_dispersion") int max_iterations_dispersion,
@Field("generate_scoring_history") boolean generate_scoring_history,
@Field("init_optimal_glm") boolean init_optimal_glm,
@Field("separate_linear_beta") boolean separate_linear_beta,
@Field("constraint_eta0") double constraint_eta0,
@Field("constraint_tau") double constraint_tau,
@Field("constraint_alpha") double constraint_alpha,
@Field("constraint_beta") double constraint_beta,
@Field("constraint_c0") double constraint_c0,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/glm")
Call<GLMV3> trainGlm();
/**
* Resume grid search for GLM model.
* @param seed Seed for pseudo random number generator (if applicable).
* @param family Family. Use binomial for classification with logistic regression, others are for regression
* problems.
* @param tweedie_variance_power Tweedie variance power
* @param dispersion_learning_rate Dispersion learning rate is only valid for tweedie family dispersion parameter
* estimation using ml. It must be > 0. This controls how much the dispersion
* parameter estimate is to be changed when the calculated loglikelihood actually
* decreases with the new dispersion. In this case, instead of setting new
* dispersion = dispersion + change, we set new dispersion = dispersion +
* dispersion_learning_rate * change. Defaults to 0.5.
* @param tweedie_link_power Tweedie link power.
* @param theta Theta
* @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
* with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
* datasets with many columns.
* @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
* alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
* specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
* 0.5 otherwise.
* @param lambda Regularization strength
* @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min.
* @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided).
* @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
* set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
* otherwise it is set to 100.
* @param score_iteration_interval Perform scoring for every score_iteration_interval iterations.
* @param standardize Standardize numeric columns to have zero mean and unit variance.
* @param cold_start Only applicable to multiple alpha/lambda values. If false, build the next model for next set
* of alpha/lambda values starting from the values provided by current model. If true will start
* GLM model from scratch.
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param influence If set to dfbetas will calculate the difference in beta when a datarow is included and excluded
* in the dataset.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues).
* @param non_negative Restrict coefficients (not intercept) to be non-negative.
* @param max_iterations Maximum number of iterations. Value should >=1. A value of 0 is only set when only the
* model coefficient names and model coefficient dimensions are needed.
* @param beta_epsilon Converge if beta changes less (using L-infinity norm) than beta esilon. ONLY applies to IRLSM
* solver.
* @param objective_epsilon Converge if objective value changes less than this. Default (of -1.0) indicates: If
* lambda_search is set to True the value of objective_epsilon is set to .0001. If the
* lambda_search is set to False and lambda is equal to zero, the value of
* objective_epsilon is set to .000001, for any other value of lambda the default value of
* objective_epsilon is set to .0001.
* @param gradient_epsilon Converge if objective changes less (using L-infinity norm) than this, ONLY applies to
* L-BFGS solver. Default (of -1.0) indicates: If lambda_search is set to False and lambda
* is equal to zero, the default value of gradient_epsilon is equal to .000001, otherwise
* the default value is .0001. If lambda_search is set to True, the conditional values above
* are 1E-8 and 1E-6 respectively.
* @param obj_reg Likelihood divider in objective value computation, default (of -1.0) will set it to 1/nobs.
* @param link Link function.
* @param dispersion_parameter_method Method used to estimate the dispersion parameter for Tweedie, Gamma and
* Negative Binomial only.
* @param startval double array to initialize coefficients for GLM. If standardize is true, the standardized
* coefficients should be used. Otherwise, use the regular coefficients.
* @param calc_like if true, will return likelihood function value.
* @param generate_variable_inflation_factors if true, will generate variable inflation factors for numerical
* predictors. Default to false.
* @param intercept Include constant term in the model
* @param build_null_model If set, will build a model with only the intercept. Default to false.
* @param fix_dispersion_parameter Only used for Tweedie, Gamma and Negative Binomial GLM. If set, will use the
* dispsersion parameter in init_dispersion_parameter as the standard error and use
* it to calculate the p-values. Default to false.
* @param init_dispersion_parameter Only used for Tweedie, Gamma and Negative Binomial GLM. Store the initial value
* of dispersion parameter. If fix_dispersion_parameter is set, this value will be
* used in the calculation of p-values.
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
* lambda that drives all coefficients to zero). Default indicates: if the number of
* observations is greater than the number of variables, then lambda_min_ratio is set to
* 0.0001; if the number of observations is less than the number of variables, then
* lambda_min_ratio is set to 0.01.
* @param beta_constraints Beta constraints
* @param linear_constraints Linear constraints: used to specify linear constraints involving more than one
* coefficients in standard form. It is only supported for solver IRLSM. It contains
* four columns: names (strings for coefficient names or constant), values, types (
* strings of 'Equal' or 'LessThanEqual'), constraint_numbers (0 for first linear
* constraint, 1 for second linear constraint, ...).
* @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
* to prevent expensive model building with many predictors. Default indicates: If the
* IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
* is set to 100000000.
* @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
* for the list.
* @param interaction_pairs A list of pairwise (first order) column interactions.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs.
* @param compute_p_values Request p-values computation, p-values work only with IRLSM solver.
* @param fix_tweedie_variance_power If true, will fix tweedie variance power value to the value set in
* tweedie_variance_power.
* @param remove_collinear_columns In case of linearly dependent columns, remove the dependent columns.
* @param dispersion_epsilon If changes in dispersion parameter estimation or loglikelihood value is smaller than
* dispersion_epsilon, will break out of the dispersion parameter estimation loop using
* maximum likelihood.
* @param tweedie_epsilon In estimating tweedie dispersion parameter using maximum likelihood, this is used to
* choose the lower and upper indices in the approximating of the infinite series summation.
* @param max_iterations_dispersion Control the maximum number of iterations in the dispersion parameter estimation
* loop using maximum likelihood.
* @param generate_scoring_history If set to true, will generate scoring history for GLM. This may significantly
* slow down the algo.
* @param init_optimal_glm If true, will initialize coefficients with values derived from GLM runs without linear
* constraints. Only available for linear constraints.
* @param separate_linear_beta If true, will keep the beta constraints and linear constraints separate. After new
* coefficients are found, first beta constraints will be applied followed by the
* application of linear constraints. Note that the beta constraints in this case will
* not be part of the objective function. If false, will combine the beta and linear
* constraints.
* @param constraint_eta0 For constrained GLM only. It affects the setting of eta_k+1=eta_0/power(ck+1, alpha).
* @param constraint_tau For constrained GLM only. It affects the setting of c_k+1=tau*c_k.
* @param constraint_alpha For constrained GLM only. It affects the setting of eta_k = eta_0/pow(c_0, alpha).
* @param constraint_beta For constrained GLM only. It affects the setting of eta_k+1 = eta_k/pow(c_k, beta).
* @param constraint_c0 For constrained GLM only. It affects the initial setting of epsilon_k = 1/c_0.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/glm/resume")
Call<GLMV3> resumeGlm(
@Field("seed") long seed,
@Field("family") GLMFamily family,
@Field("tweedie_variance_power") double tweedie_variance_power,
@Field("dispersion_learning_rate") double dispersion_learning_rate,
@Field("tweedie_link_power") double tweedie_link_power,
@Field("theta") double theta,
@Field("solver") GLMSolver solver,
@Field("alpha") double[] alpha,
@Field("lambda") double[] lambda,
@Field("lambda_search") boolean lambda_search,
@Field("early_stopping") boolean early_stopping,
@Field("nlambdas") int nlambdas,
@Field("score_iteration_interval") int score_iteration_interval,
@Field("standardize") boolean standardize,
@Field("cold_start") boolean cold_start,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("influence") GLMInfluence influence,
@Field("plug_values") String plug_values,
@Field("non_negative") boolean non_negative,
@Field("max_iterations") int max_iterations,
@Field("beta_epsilon") double beta_epsilon,
@Field("objective_epsilon") double objective_epsilon,
@Field("gradient_epsilon") double gradient_epsilon,
@Field("obj_reg") double obj_reg,
@Field("link") GLMLink link,
@Field("dispersion_parameter_method") GLMDispersionMethod dispersion_parameter_method,
@Field("startval") double[] startval,
@Field("calc_like") boolean calc_like,
@Field("generate_variable_inflation_factors") boolean generate_variable_inflation_factors,
@Field("intercept") boolean intercept,
@Field("build_null_model") boolean build_null_model,
@Field("fix_dispersion_parameter") boolean fix_dispersion_parameter,
@Field("init_dispersion_parameter") double init_dispersion_parameter,
@Field("prior") double prior,
@Field("lambda_min_ratio") double lambda_min_ratio,
@Field("beta_constraints") String beta_constraints,
@Field("linear_constraints") String linear_constraints,
@Field("max_active_predictors") int max_active_predictors,
@Field("interactions") String[] interactions,
@Field("interaction_pairs") StringPairV3[] interaction_pairs,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("compute_p_values") boolean compute_p_values,
@Field("fix_tweedie_variance_power") boolean fix_tweedie_variance_power,
@Field("remove_collinear_columns") boolean remove_collinear_columns,
@Field("dispersion_epsilon") double dispersion_epsilon,
@Field("tweedie_epsilon") double tweedie_epsilon,
@Field("max_iterations_dispersion") int max_iterations_dispersion,
@Field("generate_scoring_history") boolean generate_scoring_history,
@Field("init_optimal_glm") boolean init_optimal_glm,
@Field("separate_linear_beta") boolean separate_linear_beta,
@Field("constraint_eta0") double constraint_eta0,
@Field("constraint_tau") double constraint_tau,
@Field("constraint_alpha") double constraint_alpha,
@Field("constraint_beta") double constraint_beta,
@Field("constraint_c0") double constraint_c0,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/glm/resume")
Call<GLMV3> resumeGlm();
/**
* Run grid search for GLRM model.
* @param transform Transformation of training data
* @param k Rank of matrix approximation
* @param loss Numeric loss function
* @param multi_loss Categorical loss function
* @param loss_by_col Loss function by column (override)
* @param loss_by_col_idx Loss function by column index (override)
* @param period Length of period (only used with periodic loss function)
* @param regularization_x Regularization function for X matrix
* @param regularization_y Regularization function for Y matrix
* @param gamma_x Regularization weight on X matrix
* @param gamma_y Regularization weight on Y matrix
* @param max_iterations Maximum number of iterations
* @param max_updates Maximum number of updates, defaults to 2*max_iterations
* @param init_step_size Initial step size
* @param min_step_size Minimum step size
* @param seed RNG seed for initialization
* @param init Initialization mode
* @param svd_method Method for computing SVD during initialization (Caution: Randomized is currently experimental
* and unstable)
* @param user_y User-specified initial Y
* @param user_x User-specified initial X
* @param loading_name [Deprecated] Use representation_name instead. Frame key to save resulting X.
* @param representation_name Frame key to save resulting X
* @param expand_user_y Expand categorical columns in user-specified initial Y
* @param impute_original Reconstruct original training data by reversing transform
* @param recover_svd Recover singular values and eigenvectors of XY
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/glrm")
Call<GLRMV3> trainGlrm(
@Field("transform") DataInfoTransformType transform,
@Field("k") int k,
@Field("loss") GenmodelalgosglrmGlrmLoss loss,
@Field("multi_loss") GenmodelalgosglrmGlrmLoss multi_loss,
@Field("loss_by_col") GenmodelalgosglrmGlrmLoss[] loss_by_col,
@Field("loss_by_col_idx") int[] loss_by_col_idx,
@Field("period") int period,
@Field("regularization_x") GenmodelalgosglrmGlrmRegularizer regularization_x,
@Field("regularization_y") GenmodelalgosglrmGlrmRegularizer regularization_y,
@Field("gamma_x") double gamma_x,
@Field("gamma_y") double gamma_y,
@Field("max_iterations") int max_iterations,
@Field("max_updates") int max_updates,
@Field("init_step_size") double init_step_size,
@Field("min_step_size") double min_step_size,
@Field("seed") long seed,
@Field("init") GenmodelalgosglrmGlrmInitialization init,
@Field("svd_method") SVDMethod svd_method,
@Field("user_y") String user_y,
@Field("user_x") String user_x,
@Field("loading_name") String loading_name,
@Field("representation_name") String representation_name,
@Field("expand_user_y") boolean expand_user_y,
@Field("impute_original") boolean impute_original,
@Field("recover_svd") boolean recover_svd,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/glrm")
Call<GLRMV3> trainGlrm(@Field("k") int k);
/**
* Resume grid search for GLRM model.
* @param transform Transformation of training data
* @param k Rank of matrix approximation
* @param loss Numeric loss function
* @param multi_loss Categorical loss function
* @param loss_by_col Loss function by column (override)
* @param loss_by_col_idx Loss function by column index (override)
* @param period Length of period (only used with periodic loss function)
* @param regularization_x Regularization function for X matrix
* @param regularization_y Regularization function for Y matrix
* @param gamma_x Regularization weight on X matrix
* @param gamma_y Regularization weight on Y matrix
* @param max_iterations Maximum number of iterations
* @param max_updates Maximum number of updates, defaults to 2*max_iterations
* @param init_step_size Initial step size
* @param min_step_size Minimum step size
* @param seed RNG seed for initialization
* @param init Initialization mode
* @param svd_method Method for computing SVD during initialization (Caution: Randomized is currently experimental
* and unstable)
* @param user_y User-specified initial Y
* @param user_x User-specified initial X
* @param loading_name [Deprecated] Use representation_name instead. Frame key to save resulting X.
* @param representation_name Frame key to save resulting X
* @param expand_user_y Expand categorical columns in user-specified initial Y
* @param impute_original Reconstruct original training data by reversing transform
* @param recover_svd Recover singular values and eigenvectors of XY
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/glrm/resume")
Call<GLRMV3> resumeGlrm(
@Field("transform") DataInfoTransformType transform,
@Field("k") int k,
@Field("loss") GenmodelalgosglrmGlrmLoss loss,
@Field("multi_loss") GenmodelalgosglrmGlrmLoss multi_loss,
@Field("loss_by_col") GenmodelalgosglrmGlrmLoss[] loss_by_col,
@Field("loss_by_col_idx") int[] loss_by_col_idx,
@Field("period") int period,
@Field("regularization_x") GenmodelalgosglrmGlrmRegularizer regularization_x,
@Field("regularization_y") GenmodelalgosglrmGlrmRegularizer regularization_y,
@Field("gamma_x") double gamma_x,
@Field("gamma_y") double gamma_y,
@Field("max_iterations") int max_iterations,
@Field("max_updates") int max_updates,
@Field("init_step_size") double init_step_size,
@Field("min_step_size") double min_step_size,
@Field("seed") long seed,
@Field("init") GenmodelalgosglrmGlrmInitialization init,
@Field("svd_method") SVDMethod svd_method,
@Field("user_y") String user_y,
@Field("user_x") String user_x,
@Field("loading_name") String loading_name,
@Field("representation_name") String representation_name,
@Field("expand_user_y") boolean expand_user_y,
@Field("impute_original") boolean impute_original,
@Field("recover_svd") boolean recover_svd,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/glrm/resume")
Call<GLRMV3> resumeGlrm(@Field("k") int k);
/**
* Run grid search for KMeans model.
* @param user_points This option allows you to specify a dataframe, where each row represents an initial cluster
* center. The user-specified points must have the same number of columns as the training
* observations. The number of rows must equal the number of clusters
* @param max_iterations Maximum training iterations (if estimate_k is enabled, then this is for each inner Lloyds
* iteration)
* @param standardize Standardize columns before computing distances
* @param seed RNG Seed
* @param init Initialization mode
* @param estimate_k Whether to estimate the number of clusters (<=k) iteratively and deterministically.
* @param cluster_size_constraints An array specifying the minimum number of points that should be in each cluster.
* The length of the constraints array has to be the same as the number of clusters.
* @param k The max. number of clusters. If estimate_k is disabled, the model will find k centroids, otherwise it
* will find up to k centroids.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/kmeans")
Call<KMeansV3> trainKmeans(
@Field("user_points") String user_points,
@Field("max_iterations") int max_iterations,
@Field("standardize") boolean standardize,
@Field("seed") long seed,
@Field("init") KMeansInitialization init,
@Field("estimate_k") boolean estimate_k,
@Field("cluster_size_constraints") int[] cluster_size_constraints,
@Field("k") int k,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/kmeans")
Call<KMeansV3> trainKmeans();
/**
* Resume grid search for KMeans model.
* @param user_points This option allows you to specify a dataframe, where each row represents an initial cluster
* center. The user-specified points must have the same number of columns as the training
* observations. The number of rows must equal the number of clusters
* @param max_iterations Maximum training iterations (if estimate_k is enabled, then this is for each inner Lloyds
* iteration)
* @param standardize Standardize columns before computing distances
* @param seed RNG Seed
* @param init Initialization mode
* @param estimate_k Whether to estimate the number of clusters (<=k) iteratively and deterministically.
* @param cluster_size_constraints An array specifying the minimum number of points that should be in each cluster.
* The length of the constraints array has to be the same as the number of clusters.
* @param k The max. number of clusters. If estimate_k is disabled, the model will find k centroids, otherwise it
* will find up to k centroids.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/kmeans/resume")
Call<KMeansV3> resumeKmeans(
@Field("user_points") String user_points,
@Field("max_iterations") int max_iterations,
@Field("standardize") boolean standardize,
@Field("seed") long seed,
@Field("init") KMeansInitialization init,
@Field("estimate_k") boolean estimate_k,
@Field("cluster_size_constraints") int[] cluster_size_constraints,
@Field("k") int k,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/kmeans/resume")
Call<KMeansV3> resumeKmeans();
/**
* Run grid search for NaiveBayes model.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param laplace Laplace smoothing parameter
* @param min_sdev Min. standard deviation to use for observations with not enough data
* @param eps_sdev Cutoff below which standard deviation is replaced with min_sdev
* @param min_prob Min. probability to use for observations with not enough data
* @param eps_prob Cutoff below which probability is replaced with min_prob
* @param compute_metrics Compute metrics on training data
* @param seed Seed for pseudo random number generator (only used for cross-validation and fold_assignment="Random"
* or "AUTO")
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/naivebayes")
Call<NaiveBayesV3> trainNaivebayes(
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("laplace") double laplace,
@Field("min_sdev") double min_sdev,
@Field("eps_sdev") double eps_sdev,
@Field("min_prob") double min_prob,
@Field("eps_prob") double eps_prob,
@Field("compute_metrics") boolean compute_metrics,
@Field("seed") long seed,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/naivebayes")
Call<NaiveBayesV3> trainNaivebayes();
/**
* Resume grid search for NaiveBayes model.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param laplace Laplace smoothing parameter
* @param min_sdev Min. standard deviation to use for observations with not enough data
* @param eps_sdev Cutoff below which standard deviation is replaced with min_sdev
* @param min_prob Min. probability to use for observations with not enough data
* @param eps_prob Cutoff below which probability is replaced with min_prob
* @param compute_metrics Compute metrics on training data
* @param seed Seed for pseudo random number generator (only used for cross-validation and fold_assignment="Random"
* or "AUTO")
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/naivebayes/resume")
Call<NaiveBayesV3> resumeNaivebayes(
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("laplace") double laplace,
@Field("min_sdev") double min_sdev,
@Field("eps_sdev") double eps_sdev,
@Field("min_prob") double min_prob,
@Field("eps_prob") double eps_prob,
@Field("compute_metrics") boolean compute_metrics,
@Field("seed") long seed,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/naivebayes/resume")
Call<NaiveBayesV3> resumeNaivebayes();
/**
* Run grid search for PCA model.
* @param transform Transformation of training data
* @param pca_method Specify the algorithm to use for computing the principal components: GramSVD - uses a
* distributed computation of the Gram matrix, followed by a local SVD; Power - computes the SVD
* using the power iteration method (experimental); Randomized - uses randomized subspace
* iteration method; GLRM - fits a generalized low-rank model with L2 loss function and no
* regularization and solves for the SVD using local matrix algebra (experimental)
* @param pca_impl Specify the implementation to use for computing PCA (via SVD or EVD): MTJ_EVD_DENSEMATRIX -
* eigenvalue decompositions for dense matrix using MTJ; MTJ_EVD_SYMMMATRIX - eigenvalue
* decompositions for symmetric matrix using MTJ; MTJ_SVD_DENSEMATRIX - singular-value
* decompositions for dense matrix using MTJ; JAMA - eigenvalue decompositions for dense matrix
* using JAMA. References: JAMA - http://math.nist.gov/javanumerics/jama/; MTJ -
* https://github.com/fommil/matrix-toolkits-java/
* @param k Rank of matrix approximation
* @param max_iterations Maximum training iterations
* @param seed RNG seed for initialization
* @param use_all_factor_levels Whether first factor level is included in each categorical expansion
* @param compute_metrics Whether to compute metrics on the training data
* @param impute_missing Whether to impute missing entries with the column mean
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/pca")
Call<PCAV3> trainPca(
@Field("transform") DataInfoTransformType transform,
@Field("pca_method") PCAMethod pca_method,
@Field("pca_impl") PCAImplementation pca_impl,
@Field("k") int k,
@Field("max_iterations") int max_iterations,
@Field("seed") long seed,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("compute_metrics") boolean compute_metrics,
@Field("impute_missing") boolean impute_missing,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/pca")
Call<PCAV3> trainPca(@Field("k") int k);
/**
* Resume grid search for PCA model.
* @param transform Transformation of training data
* @param pca_method Specify the algorithm to use for computing the principal components: GramSVD - uses a
* distributed computation of the Gram matrix, followed by a local SVD; Power - computes the SVD
* using the power iteration method (experimental); Randomized - uses randomized subspace
* iteration method; GLRM - fits a generalized low-rank model with L2 loss function and no
* regularization and solves for the SVD using local matrix algebra (experimental)
* @param pca_impl Specify the implementation to use for computing PCA (via SVD or EVD): MTJ_EVD_DENSEMATRIX -
* eigenvalue decompositions for dense matrix using MTJ; MTJ_EVD_SYMMMATRIX - eigenvalue
* decompositions for symmetric matrix using MTJ; MTJ_SVD_DENSEMATRIX - singular-value
* decompositions for dense matrix using MTJ; JAMA - eigenvalue decompositions for dense matrix
* using JAMA. References: JAMA - http://math.nist.gov/javanumerics/jama/; MTJ -
* https://github.com/fommil/matrix-toolkits-java/
* @param k Rank of matrix approximation
* @param max_iterations Maximum training iterations
* @param seed RNG seed for initialization
* @param use_all_factor_levels Whether first factor level is included in each categorical expansion
* @param compute_metrics Whether to compute metrics on the training data
* @param impute_missing Whether to impute missing entries with the column mean
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/pca/resume")
Call<PCAV3> resumePca(
@Field("transform") DataInfoTransformType transform,
@Field("pca_method") PCAMethod pca_method,
@Field("pca_impl") PCAImplementation pca_impl,
@Field("k") int k,
@Field("max_iterations") int max_iterations,
@Field("seed") long seed,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("compute_metrics") boolean compute_metrics,
@Field("impute_missing") boolean impute_missing,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/pca/resume")
Call<PCAV3> resumePca(@Field("k") int k);
/**
* Run grid search for SVD model.
* @param transform Transformation of training data
* @param svd_method Method for computing SVD (Caution: Randomized is currently experimental and unstable)
* @param nv Number of right singular vectors
* @param max_iterations Maximum iterations
* @param seed RNG seed for k-means++ initialization
* @param keep_u Save left singular vectors?
* @param u_name Frame key to save left singular vectors
* @param use_all_factor_levels Whether first factor level is included in each categorical expansion
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/svd")
Call<SVDV99> trainSvd(
@Field("transform") DataInfoTransformType transform,
@Field("svd_method") SVDMethod svd_method,
@Field("nv") int nv,
@Field("max_iterations") int max_iterations,
@Field("seed") long seed,
@Field("keep_u") boolean keep_u,
@Field("u_name") String u_name,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/svd")
Call<SVDV99> trainSvd();
/**
* Resume grid search for SVD model.
* @param transform Transformation of training data
* @param svd_method Method for computing SVD (Caution: Randomized is currently experimental and unstable)
* @param nv Number of right singular vectors
* @param max_iterations Maximum iterations
* @param seed RNG seed for k-means++ initialization
* @param keep_u Save left singular vectors?
* @param u_name Frame key to save left singular vectors
* @param use_all_factor_levels Whether first factor level is included in each categorical expansion
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/svd/resume")
Call<SVDV99> resumeSvd(
@Field("transform") DataInfoTransformType transform,
@Field("svd_method") SVDMethod svd_method,
@Field("nv") int nv,
@Field("max_iterations") int max_iterations,
@Field("seed") long seed,
@Field("keep_u") boolean keep_u,
@Field("u_name") String u_name,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/svd/resume")
Call<SVDV99> resumeSvd();
/**
* Run grid search for DRF model.
* @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p}
* for classification and p/3 for regression (where p is the # of predictors
* @param binomial_double_trees For binary classification: Build 2x as many trees (one per class) - can lead to
* higher accuracy.
* @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param ntrees Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
* best point
* @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
* root level, then decrease by factor of two per level
* @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
* point. Higher values can lead to more overfitting.
* @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
* stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
* trees when the R^2 metric equals or exceeds this
* @param seed Seed for pseudo random number generator (if applicable)
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
* 1.0), for each tree
* @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
* 0.0 and <= 2.0)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
* @param histogram_type What type of histogram to use for finding optimal split points
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
* the response column is a constant value.If disabled, then model will train
* regardless of the response column being a constant value or not.
* @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
* running. In case of cluster shutdown, this checkpoint can be used to restart
* training.
* @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
* only when in_training_checkpoints_dir is defined
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/drf")
Call<DRFV3> trainDrf(
@Field("mtries") int mtries,
@Field("binomial_double_trees") boolean binomial_double_trees,
@Field("sample_rate") double sample_rate,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("nbins") int nbins,
@Field("nbins_top_level") int nbins_top_level,
@Field("nbins_cats") int nbins_cats,
@Field("r2_stopping") double r2_stopping,
@Field("seed") long seed,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("sample_rate_per_class") double[] sample_rate_per_class,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
@Field("score_tree_interval") int score_tree_interval,
@Field("min_split_improvement") double min_split_improvement,
@Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("check_constant_response") boolean check_constant_response,
@Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
@Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/drf")
Call<DRFV3> trainDrf();
/**
* Resume grid search for DRF model.
* @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p}
* for classification and p/3 for regression (where p is the # of predictors
* @param binomial_double_trees For binary classification: Build 2x as many trees (one per class) - can lead to
* higher accuracy.
* @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param ntrees Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
* best point
* @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
* root level, then decrease by factor of two per level
* @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
* point. Higher values can lead to more overfitting.
* @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
* stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
* trees when the R^2 metric equals or exceeds this
* @param seed Seed for pseudo random number generator (if applicable)
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
* 1.0), for each tree
* @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
* 0.0 and <= 2.0)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
* @param histogram_type What type of histogram to use for finding optimal split points
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
* the response column is a constant value.If disabled, then model will train
* regardless of the response column being a constant value or not.
* @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
* running. In case of cluster shutdown, this checkpoint can be used to restart
* training.
* @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
* only when in_training_checkpoints_dir is defined
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/drf/resume")
Call<DRFV3> resumeDrf(
@Field("mtries") int mtries,
@Field("binomial_double_trees") boolean binomial_double_trees,
@Field("sample_rate") double sample_rate,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("nbins") int nbins,
@Field("nbins_top_level") int nbins_top_level,
@Field("nbins_cats") int nbins_cats,
@Field("r2_stopping") double r2_stopping,
@Field("seed") long seed,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("sample_rate_per_class") double[] sample_rate_per_class,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
@Field("score_tree_interval") int score_tree_interval,
@Field("min_split_improvement") double min_split_improvement,
@Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("check_constant_response") boolean check_constant_response,
@Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
@Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/drf/resume")
Call<DRFV3> resumeDrf();
/**
* Run grid search for GBM model.
* @param learn_rate Learning rate (from 0.0 to 1.0)
* @param learn_rate_annealing Scale the learning rate by this factor after each tree (e.g., 0.99 or 0.999)
* @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate Column sample rate (from 0.0 to 1.0)
* @param monotone_constraints A mapping representing monotonic constraints. Use +1 to enforce an increasing
* constraint and -1 to specify a decreasing constraint.
* @param max_abs_leafnode_pred Maximum absolute value of a leaf node prediction
* @param pred_noise_bandwidth Bandwidth (sigma) of Gaussian multiplicative noise ~N(1,sigma) for tree node
* predictions
* @param interaction_constraints A set of allowed column interactions.
* @param auto_rebalance Allow automatic rebalancing of training and validation datasets
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param ntrees Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
* best point
* @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
* root level, then decrease by factor of two per level
* @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
* point. Higher values can lead to more overfitting.
* @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
* stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
* trees when the R^2 metric equals or exceeds this
* @param seed Seed for pseudo random number generator (if applicable)
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
* 1.0), for each tree
* @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
* 0.0 and <= 2.0)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
* @param histogram_type What type of histogram to use for finding optimal split points
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
* the response column is a constant value.If disabled, then model will train
* regardless of the response column being a constant value or not.
* @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
* running. In case of cluster shutdown, this checkpoint can be used to restart
* training.
* @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
* only when in_training_checkpoints_dir is defined
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/gbm")
Call<GBMV3> trainGbm(
@Field("learn_rate") double learn_rate,
@Field("learn_rate_annealing") double learn_rate_annealing,
@Field("sample_rate") double sample_rate,
@Field("col_sample_rate") double col_sample_rate,
@Field("monotone_constraints") KeyValueV3[] monotone_constraints,
@Field("max_abs_leafnode_pred") double max_abs_leafnode_pred,
@Field("pred_noise_bandwidth") double pred_noise_bandwidth,
@Field("interaction_constraints") String[][] interaction_constraints,
@Field("auto_rebalance") boolean auto_rebalance,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("nbins") int nbins,
@Field("nbins_top_level") int nbins_top_level,
@Field("nbins_cats") int nbins_cats,
@Field("r2_stopping") double r2_stopping,
@Field("seed") long seed,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("sample_rate_per_class") double[] sample_rate_per_class,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
@Field("score_tree_interval") int score_tree_interval,
@Field("min_split_improvement") double min_split_improvement,
@Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("check_constant_response") boolean check_constant_response,
@Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
@Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/gbm")
Call<GBMV3> trainGbm();
/**
* Resume grid search for GBM model.
* @param learn_rate Learning rate (from 0.0 to 1.0)
* @param learn_rate_annealing Scale the learning rate by this factor after each tree (e.g., 0.99 or 0.999)
* @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate Column sample rate (from 0.0 to 1.0)
* @param monotone_constraints A mapping representing monotonic constraints. Use +1 to enforce an increasing
* constraint and -1 to specify a decreasing constraint.
* @param max_abs_leafnode_pred Maximum absolute value of a leaf node prediction
* @param pred_noise_bandwidth Bandwidth (sigma) of Gaussian multiplicative noise ~N(1,sigma) for tree node
* predictions
* @param interaction_constraints A set of allowed column interactions.
* @param auto_rebalance Allow automatic rebalancing of training and validation datasets
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param ntrees Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
* best point
* @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
* root level, then decrease by factor of two per level
* @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
* point. Higher values can lead to more overfitting.
* @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
* stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
* trees when the R^2 metric equals or exceeds this
* @param seed Seed for pseudo random number generator (if applicable)
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
* 1.0), for each tree
* @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
* 0.0 and <= 2.0)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
* @param histogram_type What type of histogram to use for finding optimal split points
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
* the response column is a constant value.If disabled, then model will train
* regardless of the response column being a constant value or not.
* @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
* running. In case of cluster shutdown, this checkpoint can be used to restart
* training.
* @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
* only when in_training_checkpoints_dir is defined
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/gbm/resume")
Call<GBMV3> resumeGbm(
@Field("learn_rate") double learn_rate,
@Field("learn_rate_annealing") double learn_rate_annealing,
@Field("sample_rate") double sample_rate,
@Field("col_sample_rate") double col_sample_rate,
@Field("monotone_constraints") KeyValueV3[] monotone_constraints,
@Field("max_abs_leafnode_pred") double max_abs_leafnode_pred,
@Field("pred_noise_bandwidth") double pred_noise_bandwidth,
@Field("interaction_constraints") String[][] interaction_constraints,
@Field("auto_rebalance") boolean auto_rebalance,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("nbins") int nbins,
@Field("nbins_top_level") int nbins_top_level,
@Field("nbins_cats") int nbins_cats,
@Field("r2_stopping") double r2_stopping,
@Field("seed") long seed,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("sample_rate_per_class") double[] sample_rate_per_class,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
@Field("score_tree_interval") int score_tree_interval,
@Field("min_split_improvement") double min_split_improvement,
@Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("check_constant_response") boolean check_constant_response,
@Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
@Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/gbm/resume")
Call<GBMV3> resumeGbm();
/**
* Run grid search for IsolationForest model.
* @param sample_size Number of randomly sampled observations used to train each Isolation Forest tree. Only one of
* parameters sample_size and sample_rate should be defined. If sample_rate is defined,
* sample_size will be ignored.
* @param sample_rate Rate of randomly sampled observations used to train each Isolation Forest tree. Needs to be in
* range from 0.0 to 1.0. If set to -1, sample_rate is disabled and sample_size will be used
* instead.
* @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults (number of
* predictors)/3.
* @param contamination Contamination ratio - the proportion of anomalies in the input dataset. If undefined (-1)
* the predict function will not mark observations as anomalies and only anomaly score will be
* returned. Defaults to -1 (undefined).
* @param validation_response_column (experimental) Name of the response column in the validation frame. Response
* column should be binary and indicate not anomaly/anomaly.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param ntrees Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
* best point
* @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
* root level, then decrease by factor of two per level
* @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
* point. Higher values can lead to more overfitting.
* @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
* stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
* trees when the R^2 metric equals or exceeds this
* @param seed Seed for pseudo random number generator (if applicable)
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
* 1.0), for each tree
* @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
* 0.0 and <= 2.0)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
* @param histogram_type What type of histogram to use for finding optimal split points
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
* the response column is a constant value.If disabled, then model will train
* regardless of the response column being a constant value or not.
* @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
* running. In case of cluster shutdown, this checkpoint can be used to restart
* training.
* @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
* only when in_training_checkpoints_dir is defined
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/isolationforest")
Call<IsolationForestV3> trainIsolationforest(
@Field("sample_size") long sample_size,
@Field("sample_rate") double sample_rate,
@Field("mtries") int mtries,
@Field("contamination") double contamination,
@Field("validation_response_column") String validation_response_column,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("nbins") int nbins,
@Field("nbins_top_level") int nbins_top_level,
@Field("nbins_cats") int nbins_cats,
@Field("r2_stopping") double r2_stopping,
@Field("seed") long seed,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("sample_rate_per_class") double[] sample_rate_per_class,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
@Field("score_tree_interval") int score_tree_interval,
@Field("min_split_improvement") double min_split_improvement,
@Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("check_constant_response") boolean check_constant_response,
@Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
@Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/isolationforest")
Call<IsolationForestV3> trainIsolationforest();
/**
* Resume grid search for IsolationForest model.
* @param sample_size Number of randomly sampled observations used to train each Isolation Forest tree. Only one of
* parameters sample_size and sample_rate should be defined. If sample_rate is defined,
* sample_size will be ignored.
* @param sample_rate Rate of randomly sampled observations used to train each Isolation Forest tree. Needs to be in
* range from 0.0 to 1.0. If set to -1, sample_rate is disabled and sample_size will be used
* instead.
* @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults (number of
* predictors)/3.
* @param contamination Contamination ratio - the proportion of anomalies in the input dataset. If undefined (-1)
* the predict function will not mark observations as anomalies and only anomaly score will be
* returned. Defaults to -1 (undefined).
* @param validation_response_column (experimental) Name of the response column in the validation frame. Response
* column should be binary and indicate not anomaly/anomaly.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param ntrees Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
* best point
* @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
* root level, then decrease by factor of two per level
* @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
* point. Higher values can lead to more overfitting.
* @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
* stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
* trees when the R^2 metric equals or exceeds this
* @param seed Seed for pseudo random number generator (if applicable)
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
* 1.0), for each tree
* @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
* 0.0 and <= 2.0)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
* @param histogram_type What type of histogram to use for finding optimal split points
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
* the response column is a constant value.If disabled, then model will train
* regardless of the response column being a constant value or not.
* @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
* running. In case of cluster shutdown, this checkpoint can be used to restart
* training.
* @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
* only when in_training_checkpoints_dir is defined
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/isolationforest/resume")
Call<IsolationForestV3> resumeIsolationforest(
@Field("sample_size") long sample_size,
@Field("sample_rate") double sample_rate,
@Field("mtries") int mtries,
@Field("contamination") double contamination,
@Field("validation_response_column") String validation_response_column,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("nbins") int nbins,
@Field("nbins_top_level") int nbins_top_level,
@Field("nbins_cats") int nbins_cats,
@Field("r2_stopping") double r2_stopping,
@Field("seed") long seed,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("sample_rate_per_class") double[] sample_rate_per_class,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
@Field("score_tree_interval") int score_tree_interval,
@Field("min_split_improvement") double min_split_improvement,
@Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("check_constant_response") boolean check_constant_response,
@Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
@Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/isolationforest/resume")
Call<IsolationForestV3> resumeIsolationforest();
/**
* Run grid search for ExtendedIsolationForest model.
* @param ntrees Number of Extended Isolation Forest trees.
* @param sample_size Number of randomly sampled observations used to train each Extended Isolation Forest tree.
* @param extension_level Maximum is N - 1 (N = numCols). Minimum is 0. Extended Isolation Forest with
* extension_Level = 0 behaves like Isolation Forest.
* @param seed Seed for pseudo random number generator (if applicable)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param disable_training_metrics Disable calculating training metrics (expensive on large datasets)
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/extendedisolationforest")
Call<ExtendedIsolationForestV3> trainExtendedisolationforest(
@Field("ntrees") int ntrees,
@Field("sample_size") int sample_size,
@Field("extension_level") int extension_level,
@Field("seed") long seed,
@Field("score_tree_interval") int score_tree_interval,
@Field("disable_training_metrics") boolean disable_training_metrics,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/extendedisolationforest")
Call<ExtendedIsolationForestV3> trainExtendedisolationforest();
/**
* Resume grid search for ExtendedIsolationForest model.
* @param ntrees Number of Extended Isolation Forest trees.
* @param sample_size Number of randomly sampled observations used to train each Extended Isolation Forest tree.
* @param extension_level Maximum is N - 1 (N = numCols). Minimum is 0. Extended Isolation Forest with
* extension_Level = 0 behaves like Isolation Forest.
* @param seed Seed for pseudo random number generator (if applicable)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param disable_training_metrics Disable calculating training metrics (expensive on large datasets)
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/extendedisolationforest/resume")
Call<ExtendedIsolationForestV3> resumeExtendedisolationforest(
@Field("ntrees") int ntrees,
@Field("sample_size") int sample_size,
@Field("extension_level") int extension_level,
@Field("seed") long seed,
@Field("score_tree_interval") int score_tree_interval,
@Field("disable_training_metrics") boolean disable_training_metrics,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/extendedisolationforest/resume")
Call<ExtendedIsolationForestV3> resumeExtendedisolationforest();
/**
* Run grid search for Aggregator model.
* @param transform Transformation of training data
* @param pca_method Method for computing PCA (Caution: GLRM is currently experimental and unstable)
* @param k Rank of matrix approximation
* @param max_iterations Maximum number of iterations for PCA
* @param target_num_exemplars Targeted number of exemplars
* @param rel_tol_num_exemplars Relative tolerance for number of exemplars (e.g, 0.5 is +/- 50 percents)
* @param seed RNG seed for initialization
* @param use_all_factor_levels Whether first factor level is included in each categorical expansion
* @param save_mapping_frame Whether to export the mapping of the aggregated frame
* @param num_iteration_without_new_exemplar The number of iterations to run before aggregator exits if the number
* of exemplars collected didn't change
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/aggregator")
Call<AggregatorV99> trainAggregator(
@Field("transform") DataInfoTransformType transform,
@Field("pca_method") PCAMethod pca_method,
@Field("k") int k,
@Field("max_iterations") int max_iterations,
@Field("target_num_exemplars") int target_num_exemplars,
@Field("rel_tol_num_exemplars") double rel_tol_num_exemplars,
@Field("seed") long seed,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("save_mapping_frame") boolean save_mapping_frame,
@Field("num_iteration_without_new_exemplar") int num_iteration_without_new_exemplar,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/aggregator")
Call<AggregatorV99> trainAggregator();
/**
* Resume grid search for Aggregator model.
* @param transform Transformation of training data
* @param pca_method Method for computing PCA (Caution: GLRM is currently experimental and unstable)
* @param k Rank of matrix approximation
* @param max_iterations Maximum number of iterations for PCA
* @param target_num_exemplars Targeted number of exemplars
* @param rel_tol_num_exemplars Relative tolerance for number of exemplars (e.g, 0.5 is +/- 50 percents)
* @param seed RNG seed for initialization
* @param use_all_factor_levels Whether first factor level is included in each categorical expansion
* @param save_mapping_frame Whether to export the mapping of the aggregated frame
* @param num_iteration_without_new_exemplar The number of iterations to run before aggregator exits if the number
* of exemplars collected didn't change
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/aggregator/resume")
Call<AggregatorV99> resumeAggregator(
@Field("transform") DataInfoTransformType transform,
@Field("pca_method") PCAMethod pca_method,
@Field("k") int k,
@Field("max_iterations") int max_iterations,
@Field("target_num_exemplars") int target_num_exemplars,
@Field("rel_tol_num_exemplars") double rel_tol_num_exemplars,
@Field("seed") long seed,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("save_mapping_frame") boolean save_mapping_frame,
@Field("num_iteration_without_new_exemplar") int num_iteration_without_new_exemplar,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/aggregator/resume")
Call<AggregatorV99> resumeAggregator();
/**
* Run grid search for Word2Vec model.
* @param vec_size Set size of word vectors
* @param window_size Set max skip length between words
* @param sent_sample_rate Set threshold for occurrence of words. Those that appear with higher frequency in the
* training data
* will be randomly down-sampled; useful range is (0, 1e-5)
* @param norm_model Use Hierarchical Softmax
* @param epochs Number of training iterations to run
* @param min_word_freq This will discard words that appear less than <int> times
* @param init_learning_rate Set the starting learning rate
* @param word_model The word model to use (SkipGram or CBOW)
* @param pre_trained Id of a data frame that contains a pre-trained (external) word2vec model
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/word2vec")
Call<Word2VecV3> trainWord2vec(
@Field("vec_size") int vec_size,
@Field("window_size") int window_size,
@Field("sent_sample_rate") float sent_sample_rate,
@Field("norm_model") Word2VecNormModel norm_model,
@Field("epochs") int epochs,
@Field("min_word_freq") int min_word_freq,
@Field("init_learning_rate") float init_learning_rate,
@Field("word_model") Word2VecWordModel word_model,
@Field("pre_trained") String pre_trained,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/word2vec")
Call<Word2VecV3> trainWord2vec();
/**
* Resume grid search for Word2Vec model.
* @param vec_size Set size of word vectors
* @param window_size Set max skip length between words
* @param sent_sample_rate Set threshold for occurrence of words. Those that appear with higher frequency in the
* training data
* will be randomly down-sampled; useful range is (0, 1e-5)
* @param norm_model Use Hierarchical Softmax
* @param epochs Number of training iterations to run
* @param min_word_freq This will discard words that appear less than <int> times
* @param init_learning_rate Set the starting learning rate
* @param word_model The word model to use (SkipGram or CBOW)
* @param pre_trained Id of a data frame that contains a pre-trained (external) word2vec model
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/word2vec/resume")
Call<Word2VecV3> resumeWord2vec(
@Field("vec_size") int vec_size,
@Field("window_size") int window_size,
@Field("sent_sample_rate") float sent_sample_rate,
@Field("norm_model") Word2VecNormModel norm_model,
@Field("epochs") int epochs,
@Field("min_word_freq") int min_word_freq,
@Field("init_learning_rate") float init_learning_rate,
@Field("word_model") Word2VecWordModel word_model,
@Field("pre_trained") String pre_trained,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/word2vec/resume")
Call<Word2VecV3> resumeWord2vec();
/**
* Run grid search for StackedEnsemble model.
* @param base_models List of models or grids (or their ids) to ensemble/stack together. Grids are expanded to
* individual models. If not using blending frame, then models must have been cross-validated
* using nfolds > 1, and folds must be identical across models.
* @param metalearner_algorithm Type of algorithm to use as the metalearner. Options include 'AUTO' (GLM with non
* negative weights; if validation_frame is present, a lambda search is performed),
* 'deeplearning' (Deep Learning with default parameters), 'drf' (Random Forest with
* default parameters), 'gbm' (GBM with default parameters), 'glm' (GLM with default
* parameters), 'naivebayes' (NaiveBayes with default parameters), or 'xgboost' (if
* available, XGBoost with default parameters).
* @param metalearner_nfolds Number of folds for K-fold cross-validation of the metalearner algorithm (0 to disable
* or >= 2).
* @param metalearner_fold_assignment Cross-validation fold assignment scheme for metalearner cross-validation.
* Defaults to AUTO (which is currently set to Random). The 'Stratified' option
* will stratify the folds based on the response variable, for classification
* problems.
* @param metalearner_fold_column Column with cross-validation fold index assignment per observation for cross-
* validation of the metalearner.
* @param metalearner_transform Transformation used for the level one frame.
* @param keep_levelone_frame Keep level one frame used for metalearner training.
* @param metalearner_params Parameters for metalearner algorithm
* @param blending_frame Frame used to compute the predictions that serve as the training frame for the metalearner
* (triggers blending mode if provided)
* @param seed Seed for random numbers; passed through to the metalearner algorithm. Defaults to -1 (time-based
* random number)
* @param score_training_samples Specify the number of training set samples for scoring. The value must be >= 0. To
* use all training samples, enter 0.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/stackedensemble")
Call<StackedEnsembleV99> trainStackedensemble(
@Field("base_models") String[] base_models,
@Field("metalearner_algorithm") EnsembleMetalearnerAlgorithm metalearner_algorithm,
@Field("metalearner_nfolds") int metalearner_nfolds,
@Field("metalearner_fold_assignment") ModelParametersFoldAssignmentScheme metalearner_fold_assignment,
@Field("metalearner_fold_column") String metalearner_fold_column,
@Field("metalearner_transform") EnsembleStackedEnsembleModelStackedEnsembleParametersMetalearnerTransform metalearner_transform,
@Field("keep_levelone_frame") boolean keep_levelone_frame,
@Field("metalearner_params") String metalearner_params,
@Field("blending_frame") String blending_frame,
@Field("seed") long seed,
@Field("score_training_samples") long score_training_samples,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/stackedensemble")
Call<StackedEnsembleV99> trainStackedensemble(@Field("base_models") String[] base_models);
/**
* Resume grid search for StackedEnsemble model.
* @param base_models List of models or grids (or their ids) to ensemble/stack together. Grids are expanded to
* individual models. If not using blending frame, then models must have been cross-validated
* using nfolds > 1, and folds must be identical across models.
* @param metalearner_algorithm Type of algorithm to use as the metalearner. Options include 'AUTO' (GLM with non
* negative weights; if validation_frame is present, a lambda search is performed),
* 'deeplearning' (Deep Learning with default parameters), 'drf' (Random Forest with
* default parameters), 'gbm' (GBM with default parameters), 'glm' (GLM with default
* parameters), 'naivebayes' (NaiveBayes with default parameters), or 'xgboost' (if
* available, XGBoost with default parameters).
* @param metalearner_nfolds Number of folds for K-fold cross-validation of the metalearner algorithm (0 to disable
* or >= 2).
* @param metalearner_fold_assignment Cross-validation fold assignment scheme for metalearner cross-validation.
* Defaults to AUTO (which is currently set to Random). The 'Stratified' option
* will stratify the folds based on the response variable, for classification
* problems.
* @param metalearner_fold_column Column with cross-validation fold index assignment per observation for cross-
* validation of the metalearner.
* @param metalearner_transform Transformation used for the level one frame.
* @param keep_levelone_frame Keep level one frame used for metalearner training.
* @param metalearner_params Parameters for metalearner algorithm
* @param blending_frame Frame used to compute the predictions that serve as the training frame for the metalearner
* (triggers blending mode if provided)
* @param seed Seed for random numbers; passed through to the metalearner algorithm. Defaults to -1 (time-based
* random number)
* @param score_training_samples Specify the number of training set samples for scoring. The value must be >= 0. To
* use all training samples, enter 0.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/stackedensemble/resume")
Call<StackedEnsembleV99> resumeStackedensemble(
@Field("base_models") String[] base_models,
@Field("metalearner_algorithm") EnsembleMetalearnerAlgorithm metalearner_algorithm,
@Field("metalearner_nfolds") int metalearner_nfolds,
@Field("metalearner_fold_assignment") ModelParametersFoldAssignmentScheme metalearner_fold_assignment,
@Field("metalearner_fold_column") String metalearner_fold_column,
@Field("metalearner_transform") EnsembleStackedEnsembleModelStackedEnsembleParametersMetalearnerTransform metalearner_transform,
@Field("keep_levelone_frame") boolean keep_levelone_frame,
@Field("metalearner_params") String metalearner_params,
@Field("blending_frame") String blending_frame,
@Field("seed") long seed,
@Field("score_training_samples") long score_training_samples,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/stackedensemble/resume")
Call<StackedEnsembleV99> resumeStackedensemble(@Field("base_models") String[] base_models);
/**
* Run grid search for CoxPH model.
* @param start_column Start Time Column.
* @param stop_column Stop Time Column.
* @param stratify_by List of columns to use for stratification.
* @param ties Method for Handling Ties.
* @param init Coefficient starting value.
* @param lre_min Minimum log-relative error.
* @param max_iterations Maximum number of iterations.
* @param interactions_only A list of columns that should only be used to create interactions but should not itself
* participate in model training.
* @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
* for the list.
* @param interaction_pairs A list of pairwise (first order) column interactions.
* @param use_all_factor_levels (Internal. For development only!) Indicates whether to use all factor levels.
* @param single_node_mode Run on a single node to reduce the effect of network overhead (for smaller datasets)
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/coxph")
Call<CoxPHV3> trainCoxph(
@Field("start_column") String start_column,
@Field("stop_column") String stop_column,
@Field("stratify_by") String[] stratify_by,
@Field("ties") CoxPHTies ties,
@Field("init") double init,
@Field("lre_min") double lre_min,
@Field("max_iterations") int max_iterations,
@Field("interactions_only") String[] interactions_only,
@Field("interactions") String[] interactions,
@Field("interaction_pairs") StringPairV3[] interaction_pairs,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("single_node_mode") boolean single_node_mode,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/coxph")
Call<CoxPHV3> trainCoxph();
/**
* Resume grid search for CoxPH model.
* @param start_column Start Time Column.
* @param stop_column Stop Time Column.
* @param stratify_by List of columns to use for stratification.
* @param ties Method for Handling Ties.
* @param init Coefficient starting value.
* @param lre_min Minimum log-relative error.
* @param max_iterations Maximum number of iterations.
* @param interactions_only A list of columns that should only be used to create interactions but should not itself
* participate in model training.
* @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
* for the list.
* @param interaction_pairs A list of pairwise (first order) column interactions.
* @param use_all_factor_levels (Internal. For development only!) Indicates whether to use all factor levels.
* @param single_node_mode Run on a single node to reduce the effect of network overhead (for smaller datasets)
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/coxph/resume")
Call<CoxPHV3> resumeCoxph(
@Field("start_column") String start_column,
@Field("stop_column") String stop_column,
@Field("stratify_by") String[] stratify_by,
@Field("ties") CoxPHTies ties,
@Field("init") double init,
@Field("lre_min") double lre_min,
@Field("max_iterations") int max_iterations,
@Field("interactions_only") String[] interactions_only,
@Field("interactions") String[] interactions,
@Field("interaction_pairs") StringPairV3[] interaction_pairs,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("single_node_mode") boolean single_node_mode,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/coxph/resume")
Call<CoxPHV3> resumeCoxph();
/**
* Run grid search for Generic model.
* @param path Path to file with self-contained model archive.
* @param model_key Key to the self-contained model archive already uploaded to H2O.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/generic")
Call<GenericV3> trainGeneric(
@Field("path") String path,
@Field("model_key") String model_key,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/generic")
Call<GenericV3> trainGeneric();
/**
* Resume grid search for Generic model.
* @param path Path to file with self-contained model archive.
* @param model_key Key to the self-contained model archive already uploaded to H2O.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/generic/resume")
Call<GenericV3> resumeGeneric(
@Field("path") String path,
@Field("model_key") String model_key,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/generic/resume")
Call<GenericV3> resumeGeneric();
/**
* Run grid search for GAM model.
* @param seed Seed for pseudo random number generator (if applicable)
* @param family Family. Use binomial for classification with logistic regression, others are for regression
* problems.
* @param tweedie_variance_power Tweedie variance power
* @param tweedie_link_power Tweedie link power
* @param theta Theta
* @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
* with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
* datasets with many columns.
* @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
* alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
* specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
* 0.5 otherwise.
* @param lambda Regularization strength
* @param startval double array to initialize coefficients for GAM.
* @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
* @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided)
* @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
* set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
* otherwise it is set to 100.
* @param standardize Standardize numeric columns to have zero mean and unit variance
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
* @param non_negative Restrict coefficients (not intercept) to be non-negative
* @param max_iterations Maximum number of iterations
* @param beta_epsilon Converge if beta changes less (using L-infinity norm) than beta esilon, ONLY applies to
* IRLSM solver
* @param objective_epsilon Converge if objective value changes less than this. Default indicates: If lambda_search
* is set to True the value of objective_epsilon is set to .0001. If the lambda_search is
* set to False and lambda is equal to zero, the value of objective_epsilon is set to
* .000001, for any other value of lambda the default value of objective_epsilon is set to
* .0001.
* @param gradient_epsilon Converge if objective changes less (using L-infinity norm) than this, ONLY applies to
* L-BFGS solver. Default indicates: If lambda_search is set to False and lambda is equal to
* zero, the default value of gradient_epsilon is equal to .000001, otherwise the default
* value is .0001. If lambda_search is set to True, the conditional values above are 1E-8
* and 1E-6 respectively.
* @param obj_reg Likelihood divider in objective value computation, default is 1/nobs
* @param link Link function.
* @param intercept Include constant term in the model
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param cold_start Only applicable to multiple alpha/lambda values when calling GLM from GAM. If false, build the
* next model for next set of alpha/lambda values starting from the values provided by current
* model. If true will start GLM model from scratch.
* @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
* lambda that drives all coefficients to zero). Default indicates: if the number of
* observations is greater than the number of variables, then lambda_min_ratio is set to
* 0.0001; if the number of observations is less than the number of variables, then
* lambda_min_ratio is set to 0.01.
* @param beta_constraints Beta constraints
* @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
* to prevent expensive model building with many predictors. Default indicates: If the
* IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
* is set to 100000000.
* @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
* for the list.
* @param interaction_pairs A list of pairwise (first order) column interactions.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
* @param remove_collinear_columns In case of linearly dependent columns, remove some of the dependent columns
* @param store_knot_locations If set to true, will return knot locations as double[][] array for gam column names
* found knots_for_gam. Default to false.
* @param num_knots Number of knots for gam predictors. If specified, must specify one for each gam predictor. For
* monotone I-splines, mininum = 2, for cs spline, minimum = 3. For thin plate, minimum is size of
* polynomial basis + 2.
* @param spline_orders Order of I-splines or NBSplineTypeI M-splines used for gam predictors. If specified, must be
* the same size as gam_columns. For I-splines, the spline_orders will be the same as the
* polynomials used to generate the splines. For M-splines, the polynomials used to generate
* the splines will be spline_order-1. Values for bs=0 or 1 will be ignored.
* @param splines_non_negative Valid for I-spline (bs=2) only. True if the I-splines are monotonically increasing
* (and monotonically non-decreasing) and False if the I-splines are monotonically
* decreasing (and monotonically non-increasing). If specified, must be the same size
* as gam_columns. Values for other spline types will be ignored. Default to true.
* @param gam_columns Arrays of predictor column names for gam for smoothers using single or multiple predictors
* like {{'c1'},{'c2','c3'},{'c4'},...}
* @param scale Smoothing parameter for gam predictors. If specified, must be of the same length as gam_columns
* @param bs Basis function type for each gam predictors, 0 for cr, 1 for thin plate regression with knots, 2 for
* monotone I-splines, 3 for NBSplineTypeI M-splines (refer to doc here:
* https://github.com/h2oai/h2o-3/issues/6926). If specified, must be the same size as gam_columns
* @param keep_gam_cols Save keys of model matrix
* @param standardize_tp_gam_cols standardize tp (thin plate) predictor columns
* @param scale_tp_penalty_mat Scale penalty matrix for tp (thin plate) smoothers as in R
* @param knot_ids Array storing frame keys of knots. One for each gam column set specified in gam_columns
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/gam")
Call<GAMV3> trainGam(
@Field("seed") long seed,
@Field("family") GLMFamily family,
@Field("tweedie_variance_power") double tweedie_variance_power,
@Field("tweedie_link_power") double tweedie_link_power,
@Field("theta") double theta,
@Field("solver") GLMSolver solver,
@Field("alpha") double[] alpha,
@Field("lambda") double[] lambda,
@Field("startval") double[] startval,
@Field("lambda_search") boolean lambda_search,
@Field("early_stopping") boolean early_stopping,
@Field("nlambdas") int nlambdas,
@Field("standardize") boolean standardize,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("plug_values") String plug_values,
@Field("non_negative") boolean non_negative,
@Field("max_iterations") int max_iterations,
@Field("beta_epsilon") double beta_epsilon,
@Field("objective_epsilon") double objective_epsilon,
@Field("gradient_epsilon") double gradient_epsilon,
@Field("obj_reg") double obj_reg,
@Field("link") GLMLink link,
@Field("intercept") boolean intercept,
@Field("prior") double prior,
@Field("cold_start") boolean cold_start,
@Field("lambda_min_ratio") double lambda_min_ratio,
@Field("beta_constraints") String beta_constraints,
@Field("max_active_predictors") int max_active_predictors,
@Field("interactions") String[] interactions,
@Field("interaction_pairs") StringPairV3[] interaction_pairs,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("compute_p_values") boolean compute_p_values,
@Field("remove_collinear_columns") boolean remove_collinear_columns,
@Field("store_knot_locations") boolean store_knot_locations,
@Field("num_knots") int[] num_knots,
@Field("spline_orders") int[] spline_orders,
@Field("splines_non_negative") boolean[] splines_non_negative,
@Field("gam_columns") String[][] gam_columns,
@Field("scale") double[] scale,
@Field("bs") int[] bs,
@Field("keep_gam_cols") boolean keep_gam_cols,
@Field("standardize_tp_gam_cols") boolean standardize_tp_gam_cols,
@Field("scale_tp_penalty_mat") boolean scale_tp_penalty_mat,
@Field("knot_ids") String[] knot_ids,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/gam")
Call<GAMV3> trainGam(@Field("gam_columns") String[][] gam_columns);
/**
* Resume grid search for GAM model.
* @param seed Seed for pseudo random number generator (if applicable)
* @param family Family. Use binomial for classification with logistic regression, others are for regression
* problems.
* @param tweedie_variance_power Tweedie variance power
* @param tweedie_link_power Tweedie link power
* @param theta Theta
* @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
* with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
* datasets with many columns.
* @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
* alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
* specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
* 0.5 otherwise.
* @param lambda Regularization strength
* @param startval double array to initialize coefficients for GAM.
* @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
* @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided)
* @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
* set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
* otherwise it is set to 100.
* @param standardize Standardize numeric columns to have zero mean and unit variance
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
* @param non_negative Restrict coefficients (not intercept) to be non-negative
* @param max_iterations Maximum number of iterations
* @param beta_epsilon Converge if beta changes less (using L-infinity norm) than beta esilon, ONLY applies to
* IRLSM solver
* @param objective_epsilon Converge if objective value changes less than this. Default indicates: If lambda_search
* is set to True the value of objective_epsilon is set to .0001. If the lambda_search is
* set to False and lambda is equal to zero, the value of objective_epsilon is set to
* .000001, for any other value of lambda the default value of objective_epsilon is set to
* .0001.
* @param gradient_epsilon Converge if objective changes less (using L-infinity norm) than this, ONLY applies to
* L-BFGS solver. Default indicates: If lambda_search is set to False and lambda is equal to
* zero, the default value of gradient_epsilon is equal to .000001, otherwise the default
* value is .0001. If lambda_search is set to True, the conditional values above are 1E-8
* and 1E-6 respectively.
* @param obj_reg Likelihood divider in objective value computation, default is 1/nobs
* @param link Link function.
* @param intercept Include constant term in the model
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param cold_start Only applicable to multiple alpha/lambda values when calling GLM from GAM. If false, build the
* next model for next set of alpha/lambda values starting from the values provided by current
* model. If true will start GLM model from scratch.
* @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
* lambda that drives all coefficients to zero). Default indicates: if the number of
* observations is greater than the number of variables, then lambda_min_ratio is set to
* 0.0001; if the number of observations is less than the number of variables, then
* lambda_min_ratio is set to 0.01.
* @param beta_constraints Beta constraints
* @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
* to prevent expensive model building with many predictors. Default indicates: If the
* IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
* is set to 100000000.
* @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
* for the list.
* @param interaction_pairs A list of pairwise (first order) column interactions.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
* @param remove_collinear_columns In case of linearly dependent columns, remove some of the dependent columns
* @param store_knot_locations If set to true, will return knot locations as double[][] array for gam column names
* found knots_for_gam. Default to false.
* @param num_knots Number of knots for gam predictors. If specified, must specify one for each gam predictor. For
* monotone I-splines, mininum = 2, for cs spline, minimum = 3. For thin plate, minimum is size of
* polynomial basis + 2.
* @param spline_orders Order of I-splines or NBSplineTypeI M-splines used for gam predictors. If specified, must be
* the same size as gam_columns. For I-splines, the spline_orders will be the same as the
* polynomials used to generate the splines. For M-splines, the polynomials used to generate
* the splines will be spline_order-1. Values for bs=0 or 1 will be ignored.
* @param splines_non_negative Valid for I-spline (bs=2) only. True if the I-splines are monotonically increasing
* (and monotonically non-decreasing) and False if the I-splines are monotonically
* decreasing (and monotonically non-increasing). If specified, must be the same size
* as gam_columns. Values for other spline types will be ignored. Default to true.
* @param gam_columns Arrays of predictor column names for gam for smoothers using single or multiple predictors
* like {{'c1'},{'c2','c3'},{'c4'},...}
* @param scale Smoothing parameter for gam predictors. If specified, must be of the same length as gam_columns
* @param bs Basis function type for each gam predictors, 0 for cr, 1 for thin plate regression with knots, 2 for
* monotone I-splines, 3 for NBSplineTypeI M-splines (refer to doc here:
* https://github.com/h2oai/h2o-3/issues/6926). If specified, must be the same size as gam_columns
* @param keep_gam_cols Save keys of model matrix
* @param standardize_tp_gam_cols standardize tp (thin plate) predictor columns
* @param scale_tp_penalty_mat Scale penalty matrix for tp (thin plate) smoothers as in R
* @param knot_ids Array storing frame keys of knots. One for each gam column set specified in gam_columns
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/gam/resume")
Call<GAMV3> resumeGam(
@Field("seed") long seed,
@Field("family") GLMFamily family,
@Field("tweedie_variance_power") double tweedie_variance_power,
@Field("tweedie_link_power") double tweedie_link_power,
@Field("theta") double theta,
@Field("solver") GLMSolver solver,
@Field("alpha") double[] alpha,
@Field("lambda") double[] lambda,
@Field("startval") double[] startval,
@Field("lambda_search") boolean lambda_search,
@Field("early_stopping") boolean early_stopping,
@Field("nlambdas") int nlambdas,
@Field("standardize") boolean standardize,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("plug_values") String plug_values,
@Field("non_negative") boolean non_negative,
@Field("max_iterations") int max_iterations,
@Field("beta_epsilon") double beta_epsilon,
@Field("objective_epsilon") double objective_epsilon,
@Field("gradient_epsilon") double gradient_epsilon,
@Field("obj_reg") double obj_reg,
@Field("link") GLMLink link,
@Field("intercept") boolean intercept,
@Field("prior") double prior,
@Field("cold_start") boolean cold_start,
@Field("lambda_min_ratio") double lambda_min_ratio,
@Field("beta_constraints") String beta_constraints,
@Field("max_active_predictors") int max_active_predictors,
@Field("interactions") String[] interactions,
@Field("interaction_pairs") StringPairV3[] interaction_pairs,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("compute_p_values") boolean compute_p_values,
@Field("remove_collinear_columns") boolean remove_collinear_columns,
@Field("store_knot_locations") boolean store_knot_locations,
@Field("num_knots") int[] num_knots,
@Field("spline_orders") int[] spline_orders,
@Field("splines_non_negative") boolean[] splines_non_negative,
@Field("gam_columns") String[][] gam_columns,
@Field("scale") double[] scale,
@Field("bs") int[] bs,
@Field("keep_gam_cols") boolean keep_gam_cols,
@Field("standardize_tp_gam_cols") boolean standardize_tp_gam_cols,
@Field("scale_tp_penalty_mat") boolean scale_tp_penalty_mat,
@Field("knot_ids") String[] knot_ids,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/gam/resume")
Call<GAMV3> resumeGam(@Field("gam_columns") String[][] gam_columns);
/**
* Run grid search for ANOVAGLM model.
* @param seed Seed for pseudo random number generator (if applicable)
* @param standardize Standardize numeric columns to have zero mean and unit variance
* @param family Family. Use binomial for classification with logistic regression, others are for regression
* problems.
* @param tweedie_variance_power Tweedie variance power
* @param tweedie_link_power Tweedie link power
* @param theta Theta
* @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
* alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
* specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
* 0.5 otherwise.
* @param lambda Regularization strength
* @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
* @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
* with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
* datasets with many columns.
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
* @param non_negative Restrict coefficients (not intercept) to be non-negative
* @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
* @param max_iterations Maximum number of iterations
* @param link Link function.
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param highest_interaction_term Limit the number of interaction terms, if 2 means interaction between 2 columns
* only, 3 for three columns and so on... Default to 2.
* @param type Refer to the SS type 1, 2, 3, or 4. We are currently only supporting 3
* @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided).
* @param save_transformed_framekeys true to save the keys of transformed predictors and interaction column.
* @param nparallelism Number of models to build in parallel. Default to 4. Adjust according to your system.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/anovaglm")
Call<ANOVAGLMV3> trainAnovaglm(
@Field("seed") long seed,
@Field("standardize") boolean standardize,
@Field("family") GLMFamily family,
@Field("tweedie_variance_power") double tweedie_variance_power,
@Field("tweedie_link_power") double tweedie_link_power,
@Field("theta") double theta,
@Field("alpha") double[] alpha,
@Field("lambda") double[] lambda,
@Field("lambda_search") boolean lambda_search,
@Field("solver") GLMSolver solver,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("plug_values") String plug_values,
@Field("non_negative") boolean non_negative,
@Field("compute_p_values") boolean compute_p_values,
@Field("max_iterations") int max_iterations,
@Field("link") GLMLink link,
@Field("prior") double prior,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("highest_interaction_term") int highest_interaction_term,
@Field("type") int type,
@Field("early_stopping") boolean early_stopping,
@Field("save_transformed_framekeys") boolean save_transformed_framekeys,
@Field("nparallelism") int nparallelism,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/anovaglm")
Call<ANOVAGLMV3> trainAnovaglm();
/**
* Resume grid search for ANOVAGLM model.
* @param seed Seed for pseudo random number generator (if applicable)
* @param standardize Standardize numeric columns to have zero mean and unit variance
* @param family Family. Use binomial for classification with logistic regression, others are for regression
* problems.
* @param tweedie_variance_power Tweedie variance power
* @param tweedie_link_power Tweedie link power
* @param theta Theta
* @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
* alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
* specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
* 0.5 otherwise.
* @param lambda Regularization strength
* @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
* @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
* with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
* datasets with many columns.
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
* @param non_negative Restrict coefficients (not intercept) to be non-negative
* @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
* @param max_iterations Maximum number of iterations
* @param link Link function.
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param highest_interaction_term Limit the number of interaction terms, if 2 means interaction between 2 columns
* only, 3 for three columns and so on... Default to 2.
* @param type Refer to the SS type 1, 2, 3, or 4. We are currently only supporting 3
* @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided).
* @param save_transformed_framekeys true to save the keys of transformed predictors and interaction column.
* @param nparallelism Number of models to build in parallel. Default to 4. Adjust according to your system.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/anovaglm/resume")
Call<ANOVAGLMV3> resumeAnovaglm(
@Field("seed") long seed,
@Field("standardize") boolean standardize,
@Field("family") GLMFamily family,
@Field("tweedie_variance_power") double tweedie_variance_power,
@Field("tweedie_link_power") double tweedie_link_power,
@Field("theta") double theta,
@Field("alpha") double[] alpha,
@Field("lambda") double[] lambda,
@Field("lambda_search") boolean lambda_search,
@Field("solver") GLMSolver solver,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("plug_values") String plug_values,
@Field("non_negative") boolean non_negative,
@Field("compute_p_values") boolean compute_p_values,
@Field("max_iterations") int max_iterations,
@Field("link") GLMLink link,
@Field("prior") double prior,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("highest_interaction_term") int highest_interaction_term,
@Field("type") int type,
@Field("early_stopping") boolean early_stopping,
@Field("save_transformed_framekeys") boolean save_transformed_framekeys,
@Field("nparallelism") int nparallelism,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/anovaglm/resume")
Call<ANOVAGLMV3> resumeAnovaglm();
/**
* Run grid search for PSVM model.
* @param hyper_param Penalty parameter C of the error term
* @param kernel_type Type of used kernel
* @param gamma Coefficient of the kernel (currently RBF gamma for gaussian kernel, -1 means 1/#features)
* @param rank_ratio Desired rank of the ICF matrix expressed as an ration of number of input rows (-1 means use
* sqrt(#rows)).
* @param positive_weight Weight of positive (+1) class of observations
* @param negative_weight Weight of positive (-1) class of observations
* @param disable_training_metrics Disable calculating training metrics (expensive on large datasets)
* @param sv_threshold Threshold for accepting a candidate observation into the set of support vectors
* @param max_iterations Maximum number of iteration of the algorithm
* @param fact_threshold Convergence threshold of the Incomplete Cholesky Factorization (ICF)
* @param feasible_threshold Convergence threshold for primal-dual residuals in the IPM iteration
* @param surrogate_gap_threshold Feasibility criterion of the surrogate duality gap (eta)
* @param mu_factor Increasing factor mu
* @param seed Seed for pseudo random number generator (if applicable)
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/psvm")
Call<PSVMV3> trainPsvm(
@Field("hyper_param") double hyper_param,
@Field("kernel_type") GenmodelalgospsvmKernelType kernel_type,
@Field("gamma") double gamma,
@Field("rank_ratio") double rank_ratio,
@Field("positive_weight") double positive_weight,
@Field("negative_weight") double negative_weight,
@Field("disable_training_metrics") boolean disable_training_metrics,
@Field("sv_threshold") double sv_threshold,
@Field("max_iterations") int max_iterations,
@Field("fact_threshold") double fact_threshold,
@Field("feasible_threshold") double feasible_threshold,
@Field("surrogate_gap_threshold") double surrogate_gap_threshold,
@Field("mu_factor") double mu_factor,
@Field("seed") long seed,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/psvm")
Call<PSVMV3> trainPsvm();
/**
* Resume grid search for PSVM model.
* @param hyper_param Penalty parameter C of the error term
* @param kernel_type Type of used kernel
* @param gamma Coefficient of the kernel (currently RBF gamma for gaussian kernel, -1 means 1/#features)
* @param rank_ratio Desired rank of the ICF matrix expressed as an ration of number of input rows (-1 means use
* sqrt(#rows)).
* @param positive_weight Weight of positive (+1) class of observations
* @param negative_weight Weight of positive (-1) class of observations
* @param disable_training_metrics Disable calculating training metrics (expensive on large datasets)
* @param sv_threshold Threshold for accepting a candidate observation into the set of support vectors
* @param max_iterations Maximum number of iteration of the algorithm
* @param fact_threshold Convergence threshold of the Incomplete Cholesky Factorization (ICF)
* @param feasible_threshold Convergence threshold for primal-dual residuals in the IPM iteration
* @param surrogate_gap_threshold Feasibility criterion of the surrogate duality gap (eta)
* @param mu_factor Increasing factor mu
* @param seed Seed for pseudo random number generator (if applicable)
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/psvm/resume")
Call<PSVMV3> resumePsvm(
@Field("hyper_param") double hyper_param,
@Field("kernel_type") GenmodelalgospsvmKernelType kernel_type,
@Field("gamma") double gamma,
@Field("rank_ratio") double rank_ratio,
@Field("positive_weight") double positive_weight,
@Field("negative_weight") double negative_weight,
@Field("disable_training_metrics") boolean disable_training_metrics,
@Field("sv_threshold") double sv_threshold,
@Field("max_iterations") int max_iterations,
@Field("fact_threshold") double fact_threshold,
@Field("feasible_threshold") double feasible_threshold,
@Field("surrogate_gap_threshold") double surrogate_gap_threshold,
@Field("mu_factor") double mu_factor,
@Field("seed") long seed,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/psvm/resume")
Call<PSVMV3> resumePsvm();
/**
* Run grid search for RuleFit model.
* @param seed Seed for pseudo random number generator (if applicable).
* @param algorithm The algorithm to use to generate rules.
* @param min_rule_length Minimum length of rules. Defaults to 3.
* @param max_rule_length Maximum length of rules. Defaults to 3.
* @param max_num_rules The maximum number of rules to return. defaults to -1 which means the number of rules is
* selected
* by diminishing returns in model deviance.
* @param model_type Specifies type of base learners in the ensemble.
* @param rule_generation_ntrees Specifies the number of trees to build in the tree model. Defaults to 50.
* @param remove_duplicates Whether to remove rules which are identical to an earlier rule. Defaults to true.
* @param lambda Lambda for LASSO regressor.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/rulefit")
Call<RuleFitV3> trainRulefit(
@Field("seed") long seed,
@Field("algorithm") RuleFitModelAlgorithm algorithm,
@Field("min_rule_length") int min_rule_length,
@Field("max_rule_length") int max_rule_length,
@Field("max_num_rules") int max_num_rules,
@Field("model_type") RuleFitModelModelType model_type,
@Field("rule_generation_ntrees") int rule_generation_ntrees,
@Field("remove_duplicates") boolean remove_duplicates,
@Field("lambda") double[] lambda,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/rulefit")
Call<RuleFitV3> trainRulefit();
/**
* Resume grid search for RuleFit model.
* @param seed Seed for pseudo random number generator (if applicable).
* @param algorithm The algorithm to use to generate rules.
* @param min_rule_length Minimum length of rules. Defaults to 3.
* @param max_rule_length Maximum length of rules. Defaults to 3.
* @param max_num_rules The maximum number of rules to return. defaults to -1 which means the number of rules is
* selected
* by diminishing returns in model deviance.
* @param model_type Specifies type of base learners in the ensemble.
* @param rule_generation_ntrees Specifies the number of trees to build in the tree model. Defaults to 50.
* @param remove_duplicates Whether to remove rules which are identical to an earlier rule. Defaults to true.
* @param lambda Lambda for LASSO regressor.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/rulefit/resume")
Call<RuleFitV3> resumeRulefit(
@Field("seed") long seed,
@Field("algorithm") RuleFitModelAlgorithm algorithm,
@Field("min_rule_length") int min_rule_length,
@Field("max_rule_length") int max_rule_length,
@Field("max_num_rules") int max_num_rules,
@Field("model_type") RuleFitModelModelType model_type,
@Field("rule_generation_ntrees") int rule_generation_ntrees,
@Field("remove_duplicates") boolean remove_duplicates,
@Field("lambda") double[] lambda,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/rulefit/resume")
Call<RuleFitV3> resumeRulefit();
/**
* Run grid search for UpliftDRF model.
* @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p}
* for classification and p/3 for regression (where p is the # of predictors
* @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
* @param treatment_column Define the column which will be used for computing uplift gain to select best split for a
* tree. The column has to divide the dataset into treatment (value 1) and control (value 0)
* groups.
* @param uplift_metric Divergence metric used to find best split when building an uplift tree.
* @param auuc_type Metric used to calculate Area Under Uplift Curve.
* @param auuc_nbins Number of bins to calculate Area Under Uplift Curve.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param ntrees Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
* best point
* @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
* root level, then decrease by factor of two per level
* @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
* point. Higher values can lead to more overfitting.
* @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
* stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
* trees when the R^2 metric equals or exceeds this
* @param seed Seed for pseudo random number generator (if applicable)
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
* 1.0), for each tree
* @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
* 0.0 and <= 2.0)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
* @param histogram_type What type of histogram to use for finding optimal split points
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
* the response column is a constant value.If disabled, then model will train
* regardless of the response column being a constant value or not.
* @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
* running. In case of cluster shutdown, this checkpoint can be used to restart
* training.
* @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
* only when in_training_checkpoints_dir is defined
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/upliftdrf")
Call<UpliftDRFV3> trainUpliftdrf(
@Field("mtries") int mtries,
@Field("sample_rate") double sample_rate,
@Field("treatment_column") String treatment_column,
@Field("uplift_metric") TreeupliftUpliftDRFModelUpliftDRFParametersUpliftMetricType uplift_metric,
@Field("auuc_type") AUUCType auuc_type,
@Field("auuc_nbins") int auuc_nbins,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("nbins") int nbins,
@Field("nbins_top_level") int nbins_top_level,
@Field("nbins_cats") int nbins_cats,
@Field("r2_stopping") double r2_stopping,
@Field("seed") long seed,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("sample_rate_per_class") double[] sample_rate_per_class,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
@Field("score_tree_interval") int score_tree_interval,
@Field("min_split_improvement") double min_split_improvement,
@Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("check_constant_response") boolean check_constant_response,
@Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
@Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/upliftdrf")
Call<UpliftDRFV3> trainUpliftdrf(@Field("treatment_column") String treatment_column);
/**
* Resume grid search for UpliftDRF model.
* @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p}
* for classification and p/3 for regression (where p is the # of predictors
* @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
* @param treatment_column Define the column which will be used for computing uplift gain to select best split for a
* tree. The column has to divide the dataset into treatment (value 1) and control (value 0)
* groups.
* @param uplift_metric Divergence metric used to find best split when building an uplift tree.
* @param auuc_type Metric used to calculate Area Under Uplift Curve.
* @param auuc_nbins Number of bins to calculate Area Under Uplift Curve.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param ntrees Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
* best point
* @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
* root level, then decrease by factor of two per level
* @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
* point. Higher values can lead to more overfitting.
* @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
* stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
* trees when the R^2 metric equals or exceeds this
* @param seed Seed for pseudo random number generator (if applicable)
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
* 1.0), for each tree
* @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
* 0.0 and <= 2.0)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
* @param histogram_type What type of histogram to use for finding optimal split points
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
* the response column is a constant value.If disabled, then model will train
* regardless of the response column being a constant value or not.
* @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
* running. In case of cluster shutdown, this checkpoint can be used to restart
* training.
* @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
* only when in_training_checkpoints_dir is defined
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/upliftdrf/resume")
Call<UpliftDRFV3> resumeUpliftdrf(
@Field("mtries") int mtries,
@Field("sample_rate") double sample_rate,
@Field("treatment_column") String treatment_column,
@Field("uplift_metric") TreeupliftUpliftDRFModelUpliftDRFParametersUpliftMetricType uplift_metric,
@Field("auuc_type") AUUCType auuc_type,
@Field("auuc_nbins") int auuc_nbins,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("nbins") int nbins,
@Field("nbins_top_level") int nbins_top_level,
@Field("nbins_cats") int nbins_cats,
@Field("r2_stopping") double r2_stopping,
@Field("seed") long seed,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("sample_rate_per_class") double[] sample_rate_per_class,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
@Field("score_tree_interval") int score_tree_interval,
@Field("min_split_improvement") double min_split_improvement,
@Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("check_constant_response") boolean check_constant_response,
@Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
@Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/upliftdrf/resume")
Call<UpliftDRFV3> resumeUpliftdrf(@Field("treatment_column") String treatment_column);
/**
* Run grid search for ModelSelection model.
* @param seed Seed for pseudo random number generator (if applicable)
* @param family Family. For maxr/maxrsweep, only gaussian. For backward, ordinal and multinomial families are not
* supported
* @param tweedie_variance_power Tweedie variance power
* @param tweedie_link_power Tweedie link power
* @param theta Theta
* @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
* with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
* datasets with many columns.
* @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
* alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
* specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
* 0.5 otherwise.
* @param lambda Regularization strength
* @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
* @param multinode_mode For maxrsweep only. If enabled, will attempt to perform sweeping action using multiple
* nodes in the cluster. Defaults to false.
* @param build_glm_model For maxrsweep mode only. If true, will return full blown GLM models with the desired
* predictorsubsets. If false, only the predictor subsets, predictor coefficients are
* returned. This is forspeeding up the model selection process. The users can choose to
* build the GLM models themselvesby using the predictor subsets themselves. Defaults to
* false.
* @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided)
* @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
* set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
* otherwise it is set to 100.
* @param score_iteration_interval Perform scoring for every score_iteration_interval iterations
* @param standardize Standardize numeric columns to have zero mean and unit variance
* @param cold_start Only applicable to multiple alpha/lambda values. If false, build the next model for next set
* of alpha/lambda values starting from the values provided by current model. If true will start
* GLM model from scratch.
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
* @param non_negative Restrict coefficients (not intercept) to be non-negative
* @param max_iterations Maximum number of iterations
* @param beta_epsilon Converge if beta changes less (using L-infinity norm) than beta esilon, ONLY applies to
* IRLSM solver
* @param objective_epsilon Converge if objective value changes less than this. Default (of -1.0) indicates: If
* lambda_search is set to True the value of objective_epsilon is set to .0001. If the
* lambda_search is set to False and lambda is equal to zero, the value of
* objective_epsilon is set to .000001, for any other value of lambda the default value of
* objective_epsilon is set to .0001.
* @param gradient_epsilon Converge if objective changes less (using L-infinity norm) than this, ONLY applies to
* L-BFGS solver. Default (of -1.0) indicates: If lambda_search is set to False and lambda
* is equal to zero, the default value of gradient_epsilon is equal to .000001, otherwise
* the default value is .0001. If lambda_search is set to True, the conditional values above
* are 1E-8 and 1E-6 respectively.
* @param obj_reg Likelihood divider in objective value computation, default (of -1.0) will set it to 1/nobs
* @param link Link function.
* @param startval Double array to initialize coefficients for GLM.
* @param calc_like If true, will return likelihood function value for GLM.
* @param mode Mode: Used to choose model selection algorithms to use. Options include 'allsubsets' for all
* subsets, 'maxr' that uses sequential replacement and GLM to build all models, slow but works with
* cross-validation, validation frames for more robust results, 'maxrsweep' that uses sequential
* replacement and sweeping action, much faster than 'maxr', 'backward' for backward selection.
* @param intercept Include constant term in the model
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
* lambda that drives all coefficients to zero). Default indicates: if the number of
* observations is greater than the number of variables, then lambda_min_ratio is set to
* 0.0001; if the number of observations is less than the number of variables, then
* lambda_min_ratio is set to 0.01.
* @param beta_constraints Beta constraints
* @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
* to prevent expensive model building with many predictors. Default indicates: If the
* IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
* is set to 100000000.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
* @param remove_collinear_columns In case of linearly dependent columns, remove some of the dependent columns
* @param max_predictor_number Maximum number of predictors to be considered when building GLM models. Defaults to
* 1.
* @param min_predictor_number For mode = 'backward' only. Minimum number of predictors to be considered when
* building GLM models starting with all predictors to be included. Defaults to 1.
* @param nparallelism number of models to build in parallel. Defaults to 0.0 which is adaptive to the system
* capability
* @param p_values_threshold For mode='backward' only. If specified, will stop the model building process when all
* coefficientsp-values drop below this threshold
* @param influence If set to dfbetas will calculate the difference in beta when a datarow is included and excluded
* in the dataset.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/modelselection")
Call<ModelSelectionV3> trainModelselection(
@Field("seed") long seed,
@Field("family") GLMFamily family,
@Field("tweedie_variance_power") double tweedie_variance_power,
@Field("tweedie_link_power") double tweedie_link_power,
@Field("theta") double theta,
@Field("solver") GLMSolver solver,
@Field("alpha") double[] alpha,
@Field("lambda") double[] lambda,
@Field("lambda_search") boolean lambda_search,
@Field("multinode_mode") boolean multinode_mode,
@Field("build_glm_model") boolean build_glm_model,
@Field("early_stopping") boolean early_stopping,
@Field("nlambdas") int nlambdas,
@Field("score_iteration_interval") int score_iteration_interval,
@Field("standardize") boolean standardize,
@Field("cold_start") boolean cold_start,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("plug_values") String plug_values,
@Field("non_negative") boolean non_negative,
@Field("max_iterations") int max_iterations,
@Field("beta_epsilon") double beta_epsilon,
@Field("objective_epsilon") double objective_epsilon,
@Field("gradient_epsilon") double gradient_epsilon,
@Field("obj_reg") double obj_reg,
@Field("link") GLMLink link,
@Field("startval") double[] startval,
@Field("calc_like") boolean calc_like,
@Field("mode") ModelSelectionMode mode,
@Field("intercept") boolean intercept,
@Field("prior") double prior,
@Field("lambda_min_ratio") double lambda_min_ratio,
@Field("beta_constraints") String beta_constraints,
@Field("max_active_predictors") int max_active_predictors,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("compute_p_values") boolean compute_p_values,
@Field("remove_collinear_columns") boolean remove_collinear_columns,
@Field("max_predictor_number") int max_predictor_number,
@Field("min_predictor_number") int min_predictor_number,
@Field("nparallelism") int nparallelism,
@Field("p_values_threshold") double p_values_threshold,
@Field("influence") GLMInfluence influence,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/modelselection")
Call<ModelSelectionV3> trainModelselection();
/**
* Resume grid search for ModelSelection model.
* @param seed Seed for pseudo random number generator (if applicable)
* @param family Family. For maxr/maxrsweep, only gaussian. For backward, ordinal and multinomial families are not
* supported
* @param tweedie_variance_power Tweedie variance power
* @param tweedie_link_power Tweedie link power
* @param theta Theta
* @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
* with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
* datasets with many columns.
* @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
* alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
* specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
* 0.5 otherwise.
* @param lambda Regularization strength
* @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
* @param multinode_mode For maxrsweep only. If enabled, will attempt to perform sweeping action using multiple
* nodes in the cluster. Defaults to false.
* @param build_glm_model For maxrsweep mode only. If true, will return full blown GLM models with the desired
* predictorsubsets. If false, only the predictor subsets, predictor coefficients are
* returned. This is forspeeding up the model selection process. The users can choose to
* build the GLM models themselvesby using the predictor subsets themselves. Defaults to
* false.
* @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided)
* @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
* set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
* otherwise it is set to 100.
* @param score_iteration_interval Perform scoring for every score_iteration_interval iterations
* @param standardize Standardize numeric columns to have zero mean and unit variance
* @param cold_start Only applicable to multiple alpha/lambda values. If false, build the next model for next set
* of alpha/lambda values starting from the values provided by current model. If true will start
* GLM model from scratch.
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
* @param non_negative Restrict coefficients (not intercept) to be non-negative
* @param max_iterations Maximum number of iterations
* @param beta_epsilon Converge if beta changes less (using L-infinity norm) than beta esilon, ONLY applies to
* IRLSM solver
* @param objective_epsilon Converge if objective value changes less than this. Default (of -1.0) indicates: If
* lambda_search is set to True the value of objective_epsilon is set to .0001. If the
* lambda_search is set to False and lambda is equal to zero, the value of
* objective_epsilon is set to .000001, for any other value of lambda the default value of
* objective_epsilon is set to .0001.
* @param gradient_epsilon Converge if objective changes less (using L-infinity norm) than this, ONLY applies to
* L-BFGS solver. Default (of -1.0) indicates: If lambda_search is set to False and lambda
* is equal to zero, the default value of gradient_epsilon is equal to .000001, otherwise
* the default value is .0001. If lambda_search is set to True, the conditional values above
* are 1E-8 and 1E-6 respectively.
* @param obj_reg Likelihood divider in objective value computation, default (of -1.0) will set it to 1/nobs
* @param link Link function.
* @param startval Double array to initialize coefficients for GLM.
* @param calc_like If true, will return likelihood function value for GLM.
* @param mode Mode: Used to choose model selection algorithms to use. Options include 'allsubsets' for all
* subsets, 'maxr' that uses sequential replacement and GLM to build all models, slow but works with
* cross-validation, validation frames for more robust results, 'maxrsweep' that uses sequential
* replacement and sweeping action, much faster than 'maxr', 'backward' for backward selection.
* @param intercept Include constant term in the model
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
* lambda that drives all coefficients to zero). Default indicates: if the number of
* observations is greater than the number of variables, then lambda_min_ratio is set to
* 0.0001; if the number of observations is less than the number of variables, then
* lambda_min_ratio is set to 0.01.
* @param beta_constraints Beta constraints
* @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
* to prevent expensive model building with many predictors. Default indicates: If the
* IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
* is set to 100000000.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
* @param remove_collinear_columns In case of linearly dependent columns, remove some of the dependent columns
* @param max_predictor_number Maximum number of predictors to be considered when building GLM models. Defaults to
* 1.
* @param min_predictor_number For mode = 'backward' only. Minimum number of predictors to be considered when
* building GLM models starting with all predictors to be included. Defaults to 1.
* @param nparallelism number of models to build in parallel. Defaults to 0.0 which is adaptive to the system
* capability
* @param p_values_threshold For mode='backward' only. If specified, will stop the model building process when all
* coefficientsp-values drop below this threshold
* @param influence If set to dfbetas will calculate the difference in beta when a datarow is included and excluded
* in the dataset.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/modelselection/resume")
Call<ModelSelectionV3> resumeModelselection(
@Field("seed") long seed,
@Field("family") GLMFamily family,
@Field("tweedie_variance_power") double tweedie_variance_power,
@Field("tweedie_link_power") double tweedie_link_power,
@Field("theta") double theta,
@Field("solver") GLMSolver solver,
@Field("alpha") double[] alpha,
@Field("lambda") double[] lambda,
@Field("lambda_search") boolean lambda_search,
@Field("multinode_mode") boolean multinode_mode,
@Field("build_glm_model") boolean build_glm_model,
@Field("early_stopping") boolean early_stopping,
@Field("nlambdas") int nlambdas,
@Field("score_iteration_interval") int score_iteration_interval,
@Field("standardize") boolean standardize,
@Field("cold_start") boolean cold_start,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("plug_values") String plug_values,
@Field("non_negative") boolean non_negative,
@Field("max_iterations") int max_iterations,
@Field("beta_epsilon") double beta_epsilon,
@Field("objective_epsilon") double objective_epsilon,
@Field("gradient_epsilon") double gradient_epsilon,
@Field("obj_reg") double obj_reg,
@Field("link") GLMLink link,
@Field("startval") double[] startval,
@Field("calc_like") boolean calc_like,
@Field("mode") ModelSelectionMode mode,
@Field("intercept") boolean intercept,
@Field("prior") double prior,
@Field("lambda_min_ratio") double lambda_min_ratio,
@Field("beta_constraints") String beta_constraints,
@Field("max_active_predictors") int max_active_predictors,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("compute_p_values") boolean compute_p_values,
@Field("remove_collinear_columns") boolean remove_collinear_columns,
@Field("max_predictor_number") int max_predictor_number,
@Field("min_predictor_number") int min_predictor_number,
@Field("nparallelism") int nparallelism,
@Field("p_values_threshold") double p_values_threshold,
@Field("influence") GLMInfluence influence,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/modelselection/resume")
Call<ModelSelectionV3> resumeModelselection();
/**
* Run grid search for IsotonicRegression model.
* @param out_of_bounds Method of handling values of X predictor that are outside of the bounds seen in training.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/isotonicregression")
Call<IsotonicRegressionV3> trainIsotonicregression(
@Field("out_of_bounds") IsotonicRegressionModelOutOfBoundsHandling out_of_bounds,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/isotonicregression")
Call<IsotonicRegressionV3> trainIsotonicregression();
/**
* Resume grid search for IsotonicRegression model.
* @param out_of_bounds Method of handling values of X predictor that are outside of the bounds seen in training.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/isotonicregression/resume")
Call<IsotonicRegressionV3> resumeIsotonicregression(
@Field("out_of_bounds") IsotonicRegressionModelOutOfBoundsHandling out_of_bounds,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/isotonicregression/resume")
Call<IsotonicRegressionV3> resumeIsotonicregression();
/**
* Run grid search for DT model.
* @param seed Seed for random numbers (affects sampling)
* @param max_depth Max depth of tree.
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/dt")
Call<DTV3> trainDt(
@Field("seed") long seed,
@Field("max_depth") int max_depth,
@Field("min_rows") int min_rows,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/dt")
Call<DTV3> trainDt();
/**
* Resume grid search for DT model.
* @param seed Seed for random numbers (affects sampling)
* @param max_depth Max depth of tree.
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/dt/resume")
Call<DTV3> resumeDt(
@Field("seed") long seed,
@Field("max_depth") int max_depth,
@Field("min_rows") int min_rows,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/dt/resume")
Call<DTV3> resumeDt();
/**
* Run grid search for HGLM model.
* @param score_iteration_interval Perform scoring for every score_iteration_interval iterations.
* @param seed Seed for pseudo random number generator (if applicable).
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues).
* @param family Family. Only gaussian is supported now.
* @param rand_family Set distribution of random effects. Only Gaussian is implemented now.
* @param max_iterations Maximum number of iterations. Value should >=1. A value of 0 is only set when only the
* model coefficient names and model coefficient dimensions are needed.
* @param initial_fixed_effects An array that contains initial values of the fixed effects coefficient.
* @param initial_random_effects A H2OFrame id that contains initial values of the random effects coefficient. The
* row names shouldbe the random coefficient names. If you are not sure what the
* random coefficient names are, build HGLM model with max_iterations = 0 and checkout
* the model output field random_coefficient_names. The number of rows of this frame
* should be the number of level 2 units. Again, to figure this out, build HGLM model
* with max_iterations=0 and check out the model output field group_column_names. The
* number of rows should equal the length of thegroup_column_names.
* @param initial_t_matrix A H2OFrame id that contains initial values of the T matrix. It should be a positive
* symmetric matrix.
* @param tau_u_var_init Initial variance of random coefficient effects. If set, should provide a value > 0.0. If
* not set, will be randomly set in the model building process.
* @param tau_e_var_init Initial variance of random noise. If set, should provide a value > 0.0. If not set, will
* be randomly set in the model building process.
* @param random_columns Random columns indices for HGLM.
* @param method We only implemented EM as a method to obtain the fixed, random coefficients and the various
* variances.
* @param em_epsilon Converge if beta/ubeta/tmat/tauEVar changes less (using L-infinity norm) than em esilon. ONLY
* applies to EM method.
* @param random_intercept If true, will allow random component to the GLM coefficients.
* @param group_column Group column is the column that is categorical and used to generate the groups in HGLM
* @param gen_syn_data If true, add gaussian noise with variance specified in parms._tau_e_var_init.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/hglm")
Call<HGLMV3> trainHglm(
@Field("score_iteration_interval") int score_iteration_interval,
@Field("seed") long seed,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("plug_values") String plug_values,
@Field("family") GLMFamily family,
@Field("rand_family") GLMFamily rand_family,
@Field("max_iterations") int max_iterations,
@Field("initial_fixed_effects") double[] initial_fixed_effects,
@Field("initial_random_effects") String initial_random_effects,
@Field("initial_t_matrix") String initial_t_matrix,
@Field("tau_u_var_init") double tau_u_var_init,
@Field("tau_e_var_init") double tau_e_var_init,
@Field("random_columns") String[] random_columns,
@Field("method") HGLMMethod method,
@Field("em_epsilon") double em_epsilon,
@Field("random_intercept") boolean random_intercept,
@Field("group_column") String group_column,
@Field("gen_syn_data") boolean gen_syn_data,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/hglm")
Call<HGLMV3> trainHglm();
/**
* Resume grid search for HGLM model.
* @param score_iteration_interval Perform scoring for every score_iteration_interval iterations.
* @param seed Seed for pseudo random number generator (if applicable).
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues).
* @param family Family. Only gaussian is supported now.
* @param rand_family Set distribution of random effects. Only Gaussian is implemented now.
* @param max_iterations Maximum number of iterations. Value should >=1. A value of 0 is only set when only the
* model coefficient names and model coefficient dimensions are needed.
* @param initial_fixed_effects An array that contains initial values of the fixed effects coefficient.
* @param initial_random_effects A H2OFrame id that contains initial values of the random effects coefficient. The
* row names shouldbe the random coefficient names. If you are not sure what the
* random coefficient names are, build HGLM model with max_iterations = 0 and checkout
* the model output field random_coefficient_names. The number of rows of this frame
* should be the number of level 2 units. Again, to figure this out, build HGLM model
* with max_iterations=0 and check out the model output field group_column_names. The
* number of rows should equal the length of thegroup_column_names.
* @param initial_t_matrix A H2OFrame id that contains initial values of the T matrix. It should be a positive
* symmetric matrix.
* @param tau_u_var_init Initial variance of random coefficient effects. If set, should provide a value > 0.0. If
* not set, will be randomly set in the model building process.
* @param tau_e_var_init Initial variance of random noise. If set, should provide a value > 0.0. If not set, will
* be randomly set in the model building process.
* @param random_columns Random columns indices for HGLM.
* @param method We only implemented EM as a method to obtain the fixed, random coefficients and the various
* variances.
* @param em_epsilon Converge if beta/ubeta/tmat/tauEVar changes less (using L-infinity norm) than em esilon. ONLY
* applies to EM method.
* @param random_intercept If true, will allow random component to the GLM coefficients.
* @param group_column Group column is the column that is categorical and used to generate the groups in HGLM
* @param gen_syn_data If true, add gaussian noise with variance specified in parms._tau_e_var_init.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/hglm/resume")
Call<HGLMV3> resumeHglm(
@Field("score_iteration_interval") int score_iteration_interval,
@Field("seed") long seed,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("plug_values") String plug_values,
@Field("family") GLMFamily family,
@Field("rand_family") GLMFamily rand_family,
@Field("max_iterations") int max_iterations,
@Field("initial_fixed_effects") double[] initial_fixed_effects,
@Field("initial_random_effects") String initial_random_effects,
@Field("initial_t_matrix") String initial_t_matrix,
@Field("tau_u_var_init") double tau_u_var_init,
@Field("tau_e_var_init") double tau_e_var_init,
@Field("random_columns") String[] random_columns,
@Field("method") HGLMMethod method,
@Field("em_epsilon") double em_epsilon,
@Field("random_intercept") boolean random_intercept,
@Field("group_column") String group_column,
@Field("gen_syn_data") boolean gen_syn_data,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/hglm/resume")
Call<HGLMV3> resumeHglm();
/**
* Run grid search for AdaBoost model.
* @param nlearners Number of AdaBoost weak learners.
* @param weak_learner Choose a weak learner type. Defaults to AUTO, which means DRF.
* @param learn_rate Learning rate (from 0.0 to 1.0)
* @param weak_learner_params Customized parameters for the weak_learner algorithm.
* @param seed Seed for pseudo random number generator (if applicable)
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/adaboost")
Call<AdaBoostV3> trainAdaboost(
@Field("nlearners") int nlearners,
@Field("weak_learner") AdaBoostModelAlgorithm weak_learner,
@Field("learn_rate") double learn_rate,
@Field("weak_learner_params") String weak_learner_params,
@Field("seed") long seed,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/adaboost")
Call<AdaBoostV3> trainAdaboost();
/**
* Resume grid search for AdaBoost model.
* @param nlearners Number of AdaBoost weak learners.
* @param weak_learner Choose a weak learner type. Defaults to AUTO, which means DRF.
* @param learn_rate Learning rate (from 0.0 to 1.0)
* @param weak_learner_params Customized parameters for the weak_learner algorithm.
* @param seed Seed for pseudo random number generator (if applicable)
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/Grid/adaboost/resume")
Call<AdaBoostV3> resumeAdaboost(
@Field("nlearners") int nlearners,
@Field("weak_learner") AdaBoostModelAlgorithm weak_learner,
@Field("learn_rate") double learn_rate,
@Field("weak_learner_params") String weak_learner_params,
@Field("seed") long seed,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/Grid/adaboost/resume")
Call<AdaBoostV3> resumeAdaboost();
/**
* Import previously saved grid model
* @param grid_path Full path to the file containing saved Grid
* @param load_params_references If true will also load saved objects referenced by params. Will fail with an error
* if grid was saved without objects referenced by params.
*/
@FormUrlEncoded
@POST("/3/Grid.bin/import")
Call<GridKeyV3> importGrid(
@Field("grid_path") String grid_path,
@Field("load_params_references") boolean load_params_references
);
@FormUrlEncoded
@POST("/3/Grid.bin/import")
Call<GridKeyV3> importGrid(@Field("grid_path") String grid_path);
/**
* Export a Grid and its models.
* @param grid_id ID of the Grid to load from the directory
* @param grid_directory Path to the directory with saved Grid search
* @param save_params_references True if objects referenced by params should also be saved.
* @param export_cross_validation_predictions Flag indicating whether the exported model artifacts should also
* include CV Holdout Frame predictions
*/
@FormUrlEncoded
@POST("/3/Grid.bin/{grid_id}/export")
Call<GridKeyV3> exportGrid(
@Path("grid_id") String grid_id,
@Field("grid_directory") String grid_directory,
@Field("save_params_references") boolean save_params_references,
@Field("export_cross_validation_predictions") boolean export_cross_validation_predictions
);
@FormUrlEncoded
@POST("/3/Grid.bin/{grid_id}/export")
Call<GridKeyV3> exportGrid(
@Path("grid_id") String grid_id,
@Field("grid_directory") String grid_directory
);
@SuppressWarnings("unused")
class Helper {
/**
* Run grid search for XGBoost model.
*/
public static Call<XGBoostV3> trainXgboost(Grid z, XGBoostParametersV3 p) {
return z.trainXgboost(
p.ntrees,
p.maxDepth,
p.minRows,
p.minChildWeight,
p.learnRate,
p.eta,
p.sampleRate,
p.subsample,
p.colSampleRate,
p.colsampleBylevel,
p.colSampleRatePerTree,
p.colsampleBytree,
p.colsampleBynode,
p.monotoneConstraints,
p.maxAbsLeafnodePred,
p.maxDeltaStep,
p.scoreTreeInterval,
p.seed,
p.minSplitImprovement,
p.gamma,
p.nthread,
p.buildTreeOneNode,
p.saveMatrixDirectory,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.maxBins,
p.maxLeaves,
p.treeMethod,
p.growPolicy,
p.booster,
p.regLambda,
p.regAlpha,
p.quietMode,
p.sampleType,
p.normalizeType,
p.rateDrop,
p.oneDrop,
p.skipDrop,
p.dmatrixType,
p.backend,
p.gpuId,
p.interactionConstraints,
p.scalePosWeight,
p.evalMetric,
p.scoreEvalMetricOnly,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for XGBoost model.
*/
public static Call<XGBoostV3> resumeXgboost(Grid z, XGBoostParametersV3 p) {
return z.resumeXgboost(
p.ntrees,
p.maxDepth,
p.minRows,
p.minChildWeight,
p.learnRate,
p.eta,
p.sampleRate,
p.subsample,
p.colSampleRate,
p.colsampleBylevel,
p.colSampleRatePerTree,
p.colsampleBytree,
p.colsampleBynode,
p.monotoneConstraints,
p.maxAbsLeafnodePred,
p.maxDeltaStep,
p.scoreTreeInterval,
p.seed,
p.minSplitImprovement,
p.gamma,
p.nthread,
p.buildTreeOneNode,
p.saveMatrixDirectory,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.maxBins,
p.maxLeaves,
p.treeMethod,
p.growPolicy,
p.booster,
p.regLambda,
p.regAlpha,
p.quietMode,
p.sampleType,
p.normalizeType,
p.rateDrop,
p.oneDrop,
p.skipDrop,
p.dmatrixType,
p.backend,
p.gpuId,
p.interactionConstraints,
p.scalePosWeight,
p.evalMetric,
p.scoreEvalMetricOnly,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for Infogram model.
*/
public static Call<InfogramV3> trainInfogram(Grid z, InfogramParametersV3 p) {
return z.trainInfogram(
p.seed,
p.standardize,
(p.plugValues == null? null : p.plugValues.name),
p.maxIterations,
p.prior,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.algorithm,
p.algorithmParams,
p.protectedColumns,
p.totalInformationThreshold,
p.netInformationThreshold,
p.relevanceIndexThreshold,
p.safetyIndexThreshold,
p.dataFraction,
p.topNFeatures,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for Infogram model.
*/
public static Call<InfogramV3> resumeInfogram(Grid z, InfogramParametersV3 p) {
return z.resumeInfogram(
p.seed,
p.standardize,
(p.plugValues == null? null : p.plugValues.name),
p.maxIterations,
p.prior,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.algorithm,
p.algorithmParams,
p.protectedColumns,
p.totalInformationThreshold,
p.netInformationThreshold,
p.relevanceIndexThreshold,
p.safetyIndexThreshold,
p.dataFraction,
p.topNFeatures,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for TargetEncoder model.
*/
public static Call<TargetEncoderV3> trainTargetencoder(Grid z, TargetEncoderParametersV3 p) {
return z.trainTargetencoder(
p.columnsToEncode,
p.keepOriginalCategoricalColumns,
p.blending,
p.inflectionPoint,
p.smoothing,
p.dataLeakageHandling,
p.noise,
p.seed,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for TargetEncoder model.
*/
public static Call<TargetEncoderV3> resumeTargetencoder(Grid z, TargetEncoderParametersV3 p) {
return z.resumeTargetencoder(
p.columnsToEncode,
p.keepOriginalCategoricalColumns,
p.blending,
p.inflectionPoint,
p.smoothing,
p.dataLeakageHandling,
p.noise,
p.seed,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for DeepLearning model.
*/
public static Call<DeepLearningV3> trainDeeplearning(Grid z, DeepLearningParametersV3 p) {
return z.trainDeeplearning(
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.activation,
p.hidden,
p.epochs,
p.trainSamplesPerIteration,
p.targetRatioCommToComp,
p.seed,
p.adaptiveRate,
p.rho,
p.epsilon,
p.rate,
p.rateAnnealing,
p.rateDecay,
p.momentumStart,
p.momentumRamp,
p.momentumStable,
p.nesterovAcceleratedGradient,
p.inputDropoutRatio,
p.hiddenDropoutRatios,
p.l1,
p.l2,
p.maxW2,
p.initialWeightDistribution,
p.initialWeightScale,
(p.initialWeights == null? null : keyArrayToStringArray(p.initialWeights)),
(p.initialBiases == null? null : keyArrayToStringArray(p.initialBiases)),
p.loss,
p.scoreInterval,
p.scoreTrainingSamples,
p.scoreValidationSamples,
p.scoreDutyCycle,
p.classificationStop,
p.regressionStop,
p.quietMode,
p.scoreValidationSampling,
p.overwriteWithBestModel,
p.autoencoder,
p.useAllFactorLevels,
p.standardize,
p.diagnostics,
p.variableImportances,
p.fastMode,
p.forceLoadBalance,
p.replicateTrainingData,
p.singleNodeMode,
p.shuffleTrainingData,
p.missingValuesHandling,
p.sparse,
p.colMajor,
p.averageActivation,
p.sparsityBeta,
p.maxCategoricalFeatures,
p.reproducible,
p.exportWeightsAndBiases,
p.miniBatchSize,
p.elasticAveraging,
p.elasticAveragingMovingRate,
p.elasticAveragingRegularization,
(p.pretrainedAutoencoder == null? null : p.pretrainedAutoencoder.name),
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for DeepLearning model.
*/
public static Call<DeepLearningV3> resumeDeeplearning(Grid z, DeepLearningParametersV3 p) {
return z.resumeDeeplearning(
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.activation,
p.hidden,
p.epochs,
p.trainSamplesPerIteration,
p.targetRatioCommToComp,
p.seed,
p.adaptiveRate,
p.rho,
p.epsilon,
p.rate,
p.rateAnnealing,
p.rateDecay,
p.momentumStart,
p.momentumRamp,
p.momentumStable,
p.nesterovAcceleratedGradient,
p.inputDropoutRatio,
p.hiddenDropoutRatios,
p.l1,
p.l2,
p.maxW2,
p.initialWeightDistribution,
p.initialWeightScale,
(p.initialWeights == null? null : keyArrayToStringArray(p.initialWeights)),
(p.initialBiases == null? null : keyArrayToStringArray(p.initialBiases)),
p.loss,
p.scoreInterval,
p.scoreTrainingSamples,
p.scoreValidationSamples,
p.scoreDutyCycle,
p.classificationStop,
p.regressionStop,
p.quietMode,
p.scoreValidationSampling,
p.overwriteWithBestModel,
p.autoencoder,
p.useAllFactorLevels,
p.standardize,
p.diagnostics,
p.variableImportances,
p.fastMode,
p.forceLoadBalance,
p.replicateTrainingData,
p.singleNodeMode,
p.shuffleTrainingData,
p.missingValuesHandling,
p.sparse,
p.colMajor,
p.averageActivation,
p.sparsityBeta,
p.maxCategoricalFeatures,
p.reproducible,
p.exportWeightsAndBiases,
p.miniBatchSize,
p.elasticAveraging,
p.elasticAveragingMovingRate,
p.elasticAveragingRegularization,
(p.pretrainedAutoencoder == null? null : p.pretrainedAutoencoder.name),
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for GLM model.
*/
public static Call<GLMV3> trainGlm(Grid z, GLMParametersV3 p) {
return z.trainGlm(
p.seed,
p.family,
p.tweedieVariancePower,
p.dispersionLearningRate,
p.tweedieLinkPower,
p.theta,
p.solver,
p.alpha,
p.lambda,
p.lambdaSearch,
p.earlyStopping,
p.nlambdas,
p.scoreIterationInterval,
p.standardize,
p.coldStart,
p.missingValuesHandling,
p.influence,
(p.plugValues == null? null : p.plugValues.name),
p.nonNegative,
p.maxIterations,
p.betaEpsilon,
p.objectiveEpsilon,
p.gradientEpsilon,
p.objReg,
p.link,
p.dispersionParameterMethod,
p.startval,
p.calcLike,
p.generateVariableInflationFactors,
p.intercept,
p.buildNullModel,
p.fixDispersionParameter,
p.initDispersionParameter,
p.prior,
p.lambdaMinRatio,
(p.betaConstraints == null? null : p.betaConstraints.name),
(p.linearConstraints == null? null : p.linearConstraints.name),
p.maxActivePredictors,
p.interactions,
p.interactionPairs,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.computePValues,
p.fixTweedieVariancePower,
p.removeCollinearColumns,
p.dispersionEpsilon,
p.tweedieEpsilon,
p.maxIterationsDispersion,
p.generateScoringHistory,
p.initOptimalGlm,
p.separateLinearBeta,
p.constraintEta0,
p.constraintTau,
p.constraintAlpha,
p.constraintBeta,
p.constraintC0,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for GLM model.
*/
public static Call<GLMV3> resumeGlm(Grid z, GLMParametersV3 p) {
return z.resumeGlm(
p.seed,
p.family,
p.tweedieVariancePower,
p.dispersionLearningRate,
p.tweedieLinkPower,
p.theta,
p.solver,
p.alpha,
p.lambda,
p.lambdaSearch,
p.earlyStopping,
p.nlambdas,
p.scoreIterationInterval,
p.standardize,
p.coldStart,
p.missingValuesHandling,
p.influence,
(p.plugValues == null? null : p.plugValues.name),
p.nonNegative,
p.maxIterations,
p.betaEpsilon,
p.objectiveEpsilon,
p.gradientEpsilon,
p.objReg,
p.link,
p.dispersionParameterMethod,
p.startval,
p.calcLike,
p.generateVariableInflationFactors,
p.intercept,
p.buildNullModel,
p.fixDispersionParameter,
p.initDispersionParameter,
p.prior,
p.lambdaMinRatio,
(p.betaConstraints == null? null : p.betaConstraints.name),
(p.linearConstraints == null? null : p.linearConstraints.name),
p.maxActivePredictors,
p.interactions,
p.interactionPairs,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.computePValues,
p.fixTweedieVariancePower,
p.removeCollinearColumns,
p.dispersionEpsilon,
p.tweedieEpsilon,
p.maxIterationsDispersion,
p.generateScoringHistory,
p.initOptimalGlm,
p.separateLinearBeta,
p.constraintEta0,
p.constraintTau,
p.constraintAlpha,
p.constraintBeta,
p.constraintC0,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for GLRM model.
*/
public static Call<GLRMV3> trainGlrm(Grid z, GLRMParametersV3 p) {
return z.trainGlrm(
p.transform,
p.k,
p.loss,
p.multiLoss,
p.lossByCol,
p.lossByColIdx,
p.period,
p.regularizationX,
p.regularizationY,
p.gammaX,
p.gammaY,
p.maxIterations,
p.maxUpdates,
p.initStepSize,
p.minStepSize,
p.seed,
p.init,
p.svdMethod,
(p.userY == null? null : p.userY.name),
(p.userX == null? null : p.userX.name),
p.loadingName,
p.representationName,
p.expandUserY,
p.imputeOriginal,
p.recoverSvd,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for GLRM model.
*/
public static Call<GLRMV3> resumeGlrm(Grid z, GLRMParametersV3 p) {
return z.resumeGlrm(
p.transform,
p.k,
p.loss,
p.multiLoss,
p.lossByCol,
p.lossByColIdx,
p.period,
p.regularizationX,
p.regularizationY,
p.gammaX,
p.gammaY,
p.maxIterations,
p.maxUpdates,
p.initStepSize,
p.minStepSize,
p.seed,
p.init,
p.svdMethod,
(p.userY == null? null : p.userY.name),
(p.userX == null? null : p.userX.name),
p.loadingName,
p.representationName,
p.expandUserY,
p.imputeOriginal,
p.recoverSvd,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for KMeans model.
*/
public static Call<KMeansV3> trainKmeans(Grid z, KMeansParametersV3 p) {
return z.trainKmeans(
(p.userPoints == null? null : p.userPoints.name),
p.maxIterations,
p.standardize,
p.seed,
p.init,
p.estimateK,
p.clusterSizeConstraints,
p.k,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for KMeans model.
*/
public static Call<KMeansV3> resumeKmeans(Grid z, KMeansParametersV3 p) {
return z.resumeKmeans(
(p.userPoints == null? null : p.userPoints.name),
p.maxIterations,
p.standardize,
p.seed,
p.init,
p.estimateK,
p.clusterSizeConstraints,
p.k,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for NaiveBayes model.
*/
public static Call<NaiveBayesV3> trainNaivebayes(Grid z, NaiveBayesParametersV3 p) {
return z.trainNaivebayes(
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.laplace,
p.minSdev,
p.epsSdev,
p.minProb,
p.epsProb,
p.computeMetrics,
p.seed,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for NaiveBayes model.
*/
public static Call<NaiveBayesV3> resumeNaivebayes(Grid z, NaiveBayesParametersV3 p) {
return z.resumeNaivebayes(
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.laplace,
p.minSdev,
p.epsSdev,
p.minProb,
p.epsProb,
p.computeMetrics,
p.seed,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for PCA model.
*/
public static Call<PCAV3> trainPca(Grid z, PCAParametersV3 p) {
return z.trainPca(
p.transform,
p.pcaMethod,
p.pcaImpl,
p.k,
p.maxIterations,
p.seed,
p.useAllFactorLevels,
p.computeMetrics,
p.imputeMissing,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for PCA model.
*/
public static Call<PCAV3> resumePca(Grid z, PCAParametersV3 p) {
return z.resumePca(
p.transform,
p.pcaMethod,
p.pcaImpl,
p.k,
p.maxIterations,
p.seed,
p.useAllFactorLevels,
p.computeMetrics,
p.imputeMissing,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for SVD model.
*/
public static Call<SVDV99> trainSvd(Grid z, SVDParametersV99 p) {
return z.trainSvd(
p.transform,
p.svdMethod,
p.nv,
p.maxIterations,
p.seed,
p.keepU,
p.uName,
p.useAllFactorLevels,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for SVD model.
*/
public static Call<SVDV99> resumeSvd(Grid z, SVDParametersV99 p) {
return z.resumeSvd(
p.transform,
p.svdMethod,
p.nv,
p.maxIterations,
p.seed,
p.keepU,
p.uName,
p.useAllFactorLevels,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for DRF model.
*/
public static Call<DRFV3> trainDrf(Grid z, DRFParametersV3 p) {
return z.trainDrf(
p.mtries,
p.binomialDoubleTrees,
p.sampleRate,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.ntrees,
p.maxDepth,
p.minRows,
p.nbins,
p.nbinsTopLevel,
p.nbinsCats,
p.r2Stopping,
p.seed,
p.buildTreeOneNode,
p.sampleRatePerClass,
p.colSampleRatePerTree,
p.colSampleRateChangePerLevel,
p.scoreTreeInterval,
p.minSplitImprovement,
p.histogramType,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.checkConstantResponse,
p.inTrainingCheckpointsDir,
p.inTrainingCheckpointsTreeInterval,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for DRF model.
*/
public static Call<DRFV3> resumeDrf(Grid z, DRFParametersV3 p) {
return z.resumeDrf(
p.mtries,
p.binomialDoubleTrees,
p.sampleRate,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.ntrees,
p.maxDepth,
p.minRows,
p.nbins,
p.nbinsTopLevel,
p.nbinsCats,
p.r2Stopping,
p.seed,
p.buildTreeOneNode,
p.sampleRatePerClass,
p.colSampleRatePerTree,
p.colSampleRateChangePerLevel,
p.scoreTreeInterval,
p.minSplitImprovement,
p.histogramType,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.checkConstantResponse,
p.inTrainingCheckpointsDir,
p.inTrainingCheckpointsTreeInterval,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for GBM model.
*/
public static Call<GBMV3> trainGbm(Grid z, GBMParametersV3 p) {
return z.trainGbm(
p.learnRate,
p.learnRateAnnealing,
p.sampleRate,
p.colSampleRate,
p.monotoneConstraints,
p.maxAbsLeafnodePred,
p.predNoiseBandwidth,
p.interactionConstraints,
p.autoRebalance,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.ntrees,
p.maxDepth,
p.minRows,
p.nbins,
p.nbinsTopLevel,
p.nbinsCats,
p.r2Stopping,
p.seed,
p.buildTreeOneNode,
p.sampleRatePerClass,
p.colSampleRatePerTree,
p.colSampleRateChangePerLevel,
p.scoreTreeInterval,
p.minSplitImprovement,
p.histogramType,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.checkConstantResponse,
p.inTrainingCheckpointsDir,
p.inTrainingCheckpointsTreeInterval,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for GBM model.
*/
public static Call<GBMV3> resumeGbm(Grid z, GBMParametersV3 p) {
return z.resumeGbm(
p.learnRate,
p.learnRateAnnealing,
p.sampleRate,
p.colSampleRate,
p.monotoneConstraints,
p.maxAbsLeafnodePred,
p.predNoiseBandwidth,
p.interactionConstraints,
p.autoRebalance,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.ntrees,
p.maxDepth,
p.minRows,
p.nbins,
p.nbinsTopLevel,
p.nbinsCats,
p.r2Stopping,
p.seed,
p.buildTreeOneNode,
p.sampleRatePerClass,
p.colSampleRatePerTree,
p.colSampleRateChangePerLevel,
p.scoreTreeInterval,
p.minSplitImprovement,
p.histogramType,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.checkConstantResponse,
p.inTrainingCheckpointsDir,
p.inTrainingCheckpointsTreeInterval,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for IsolationForest model.
*/
public static Call<IsolationForestV3> trainIsolationforest(Grid z, IsolationForestParametersV3 p) {
return z.trainIsolationforest(
p.sampleSize,
p.sampleRate,
p.mtries,
p.contamination,
(p.validationResponseColumn == null? null : p.validationResponseColumn.columnName),
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.ntrees,
p.maxDepth,
p.minRows,
p.nbins,
p.nbinsTopLevel,
p.nbinsCats,
p.r2Stopping,
p.seed,
p.buildTreeOneNode,
p.sampleRatePerClass,
p.colSampleRatePerTree,
p.colSampleRateChangePerLevel,
p.scoreTreeInterval,
p.minSplitImprovement,
p.histogramType,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.checkConstantResponse,
p.inTrainingCheckpointsDir,
p.inTrainingCheckpointsTreeInterval,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for IsolationForest model.
*/
public static Call<IsolationForestV3> resumeIsolationforest(Grid z, IsolationForestParametersV3 p) {
return z.resumeIsolationforest(
p.sampleSize,
p.sampleRate,
p.mtries,
p.contamination,
(p.validationResponseColumn == null? null : p.validationResponseColumn.columnName),
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.ntrees,
p.maxDepth,
p.minRows,
p.nbins,
p.nbinsTopLevel,
p.nbinsCats,
p.r2Stopping,
p.seed,
p.buildTreeOneNode,
p.sampleRatePerClass,
p.colSampleRatePerTree,
p.colSampleRateChangePerLevel,
p.scoreTreeInterval,
p.minSplitImprovement,
p.histogramType,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.checkConstantResponse,
p.inTrainingCheckpointsDir,
p.inTrainingCheckpointsTreeInterval,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for ExtendedIsolationForest model.
*/
public static Call<ExtendedIsolationForestV3> trainExtendedisolationforest(Grid z, ExtendedIsolationForestParametersV3 p) {
return z.trainExtendedisolationforest(
p.ntrees,
p.sampleSize,
p.extensionLevel,
p.seed,
p.scoreTreeInterval,
p.disableTrainingMetrics,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for ExtendedIsolationForest model.
*/
public static Call<ExtendedIsolationForestV3> resumeExtendedisolationforest(Grid z, ExtendedIsolationForestParametersV3 p) {
return z.resumeExtendedisolationforest(
p.ntrees,
p.sampleSize,
p.extensionLevel,
p.seed,
p.scoreTreeInterval,
p.disableTrainingMetrics,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for Aggregator model.
*/
public static Call<AggregatorV99> trainAggregator(Grid z, AggregatorParametersV99 p) {
return z.trainAggregator(
p.transform,
p.pcaMethod,
p.k,
p.maxIterations,
p.targetNumExemplars,
p.relTolNumExemplars,
p.seed,
p.useAllFactorLevels,
p.saveMappingFrame,
p.numIterationWithoutNewExemplar,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for Aggregator model.
*/
public static Call<AggregatorV99> resumeAggregator(Grid z, AggregatorParametersV99 p) {
return z.resumeAggregator(
p.transform,
p.pcaMethod,
p.k,
p.maxIterations,
p.targetNumExemplars,
p.relTolNumExemplars,
p.seed,
p.useAllFactorLevels,
p.saveMappingFrame,
p.numIterationWithoutNewExemplar,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for Word2Vec model.
*/
public static Call<Word2VecV3> trainWord2vec(Grid z, Word2VecParametersV3 p) {
return z.trainWord2vec(
p.vecSize,
p.windowSize,
p.sentSampleRate,
p.normModel,
p.epochs,
p.minWordFreq,
p.initLearningRate,
p.wordModel,
(p.preTrained == null? null : p.preTrained.name),
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for Word2Vec model.
*/
public static Call<Word2VecV3> resumeWord2vec(Grid z, Word2VecParametersV3 p) {
return z.resumeWord2vec(
p.vecSize,
p.windowSize,
p.sentSampleRate,
p.normModel,
p.epochs,
p.minWordFreq,
p.initLearningRate,
p.wordModel,
(p.preTrained == null? null : p.preTrained.name),
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for StackedEnsemble model.
*/
public static Call<StackedEnsembleV99> trainStackedensemble(Grid z, StackedEnsembleParametersV99 p) {
return z.trainStackedensemble(
(p.baseModels == null? null : keyArrayToStringArray(p.baseModels)),
p.metalearnerAlgorithm,
p.metalearnerNfolds,
p.metalearnerFoldAssignment,
(p.metalearnerFoldColumn == null? null : p.metalearnerFoldColumn.columnName),
p.metalearnerTransform,
p.keepLeveloneFrame,
p.metalearnerParams,
(p.blendingFrame == null? null : p.blendingFrame.name),
p.seed,
p.scoreTrainingSamples,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for StackedEnsemble model.
*/
public static Call<StackedEnsembleV99> resumeStackedensemble(Grid z, StackedEnsembleParametersV99 p) {
return z.resumeStackedensemble(
(p.baseModels == null? null : keyArrayToStringArray(p.baseModels)),
p.metalearnerAlgorithm,
p.metalearnerNfolds,
p.metalearnerFoldAssignment,
(p.metalearnerFoldColumn == null? null : p.metalearnerFoldColumn.columnName),
p.metalearnerTransform,
p.keepLeveloneFrame,
p.metalearnerParams,
(p.blendingFrame == null? null : p.blendingFrame.name),
p.seed,
p.scoreTrainingSamples,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for CoxPH model.
*/
public static Call<CoxPHV3> trainCoxph(Grid z, CoxPHParametersV3 p) {
return z.trainCoxph(
(p.startColumn == null? null : p.startColumn.columnName),
(p.stopColumn == null? null : p.stopColumn.columnName),
p.stratifyBy,
p.ties,
p.init,
p.lreMin,
p.maxIterations,
p.interactionsOnly,
p.interactions,
p.interactionPairs,
p.useAllFactorLevels,
p.singleNodeMode,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for CoxPH model.
*/
public static Call<CoxPHV3> resumeCoxph(Grid z, CoxPHParametersV3 p) {
return z.resumeCoxph(
(p.startColumn == null? null : p.startColumn.columnName),
(p.stopColumn == null? null : p.stopColumn.columnName),
p.stratifyBy,
p.ties,
p.init,
p.lreMin,
p.maxIterations,
p.interactionsOnly,
p.interactions,
p.interactionPairs,
p.useAllFactorLevels,
p.singleNodeMode,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for Generic model.
*/
public static Call<GenericV3> trainGeneric(Grid z, GenericParametersV3 p) {
return z.trainGeneric(
p.path,
(p.modelKey == null? null : p.modelKey.name),
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for Generic model.
*/
public static Call<GenericV3> resumeGeneric(Grid z, GenericParametersV3 p) {
return z.resumeGeneric(
p.path,
(p.modelKey == null? null : p.modelKey.name),
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for GAM model.
*/
public static Call<GAMV3> trainGam(Grid z, GAMParametersV3 p) {
return z.trainGam(
p.seed,
p.family,
p.tweedieVariancePower,
p.tweedieLinkPower,
p.theta,
p.solver,
p.alpha,
p.lambda,
p.startval,
p.lambdaSearch,
p.earlyStopping,
p.nlambdas,
p.standardize,
p.missingValuesHandling,
(p.plugValues == null? null : p.plugValues.name),
p.nonNegative,
p.maxIterations,
p.betaEpsilon,
p.objectiveEpsilon,
p.gradientEpsilon,
p.objReg,
p.link,
p.intercept,
p.prior,
p.coldStart,
p.lambdaMinRatio,
(p.betaConstraints == null? null : p.betaConstraints.name),
p.maxActivePredictors,
p.interactions,
p.interactionPairs,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.computePValues,
p.removeCollinearColumns,
p.storeKnotLocations,
p.numKnots,
p.splineOrders,
p.splinesNonNegative,
p.gamColumns,
p.scale,
p.bs,
p.keepGamCols,
p.standardizeTpGamCols,
p.scaleTpPenaltyMat,
p.knotIds,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for GAM model.
*/
public static Call<GAMV3> resumeGam(Grid z, GAMParametersV3 p) {
return z.resumeGam(
p.seed,
p.family,
p.tweedieVariancePower,
p.tweedieLinkPower,
p.theta,
p.solver,
p.alpha,
p.lambda,
p.startval,
p.lambdaSearch,
p.earlyStopping,
p.nlambdas,
p.standardize,
p.missingValuesHandling,
(p.plugValues == null? null : p.plugValues.name),
p.nonNegative,
p.maxIterations,
p.betaEpsilon,
p.objectiveEpsilon,
p.gradientEpsilon,
p.objReg,
p.link,
p.intercept,
p.prior,
p.coldStart,
p.lambdaMinRatio,
(p.betaConstraints == null? null : p.betaConstraints.name),
p.maxActivePredictors,
p.interactions,
p.interactionPairs,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.computePValues,
p.removeCollinearColumns,
p.storeKnotLocations,
p.numKnots,
p.splineOrders,
p.splinesNonNegative,
p.gamColumns,
p.scale,
p.bs,
p.keepGamCols,
p.standardizeTpGamCols,
p.scaleTpPenaltyMat,
p.knotIds,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for ANOVAGLM model.
*/
public static Call<ANOVAGLMV3> trainAnovaglm(Grid z, ANOVAGLMParametersV3 p) {
return z.trainAnovaglm(
p.seed,
p.standardize,
p.family,
p.tweedieVariancePower,
p.tweedieLinkPower,
p.theta,
p.alpha,
p.lambda,
p.lambdaSearch,
p.solver,
p.missingValuesHandling,
(p.plugValues == null? null : p.plugValues.name),
p.nonNegative,
p.computePValues,
p.maxIterations,
p.link,
p.prior,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.highestInteractionTerm,
p.type,
p.earlyStopping,
p.saveTransformedFramekeys,
p.nparallelism,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for ANOVAGLM model.
*/
public static Call<ANOVAGLMV3> resumeAnovaglm(Grid z, ANOVAGLMParametersV3 p) {
return z.resumeAnovaglm(
p.seed,
p.standardize,
p.family,
p.tweedieVariancePower,
p.tweedieLinkPower,
p.theta,
p.alpha,
p.lambda,
p.lambdaSearch,
p.solver,
p.missingValuesHandling,
(p.plugValues == null? null : p.plugValues.name),
p.nonNegative,
p.computePValues,
p.maxIterations,
p.link,
p.prior,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.highestInteractionTerm,
p.type,
p.earlyStopping,
p.saveTransformedFramekeys,
p.nparallelism,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for PSVM model.
*/
public static Call<PSVMV3> trainPsvm(Grid z, PSVMParametersV3 p) {
return z.trainPsvm(
p.hyperParam,
p.kernelType,
p.gamma,
p.rankRatio,
p.positiveWeight,
p.negativeWeight,
p.disableTrainingMetrics,
p.svThreshold,
p.maxIterations,
p.factThreshold,
p.feasibleThreshold,
p.surrogateGapThreshold,
p.muFactor,
p.seed,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for PSVM model.
*/
public static Call<PSVMV3> resumePsvm(Grid z, PSVMParametersV3 p) {
return z.resumePsvm(
p.hyperParam,
p.kernelType,
p.gamma,
p.rankRatio,
p.positiveWeight,
p.negativeWeight,
p.disableTrainingMetrics,
p.svThreshold,
p.maxIterations,
p.factThreshold,
p.feasibleThreshold,
p.surrogateGapThreshold,
p.muFactor,
p.seed,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for RuleFit model.
*/
public static Call<RuleFitV3> trainRulefit(Grid z, RuleFitParametersV3 p) {
return z.trainRulefit(
p.seed,
p.algorithm,
p.minRuleLength,
p.maxRuleLength,
p.maxNumRules,
p.modelType,
p.ruleGenerationNtrees,
p.removeDuplicates,
p.lambda,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for RuleFit model.
*/
public static Call<RuleFitV3> resumeRulefit(Grid z, RuleFitParametersV3 p) {
return z.resumeRulefit(
p.seed,
p.algorithm,
p.minRuleLength,
p.maxRuleLength,
p.maxNumRules,
p.modelType,
p.ruleGenerationNtrees,
p.removeDuplicates,
p.lambda,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for UpliftDRF model.
*/
public static Call<UpliftDRFV3> trainUpliftdrf(Grid z, UpliftDRFParametersV3 p) {
return z.trainUpliftdrf(
p.mtries,
p.sampleRate,
p.treatmentColumn,
p.upliftMetric,
p.auucType,
p.auucNbins,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.ntrees,
p.maxDepth,
p.minRows,
p.nbins,
p.nbinsTopLevel,
p.nbinsCats,
p.r2Stopping,
p.seed,
p.buildTreeOneNode,
p.sampleRatePerClass,
p.colSampleRatePerTree,
p.colSampleRateChangePerLevel,
p.scoreTreeInterval,
p.minSplitImprovement,
p.histogramType,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.checkConstantResponse,
p.inTrainingCheckpointsDir,
p.inTrainingCheckpointsTreeInterval,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for UpliftDRF model.
*/
public static Call<UpliftDRFV3> resumeUpliftdrf(Grid z, UpliftDRFParametersV3 p) {
return z.resumeUpliftdrf(
p.mtries,
p.sampleRate,
p.treatmentColumn,
p.upliftMetric,
p.auucType,
p.auucNbins,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.ntrees,
p.maxDepth,
p.minRows,
p.nbins,
p.nbinsTopLevel,
p.nbinsCats,
p.r2Stopping,
p.seed,
p.buildTreeOneNode,
p.sampleRatePerClass,
p.colSampleRatePerTree,
p.colSampleRateChangePerLevel,
p.scoreTreeInterval,
p.minSplitImprovement,
p.histogramType,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.checkConstantResponse,
p.inTrainingCheckpointsDir,
p.inTrainingCheckpointsTreeInterval,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for ModelSelection model.
*/
public static Call<ModelSelectionV3> trainModelselection(Grid z, ModelSelectionParametersV3 p) {
return z.trainModelselection(
p.seed,
p.family,
p.tweedieVariancePower,
p.tweedieLinkPower,
p.theta,
p.solver,
p.alpha,
p.lambda,
p.lambdaSearch,
p.multinodeMode,
p.buildGlmModel,
p.earlyStopping,
p.nlambdas,
p.scoreIterationInterval,
p.standardize,
p.coldStart,
p.missingValuesHandling,
(p.plugValues == null? null : p.plugValues.name),
p.nonNegative,
p.maxIterations,
p.betaEpsilon,
p.objectiveEpsilon,
p.gradientEpsilon,
p.objReg,
p.link,
p.startval,
p.calcLike,
p.mode,
p.intercept,
p.prior,
p.lambdaMinRatio,
(p.betaConstraints == null? null : p.betaConstraints.name),
p.maxActivePredictors,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.computePValues,
p.removeCollinearColumns,
p.maxPredictorNumber,
p.minPredictorNumber,
p.nparallelism,
p.pValuesThreshold,
p.influence,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for ModelSelection model.
*/
public static Call<ModelSelectionV3> resumeModelselection(Grid z, ModelSelectionParametersV3 p) {
return z.resumeModelselection(
p.seed,
p.family,
p.tweedieVariancePower,
p.tweedieLinkPower,
p.theta,
p.solver,
p.alpha,
p.lambda,
p.lambdaSearch,
p.multinodeMode,
p.buildGlmModel,
p.earlyStopping,
p.nlambdas,
p.scoreIterationInterval,
p.standardize,
p.coldStart,
p.missingValuesHandling,
(p.plugValues == null? null : p.plugValues.name),
p.nonNegative,
p.maxIterations,
p.betaEpsilon,
p.objectiveEpsilon,
p.gradientEpsilon,
p.objReg,
p.link,
p.startval,
p.calcLike,
p.mode,
p.intercept,
p.prior,
p.lambdaMinRatio,
(p.betaConstraints == null? null : p.betaConstraints.name),
p.maxActivePredictors,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.computePValues,
p.removeCollinearColumns,
p.maxPredictorNumber,
p.minPredictorNumber,
p.nparallelism,
p.pValuesThreshold,
p.influence,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for IsotonicRegression model.
*/
public static Call<IsotonicRegressionV3> trainIsotonicregression(Grid z, IsotonicRegressionParametersV3 p) {
return z.trainIsotonicregression(
p.outOfBounds,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for IsotonicRegression model.
*/
public static Call<IsotonicRegressionV3> resumeIsotonicregression(Grid z, IsotonicRegressionParametersV3 p) {
return z.resumeIsotonicregression(
p.outOfBounds,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for DT model.
*/
public static Call<DTV3> trainDt(Grid z, DTParametersV3 p) {
return z.trainDt(
p.seed,
p.maxDepth,
p.minRows,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for DT model.
*/
public static Call<DTV3> resumeDt(Grid z, DTParametersV3 p) {
return z.resumeDt(
p.seed,
p.maxDepth,
p.minRows,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for HGLM model.
*/
public static Call<HGLMV3> trainHglm(Grid z, HGLMParametersV3 p) {
return z.trainHglm(
p.scoreIterationInterval,
p.seed,
p.missingValuesHandling,
(p.plugValues == null? null : p.plugValues.name),
p.family,
p.randFamily,
p.maxIterations,
p.initialFixedEffects,
(p.initialRandomEffects == null? null : p.initialRandomEffects.name),
(p.initialTMatrix == null? null : p.initialTMatrix.name),
p.tauUVarInit,
p.tauEVarInit,
p.randomColumns,
p.method,
p.emEpsilon,
p.randomIntercept,
p.groupColumn,
p.genSynData,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for HGLM model.
*/
public static Call<HGLMV3> resumeHglm(Grid z, HGLMParametersV3 p) {
return z.resumeHglm(
p.scoreIterationInterval,
p.seed,
p.missingValuesHandling,
(p.plugValues == null? null : p.plugValues.name),
p.family,
p.randFamily,
p.maxIterations,
p.initialFixedEffects,
(p.initialRandomEffects == null? null : p.initialRandomEffects.name),
(p.initialTMatrix == null? null : p.initialTMatrix.name),
p.tauUVarInit,
p.tauEVarInit,
p.randomColumns,
p.method,
p.emEpsilon,
p.randomIntercept,
p.groupColumn,
p.genSynData,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Run grid search for AdaBoost model.
*/
public static Call<AdaBoostV3> trainAdaboost(Grid z, AdaBoostParametersV3 p) {
return z.trainAdaboost(
p.nlearners,
p.weakLearner,
p.learnRate,
p.weakLearnerParams,
p.seed,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Resume grid search for AdaBoost model.
*/
public static Call<AdaBoostV3> resumeAdaboost(Grid z, AdaBoostParametersV3 p) {
return z.resumeAdaboost(
p.nlearners,
p.weakLearner,
p.learnRate,
p.weakLearnerParams,
p.seed,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Return an array of Strings for an array of keys.
*/
public static String[] keyArrayToStringArray(KeyV3[] keys) {
if (keys == null) return null;
String[] ids = new String[keys.length];
int i = 0;
for (KeyV3 key : keys) ids[i++] = key.name;
return ids;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Grids.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Grids {
/**
* Return the specified grid search result.
* @param grid_id Grid id
* @param sort_by Model performance metric to sort by. Examples: logloss, residual_deviance, mse, rmse, mae,rmsle,
* auc, r2, f1, recall, precision, accuracy, mcc, err, err_count, lift_top_group, max_per_class_error
* @param decreasing Specify whether sort order should be decreasing.
* @param model_ids Model IDs built by a grid search
*/
@GET("/99/Grids/{grid_id}")
Call<GridSchemaV99> fetch(
@Path("grid_id") String grid_id,
@Query("sort_by") String sort_by,
@Query("decreasing") boolean decreasing,
@Query("model_ids") String[] model_ids
);
@GET("/99/Grids/{grid_id}")
Call<GridSchemaV99> fetch(@Path("grid_id") String grid_id);
/**
* Return all grids from H2O distributed K/V store.
*/
@GET("/99/Grids")
Call<GridsV99> list();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/ImportFiles.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface ImportFiles {
/**
* Import raw data files into a single-column H2O Frame.
* @param path path
* @param pattern pattern
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/ImportFiles")
Call<ImportFilesV3> importFiles(
@Field("path") String path,
@Field("pattern") String pattern,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/ImportFiles")
Call<ImportFilesV3> importFiles(@Field("path") String path);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/ImportFilesMulti.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface ImportFilesMulti {
/**
* Import raw data files from multiple directories (or different data sources) into a single-column H2O Frame.
* @param paths paths
* @param pattern pattern
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/ImportFilesMulti")
Call<ImportFilesMultiV3> importFilesMulti(
@Field("paths") String[] paths,
@Field("pattern") String pattern,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/ImportFilesMulti")
Call<ImportFilesMultiV3> importFilesMulti(@Field("paths") String[] paths);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/ImportHiveTable.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface ImportHiveTable {
/**
* Import Hive table into an H2O Frame.
* @param database database
* @param table table
* @param partitions partitions
* @param allow_multi_format partitions
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/ImportHiveTable")
Call<JobV3> importHiveTable(
@Field("database") String database,
@Field("table") String table,
@Field("partitions") String[][] partitions,
@Field("allow_multi_format") boolean allow_multi_format,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/ImportHiveTable")
Call<JobV3> importHiveTable(@Field("table") String table);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/ImportSQLTable.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface ImportSQLTable {
/**
* Import SQL table into an H2O Frame.
* @param connection_url connection_url
* @param table table
* @param select_query select_query
* @param use_temp_table use_temp_table
* @param temp_table_name temp_table_name
* @param username username
* @param password password
* @param columns columns
* @param fetch_mode Mode for data loading. All modes may not be supported by all databases.
* @param num_chunks_hint Desired number of chunks for the target Frame. Optional.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/99/ImportSQLTable")
Call<JobV3> importSQLTable(
@Field("connection_url") String connection_url,
@Field("table") String table,
@Field("select_query") String select_query,
@Field("use_temp_table") String use_temp_table,
@Field("temp_table_name") String temp_table_name,
@Field("username") String username,
@Field("password") String password,
@Field("columns") String columns,
@Field("fetch_mode") String fetch_mode,
@Field("num_chunks_hint") String num_chunks_hint,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/99/ImportSQLTable")
Call<JobV3> importSQLTable(
@Field("connection_url") String connection_url,
@Field("username") String username,
@Field("password") String password
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/InitID.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface InitID {
/**
* Issue a new session ID.
* @param session_key Session ID
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/InitID")
Call<InitIDV3> startSession(
@Query("session_key") String session_key,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/InitID")
Call<InitIDV3> startSession();
/**
* End a session.
* @param session_key Session ID
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@DELETE("/3/InitID")
Call<InitIDV3> endSession(
@Field("session_key") String session_key,
@Field("_exclude_fields") String _exclude_fields
);
@DELETE("/3/InitID")
Call<InitIDV3> endSession();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Interaction.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Interaction {
/**
* Create interactions between categorical columns.
* @param dest destination key
* @param source_frame Input data frame
* @param factor_columns Factor columns
* @param pairwise Whether to create pairwise quadratic interactions between factors (otherwise create one higher-
* order interaction). Only applicable if there are 3 or more factors.
* @param max_factors Max. number of factor levels in pair-wise interaction terms (if enforced, one extra catch-all
* factor will be made)
* @param min_occurrence Min. occurrence threshold for factor levels in pair-wise interaction terms
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/Interaction")
Call<JobV3> run(
@Field("dest") String dest,
@Field("source_frame") String source_frame,
@Field("factor_columns") String[] factor_columns,
@Field("pairwise") boolean pairwise,
@Field("max_factors") int max_factors,
@Field("min_occurrence") int min_occurrence,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/Interaction")
Call<JobV3> run(@Field("max_factors") int max_factors);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/JStack.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface JStack {
/**
* Report stack traces for all threads on all nodes.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/JStack")
Call<JStackV3> fetch(@Query("_exclude_fields") String _exclude_fields);
@GET("/3/JStack")
Call<JStackV3> fetch();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Jobs.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Jobs {
/**
* Get a list of all the H2O Jobs (long-running actions).
* @param job_id Optional Job identifier
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Jobs")
Call<JobsV3> list(
@Query("job_id") String job_id,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Jobs")
Call<JobsV3> list();
/**
* Get the status of the given H2O Job (long-running action).
* @param job_id Optional Job identifier
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Jobs/{job_id}")
Call<JobsV3> fetch(
@Path("job_id") String job_id,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Jobs/{job_id}")
Call<JobsV3> fetch(@Path("job_id") String job_id);
/**
* Cancel a running job.
* @param job_id Optional Job identifier
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/Jobs/{job_id}/cancel")
Call<JobsV3> cancel(
@Path("job_id") String job_id,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/Jobs/{job_id}/cancel")
Call<JobsV3> cancel(@Path("job_id") String job_id);
/**
* Retrieve information about the current state of a job.
* @param job_id Id of the job to fetch.
* @param _fields Filter on the set of output fields: if you set _fields="foo,bar,baz", then only those fields will
* be included in the output; or you can specify _fields="-goo,gee" to include all fields except goo
* and gee. If the result contains nested data structures, then you can refer to the fields within
* those structures as well. For example if you specify _fields="foo(oof),bar(-rab)", then only
* fields foo and bar will be included, and within foo there will be only field oof, whereas within
* bar all fields except rab will be reported.
*/
@GET("/4/jobs/{job_id}")
Call<JobV4> getJob4(
@Path("job_id") String job_id,
@Query("_fields") String _fields
);
@GET("/4/jobs/{job_id}")
Call<JobV4> getJob4(@Path("job_id") String job_id);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/KillMinus3.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface KillMinus3 {
/**
* Kill minus 3 on *this* node
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/KillMinus3")
Call<KillMinus3V3> killm3(@Query("_exclude_fields") String _exclude_fields);
@GET("/3/KillMinus3")
Call<KillMinus3V3> killm3();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Leaderboards.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Leaderboards {
/**
* Return all the AutoML leaderboards.
* @param project_name Name of project of interest
* @param extensions List of extension columns to add to leaderboard
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/99/Leaderboards")
Call<LeaderboardsV99> list(
@Query("project_name") String project_name,
@Query("extensions") String[] extensions,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/99/Leaderboards")
Call<LeaderboardsV99> list();
/**
* Return the AutoML leaderboard for the given project.
* @param project_name Name of project of interest
* @param extensions List of extension columns to add to leaderboard
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/99/Leaderboards/{project_name}")
Call<LeaderboardV99> fetch(
@Path("project_name") String project_name,
@Query("extensions") String[] extensions,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/99/Leaderboards/{project_name}")
Call<LeaderboardV99> fetch(@Path("project_name") String project_name);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/LogAndEcho.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface LogAndEcho {
/**
* Save a message to the H2O logfile.
* @param message Message to be Logged and Echoed
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/LogAndEcho")
Call<LogAndEchoV3> echo(
@Field("message") String message,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/LogAndEcho")
Call<LogAndEchoV3> echo();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Logs.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Logs {
/**
* Get named log file for a node.
* @param nodeidx Identifier of the node to get logs from. It can be either node index starting from (0-based),
* where -1 means current node, or IP and port.
* @param name Which specific log file to read from the log file directory. If left unspecified, the system chooses
* a default for you.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Logs/nodes/{nodeidx}/files/{name}")
Call<LogsV3> fetch(
@Path("nodeidx") String nodeidx,
@Path("name") String name,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Logs/nodes/{nodeidx}/files/{name}")
Call<LogsV3> fetch(
@Path("nodeidx") String nodeidx,
@Path("name") String name
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/MakeGLMModel.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface MakeGLMModel {
/**
* Make a new GLM model based on existing one
* @param model source model
* @param dest destination key
* @param names coefficient names
* @param beta new glm coefficients
* @param threshold decision threshold for label-generation
*/
@FormUrlEncoded
@POST("/3/MakeGLMModel")
Call<GLMModelV3> make_model(
@Field("model") String model,
@Field("dest") String dest,
@Field("names") String[] names,
@Field("beta") double[] beta,
@Field("threshold") float threshold
);
@FormUrlEncoded
@POST("/3/MakeGLMModel")
Call<GLMModelV3> make_model(
@Field("model") String model,
@Field("names") String[] names,
@Field("beta") double[] beta
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Metadata.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Metadata {
/**
* Return the list of (almost) all REST API endpoints.
* @param num Number for specifying an endpoint
* @param http_method HTTP method (GET, POST, DELETE) if fetching by path
* @param path Path for specifying an endpoint
* @param classname Class name, for fetching docs for a schema (DEPRECATED)
* @param schemaname Schema name (e.g., DocsV1), for fetching docs for a schema
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Metadata/endpoints")
Call<MetadataV3> listRoutes(
@Query("num") int num,
@Query("http_method") String http_method,
@Query("path") String path,
@Query("classname") String classname,
@Query("schemaname") String schemaname,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Metadata/endpoints")
Call<MetadataV3> listRoutes();
/**
* Return the REST API endpoint metadata, including documentation, for the endpoint specified by path or index.
* @param path Path for specifying an endpoint
* @param num Number for specifying an endpoint
* @param http_method HTTP method (GET, POST, DELETE) if fetching by path
* @param classname Class name, for fetching docs for a schema (DEPRECATED)
* @param schemaname Schema name (e.g., DocsV1), for fetching docs for a schema
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Metadata/endpoints/{path}")
Call<MetadataV3> fetchRoute(
@Path("path") String path,
@Query("num") int num,
@Query("http_method") String http_method,
@Query("classname") String classname,
@Query("schemaname") String schemaname,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Metadata/endpoints/{path}")
Call<MetadataV3> fetchRoute(@Path("path") String path);
/**
* Return the REST API schema metadata for specified schema class.
* @param classname Class name, for fetching docs for a schema (DEPRECATED)
* @param num Number for specifying an endpoint
* @param http_method HTTP method (GET, POST, DELETE) if fetching by path
* @param path Path for specifying an endpoint
* @param schemaname Schema name (e.g., DocsV1), for fetching docs for a schema
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Metadata/schemaclasses/{classname}")
Call<MetadataV3> fetchSchemaMetadataByClass(
@Path("classname") String classname,
@Query("num") int num,
@Query("http_method") String http_method,
@Query("path") String path,
@Query("schemaname") String schemaname,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Metadata/schemaclasses/{classname}")
Call<MetadataV3> fetchSchemaMetadataByClass(@Path("classname") String classname);
/**
* Return the REST API schema metadata for specified schema.
* @param schemaname Schema name (e.g., DocsV1), for fetching docs for a schema
* @param num Number for specifying an endpoint
* @param http_method HTTP method (GET, POST, DELETE) if fetching by path
* @param path Path for specifying an endpoint
* @param classname Class name, for fetching docs for a schema (DEPRECATED)
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Metadata/schemas/{schemaname}")
Call<MetadataV3> fetchSchemaMetadata(
@Path("schemaname") String schemaname,
@Query("num") int num,
@Query("http_method") String http_method,
@Query("path") String path,
@Query("classname") String classname,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Metadata/schemas/{schemaname}")
Call<MetadataV3> fetchSchemaMetadata(@Path("schemaname") String schemaname);
/**
* Return list of all REST API schemas.
* @param num Number for specifying an endpoint
* @param http_method HTTP method (GET, POST, DELETE) if fetching by path
* @param path Path for specifying an endpoint
* @param classname Class name, for fetching docs for a schema (DEPRECATED)
* @param schemaname Schema name (e.g., DocsV1), for fetching docs for a schema
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Metadata/schemas")
Call<MetadataV3> listSchemas(
@Query("num") int num,
@Query("http_method") String http_method,
@Query("path") String path,
@Query("classname") String classname,
@Query("schemaname") String schemaname,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Metadata/schemas")
Call<MetadataV3> listSchemas();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/MissingInserter.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface MissingInserter {
/**
* Insert missing values.
* @param dataset dataset
* @param fraction Fraction of data to replace with a missing value
* @param seed Seed
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/MissingInserter")
Call<JobV3> run(
@Field("dataset") String dataset,
@Field("fraction") double fraction,
@Field("seed") long seed,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/MissingInserter")
Call<JobV3> run(
@Field("dataset") String dataset,
@Field("fraction") double fraction
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/ModelBuilders.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
import java.util.Map;
public interface ModelBuilders {
/**
* Train a XGBoost model.
* @param ntrees (same as n_estimators) Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows (same as min_child_weight) Fewest allowed (weighted) observations in a leaf.
* @param min_child_weight (same as min_rows) Fewest allowed (weighted) observations in a leaf.
* @param learn_rate (same as eta) Learning rate (from 0.0 to 1.0)
* @param eta (same as learn_rate) Learning rate (from 0.0 to 1.0)
* @param sample_rate (same as subsample) Row sample rate per tree (from 0.0 to 1.0)
* @param subsample (same as sample_rate) Row sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate (same as colsample_bylevel) Column sample rate (from 0.0 to 1.0)
* @param colsample_bylevel (same as col_sample_rate) Column sample rate (from 0.0 to 1.0)
* @param col_sample_rate_per_tree (same as colsample_bytree) Column sample rate per tree (from 0.0 to 1.0)
* @param colsample_bytree (same as col_sample_rate_per_tree) Column sample rate per tree (from 0.0 to 1.0)
* @param colsample_bynode Column sample rate per tree node (from 0.0 to 1.0)
* @param monotone_constraints A mapping representing monotonic constraints. Use +1 to enforce an increasing
* constraint and -1 to specify a decreasing constraint.
* @param max_abs_leafnode_pred (same as max_delta_step) Maximum absolute value of a leaf node prediction
* @param max_delta_step (same as max_abs_leafnode_pred) Maximum absolute value of a leaf node prediction
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param seed Seed for pseudo random number generator (if applicable)
* @param min_split_improvement (same as gamma) Minimum relative improvement in squared error reduction for a split
* to happen
* @param gamma (same as min_split_improvement) Minimum relative improvement in squared error reduction for a split
* to happen
* @param nthread Number of parallel threads that can be used to run XGBoost. Cannot exceed H2O cluster limits
* (-nthreads parameter). Defaults to maximum available
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param save_matrix_directory Directory where to save matrices passed to XGBoost library. Useful for debugging.
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param max_bins For tree_method=hist only: maximum number of bins
* @param max_leaves For tree_method=hist only: maximum number of leaves
* @param tree_method Tree method
* @param grow_policy Grow policy - depthwise is standard GBM, lossguide is LightGBM
* @param booster Booster type
* @param reg_lambda L2 regularization
* @param reg_alpha L1 regularization
* @param quiet_mode Enable quiet mode
* @param sample_type For booster=dart only: sample_type
* @param normalize_type For booster=dart only: normalize_type
* @param rate_drop For booster=dart only: rate_drop (0..1)
* @param one_drop For booster=dart only: one_drop
* @param skip_drop For booster=dart only: skip_drop (0..1)
* @param dmatrix_type Type of DMatrix. For sparse, NAs and 0 are treated equally.
* @param backend Backend. By default (auto), a GPU is used if available.
* @param gpu_id Which GPU(s) to use.
* @param interaction_constraints A set of allowed column interactions.
* @param scale_pos_weight Controls the effect of observations with positive labels in relation to the observations
* with negative labels on gradient calculation. Useful for imbalanced problems.
* @param eval_metric Specification of evaluation metric that will be passed to the native XGBoost backend.
* @param score_eval_metric_only If enabled, score only the evaluation metric. This can make model training faster
* if scoring is frequent (eg. each iteration).
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/xgboost")
Call<XGBoostV3> trainXgboost(
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("min_child_weight") double min_child_weight,
@Field("learn_rate") double learn_rate,
@Field("eta") double eta,
@Field("sample_rate") double sample_rate,
@Field("subsample") double subsample,
@Field("col_sample_rate") double col_sample_rate,
@Field("colsample_bylevel") double colsample_bylevel,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("colsample_bytree") double colsample_bytree,
@Field("colsample_bynode") double colsample_bynode,
@Field("monotone_constraints") KeyValueV3[] monotone_constraints,
@Field("max_abs_leafnode_pred") float max_abs_leafnode_pred,
@Field("max_delta_step") float max_delta_step,
@Field("score_tree_interval") int score_tree_interval,
@Field("seed") long seed,
@Field("min_split_improvement") float min_split_improvement,
@Field("gamma") float gamma,
@Field("nthread") int nthread,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("save_matrix_directory") String save_matrix_directory,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("max_bins") int max_bins,
@Field("max_leaves") int max_leaves,
@Field("tree_method") TreexgboostXGBoostModelXGBoostParametersTreeMethod tree_method,
@Field("grow_policy") TreexgboostXGBoostModelXGBoostParametersGrowPolicy grow_policy,
@Field("booster") TreexgboostXGBoostModelXGBoostParametersBooster booster,
@Field("reg_lambda") float reg_lambda,
@Field("reg_alpha") float reg_alpha,
@Field("quiet_mode") boolean quiet_mode,
@Field("sample_type") TreexgboostXGBoostModelXGBoostParametersDartSampleType sample_type,
@Field("normalize_type") TreexgboostXGBoostModelXGBoostParametersDartNormalizeType normalize_type,
@Field("rate_drop") float rate_drop,
@Field("one_drop") boolean one_drop,
@Field("skip_drop") float skip_drop,
@Field("dmatrix_type") TreexgboostXGBoostModelXGBoostParametersDMatrixType dmatrix_type,
@Field("backend") TreexgboostXGBoostModelXGBoostParametersBackend backend,
@Field("gpu_id") int[] gpu_id,
@Field("interaction_constraints") String[][] interaction_constraints,
@Field("scale_pos_weight") float scale_pos_weight,
@Field("eval_metric") String eval_metric,
@Field("score_eval_metric_only") boolean score_eval_metric_only,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/xgboost")
Call<XGBoostV3> trainXgboost();
/**
* Validate a set of XGBoost model builder parameters.
* @param ntrees (same as n_estimators) Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows (same as min_child_weight) Fewest allowed (weighted) observations in a leaf.
* @param min_child_weight (same as min_rows) Fewest allowed (weighted) observations in a leaf.
* @param learn_rate (same as eta) Learning rate (from 0.0 to 1.0)
* @param eta (same as learn_rate) Learning rate (from 0.0 to 1.0)
* @param sample_rate (same as subsample) Row sample rate per tree (from 0.0 to 1.0)
* @param subsample (same as sample_rate) Row sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate (same as colsample_bylevel) Column sample rate (from 0.0 to 1.0)
* @param colsample_bylevel (same as col_sample_rate) Column sample rate (from 0.0 to 1.0)
* @param col_sample_rate_per_tree (same as colsample_bytree) Column sample rate per tree (from 0.0 to 1.0)
* @param colsample_bytree (same as col_sample_rate_per_tree) Column sample rate per tree (from 0.0 to 1.0)
* @param colsample_bynode Column sample rate per tree node (from 0.0 to 1.0)
* @param monotone_constraints A mapping representing monotonic constraints. Use +1 to enforce an increasing
* constraint and -1 to specify a decreasing constraint.
* @param max_abs_leafnode_pred (same as max_delta_step) Maximum absolute value of a leaf node prediction
* @param max_delta_step (same as max_abs_leafnode_pred) Maximum absolute value of a leaf node prediction
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param seed Seed for pseudo random number generator (if applicable)
* @param min_split_improvement (same as gamma) Minimum relative improvement in squared error reduction for a split
* to happen
* @param gamma (same as min_split_improvement) Minimum relative improvement in squared error reduction for a split
* to happen
* @param nthread Number of parallel threads that can be used to run XGBoost. Cannot exceed H2O cluster limits
* (-nthreads parameter). Defaults to maximum available
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param save_matrix_directory Directory where to save matrices passed to XGBoost library. Useful for debugging.
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param max_bins For tree_method=hist only: maximum number of bins
* @param max_leaves For tree_method=hist only: maximum number of leaves
* @param tree_method Tree method
* @param grow_policy Grow policy - depthwise is standard GBM, lossguide is LightGBM
* @param booster Booster type
* @param reg_lambda L2 regularization
* @param reg_alpha L1 regularization
* @param quiet_mode Enable quiet mode
* @param sample_type For booster=dart only: sample_type
* @param normalize_type For booster=dart only: normalize_type
* @param rate_drop For booster=dart only: rate_drop (0..1)
* @param one_drop For booster=dart only: one_drop
* @param skip_drop For booster=dart only: skip_drop (0..1)
* @param dmatrix_type Type of DMatrix. For sparse, NAs and 0 are treated equally.
* @param backend Backend. By default (auto), a GPU is used if available.
* @param gpu_id Which GPU(s) to use.
* @param interaction_constraints A set of allowed column interactions.
* @param scale_pos_weight Controls the effect of observations with positive labels in relation to the observations
* with negative labels on gradient calculation. Useful for imbalanced problems.
* @param eval_metric Specification of evaluation metric that will be passed to the native XGBoost backend.
* @param score_eval_metric_only If enabled, score only the evaluation metric. This can make model training faster
* if scoring is frequent (eg. each iteration).
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/xgboost/parameters")
Call<XGBoostV3> validate_parametersXgboost(
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("min_child_weight") double min_child_weight,
@Field("learn_rate") double learn_rate,
@Field("eta") double eta,
@Field("sample_rate") double sample_rate,
@Field("subsample") double subsample,
@Field("col_sample_rate") double col_sample_rate,
@Field("colsample_bylevel") double colsample_bylevel,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("colsample_bytree") double colsample_bytree,
@Field("colsample_bynode") double colsample_bynode,
@Field("monotone_constraints") KeyValueV3[] monotone_constraints,
@Field("max_abs_leafnode_pred") float max_abs_leafnode_pred,
@Field("max_delta_step") float max_delta_step,
@Field("score_tree_interval") int score_tree_interval,
@Field("seed") long seed,
@Field("min_split_improvement") float min_split_improvement,
@Field("gamma") float gamma,
@Field("nthread") int nthread,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("save_matrix_directory") String save_matrix_directory,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("max_bins") int max_bins,
@Field("max_leaves") int max_leaves,
@Field("tree_method") TreexgboostXGBoostModelXGBoostParametersTreeMethod tree_method,
@Field("grow_policy") TreexgboostXGBoostModelXGBoostParametersGrowPolicy grow_policy,
@Field("booster") TreexgboostXGBoostModelXGBoostParametersBooster booster,
@Field("reg_lambda") float reg_lambda,
@Field("reg_alpha") float reg_alpha,
@Field("quiet_mode") boolean quiet_mode,
@Field("sample_type") TreexgboostXGBoostModelXGBoostParametersDartSampleType sample_type,
@Field("normalize_type") TreexgboostXGBoostModelXGBoostParametersDartNormalizeType normalize_type,
@Field("rate_drop") float rate_drop,
@Field("one_drop") boolean one_drop,
@Field("skip_drop") float skip_drop,
@Field("dmatrix_type") TreexgboostXGBoostModelXGBoostParametersDMatrixType dmatrix_type,
@Field("backend") TreexgboostXGBoostModelXGBoostParametersBackend backend,
@Field("gpu_id") int[] gpu_id,
@Field("interaction_constraints") String[][] interaction_constraints,
@Field("scale_pos_weight") float scale_pos_weight,
@Field("eval_metric") String eval_metric,
@Field("score_eval_metric_only") boolean score_eval_metric_only,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/xgboost/parameters")
Call<XGBoostV3> validate_parametersXgboost();
/**
* Train a Infogram model.
* @param seed Seed for pseudo random number generator (if applicable).
* @param standardize Standardize numeric columns to have zero mean and unit variance.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues).
* @param max_iterations Maximum number of iterations.
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param algorithm Type of machine learning algorithm used to build the infogram. Options include 'AUTO' (gbm),
* 'deeplearning' (Deep Learning with default parameters), 'drf' (Random Forest with default
* parameters), 'gbm' (GBM with default parameters), 'glm' (GLM with default parameters), or
* 'xgboost' (if available, XGBoost with default parameters).
* @param algorithm_params Customized parameters for the machine learning algorithm specified in the algorithm
* parameter.
* @param protected_columns Columns that contain features that are sensitive and need to be protected (legally, or
* otherwise), if applicable. These features (e.g. race, gender, etc) should not drive the
* prediction of the response.
* @param total_information_threshold A number between 0 and 1 representing a threshold for total information,
* defaulting to 0.1. For a specific feature, if the total information is higher
* than this threshold, and the corresponding net information is also higher than
* the threshold ``net_information_threshold``, that feature will be considered
* admissible. The total information is the x-axis of the Core Infogram. Default
* is -1 which gets set to 0.1.
* @param net_information_threshold A number between 0 and 1 representing a threshold for net information,
* defaulting to 0.1. For a specific feature, if the net information is higher
* than this threshold, and the corresponding total information is also higher than
* the total_information_threshold, that feature will be considered admissible. The
* net information is the y-axis of the Core Infogram. Default is -1 which gets set
* to 0.1.
* @param relevance_index_threshold A number between 0 and 1 representing a threshold for the relevance index,
* defaulting to 0.1. This is only used when ``protected_columns`` is set by the
* user. For a specific feature, if the relevance index value is higher than this
* threshold, and the corresponding safety index is also higher than the
* safety_index_threshold``, that feature will be considered admissible. The
* relevance index is the x-axis of the Fair Infogram. Default is -1 which gets set
* to 0.1.
* @param safety_index_threshold A number between 0 and 1 representing a threshold for the safety index, defaulting
* to 0.1. This is only used when protected_columns is set by the user. For a
* specific feature, if the safety index value is higher than this threshold, and the
* corresponding relevance index is also higher than the relevance_index_threshold,
* that feature will be considered admissible. The safety index is the y-axis of the
* Fair Infogram. Default is -1 which gets set to 0.1.
* @param data_fraction The fraction of training frame to use to build the infogram model. Defaults to 1.0, and any
* value greater than 0 and less than or equal to 1.0 is acceptable.
* @param top_n_features An integer specifying the number of columns to evaluate in the infogram. The columns are
* ranked by variable importance, and the top N are evaluated. Defaults to 50.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/infogram")
Call<InfogramV3> trainInfogram(
@Field("seed") long seed,
@Field("standardize") boolean standardize,
@Field("plug_values") String plug_values,
@Field("max_iterations") int max_iterations,
@Field("prior") double prior,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("algorithm") InfogramAlgorithm algorithm,
@Field("algorithm_params") String algorithm_params,
@Field("protected_columns") String[] protected_columns,
@Field("total_information_threshold") double total_information_threshold,
@Field("net_information_threshold") double net_information_threshold,
@Field("relevance_index_threshold") double relevance_index_threshold,
@Field("safety_index_threshold") double safety_index_threshold,
@Field("data_fraction") double data_fraction,
@Field("top_n_features") int top_n_features,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/infogram")
Call<InfogramV3> trainInfogram();
/**
* Validate a set of Infogram model builder parameters.
* @param seed Seed for pseudo random number generator (if applicable).
* @param standardize Standardize numeric columns to have zero mean and unit variance.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues).
* @param max_iterations Maximum number of iterations.
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param algorithm Type of machine learning algorithm used to build the infogram. Options include 'AUTO' (gbm),
* 'deeplearning' (Deep Learning with default parameters), 'drf' (Random Forest with default
* parameters), 'gbm' (GBM with default parameters), 'glm' (GLM with default parameters), or
* 'xgboost' (if available, XGBoost with default parameters).
* @param algorithm_params Customized parameters for the machine learning algorithm specified in the algorithm
* parameter.
* @param protected_columns Columns that contain features that are sensitive and need to be protected (legally, or
* otherwise), if applicable. These features (e.g. race, gender, etc) should not drive the
* prediction of the response.
* @param total_information_threshold A number between 0 and 1 representing a threshold for total information,
* defaulting to 0.1. For a specific feature, if the total information is higher
* than this threshold, and the corresponding net information is also higher than
* the threshold ``net_information_threshold``, that feature will be considered
* admissible. The total information is the x-axis of the Core Infogram. Default
* is -1 which gets set to 0.1.
* @param net_information_threshold A number between 0 and 1 representing a threshold for net information,
* defaulting to 0.1. For a specific feature, if the net information is higher
* than this threshold, and the corresponding total information is also higher than
* the total_information_threshold, that feature will be considered admissible. The
* net information is the y-axis of the Core Infogram. Default is -1 which gets set
* to 0.1.
* @param relevance_index_threshold A number between 0 and 1 representing a threshold for the relevance index,
* defaulting to 0.1. This is only used when ``protected_columns`` is set by the
* user. For a specific feature, if the relevance index value is higher than this
* threshold, and the corresponding safety index is also higher than the
* safety_index_threshold``, that feature will be considered admissible. The
* relevance index is the x-axis of the Fair Infogram. Default is -1 which gets set
* to 0.1.
* @param safety_index_threshold A number between 0 and 1 representing a threshold for the safety index, defaulting
* to 0.1. This is only used when protected_columns is set by the user. For a
* specific feature, if the safety index value is higher than this threshold, and the
* corresponding relevance index is also higher than the relevance_index_threshold,
* that feature will be considered admissible. The safety index is the y-axis of the
* Fair Infogram. Default is -1 which gets set to 0.1.
* @param data_fraction The fraction of training frame to use to build the infogram model. Defaults to 1.0, and any
* value greater than 0 and less than or equal to 1.0 is acceptable.
* @param top_n_features An integer specifying the number of columns to evaluate in the infogram. The columns are
* ranked by variable importance, and the top N are evaluated. Defaults to 50.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/infogram/parameters")
Call<InfogramV3> validate_parametersInfogram(
@Field("seed") long seed,
@Field("standardize") boolean standardize,
@Field("plug_values") String plug_values,
@Field("max_iterations") int max_iterations,
@Field("prior") double prior,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("algorithm") InfogramAlgorithm algorithm,
@Field("algorithm_params") String algorithm_params,
@Field("protected_columns") String[] protected_columns,
@Field("total_information_threshold") double total_information_threshold,
@Field("net_information_threshold") double net_information_threshold,
@Field("relevance_index_threshold") double relevance_index_threshold,
@Field("safety_index_threshold") double safety_index_threshold,
@Field("data_fraction") double data_fraction,
@Field("top_n_features") int top_n_features,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/infogram/parameters")
Call<InfogramV3> validate_parametersInfogram();
/**
* Train a TargetEncoder model.
* @param columns_to_encode List of categorical columns or groups of categorical columns to encode. When groups of
* columns are specified, each group is encoded as a single column (interactions are
* created internally).
* @param keep_original_categorical_columns If true, the original non-encoded categorical features will remain in
* the result frame.
* @param blending If true, enables blending of posterior probabilities (computed for a given categorical value)
* with prior probabilities (computed on the entire set). This allows to mitigate the effect of
* categorical values with small cardinality. The blending effect can be tuned using the
* `inflection_point` and `smoothing` parameters.
* @param inflection_point Inflection point of the sigmoid used to blend probabilities (see `blending` parameter).
* For a given categorical value, if it appears less that `inflection_point` in a data
* sample, then the influence of the posterior probability will be smaller than the prior.
* @param smoothing Smoothing factor corresponds to the inverse of the slope at the inflection point on the sigmoid
* used to blend probabilities (see `blending` parameter). If smoothing tends towards 0, then the
* sigmoid used for blending turns into a Heaviside step function.
* @param data_leakage_handling Data leakage handling strategy used to generate the encoding. Supported options are:
* 1) "none" (default) - no holdout, using the entire training frame.
* 2) "leave_one_out" - current row's response value is subtracted from the per-level
* frequencies pre-calculated on the entire training frame.
* 3) "k_fold" - encodings for a fold are generated based on out-of-fold data.
* @param noise The amount of noise to add to the encoded column. Use 0 to disable noise, and -1 (=AUTO) to let the
* algorithm determine a reasonable amount of noise.
* @param seed Seed used to generate the noise. By default, the seed is chosen randomly.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/targetencoder")
Call<TargetEncoderV3> trainTargetencoder(
@Field("columns_to_encode") String[][] columns_to_encode,
@Field("keep_original_categorical_columns") boolean keep_original_categorical_columns,
@Field("blending") boolean blending,
@Field("inflection_point") double inflection_point,
@Field("smoothing") double smoothing,
@Field("data_leakage_handling") H2otargetencodingTargetEncoderModelDataLeakageHandlingStrategy data_leakage_handling,
@Field("noise") double noise,
@Field("seed") long seed,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/targetencoder")
Call<TargetEncoderV3> trainTargetencoder();
/**
* Validate a set of TargetEncoder model builder parameters.
* @param columns_to_encode List of categorical columns or groups of categorical columns to encode. When groups of
* columns are specified, each group is encoded as a single column (interactions are
* created internally).
* @param keep_original_categorical_columns If true, the original non-encoded categorical features will remain in
* the result frame.
* @param blending If true, enables blending of posterior probabilities (computed for a given categorical value)
* with prior probabilities (computed on the entire set). This allows to mitigate the effect of
* categorical values with small cardinality. The blending effect can be tuned using the
* `inflection_point` and `smoothing` parameters.
* @param inflection_point Inflection point of the sigmoid used to blend probabilities (see `blending` parameter).
* For a given categorical value, if it appears less that `inflection_point` in a data
* sample, then the influence of the posterior probability will be smaller than the prior.
* @param smoothing Smoothing factor corresponds to the inverse of the slope at the inflection point on the sigmoid
* used to blend probabilities (see `blending` parameter). If smoothing tends towards 0, then the
* sigmoid used for blending turns into a Heaviside step function.
* @param data_leakage_handling Data leakage handling strategy used to generate the encoding. Supported options are:
* 1) "none" (default) - no holdout, using the entire training frame.
* 2) "leave_one_out" - current row's response value is subtracted from the per-level
* frequencies pre-calculated on the entire training frame.
* 3) "k_fold" - encodings for a fold are generated based on out-of-fold data.
* @param noise The amount of noise to add to the encoded column. Use 0 to disable noise, and -1 (=AUTO) to let the
* algorithm determine a reasonable amount of noise.
* @param seed Seed used to generate the noise. By default, the seed is chosen randomly.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/targetencoder/parameters")
Call<TargetEncoderV3> validate_parametersTargetencoder(
@Field("columns_to_encode") String[][] columns_to_encode,
@Field("keep_original_categorical_columns") boolean keep_original_categorical_columns,
@Field("blending") boolean blending,
@Field("inflection_point") double inflection_point,
@Field("smoothing") double smoothing,
@Field("data_leakage_handling") H2otargetencodingTargetEncoderModelDataLeakageHandlingStrategy data_leakage_handling,
@Field("noise") double noise,
@Field("seed") long seed,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/targetencoder/parameters")
Call<TargetEncoderV3> validate_parametersTargetencoder();
/**
* Train a DeepLearning model.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs.
* @param activation Activation function.
* @param hidden Hidden layer sizes (e.g. [100, 100]).
* @param epochs How many times the dataset should be iterated (streamed), can be fractional.
* @param train_samples_per_iteration Number of training samples (globally) per MapReduce iteration. Special values
* are 0: one epoch, -1: all available data (e.g., replicated training data), -2:
* automatic.
* @param target_ratio_comm_to_comp Target ratio of communication overhead to computation. Only for multi-node
* operation and train_samples_per_iteration = -2 (auto-tuning).
* @param seed Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded.
* @param adaptive_rate Adaptive learning rate.
* @param rho Adaptive learning rate time decay factor (similarity to prior updates).
* @param epsilon Adaptive learning rate smoothing factor (to avoid divisions by zero and allow progress).
* @param rate Learning rate (higher => less stable, lower => slower convergence).
* @param rate_annealing Learning rate annealing: rate / (1 + rate_annealing * samples).
* @param rate_decay Learning rate decay factor between layers (N-th layer: rate * rate_decay ^ (n - 1).
* @param momentum_start Initial momentum at the beginning of training (try 0.5).
* @param momentum_ramp Number of training samples for which momentum increases.
* @param momentum_stable Final momentum after the ramp is over (try 0.99).
* @param nesterov_accelerated_gradient Use Nesterov accelerated gradient (recommended).
* @param input_dropout_ratio Input layer dropout ratio (can improve generalization, try 0.1 or 0.2).
* @param hidden_dropout_ratios Hidden layer dropout ratios (can improve generalization), specify one value per
* hidden layer, defaults to 0.5.
* @param l1 L1 regularization (can add stability and improve generalization, causes many weights to become 0).
* @param l2 L2 regularization (can add stability and improve generalization, causes many weights to be small.
* @param max_w2 Constraint for squared sum of incoming weights per unit (e.g. for Rectifier).
* @param initial_weight_distribution Initial weight distribution.
* @param initial_weight_scale Uniform: -value...value, Normal: stddev.
* @param initial_weights A list of H2OFrame ids to initialize the weight matrices of this model with.
* @param initial_biases A list of H2OFrame ids to initialize the bias vectors of this model with.
* @param loss Loss function.
* @param score_interval Shortest time interval (in seconds) between model scoring.
* @param score_training_samples Number of training set samples for scoring (0 for all).
* @param score_validation_samples Number of validation set samples for scoring (0 for all).
* @param score_duty_cycle Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring).
* @param classification_stop Stopping criterion for classification error fraction on training data (-1 to disable).
* @param regression_stop Stopping criterion for regression error (MSE) on training data (-1 to disable).
* @param quiet_mode Enable quiet mode for less output to standard output.
* @param score_validation_sampling Method used to sample validation dataset for scoring.
* @param overwrite_with_best_model If enabled, override the final model with the best model found during training.
* @param autoencoder Auto-Encoder.
* @param use_all_factor_levels Use all factor levels of categorical variables. Otherwise, the first factor level is
* omitted (without loss of accuracy). Useful for variable importances and auto-enabled
* for autoencoder.
* @param standardize If enabled, automatically standardize the data. If disabled, the user must provide properly
* scaled input data.
* @param diagnostics Enable diagnostics for hidden layers.
* @param variable_importances Compute variable importances for input features (Gedeon method) - can be slow for
* large networks.
* @param fast_mode Enable fast mode (minor approximation in back-propagation).
* @param force_load_balance Force extra load balancing to increase training speed for small datasets (to keep all
* cores busy).
* @param replicate_training_data Replicate the entire training dataset onto every node for faster training on small
* datasets.
* @param single_node_mode Run on a single node for fine-tuning of model parameters.
* @param shuffle_training_data Enable shuffling of training data (recommended if training data is replicated and
* train_samples_per_iteration is close to #nodes x #rows, of if using
* balance_classes).
* @param missing_values_handling Handling of missing values. Either MeanImputation or Skip.
* @param sparse Sparse data handling (more efficient for data with lots of 0 values).
* @param col_major #DEPRECATED Use a column major weight matrix for input layer. Can speed up forward propagation,
* but might slow down backpropagation.
* @param average_activation Average activation for sparse auto-encoder. #Experimental
* @param sparsity_beta Sparsity regularization. #Experimental
* @param max_categorical_features Max. number of categorical features, enforced via hashing. #Experimental
* @param reproducible Force reproducibility on small data (will be slow - only uses 1 thread).
* @param export_weights_and_biases Whether to export Neural Network weights and biases to H2O Frames.
* @param mini_batch_size Mini-batch size (smaller leads to better fit, larger can speed up and generalize better).
* @param elastic_averaging Elastic averaging between compute nodes can improve distributed model convergence.
* #Experimental
* @param elastic_averaging_moving_rate Elastic averaging moving rate (only if elastic averaging is enabled).
* @param elastic_averaging_regularization Elastic averaging regularization strength (only if elastic averaging is
* enabled).
* @param pretrained_autoencoder Pretrained autoencoder model to initialize this model with.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/deeplearning")
Call<DeepLearningV3> trainDeeplearning(
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("activation") DeepLearningActivation activation,
@Field("hidden") int[] hidden,
@Field("epochs") double epochs,
@Field("train_samples_per_iteration") long train_samples_per_iteration,
@Field("target_ratio_comm_to_comp") double target_ratio_comm_to_comp,
@Field("seed") long seed,
@Field("adaptive_rate") boolean adaptive_rate,
@Field("rho") double rho,
@Field("epsilon") double epsilon,
@Field("rate") double rate,
@Field("rate_annealing") double rate_annealing,
@Field("rate_decay") double rate_decay,
@Field("momentum_start") double momentum_start,
@Field("momentum_ramp") double momentum_ramp,
@Field("momentum_stable") double momentum_stable,
@Field("nesterov_accelerated_gradient") boolean nesterov_accelerated_gradient,
@Field("input_dropout_ratio") double input_dropout_ratio,
@Field("hidden_dropout_ratios") double[] hidden_dropout_ratios,
@Field("l1") double l1,
@Field("l2") double l2,
@Field("max_w2") float max_w2,
@Field("initial_weight_distribution") DeepLearningInitialWeightDistribution initial_weight_distribution,
@Field("initial_weight_scale") double initial_weight_scale,
@Field("initial_weights") String[] initial_weights,
@Field("initial_biases") String[] initial_biases,
@Field("loss") DeepLearningLoss loss,
@Field("score_interval") double score_interval,
@Field("score_training_samples") long score_training_samples,
@Field("score_validation_samples") long score_validation_samples,
@Field("score_duty_cycle") double score_duty_cycle,
@Field("classification_stop") double classification_stop,
@Field("regression_stop") double regression_stop,
@Field("quiet_mode") boolean quiet_mode,
@Field("score_validation_sampling") DeepLearningClassSamplingMethod score_validation_sampling,
@Field("overwrite_with_best_model") boolean overwrite_with_best_model,
@Field("autoencoder") boolean autoencoder,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("standardize") boolean standardize,
@Field("diagnostics") boolean diagnostics,
@Field("variable_importances") boolean variable_importances,
@Field("fast_mode") boolean fast_mode,
@Field("force_load_balance") boolean force_load_balance,
@Field("replicate_training_data") boolean replicate_training_data,
@Field("single_node_mode") boolean single_node_mode,
@Field("shuffle_training_data") boolean shuffle_training_data,
@Field("missing_values_handling") DeepLearningMissingValuesHandling missing_values_handling,
@Field("sparse") boolean sparse,
@Field("col_major") boolean col_major,
@Field("average_activation") double average_activation,
@Field("sparsity_beta") double sparsity_beta,
@Field("max_categorical_features") int max_categorical_features,
@Field("reproducible") boolean reproducible,
@Field("export_weights_and_biases") boolean export_weights_and_biases,
@Field("mini_batch_size") int mini_batch_size,
@Field("elastic_averaging") boolean elastic_averaging,
@Field("elastic_averaging_moving_rate") double elastic_averaging_moving_rate,
@Field("elastic_averaging_regularization") double elastic_averaging_regularization,
@Field("pretrained_autoencoder") String pretrained_autoencoder,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/deeplearning")
Call<DeepLearningV3> trainDeeplearning();
/**
* Validate a set of DeepLearning model builder parameters.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs.
* @param activation Activation function.
* @param hidden Hidden layer sizes (e.g. [100, 100]).
* @param epochs How many times the dataset should be iterated (streamed), can be fractional.
* @param train_samples_per_iteration Number of training samples (globally) per MapReduce iteration. Special values
* are 0: one epoch, -1: all available data (e.g., replicated training data), -2:
* automatic.
* @param target_ratio_comm_to_comp Target ratio of communication overhead to computation. Only for multi-node
* operation and train_samples_per_iteration = -2 (auto-tuning).
* @param seed Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded.
* @param adaptive_rate Adaptive learning rate.
* @param rho Adaptive learning rate time decay factor (similarity to prior updates).
* @param epsilon Adaptive learning rate smoothing factor (to avoid divisions by zero and allow progress).
* @param rate Learning rate (higher => less stable, lower => slower convergence).
* @param rate_annealing Learning rate annealing: rate / (1 + rate_annealing * samples).
* @param rate_decay Learning rate decay factor between layers (N-th layer: rate * rate_decay ^ (n - 1).
* @param momentum_start Initial momentum at the beginning of training (try 0.5).
* @param momentum_ramp Number of training samples for which momentum increases.
* @param momentum_stable Final momentum after the ramp is over (try 0.99).
* @param nesterov_accelerated_gradient Use Nesterov accelerated gradient (recommended).
* @param input_dropout_ratio Input layer dropout ratio (can improve generalization, try 0.1 or 0.2).
* @param hidden_dropout_ratios Hidden layer dropout ratios (can improve generalization), specify one value per
* hidden layer, defaults to 0.5.
* @param l1 L1 regularization (can add stability and improve generalization, causes many weights to become 0).
* @param l2 L2 regularization (can add stability and improve generalization, causes many weights to be small.
* @param max_w2 Constraint for squared sum of incoming weights per unit (e.g. for Rectifier).
* @param initial_weight_distribution Initial weight distribution.
* @param initial_weight_scale Uniform: -value...value, Normal: stddev.
* @param initial_weights A list of H2OFrame ids to initialize the weight matrices of this model with.
* @param initial_biases A list of H2OFrame ids to initialize the bias vectors of this model with.
* @param loss Loss function.
* @param score_interval Shortest time interval (in seconds) between model scoring.
* @param score_training_samples Number of training set samples for scoring (0 for all).
* @param score_validation_samples Number of validation set samples for scoring (0 for all).
* @param score_duty_cycle Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring).
* @param classification_stop Stopping criterion for classification error fraction on training data (-1 to disable).
* @param regression_stop Stopping criterion for regression error (MSE) on training data (-1 to disable).
* @param quiet_mode Enable quiet mode for less output to standard output.
* @param score_validation_sampling Method used to sample validation dataset for scoring.
* @param overwrite_with_best_model If enabled, override the final model with the best model found during training.
* @param autoencoder Auto-Encoder.
* @param use_all_factor_levels Use all factor levels of categorical variables. Otherwise, the first factor level is
* omitted (without loss of accuracy). Useful for variable importances and auto-enabled
* for autoencoder.
* @param standardize If enabled, automatically standardize the data. If disabled, the user must provide properly
* scaled input data.
* @param diagnostics Enable diagnostics for hidden layers.
* @param variable_importances Compute variable importances for input features (Gedeon method) - can be slow for
* large networks.
* @param fast_mode Enable fast mode (minor approximation in back-propagation).
* @param force_load_balance Force extra load balancing to increase training speed for small datasets (to keep all
* cores busy).
* @param replicate_training_data Replicate the entire training dataset onto every node for faster training on small
* datasets.
* @param single_node_mode Run on a single node for fine-tuning of model parameters.
* @param shuffle_training_data Enable shuffling of training data (recommended if training data is replicated and
* train_samples_per_iteration is close to #nodes x #rows, of if using
* balance_classes).
* @param missing_values_handling Handling of missing values. Either MeanImputation or Skip.
* @param sparse Sparse data handling (more efficient for data with lots of 0 values).
* @param col_major #DEPRECATED Use a column major weight matrix for input layer. Can speed up forward propagation,
* but might slow down backpropagation.
* @param average_activation Average activation for sparse auto-encoder. #Experimental
* @param sparsity_beta Sparsity regularization. #Experimental
* @param max_categorical_features Max. number of categorical features, enforced via hashing. #Experimental
* @param reproducible Force reproducibility on small data (will be slow - only uses 1 thread).
* @param export_weights_and_biases Whether to export Neural Network weights and biases to H2O Frames.
* @param mini_batch_size Mini-batch size (smaller leads to better fit, larger can speed up and generalize better).
* @param elastic_averaging Elastic averaging between compute nodes can improve distributed model convergence.
* #Experimental
* @param elastic_averaging_moving_rate Elastic averaging moving rate (only if elastic averaging is enabled).
* @param elastic_averaging_regularization Elastic averaging regularization strength (only if elastic averaging is
* enabled).
* @param pretrained_autoencoder Pretrained autoencoder model to initialize this model with.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/deeplearning/parameters")
Call<DeepLearningV3> validate_parametersDeeplearning(
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("activation") DeepLearningActivation activation,
@Field("hidden") int[] hidden,
@Field("epochs") double epochs,
@Field("train_samples_per_iteration") long train_samples_per_iteration,
@Field("target_ratio_comm_to_comp") double target_ratio_comm_to_comp,
@Field("seed") long seed,
@Field("adaptive_rate") boolean adaptive_rate,
@Field("rho") double rho,
@Field("epsilon") double epsilon,
@Field("rate") double rate,
@Field("rate_annealing") double rate_annealing,
@Field("rate_decay") double rate_decay,
@Field("momentum_start") double momentum_start,
@Field("momentum_ramp") double momentum_ramp,
@Field("momentum_stable") double momentum_stable,
@Field("nesterov_accelerated_gradient") boolean nesterov_accelerated_gradient,
@Field("input_dropout_ratio") double input_dropout_ratio,
@Field("hidden_dropout_ratios") double[] hidden_dropout_ratios,
@Field("l1") double l1,
@Field("l2") double l2,
@Field("max_w2") float max_w2,
@Field("initial_weight_distribution") DeepLearningInitialWeightDistribution initial_weight_distribution,
@Field("initial_weight_scale") double initial_weight_scale,
@Field("initial_weights") String[] initial_weights,
@Field("initial_biases") String[] initial_biases,
@Field("loss") DeepLearningLoss loss,
@Field("score_interval") double score_interval,
@Field("score_training_samples") long score_training_samples,
@Field("score_validation_samples") long score_validation_samples,
@Field("score_duty_cycle") double score_duty_cycle,
@Field("classification_stop") double classification_stop,
@Field("regression_stop") double regression_stop,
@Field("quiet_mode") boolean quiet_mode,
@Field("score_validation_sampling") DeepLearningClassSamplingMethod score_validation_sampling,
@Field("overwrite_with_best_model") boolean overwrite_with_best_model,
@Field("autoencoder") boolean autoencoder,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("standardize") boolean standardize,
@Field("diagnostics") boolean diagnostics,
@Field("variable_importances") boolean variable_importances,
@Field("fast_mode") boolean fast_mode,
@Field("force_load_balance") boolean force_load_balance,
@Field("replicate_training_data") boolean replicate_training_data,
@Field("single_node_mode") boolean single_node_mode,
@Field("shuffle_training_data") boolean shuffle_training_data,
@Field("missing_values_handling") DeepLearningMissingValuesHandling missing_values_handling,
@Field("sparse") boolean sparse,
@Field("col_major") boolean col_major,
@Field("average_activation") double average_activation,
@Field("sparsity_beta") double sparsity_beta,
@Field("max_categorical_features") int max_categorical_features,
@Field("reproducible") boolean reproducible,
@Field("export_weights_and_biases") boolean export_weights_and_biases,
@Field("mini_batch_size") int mini_batch_size,
@Field("elastic_averaging") boolean elastic_averaging,
@Field("elastic_averaging_moving_rate") double elastic_averaging_moving_rate,
@Field("elastic_averaging_regularization") double elastic_averaging_regularization,
@Field("pretrained_autoencoder") String pretrained_autoencoder,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/deeplearning/parameters")
Call<DeepLearningV3> validate_parametersDeeplearning();
/**
* Train a GLM model.
* @param seed Seed for pseudo random number generator (if applicable).
* @param family Family. Use binomial for classification with logistic regression, others are for regression
* problems.
* @param tweedie_variance_power Tweedie variance power
* @param dispersion_learning_rate Dispersion learning rate is only valid for tweedie family dispersion parameter
* estimation using ml. It must be > 0. This controls how much the dispersion
* parameter estimate is to be changed when the calculated loglikelihood actually
* decreases with the new dispersion. In this case, instead of setting new
* dispersion = dispersion + change, we set new dispersion = dispersion +
* dispersion_learning_rate * change. Defaults to 0.5.
* @param tweedie_link_power Tweedie link power.
* @param theta Theta
* @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
* with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
* datasets with many columns.
* @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
* alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
* specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
* 0.5 otherwise.
* @param lambda Regularization strength
* @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min.
* @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided).
* @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
* set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
* otherwise it is set to 100.
* @param score_iteration_interval Perform scoring for every score_iteration_interval iterations.
* @param standardize Standardize numeric columns to have zero mean and unit variance.
* @param cold_start Only applicable to multiple alpha/lambda values. If false, build the next model for next set
* of alpha/lambda values starting from the values provided by current model. If true will start
* GLM model from scratch.
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param influence If set to dfbetas will calculate the difference in beta when a datarow is included and excluded
* in the dataset.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues).
* @param non_negative Restrict coefficients (not intercept) to be non-negative.
* @param max_iterations Maximum number of iterations. Value should >=1. A value of 0 is only set when only the
* model coefficient names and model coefficient dimensions are needed.
* @param beta_epsilon Converge if beta changes less (using L-infinity norm) than beta esilon. ONLY applies to IRLSM
* solver.
* @param objective_epsilon Converge if objective value changes less than this. Default (of -1.0) indicates: If
* lambda_search is set to True the value of objective_epsilon is set to .0001. If the
* lambda_search is set to False and lambda is equal to zero, the value of
* objective_epsilon is set to .000001, for any other value of lambda the default value of
* objective_epsilon is set to .0001.
* @param gradient_epsilon Converge if objective changes less (using L-infinity norm) than this, ONLY applies to
* L-BFGS solver. Default (of -1.0) indicates: If lambda_search is set to False and lambda
* is equal to zero, the default value of gradient_epsilon is equal to .000001, otherwise
* the default value is .0001. If lambda_search is set to True, the conditional values above
* are 1E-8 and 1E-6 respectively.
* @param obj_reg Likelihood divider in objective value computation, default (of -1.0) will set it to 1/nobs.
* @param link Link function.
* @param dispersion_parameter_method Method used to estimate the dispersion parameter for Tweedie, Gamma and
* Negative Binomial only.
* @param startval double array to initialize coefficients for GLM. If standardize is true, the standardized
* coefficients should be used. Otherwise, use the regular coefficients.
* @param calc_like if true, will return likelihood function value.
* @param generate_variable_inflation_factors if true, will generate variable inflation factors for numerical
* predictors. Default to false.
* @param intercept Include constant term in the model
* @param build_null_model If set, will build a model with only the intercept. Default to false.
* @param fix_dispersion_parameter Only used for Tweedie, Gamma and Negative Binomial GLM. If set, will use the
* dispsersion parameter in init_dispersion_parameter as the standard error and use
* it to calculate the p-values. Default to false.
* @param init_dispersion_parameter Only used for Tweedie, Gamma and Negative Binomial GLM. Store the initial value
* of dispersion parameter. If fix_dispersion_parameter is set, this value will be
* used in the calculation of p-values.
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
* lambda that drives all coefficients to zero). Default indicates: if the number of
* observations is greater than the number of variables, then lambda_min_ratio is set to
* 0.0001; if the number of observations is less than the number of variables, then
* lambda_min_ratio is set to 0.01.
* @param beta_constraints Beta constraints
* @param linear_constraints Linear constraints: used to specify linear constraints involving more than one
* coefficients in standard form. It is only supported for solver IRLSM. It contains
* four columns: names (strings for coefficient names or constant), values, types (
* strings of 'Equal' or 'LessThanEqual'), constraint_numbers (0 for first linear
* constraint, 1 for second linear constraint, ...).
* @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
* to prevent expensive model building with many predictors. Default indicates: If the
* IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
* is set to 100000000.
* @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
* for the list.
* @param interaction_pairs A list of pairwise (first order) column interactions.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs.
* @param compute_p_values Request p-values computation, p-values work only with IRLSM solver.
* @param fix_tweedie_variance_power If true, will fix tweedie variance power value to the value set in
* tweedie_variance_power.
* @param remove_collinear_columns In case of linearly dependent columns, remove the dependent columns.
* @param dispersion_epsilon If changes in dispersion parameter estimation or loglikelihood value is smaller than
* dispersion_epsilon, will break out of the dispersion parameter estimation loop using
* maximum likelihood.
* @param tweedie_epsilon In estimating tweedie dispersion parameter using maximum likelihood, this is used to
* choose the lower and upper indices in the approximating of the infinite series summation.
* @param max_iterations_dispersion Control the maximum number of iterations in the dispersion parameter estimation
* loop using maximum likelihood.
* @param generate_scoring_history If set to true, will generate scoring history for GLM. This may significantly
* slow down the algo.
* @param init_optimal_glm If true, will initialize coefficients with values derived from GLM runs without linear
* constraints. Only available for linear constraints.
* @param separate_linear_beta If true, will keep the beta constraints and linear constraints separate. After new
* coefficients are found, first beta constraints will be applied followed by the
* application of linear constraints. Note that the beta constraints in this case will
* not be part of the objective function. If false, will combine the beta and linear
* constraints.
* @param constraint_eta0 For constrained GLM only. It affects the setting of eta_k+1=eta_0/power(ck+1, alpha).
* @param constraint_tau For constrained GLM only. It affects the setting of c_k+1=tau*c_k.
* @param constraint_alpha For constrained GLM only. It affects the setting of eta_k = eta_0/pow(c_0, alpha).
* @param constraint_beta For constrained GLM only. It affects the setting of eta_k+1 = eta_k/pow(c_k, beta).
* @param constraint_c0 For constrained GLM only. It affects the initial setting of epsilon_k = 1/c_0.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/glm")
Call<GLMV3> trainGlm(
@Field("seed") long seed,
@Field("family") GLMFamily family,
@Field("tweedie_variance_power") double tweedie_variance_power,
@Field("dispersion_learning_rate") double dispersion_learning_rate,
@Field("tweedie_link_power") double tweedie_link_power,
@Field("theta") double theta,
@Field("solver") GLMSolver solver,
@Field("alpha") double[] alpha,
@Field("lambda") double[] lambda,
@Field("lambda_search") boolean lambda_search,
@Field("early_stopping") boolean early_stopping,
@Field("nlambdas") int nlambdas,
@Field("score_iteration_interval") int score_iteration_interval,
@Field("standardize") boolean standardize,
@Field("cold_start") boolean cold_start,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("influence") GLMInfluence influence,
@Field("plug_values") String plug_values,
@Field("non_negative") boolean non_negative,
@Field("max_iterations") int max_iterations,
@Field("beta_epsilon") double beta_epsilon,
@Field("objective_epsilon") double objective_epsilon,
@Field("gradient_epsilon") double gradient_epsilon,
@Field("obj_reg") double obj_reg,
@Field("link") GLMLink link,
@Field("dispersion_parameter_method") GLMDispersionMethod dispersion_parameter_method,
@Field("startval") double[] startval,
@Field("calc_like") boolean calc_like,
@Field("generate_variable_inflation_factors") boolean generate_variable_inflation_factors,
@Field("intercept") boolean intercept,
@Field("build_null_model") boolean build_null_model,
@Field("fix_dispersion_parameter") boolean fix_dispersion_parameter,
@Field("init_dispersion_parameter") double init_dispersion_parameter,
@Field("prior") double prior,
@Field("lambda_min_ratio") double lambda_min_ratio,
@Field("beta_constraints") String beta_constraints,
@Field("linear_constraints") String linear_constraints,
@Field("max_active_predictors") int max_active_predictors,
@Field("interactions") String[] interactions,
@Field("interaction_pairs") StringPairV3[] interaction_pairs,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("compute_p_values") boolean compute_p_values,
@Field("fix_tweedie_variance_power") boolean fix_tweedie_variance_power,
@Field("remove_collinear_columns") boolean remove_collinear_columns,
@Field("dispersion_epsilon") double dispersion_epsilon,
@Field("tweedie_epsilon") double tweedie_epsilon,
@Field("max_iterations_dispersion") int max_iterations_dispersion,
@Field("generate_scoring_history") boolean generate_scoring_history,
@Field("init_optimal_glm") boolean init_optimal_glm,
@Field("separate_linear_beta") boolean separate_linear_beta,
@Field("constraint_eta0") double constraint_eta0,
@Field("constraint_tau") double constraint_tau,
@Field("constraint_alpha") double constraint_alpha,
@Field("constraint_beta") double constraint_beta,
@Field("constraint_c0") double constraint_c0,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/glm")
Call<GLMV3> trainGlm();
/**
* Validate a set of GLM model builder parameters.
* @param seed Seed for pseudo random number generator (if applicable).
* @param family Family. Use binomial for classification with logistic regression, others are for regression
* problems.
* @param tweedie_variance_power Tweedie variance power
* @param dispersion_learning_rate Dispersion learning rate is only valid for tweedie family dispersion parameter
* estimation using ml. It must be > 0. This controls how much the dispersion
* parameter estimate is to be changed when the calculated loglikelihood actually
* decreases with the new dispersion. In this case, instead of setting new
* dispersion = dispersion + change, we set new dispersion = dispersion +
* dispersion_learning_rate * change. Defaults to 0.5.
* @param tweedie_link_power Tweedie link power.
* @param theta Theta
* @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
* with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
* datasets with many columns.
* @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
* alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
* specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
* 0.5 otherwise.
* @param lambda Regularization strength
* @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min.
* @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided).
* @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
* set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
* otherwise it is set to 100.
* @param score_iteration_interval Perform scoring for every score_iteration_interval iterations.
* @param standardize Standardize numeric columns to have zero mean and unit variance.
* @param cold_start Only applicable to multiple alpha/lambda values. If false, build the next model for next set
* of alpha/lambda values starting from the values provided by current model. If true will start
* GLM model from scratch.
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param influence If set to dfbetas will calculate the difference in beta when a datarow is included and excluded
* in the dataset.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues).
* @param non_negative Restrict coefficients (not intercept) to be non-negative.
* @param max_iterations Maximum number of iterations. Value should >=1. A value of 0 is only set when only the
* model coefficient names and model coefficient dimensions are needed.
* @param beta_epsilon Converge if beta changes less (using L-infinity norm) than beta esilon. ONLY applies to IRLSM
* solver.
* @param objective_epsilon Converge if objective value changes less than this. Default (of -1.0) indicates: If
* lambda_search is set to True the value of objective_epsilon is set to .0001. If the
* lambda_search is set to False and lambda is equal to zero, the value of
* objective_epsilon is set to .000001, for any other value of lambda the default value of
* objective_epsilon is set to .0001.
* @param gradient_epsilon Converge if objective changes less (using L-infinity norm) than this, ONLY applies to
* L-BFGS solver. Default (of -1.0) indicates: If lambda_search is set to False and lambda
* is equal to zero, the default value of gradient_epsilon is equal to .000001, otherwise
* the default value is .0001. If lambda_search is set to True, the conditional values above
* are 1E-8 and 1E-6 respectively.
* @param obj_reg Likelihood divider in objective value computation, default (of -1.0) will set it to 1/nobs.
* @param link Link function.
* @param dispersion_parameter_method Method used to estimate the dispersion parameter for Tweedie, Gamma and
* Negative Binomial only.
* @param startval double array to initialize coefficients for GLM. If standardize is true, the standardized
* coefficients should be used. Otherwise, use the regular coefficients.
* @param calc_like if true, will return likelihood function value.
* @param generate_variable_inflation_factors if true, will generate variable inflation factors for numerical
* predictors. Default to false.
* @param intercept Include constant term in the model
* @param build_null_model If set, will build a model with only the intercept. Default to false.
* @param fix_dispersion_parameter Only used for Tweedie, Gamma and Negative Binomial GLM. If set, will use the
* dispsersion parameter in init_dispersion_parameter as the standard error and use
* it to calculate the p-values. Default to false.
* @param init_dispersion_parameter Only used for Tweedie, Gamma and Negative Binomial GLM. Store the initial value
* of dispersion parameter. If fix_dispersion_parameter is set, this value will be
* used in the calculation of p-values.
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
* lambda that drives all coefficients to zero). Default indicates: if the number of
* observations is greater than the number of variables, then lambda_min_ratio is set to
* 0.0001; if the number of observations is less than the number of variables, then
* lambda_min_ratio is set to 0.01.
* @param beta_constraints Beta constraints
* @param linear_constraints Linear constraints: used to specify linear constraints involving more than one
* coefficients in standard form. It is only supported for solver IRLSM. It contains
* four columns: names (strings for coefficient names or constant), values, types (
* strings of 'Equal' or 'LessThanEqual'), constraint_numbers (0 for first linear
* constraint, 1 for second linear constraint, ...).
* @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
* to prevent expensive model building with many predictors. Default indicates: If the
* IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
* is set to 100000000.
* @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
* for the list.
* @param interaction_pairs A list of pairwise (first order) column interactions.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs.
* @param compute_p_values Request p-values computation, p-values work only with IRLSM solver.
* @param fix_tweedie_variance_power If true, will fix tweedie variance power value to the value set in
* tweedie_variance_power.
* @param remove_collinear_columns In case of linearly dependent columns, remove the dependent columns.
* @param dispersion_epsilon If changes in dispersion parameter estimation or loglikelihood value is smaller than
* dispersion_epsilon, will break out of the dispersion parameter estimation loop using
* maximum likelihood.
* @param tweedie_epsilon In estimating tweedie dispersion parameter using maximum likelihood, this is used to
* choose the lower and upper indices in the approximating of the infinite series summation.
* @param max_iterations_dispersion Control the maximum number of iterations in the dispersion parameter estimation
* loop using maximum likelihood.
* @param generate_scoring_history If set to true, will generate scoring history for GLM. This may significantly
* slow down the algo.
* @param init_optimal_glm If true, will initialize coefficients with values derived from GLM runs without linear
* constraints. Only available for linear constraints.
* @param separate_linear_beta If true, will keep the beta constraints and linear constraints separate. After new
* coefficients are found, first beta constraints will be applied followed by the
* application of linear constraints. Note that the beta constraints in this case will
* not be part of the objective function. If false, will combine the beta and linear
* constraints.
* @param constraint_eta0 For constrained GLM only. It affects the setting of eta_k+1=eta_0/power(ck+1, alpha).
* @param constraint_tau For constrained GLM only. It affects the setting of c_k+1=tau*c_k.
* @param constraint_alpha For constrained GLM only. It affects the setting of eta_k = eta_0/pow(c_0, alpha).
* @param constraint_beta For constrained GLM only. It affects the setting of eta_k+1 = eta_k/pow(c_k, beta).
* @param constraint_c0 For constrained GLM only. It affects the initial setting of epsilon_k = 1/c_0.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/glm/parameters")
Call<GLMV3> validate_parametersGlm(
@Field("seed") long seed,
@Field("family") GLMFamily family,
@Field("tweedie_variance_power") double tweedie_variance_power,
@Field("dispersion_learning_rate") double dispersion_learning_rate,
@Field("tweedie_link_power") double tweedie_link_power,
@Field("theta") double theta,
@Field("solver") GLMSolver solver,
@Field("alpha") double[] alpha,
@Field("lambda") double[] lambda,
@Field("lambda_search") boolean lambda_search,
@Field("early_stopping") boolean early_stopping,
@Field("nlambdas") int nlambdas,
@Field("score_iteration_interval") int score_iteration_interval,
@Field("standardize") boolean standardize,
@Field("cold_start") boolean cold_start,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("influence") GLMInfluence influence,
@Field("plug_values") String plug_values,
@Field("non_negative") boolean non_negative,
@Field("max_iterations") int max_iterations,
@Field("beta_epsilon") double beta_epsilon,
@Field("objective_epsilon") double objective_epsilon,
@Field("gradient_epsilon") double gradient_epsilon,
@Field("obj_reg") double obj_reg,
@Field("link") GLMLink link,
@Field("dispersion_parameter_method") GLMDispersionMethod dispersion_parameter_method,
@Field("startval") double[] startval,
@Field("calc_like") boolean calc_like,
@Field("generate_variable_inflation_factors") boolean generate_variable_inflation_factors,
@Field("intercept") boolean intercept,
@Field("build_null_model") boolean build_null_model,
@Field("fix_dispersion_parameter") boolean fix_dispersion_parameter,
@Field("init_dispersion_parameter") double init_dispersion_parameter,
@Field("prior") double prior,
@Field("lambda_min_ratio") double lambda_min_ratio,
@Field("beta_constraints") String beta_constraints,
@Field("linear_constraints") String linear_constraints,
@Field("max_active_predictors") int max_active_predictors,
@Field("interactions") String[] interactions,
@Field("interaction_pairs") StringPairV3[] interaction_pairs,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("compute_p_values") boolean compute_p_values,
@Field("fix_tweedie_variance_power") boolean fix_tweedie_variance_power,
@Field("remove_collinear_columns") boolean remove_collinear_columns,
@Field("dispersion_epsilon") double dispersion_epsilon,
@Field("tweedie_epsilon") double tweedie_epsilon,
@Field("max_iterations_dispersion") int max_iterations_dispersion,
@Field("generate_scoring_history") boolean generate_scoring_history,
@Field("init_optimal_glm") boolean init_optimal_glm,
@Field("separate_linear_beta") boolean separate_linear_beta,
@Field("constraint_eta0") double constraint_eta0,
@Field("constraint_tau") double constraint_tau,
@Field("constraint_alpha") double constraint_alpha,
@Field("constraint_beta") double constraint_beta,
@Field("constraint_c0") double constraint_c0,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/glm/parameters")
Call<GLMV3> validate_parametersGlm();
/**
* Train a GLRM model.
* @param transform Transformation of training data
* @param k Rank of matrix approximation
* @param loss Numeric loss function
* @param multi_loss Categorical loss function
* @param loss_by_col Loss function by column (override)
* @param loss_by_col_idx Loss function by column index (override)
* @param period Length of period (only used with periodic loss function)
* @param regularization_x Regularization function for X matrix
* @param regularization_y Regularization function for Y matrix
* @param gamma_x Regularization weight on X matrix
* @param gamma_y Regularization weight on Y matrix
* @param max_iterations Maximum number of iterations
* @param max_updates Maximum number of updates, defaults to 2*max_iterations
* @param init_step_size Initial step size
* @param min_step_size Minimum step size
* @param seed RNG seed for initialization
* @param init Initialization mode
* @param svd_method Method for computing SVD during initialization (Caution: Randomized is currently experimental
* and unstable)
* @param user_y User-specified initial Y
* @param user_x User-specified initial X
* @param loading_name [Deprecated] Use representation_name instead. Frame key to save resulting X.
* @param representation_name Frame key to save resulting X
* @param expand_user_y Expand categorical columns in user-specified initial Y
* @param impute_original Reconstruct original training data by reversing transform
* @param recover_svd Recover singular values and eigenvectors of XY
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/glrm")
Call<GLRMV3> trainGlrm(
@Field("transform") DataInfoTransformType transform,
@Field("k") int k,
@Field("loss") GenmodelalgosglrmGlrmLoss loss,
@Field("multi_loss") GenmodelalgosglrmGlrmLoss multi_loss,
@Field("loss_by_col") GenmodelalgosglrmGlrmLoss[] loss_by_col,
@Field("loss_by_col_idx") int[] loss_by_col_idx,
@Field("period") int period,
@Field("regularization_x") GenmodelalgosglrmGlrmRegularizer regularization_x,
@Field("regularization_y") GenmodelalgosglrmGlrmRegularizer regularization_y,
@Field("gamma_x") double gamma_x,
@Field("gamma_y") double gamma_y,
@Field("max_iterations") int max_iterations,
@Field("max_updates") int max_updates,
@Field("init_step_size") double init_step_size,
@Field("min_step_size") double min_step_size,
@Field("seed") long seed,
@Field("init") GenmodelalgosglrmGlrmInitialization init,
@Field("svd_method") SVDMethod svd_method,
@Field("user_y") String user_y,
@Field("user_x") String user_x,
@Field("loading_name") String loading_name,
@Field("representation_name") String representation_name,
@Field("expand_user_y") boolean expand_user_y,
@Field("impute_original") boolean impute_original,
@Field("recover_svd") boolean recover_svd,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/glrm")
Call<GLRMV3> trainGlrm(@Field("k") int k);
/**
* Validate a set of GLRM model builder parameters.
* @param transform Transformation of training data
* @param k Rank of matrix approximation
* @param loss Numeric loss function
* @param multi_loss Categorical loss function
* @param loss_by_col Loss function by column (override)
* @param loss_by_col_idx Loss function by column index (override)
* @param period Length of period (only used with periodic loss function)
* @param regularization_x Regularization function for X matrix
* @param regularization_y Regularization function for Y matrix
* @param gamma_x Regularization weight on X matrix
* @param gamma_y Regularization weight on Y matrix
* @param max_iterations Maximum number of iterations
* @param max_updates Maximum number of updates, defaults to 2*max_iterations
* @param init_step_size Initial step size
* @param min_step_size Minimum step size
* @param seed RNG seed for initialization
* @param init Initialization mode
* @param svd_method Method for computing SVD during initialization (Caution: Randomized is currently experimental
* and unstable)
* @param user_y User-specified initial Y
* @param user_x User-specified initial X
* @param loading_name [Deprecated] Use representation_name instead. Frame key to save resulting X.
* @param representation_name Frame key to save resulting X
* @param expand_user_y Expand categorical columns in user-specified initial Y
* @param impute_original Reconstruct original training data by reversing transform
* @param recover_svd Recover singular values and eigenvectors of XY
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/glrm/parameters")
Call<GLRMV3> validate_parametersGlrm(
@Field("transform") DataInfoTransformType transform,
@Field("k") int k,
@Field("loss") GenmodelalgosglrmGlrmLoss loss,
@Field("multi_loss") GenmodelalgosglrmGlrmLoss multi_loss,
@Field("loss_by_col") GenmodelalgosglrmGlrmLoss[] loss_by_col,
@Field("loss_by_col_idx") int[] loss_by_col_idx,
@Field("period") int period,
@Field("regularization_x") GenmodelalgosglrmGlrmRegularizer regularization_x,
@Field("regularization_y") GenmodelalgosglrmGlrmRegularizer regularization_y,
@Field("gamma_x") double gamma_x,
@Field("gamma_y") double gamma_y,
@Field("max_iterations") int max_iterations,
@Field("max_updates") int max_updates,
@Field("init_step_size") double init_step_size,
@Field("min_step_size") double min_step_size,
@Field("seed") long seed,
@Field("init") GenmodelalgosglrmGlrmInitialization init,
@Field("svd_method") SVDMethod svd_method,
@Field("user_y") String user_y,
@Field("user_x") String user_x,
@Field("loading_name") String loading_name,
@Field("representation_name") String representation_name,
@Field("expand_user_y") boolean expand_user_y,
@Field("impute_original") boolean impute_original,
@Field("recover_svd") boolean recover_svd,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/glrm/parameters")
Call<GLRMV3> validate_parametersGlrm(@Field("k") int k);
/**
* Train a KMeans model.
* @param user_points This option allows you to specify a dataframe, where each row represents an initial cluster
* center. The user-specified points must have the same number of columns as the training
* observations. The number of rows must equal the number of clusters
* @param max_iterations Maximum training iterations (if estimate_k is enabled, then this is for each inner Lloyds
* iteration)
* @param standardize Standardize columns before computing distances
* @param seed RNG Seed
* @param init Initialization mode
* @param estimate_k Whether to estimate the number of clusters (<=k) iteratively and deterministically.
* @param cluster_size_constraints An array specifying the minimum number of points that should be in each cluster.
* The length of the constraints array has to be the same as the number of clusters.
* @param k The max. number of clusters. If estimate_k is disabled, the model will find k centroids, otherwise it
* will find up to k centroids.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/kmeans")
Call<KMeansV3> trainKmeans(
@Field("user_points") String user_points,
@Field("max_iterations") int max_iterations,
@Field("standardize") boolean standardize,
@Field("seed") long seed,
@Field("init") KMeansInitialization init,
@Field("estimate_k") boolean estimate_k,
@Field("cluster_size_constraints") int[] cluster_size_constraints,
@Field("k") int k,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/kmeans")
Call<KMeansV3> trainKmeans();
/**
* Validate a set of KMeans model builder parameters.
* @param user_points This option allows you to specify a dataframe, where each row represents an initial cluster
* center. The user-specified points must have the same number of columns as the training
* observations. The number of rows must equal the number of clusters
* @param max_iterations Maximum training iterations (if estimate_k is enabled, then this is for each inner Lloyds
* iteration)
* @param standardize Standardize columns before computing distances
* @param seed RNG Seed
* @param init Initialization mode
* @param estimate_k Whether to estimate the number of clusters (<=k) iteratively and deterministically.
* @param cluster_size_constraints An array specifying the minimum number of points that should be in each cluster.
* The length of the constraints array has to be the same as the number of clusters.
* @param k The max. number of clusters. If estimate_k is disabled, the model will find k centroids, otherwise it
* will find up to k centroids.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/kmeans/parameters")
Call<KMeansV3> validate_parametersKmeans(
@Field("user_points") String user_points,
@Field("max_iterations") int max_iterations,
@Field("standardize") boolean standardize,
@Field("seed") long seed,
@Field("init") KMeansInitialization init,
@Field("estimate_k") boolean estimate_k,
@Field("cluster_size_constraints") int[] cluster_size_constraints,
@Field("k") int k,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/kmeans/parameters")
Call<KMeansV3> validate_parametersKmeans();
/**
* Train a NaiveBayes model.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param laplace Laplace smoothing parameter
* @param min_sdev Min. standard deviation to use for observations with not enough data
* @param eps_sdev Cutoff below which standard deviation is replaced with min_sdev
* @param min_prob Min. probability to use for observations with not enough data
* @param eps_prob Cutoff below which probability is replaced with min_prob
* @param compute_metrics Compute metrics on training data
* @param seed Seed for pseudo random number generator (only used for cross-validation and fold_assignment="Random"
* or "AUTO")
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/naivebayes")
Call<NaiveBayesV3> trainNaivebayes(
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("laplace") double laplace,
@Field("min_sdev") double min_sdev,
@Field("eps_sdev") double eps_sdev,
@Field("min_prob") double min_prob,
@Field("eps_prob") double eps_prob,
@Field("compute_metrics") boolean compute_metrics,
@Field("seed") long seed,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/naivebayes")
Call<NaiveBayesV3> trainNaivebayes();
/**
* Validate a set of NaiveBayes model builder parameters.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param laplace Laplace smoothing parameter
* @param min_sdev Min. standard deviation to use for observations with not enough data
* @param eps_sdev Cutoff below which standard deviation is replaced with min_sdev
* @param min_prob Min. probability to use for observations with not enough data
* @param eps_prob Cutoff below which probability is replaced with min_prob
* @param compute_metrics Compute metrics on training data
* @param seed Seed for pseudo random number generator (only used for cross-validation and fold_assignment="Random"
* or "AUTO")
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/naivebayes/parameters")
Call<NaiveBayesV3> validate_parametersNaivebayes(
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("laplace") double laplace,
@Field("min_sdev") double min_sdev,
@Field("eps_sdev") double eps_sdev,
@Field("min_prob") double min_prob,
@Field("eps_prob") double eps_prob,
@Field("compute_metrics") boolean compute_metrics,
@Field("seed") long seed,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/naivebayes/parameters")
Call<NaiveBayesV3> validate_parametersNaivebayes();
/**
* Train a PCA model.
* @param transform Transformation of training data
* @param pca_method Specify the algorithm to use for computing the principal components: GramSVD - uses a
* distributed computation of the Gram matrix, followed by a local SVD; Power - computes the SVD
* using the power iteration method (experimental); Randomized - uses randomized subspace
* iteration method; GLRM - fits a generalized low-rank model with L2 loss function and no
* regularization and solves for the SVD using local matrix algebra (experimental)
* @param pca_impl Specify the implementation to use for computing PCA (via SVD or EVD): MTJ_EVD_DENSEMATRIX -
* eigenvalue decompositions for dense matrix using MTJ; MTJ_EVD_SYMMMATRIX - eigenvalue
* decompositions for symmetric matrix using MTJ; MTJ_SVD_DENSEMATRIX - singular-value
* decompositions for dense matrix using MTJ; JAMA - eigenvalue decompositions for dense matrix
* using JAMA. References: JAMA - http://math.nist.gov/javanumerics/jama/; MTJ -
* https://github.com/fommil/matrix-toolkits-java/
* @param k Rank of matrix approximation
* @param max_iterations Maximum training iterations
* @param seed RNG seed for initialization
* @param use_all_factor_levels Whether first factor level is included in each categorical expansion
* @param compute_metrics Whether to compute metrics on the training data
* @param impute_missing Whether to impute missing entries with the column mean
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/pca")
Call<PCAV3> trainPca(
@Field("transform") DataInfoTransformType transform,
@Field("pca_method") PCAMethod pca_method,
@Field("pca_impl") PCAImplementation pca_impl,
@Field("k") int k,
@Field("max_iterations") int max_iterations,
@Field("seed") long seed,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("compute_metrics") boolean compute_metrics,
@Field("impute_missing") boolean impute_missing,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/pca")
Call<PCAV3> trainPca(@Field("k") int k);
/**
* Validate a set of PCA model builder parameters.
* @param transform Transformation of training data
* @param pca_method Specify the algorithm to use for computing the principal components: GramSVD - uses a
* distributed computation of the Gram matrix, followed by a local SVD; Power - computes the SVD
* using the power iteration method (experimental); Randomized - uses randomized subspace
* iteration method; GLRM - fits a generalized low-rank model with L2 loss function and no
* regularization and solves for the SVD using local matrix algebra (experimental)
* @param pca_impl Specify the implementation to use for computing PCA (via SVD or EVD): MTJ_EVD_DENSEMATRIX -
* eigenvalue decompositions for dense matrix using MTJ; MTJ_EVD_SYMMMATRIX - eigenvalue
* decompositions for symmetric matrix using MTJ; MTJ_SVD_DENSEMATRIX - singular-value
* decompositions for dense matrix using MTJ; JAMA - eigenvalue decompositions for dense matrix
* using JAMA. References: JAMA - http://math.nist.gov/javanumerics/jama/; MTJ -
* https://github.com/fommil/matrix-toolkits-java/
* @param k Rank of matrix approximation
* @param max_iterations Maximum training iterations
* @param seed RNG seed for initialization
* @param use_all_factor_levels Whether first factor level is included in each categorical expansion
* @param compute_metrics Whether to compute metrics on the training data
* @param impute_missing Whether to impute missing entries with the column mean
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/pca/parameters")
Call<PCAV3> validate_parametersPca(
@Field("transform") DataInfoTransformType transform,
@Field("pca_method") PCAMethod pca_method,
@Field("pca_impl") PCAImplementation pca_impl,
@Field("k") int k,
@Field("max_iterations") int max_iterations,
@Field("seed") long seed,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("compute_metrics") boolean compute_metrics,
@Field("impute_missing") boolean impute_missing,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/pca/parameters")
Call<PCAV3> validate_parametersPca(@Field("k") int k);
/**
* Train a SVD model.
* @param transform Transformation of training data
* @param svd_method Method for computing SVD (Caution: Randomized is currently experimental and unstable)
* @param nv Number of right singular vectors
* @param max_iterations Maximum iterations
* @param seed RNG seed for k-means++ initialization
* @param keep_u Save left singular vectors?
* @param u_name Frame key to save left singular vectors
* @param use_all_factor_levels Whether first factor level is included in each categorical expansion
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/ModelBuilders/svd")
Call<SVDV99> trainSvd(
@Field("transform") DataInfoTransformType transform,
@Field("svd_method") SVDMethod svd_method,
@Field("nv") int nv,
@Field("max_iterations") int max_iterations,
@Field("seed") long seed,
@Field("keep_u") boolean keep_u,
@Field("u_name") String u_name,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/ModelBuilders/svd")
Call<SVDV99> trainSvd();
/**
* Validate a set of SVD model builder parameters.
* @param transform Transformation of training data
* @param svd_method Method for computing SVD (Caution: Randomized is currently experimental and unstable)
* @param nv Number of right singular vectors
* @param max_iterations Maximum iterations
* @param seed RNG seed for k-means++ initialization
* @param keep_u Save left singular vectors?
* @param u_name Frame key to save left singular vectors
* @param use_all_factor_levels Whether first factor level is included in each categorical expansion
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/ModelBuilders/svd/parameters")
Call<SVDV99> validate_parametersSvd(
@Field("transform") DataInfoTransformType transform,
@Field("svd_method") SVDMethod svd_method,
@Field("nv") int nv,
@Field("max_iterations") int max_iterations,
@Field("seed") long seed,
@Field("keep_u") boolean keep_u,
@Field("u_name") String u_name,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/ModelBuilders/svd/parameters")
Call<SVDV99> validate_parametersSvd();
/**
* Train a DRF model.
* @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p}
* for classification and p/3 for regression (where p is the # of predictors
* @param binomial_double_trees For binary classification: Build 2x as many trees (one per class) - can lead to
* higher accuracy.
* @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param ntrees Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
* best point
* @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
* root level, then decrease by factor of two per level
* @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
* point. Higher values can lead to more overfitting.
* @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
* stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
* trees when the R^2 metric equals or exceeds this
* @param seed Seed for pseudo random number generator (if applicable)
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
* 1.0), for each tree
* @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
* 0.0 and <= 2.0)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
* @param histogram_type What type of histogram to use for finding optimal split points
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
* the response column is a constant value.If disabled, then model will train
* regardless of the response column being a constant value or not.
* @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
* running. In case of cluster shutdown, this checkpoint can be used to restart
* training.
* @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
* only when in_training_checkpoints_dir is defined
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/drf")
Call<DRFV3> trainDrf(
@Field("mtries") int mtries,
@Field("binomial_double_trees") boolean binomial_double_trees,
@Field("sample_rate") double sample_rate,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("nbins") int nbins,
@Field("nbins_top_level") int nbins_top_level,
@Field("nbins_cats") int nbins_cats,
@Field("r2_stopping") double r2_stopping,
@Field("seed") long seed,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("sample_rate_per_class") double[] sample_rate_per_class,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
@Field("score_tree_interval") int score_tree_interval,
@Field("min_split_improvement") double min_split_improvement,
@Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("check_constant_response") boolean check_constant_response,
@Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
@Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/drf")
Call<DRFV3> trainDrf();
/**
* Validate a set of DRF model builder parameters.
* @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p}
* for classification and p/3 for regression (where p is the # of predictors
* @param binomial_double_trees For binary classification: Build 2x as many trees (one per class) - can lead to
* higher accuracy.
* @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param ntrees Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
* best point
* @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
* root level, then decrease by factor of two per level
* @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
* point. Higher values can lead to more overfitting.
* @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
* stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
* trees when the R^2 metric equals or exceeds this
* @param seed Seed for pseudo random number generator (if applicable)
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
* 1.0), for each tree
* @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
* 0.0 and <= 2.0)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
* @param histogram_type What type of histogram to use for finding optimal split points
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
* the response column is a constant value.If disabled, then model will train
* regardless of the response column being a constant value or not.
* @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
* running. In case of cluster shutdown, this checkpoint can be used to restart
* training.
* @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
* only when in_training_checkpoints_dir is defined
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/drf/parameters")
Call<DRFV3> validate_parametersDrf(
@Field("mtries") int mtries,
@Field("binomial_double_trees") boolean binomial_double_trees,
@Field("sample_rate") double sample_rate,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("nbins") int nbins,
@Field("nbins_top_level") int nbins_top_level,
@Field("nbins_cats") int nbins_cats,
@Field("r2_stopping") double r2_stopping,
@Field("seed") long seed,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("sample_rate_per_class") double[] sample_rate_per_class,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
@Field("score_tree_interval") int score_tree_interval,
@Field("min_split_improvement") double min_split_improvement,
@Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("check_constant_response") boolean check_constant_response,
@Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
@Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/drf/parameters")
Call<DRFV3> validate_parametersDrf();
/**
* Train a GBM model.
* @param learn_rate Learning rate (from 0.0 to 1.0)
* @param learn_rate_annealing Scale the learning rate by this factor after each tree (e.g., 0.99 or 0.999)
* @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate Column sample rate (from 0.0 to 1.0)
* @param monotone_constraints A mapping representing monotonic constraints. Use +1 to enforce an increasing
* constraint and -1 to specify a decreasing constraint.
* @param max_abs_leafnode_pred Maximum absolute value of a leaf node prediction
* @param pred_noise_bandwidth Bandwidth (sigma) of Gaussian multiplicative noise ~N(1,sigma) for tree node
* predictions
* @param interaction_constraints A set of allowed column interactions.
* @param auto_rebalance Allow automatic rebalancing of training and validation datasets
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param ntrees Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
* best point
* @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
* root level, then decrease by factor of two per level
* @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
* point. Higher values can lead to more overfitting.
* @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
* stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
* trees when the R^2 metric equals or exceeds this
* @param seed Seed for pseudo random number generator (if applicable)
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
* 1.0), for each tree
* @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
* 0.0 and <= 2.0)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
* @param histogram_type What type of histogram to use for finding optimal split points
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
* the response column is a constant value.If disabled, then model will train
* regardless of the response column being a constant value or not.
* @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
* running. In case of cluster shutdown, this checkpoint can be used to restart
* training.
* @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
* only when in_training_checkpoints_dir is defined
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/gbm")
Call<GBMV3> trainGbm(
@Field("learn_rate") double learn_rate,
@Field("learn_rate_annealing") double learn_rate_annealing,
@Field("sample_rate") double sample_rate,
@Field("col_sample_rate") double col_sample_rate,
@Field("monotone_constraints") KeyValueV3[] monotone_constraints,
@Field("max_abs_leafnode_pred") double max_abs_leafnode_pred,
@Field("pred_noise_bandwidth") double pred_noise_bandwidth,
@Field("interaction_constraints") String[][] interaction_constraints,
@Field("auto_rebalance") boolean auto_rebalance,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("nbins") int nbins,
@Field("nbins_top_level") int nbins_top_level,
@Field("nbins_cats") int nbins_cats,
@Field("r2_stopping") double r2_stopping,
@Field("seed") long seed,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("sample_rate_per_class") double[] sample_rate_per_class,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
@Field("score_tree_interval") int score_tree_interval,
@Field("min_split_improvement") double min_split_improvement,
@Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("check_constant_response") boolean check_constant_response,
@Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
@Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/gbm")
Call<GBMV3> trainGbm();
/**
* Validate a set of GBM model builder parameters.
* @param learn_rate Learning rate (from 0.0 to 1.0)
* @param learn_rate_annealing Scale the learning rate by this factor after each tree (e.g., 0.99 or 0.999)
* @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate Column sample rate (from 0.0 to 1.0)
* @param monotone_constraints A mapping representing monotonic constraints. Use +1 to enforce an increasing
* constraint and -1 to specify a decreasing constraint.
* @param max_abs_leafnode_pred Maximum absolute value of a leaf node prediction
* @param pred_noise_bandwidth Bandwidth (sigma) of Gaussian multiplicative noise ~N(1,sigma) for tree node
* predictions
* @param interaction_constraints A set of allowed column interactions.
* @param auto_rebalance Allow automatic rebalancing of training and validation datasets
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param ntrees Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
* best point
* @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
* root level, then decrease by factor of two per level
* @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
* point. Higher values can lead to more overfitting.
* @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
* stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
* trees when the R^2 metric equals or exceeds this
* @param seed Seed for pseudo random number generator (if applicable)
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
* 1.0), for each tree
* @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
* 0.0 and <= 2.0)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
* @param histogram_type What type of histogram to use for finding optimal split points
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
* the response column is a constant value.If disabled, then model will train
* regardless of the response column being a constant value or not.
* @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
* running. In case of cluster shutdown, this checkpoint can be used to restart
* training.
* @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
* only when in_training_checkpoints_dir is defined
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/gbm/parameters")
Call<GBMV3> validate_parametersGbm(
@Field("learn_rate") double learn_rate,
@Field("learn_rate_annealing") double learn_rate_annealing,
@Field("sample_rate") double sample_rate,
@Field("col_sample_rate") double col_sample_rate,
@Field("monotone_constraints") KeyValueV3[] monotone_constraints,
@Field("max_abs_leafnode_pred") double max_abs_leafnode_pred,
@Field("pred_noise_bandwidth") double pred_noise_bandwidth,
@Field("interaction_constraints") String[][] interaction_constraints,
@Field("auto_rebalance") boolean auto_rebalance,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("nbins") int nbins,
@Field("nbins_top_level") int nbins_top_level,
@Field("nbins_cats") int nbins_cats,
@Field("r2_stopping") double r2_stopping,
@Field("seed") long seed,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("sample_rate_per_class") double[] sample_rate_per_class,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
@Field("score_tree_interval") int score_tree_interval,
@Field("min_split_improvement") double min_split_improvement,
@Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("check_constant_response") boolean check_constant_response,
@Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
@Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/gbm/parameters")
Call<GBMV3> validate_parametersGbm();
/**
* Train a IsolationForest model.
* @param sample_size Number of randomly sampled observations used to train each Isolation Forest tree. Only one of
* parameters sample_size and sample_rate should be defined. If sample_rate is defined,
* sample_size will be ignored.
* @param sample_rate Rate of randomly sampled observations used to train each Isolation Forest tree. Needs to be in
* range from 0.0 to 1.0. If set to -1, sample_rate is disabled and sample_size will be used
* instead.
* @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults (number of
* predictors)/3.
* @param contamination Contamination ratio - the proportion of anomalies in the input dataset. If undefined (-1)
* the predict function will not mark observations as anomalies and only anomaly score will be
* returned. Defaults to -1 (undefined).
* @param validation_response_column (experimental) Name of the response column in the validation frame. Response
* column should be binary and indicate not anomaly/anomaly.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param ntrees Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
* best point
* @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
* root level, then decrease by factor of two per level
* @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
* point. Higher values can lead to more overfitting.
* @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
* stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
* trees when the R^2 metric equals or exceeds this
* @param seed Seed for pseudo random number generator (if applicable)
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
* 1.0), for each tree
* @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
* 0.0 and <= 2.0)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
* @param histogram_type What type of histogram to use for finding optimal split points
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
* the response column is a constant value.If disabled, then model will train
* regardless of the response column being a constant value or not.
* @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
* running. In case of cluster shutdown, this checkpoint can be used to restart
* training.
* @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
* only when in_training_checkpoints_dir is defined
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/isolationforest")
Call<IsolationForestV3> trainIsolationforest(
@Field("sample_size") long sample_size,
@Field("sample_rate") double sample_rate,
@Field("mtries") int mtries,
@Field("contamination") double contamination,
@Field("validation_response_column") String validation_response_column,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("nbins") int nbins,
@Field("nbins_top_level") int nbins_top_level,
@Field("nbins_cats") int nbins_cats,
@Field("r2_stopping") double r2_stopping,
@Field("seed") long seed,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("sample_rate_per_class") double[] sample_rate_per_class,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
@Field("score_tree_interval") int score_tree_interval,
@Field("min_split_improvement") double min_split_improvement,
@Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("check_constant_response") boolean check_constant_response,
@Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
@Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/isolationforest")
Call<IsolationForestV3> trainIsolationforest();
/**
* Validate a set of IsolationForest model builder parameters.
* @param sample_size Number of randomly sampled observations used to train each Isolation Forest tree. Only one of
* parameters sample_size and sample_rate should be defined. If sample_rate is defined,
* sample_size will be ignored.
* @param sample_rate Rate of randomly sampled observations used to train each Isolation Forest tree. Needs to be in
* range from 0.0 to 1.0. If set to -1, sample_rate is disabled and sample_size will be used
* instead.
* @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults (number of
* predictors)/3.
* @param contamination Contamination ratio - the proportion of anomalies in the input dataset. If undefined (-1)
* the predict function will not mark observations as anomalies and only anomaly score will be
* returned. Defaults to -1 (undefined).
* @param validation_response_column (experimental) Name of the response column in the validation frame. Response
* column should be binary and indicate not anomaly/anomaly.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param ntrees Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
* best point
* @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
* root level, then decrease by factor of two per level
* @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
* point. Higher values can lead to more overfitting.
* @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
* stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
* trees when the R^2 metric equals or exceeds this
* @param seed Seed for pseudo random number generator (if applicable)
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
* 1.0), for each tree
* @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
* 0.0 and <= 2.0)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
* @param histogram_type What type of histogram to use for finding optimal split points
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
* the response column is a constant value.If disabled, then model will train
* regardless of the response column being a constant value or not.
* @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
* running. In case of cluster shutdown, this checkpoint can be used to restart
* training.
* @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
* only when in_training_checkpoints_dir is defined
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/isolationforest/parameters")
Call<IsolationForestV3> validate_parametersIsolationforest(
@Field("sample_size") long sample_size,
@Field("sample_rate") double sample_rate,
@Field("mtries") int mtries,
@Field("contamination") double contamination,
@Field("validation_response_column") String validation_response_column,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("nbins") int nbins,
@Field("nbins_top_level") int nbins_top_level,
@Field("nbins_cats") int nbins_cats,
@Field("r2_stopping") double r2_stopping,
@Field("seed") long seed,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("sample_rate_per_class") double[] sample_rate_per_class,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
@Field("score_tree_interval") int score_tree_interval,
@Field("min_split_improvement") double min_split_improvement,
@Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("check_constant_response") boolean check_constant_response,
@Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
@Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/isolationforest/parameters")
Call<IsolationForestV3> validate_parametersIsolationforest();
/**
* Train a ExtendedIsolationForest model.
* @param ntrees Number of Extended Isolation Forest trees.
* @param sample_size Number of randomly sampled observations used to train each Extended Isolation Forest tree.
* @param extension_level Maximum is N - 1 (N = numCols). Minimum is 0. Extended Isolation Forest with
* extension_Level = 0 behaves like Isolation Forest.
* @param seed Seed for pseudo random number generator (if applicable)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param disable_training_metrics Disable calculating training metrics (expensive on large datasets)
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/extendedisolationforest")
Call<ExtendedIsolationForestV3> trainExtendedisolationforest(
@Field("ntrees") int ntrees,
@Field("sample_size") int sample_size,
@Field("extension_level") int extension_level,
@Field("seed") long seed,
@Field("score_tree_interval") int score_tree_interval,
@Field("disable_training_metrics") boolean disable_training_metrics,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/extendedisolationforest")
Call<ExtendedIsolationForestV3> trainExtendedisolationforest();
/**
* Validate a set of ExtendedIsolationForest model builder parameters.
* @param ntrees Number of Extended Isolation Forest trees.
* @param sample_size Number of randomly sampled observations used to train each Extended Isolation Forest tree.
* @param extension_level Maximum is N - 1 (N = numCols). Minimum is 0. Extended Isolation Forest with
* extension_Level = 0 behaves like Isolation Forest.
* @param seed Seed for pseudo random number generator (if applicable)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param disable_training_metrics Disable calculating training metrics (expensive on large datasets)
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/extendedisolationforest/parameters")
Call<ExtendedIsolationForestV3> validate_parametersExtendedisolationforest(
@Field("ntrees") int ntrees,
@Field("sample_size") int sample_size,
@Field("extension_level") int extension_level,
@Field("seed") long seed,
@Field("score_tree_interval") int score_tree_interval,
@Field("disable_training_metrics") boolean disable_training_metrics,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/extendedisolationforest/parameters")
Call<ExtendedIsolationForestV3> validate_parametersExtendedisolationforest();
/**
* Train a Aggregator model.
* @param transform Transformation of training data
* @param pca_method Method for computing PCA (Caution: GLRM is currently experimental and unstable)
* @param k Rank of matrix approximation
* @param max_iterations Maximum number of iterations for PCA
* @param target_num_exemplars Targeted number of exemplars
* @param rel_tol_num_exemplars Relative tolerance for number of exemplars (e.g, 0.5 is +/- 50 percents)
* @param seed RNG seed for initialization
* @param use_all_factor_levels Whether first factor level is included in each categorical expansion
* @param save_mapping_frame Whether to export the mapping of the aggregated frame
* @param num_iteration_without_new_exemplar The number of iterations to run before aggregator exits if the number
* of exemplars collected didn't change
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/ModelBuilders/aggregator")
Call<AggregatorV99> trainAggregator(
@Field("transform") DataInfoTransformType transform,
@Field("pca_method") PCAMethod pca_method,
@Field("k") int k,
@Field("max_iterations") int max_iterations,
@Field("target_num_exemplars") int target_num_exemplars,
@Field("rel_tol_num_exemplars") double rel_tol_num_exemplars,
@Field("seed") long seed,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("save_mapping_frame") boolean save_mapping_frame,
@Field("num_iteration_without_new_exemplar") int num_iteration_without_new_exemplar,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/ModelBuilders/aggregator")
Call<AggregatorV99> trainAggregator();
/**
* Validate a set of Aggregator model builder parameters.
* @param transform Transformation of training data
* @param pca_method Method for computing PCA (Caution: GLRM is currently experimental and unstable)
* @param k Rank of matrix approximation
* @param max_iterations Maximum number of iterations for PCA
* @param target_num_exemplars Targeted number of exemplars
* @param rel_tol_num_exemplars Relative tolerance for number of exemplars (e.g, 0.5 is +/- 50 percents)
* @param seed RNG seed for initialization
* @param use_all_factor_levels Whether first factor level is included in each categorical expansion
* @param save_mapping_frame Whether to export the mapping of the aggregated frame
* @param num_iteration_without_new_exemplar The number of iterations to run before aggregator exits if the number
* of exemplars collected didn't change
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/ModelBuilders/aggregator/parameters")
Call<AggregatorV99> validate_parametersAggregator(
@Field("transform") DataInfoTransformType transform,
@Field("pca_method") PCAMethod pca_method,
@Field("k") int k,
@Field("max_iterations") int max_iterations,
@Field("target_num_exemplars") int target_num_exemplars,
@Field("rel_tol_num_exemplars") double rel_tol_num_exemplars,
@Field("seed") long seed,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("save_mapping_frame") boolean save_mapping_frame,
@Field("num_iteration_without_new_exemplar") int num_iteration_without_new_exemplar,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/ModelBuilders/aggregator/parameters")
Call<AggregatorV99> validate_parametersAggregator();
/**
* Train a Word2Vec model.
* @param vec_size Set size of word vectors
* @param window_size Set max skip length between words
* @param sent_sample_rate Set threshold for occurrence of words. Those that appear with higher frequency in the
* training data
* will be randomly down-sampled; useful range is (0, 1e-5)
* @param norm_model Use Hierarchical Softmax
* @param epochs Number of training iterations to run
* @param min_word_freq This will discard words that appear less than <int> times
* @param init_learning_rate Set the starting learning rate
* @param word_model The word model to use (SkipGram or CBOW)
* @param pre_trained Id of a data frame that contains a pre-trained (external) word2vec model
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/word2vec")
Call<Word2VecV3> trainWord2vec(
@Field("vec_size") int vec_size,
@Field("window_size") int window_size,
@Field("sent_sample_rate") float sent_sample_rate,
@Field("norm_model") Word2VecNormModel norm_model,
@Field("epochs") int epochs,
@Field("min_word_freq") int min_word_freq,
@Field("init_learning_rate") float init_learning_rate,
@Field("word_model") Word2VecWordModel word_model,
@Field("pre_trained") String pre_trained,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/word2vec")
Call<Word2VecV3> trainWord2vec();
/**
* Validate a set of Word2Vec model builder parameters.
* @param vec_size Set size of word vectors
* @param window_size Set max skip length between words
* @param sent_sample_rate Set threshold for occurrence of words. Those that appear with higher frequency in the
* training data
* will be randomly down-sampled; useful range is (0, 1e-5)
* @param norm_model Use Hierarchical Softmax
* @param epochs Number of training iterations to run
* @param min_word_freq This will discard words that appear less than <int> times
* @param init_learning_rate Set the starting learning rate
* @param word_model The word model to use (SkipGram or CBOW)
* @param pre_trained Id of a data frame that contains a pre-trained (external) word2vec model
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/word2vec/parameters")
Call<Word2VecV3> validate_parametersWord2vec(
@Field("vec_size") int vec_size,
@Field("window_size") int window_size,
@Field("sent_sample_rate") float sent_sample_rate,
@Field("norm_model") Word2VecNormModel norm_model,
@Field("epochs") int epochs,
@Field("min_word_freq") int min_word_freq,
@Field("init_learning_rate") float init_learning_rate,
@Field("word_model") Word2VecWordModel word_model,
@Field("pre_trained") String pre_trained,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/word2vec/parameters")
Call<Word2VecV3> validate_parametersWord2vec();
/**
* Train a StackedEnsemble model.
* @param base_models List of models or grids (or their ids) to ensemble/stack together. Grids are expanded to
* individual models. If not using blending frame, then models must have been cross-validated
* using nfolds > 1, and folds must be identical across models.
* @param metalearner_algorithm Type of algorithm to use as the metalearner. Options include 'AUTO' (GLM with non
* negative weights; if validation_frame is present, a lambda search is performed),
* 'deeplearning' (Deep Learning with default parameters), 'drf' (Random Forest with
* default parameters), 'gbm' (GBM with default parameters), 'glm' (GLM with default
* parameters), 'naivebayes' (NaiveBayes with default parameters), or 'xgboost' (if
* available, XGBoost with default parameters).
* @param metalearner_nfolds Number of folds for K-fold cross-validation of the metalearner algorithm (0 to disable
* or >= 2).
* @param metalearner_fold_assignment Cross-validation fold assignment scheme for metalearner cross-validation.
* Defaults to AUTO (which is currently set to Random). The 'Stratified' option
* will stratify the folds based on the response variable, for classification
* problems.
* @param metalearner_fold_column Column with cross-validation fold index assignment per observation for cross-
* validation of the metalearner.
* @param metalearner_transform Transformation used for the level one frame.
* @param keep_levelone_frame Keep level one frame used for metalearner training.
* @param metalearner_params Parameters for metalearner algorithm
* @param blending_frame Frame used to compute the predictions that serve as the training frame for the metalearner
* (triggers blending mode if provided)
* @param seed Seed for random numbers; passed through to the metalearner algorithm. Defaults to -1 (time-based
* random number)
* @param score_training_samples Specify the number of training set samples for scoring. The value must be >= 0. To
* use all training samples, enter 0.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/ModelBuilders/stackedensemble")
Call<StackedEnsembleV99> trainStackedensemble(
@Field("base_models") String[] base_models,
@Field("metalearner_algorithm") EnsembleMetalearnerAlgorithm metalearner_algorithm,
@Field("metalearner_nfolds") int metalearner_nfolds,
@Field("metalearner_fold_assignment") ModelParametersFoldAssignmentScheme metalearner_fold_assignment,
@Field("metalearner_fold_column") String metalearner_fold_column,
@Field("metalearner_transform") EnsembleStackedEnsembleModelStackedEnsembleParametersMetalearnerTransform metalearner_transform,
@Field("keep_levelone_frame") boolean keep_levelone_frame,
@Field("metalearner_params") String metalearner_params,
@Field("blending_frame") String blending_frame,
@Field("seed") long seed,
@Field("score_training_samples") long score_training_samples,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/ModelBuilders/stackedensemble")
Call<StackedEnsembleV99> trainStackedensemble(@Field("base_models") String[] base_models);
/**
* Validate a set of StackedEnsemble model builder parameters.
* @param base_models List of models or grids (or their ids) to ensemble/stack together. Grids are expanded to
* individual models. If not using blending frame, then models must have been cross-validated
* using nfolds > 1, and folds must be identical across models.
* @param metalearner_algorithm Type of algorithm to use as the metalearner. Options include 'AUTO' (GLM with non
* negative weights; if validation_frame is present, a lambda search is performed),
* 'deeplearning' (Deep Learning with default parameters), 'drf' (Random Forest with
* default parameters), 'gbm' (GBM with default parameters), 'glm' (GLM with default
* parameters), 'naivebayes' (NaiveBayes with default parameters), or 'xgboost' (if
* available, XGBoost with default parameters).
* @param metalearner_nfolds Number of folds for K-fold cross-validation of the metalearner algorithm (0 to disable
* or >= 2).
* @param metalearner_fold_assignment Cross-validation fold assignment scheme for metalearner cross-validation.
* Defaults to AUTO (which is currently set to Random). The 'Stratified' option
* will stratify the folds based on the response variable, for classification
* problems.
* @param metalearner_fold_column Column with cross-validation fold index assignment per observation for cross-
* validation of the metalearner.
* @param metalearner_transform Transformation used for the level one frame.
* @param keep_levelone_frame Keep level one frame used for metalearner training.
* @param metalearner_params Parameters for metalearner algorithm
* @param blending_frame Frame used to compute the predictions that serve as the training frame for the metalearner
* (triggers blending mode if provided)
* @param seed Seed for random numbers; passed through to the metalearner algorithm. Defaults to -1 (time-based
* random number)
* @param score_training_samples Specify the number of training set samples for scoring. The value must be >= 0. To
* use all training samples, enter 0.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/99/ModelBuilders/stackedensemble/parameters")
Call<StackedEnsembleV99> validate_parametersStackedensemble(
@Field("base_models") String[] base_models,
@Field("metalearner_algorithm") EnsembleMetalearnerAlgorithm metalearner_algorithm,
@Field("metalearner_nfolds") int metalearner_nfolds,
@Field("metalearner_fold_assignment") ModelParametersFoldAssignmentScheme metalearner_fold_assignment,
@Field("metalearner_fold_column") String metalearner_fold_column,
@Field("metalearner_transform") EnsembleStackedEnsembleModelStackedEnsembleParametersMetalearnerTransform metalearner_transform,
@Field("keep_levelone_frame") boolean keep_levelone_frame,
@Field("metalearner_params") String metalearner_params,
@Field("blending_frame") String blending_frame,
@Field("seed") long seed,
@Field("score_training_samples") long score_training_samples,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/99/ModelBuilders/stackedensemble/parameters")
Call<StackedEnsembleV99> validate_parametersStackedensemble(@Field("base_models") String[] base_models);
/**
* Train a CoxPH model.
* @param start_column Start Time Column.
* @param stop_column Stop Time Column.
* @param stratify_by List of columns to use for stratification.
* @param ties Method for Handling Ties.
* @param init Coefficient starting value.
* @param lre_min Minimum log-relative error.
* @param max_iterations Maximum number of iterations.
* @param interactions_only A list of columns that should only be used to create interactions but should not itself
* participate in model training.
* @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
* for the list.
* @param interaction_pairs A list of pairwise (first order) column interactions.
* @param use_all_factor_levels (Internal. For development only!) Indicates whether to use all factor levels.
* @param single_node_mode Run on a single node to reduce the effect of network overhead (for smaller datasets)
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/coxph")
Call<CoxPHV3> trainCoxph(
@Field("start_column") String start_column,
@Field("stop_column") String stop_column,
@Field("stratify_by") String[] stratify_by,
@Field("ties") CoxPHTies ties,
@Field("init") double init,
@Field("lre_min") double lre_min,
@Field("max_iterations") int max_iterations,
@Field("interactions_only") String[] interactions_only,
@Field("interactions") String[] interactions,
@Field("interaction_pairs") StringPairV3[] interaction_pairs,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("single_node_mode") boolean single_node_mode,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/coxph")
Call<CoxPHV3> trainCoxph();
/**
* Validate a set of CoxPH model builder parameters.
* @param start_column Start Time Column.
* @param stop_column Stop Time Column.
* @param stratify_by List of columns to use for stratification.
* @param ties Method for Handling Ties.
* @param init Coefficient starting value.
* @param lre_min Minimum log-relative error.
* @param max_iterations Maximum number of iterations.
* @param interactions_only A list of columns that should only be used to create interactions but should not itself
* participate in model training.
* @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
* for the list.
* @param interaction_pairs A list of pairwise (first order) column interactions.
* @param use_all_factor_levels (Internal. For development only!) Indicates whether to use all factor levels.
* @param single_node_mode Run on a single node to reduce the effect of network overhead (for smaller datasets)
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/coxph/parameters")
Call<CoxPHV3> validate_parametersCoxph(
@Field("start_column") String start_column,
@Field("stop_column") String stop_column,
@Field("stratify_by") String[] stratify_by,
@Field("ties") CoxPHTies ties,
@Field("init") double init,
@Field("lre_min") double lre_min,
@Field("max_iterations") int max_iterations,
@Field("interactions_only") String[] interactions_only,
@Field("interactions") String[] interactions,
@Field("interaction_pairs") StringPairV3[] interaction_pairs,
@Field("use_all_factor_levels") boolean use_all_factor_levels,
@Field("single_node_mode") boolean single_node_mode,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/coxph/parameters")
Call<CoxPHV3> validate_parametersCoxph();
/**
* Train a Generic model.
* @param path Path to file with self-contained model archive.
* @param model_key Key to the self-contained model archive already uploaded to H2O.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/generic")
Call<GenericV3> trainGeneric(
@Field("path") String path,
@Field("model_key") String model_key,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/generic")
Call<GenericV3> trainGeneric();
/**
* Validate a set of Generic model builder parameters.
* @param path Path to file with self-contained model archive.
* @param model_key Key to the self-contained model archive already uploaded to H2O.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/generic/parameters")
Call<GenericV3> validate_parametersGeneric(
@Field("path") String path,
@Field("model_key") String model_key,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/generic/parameters")
Call<GenericV3> validate_parametersGeneric();
/**
* Train a GAM model.
* @param seed Seed for pseudo random number generator (if applicable)
* @param family Family. Use binomial for classification with logistic regression, others are for regression
* problems.
* @param tweedie_variance_power Tweedie variance power
* @param tweedie_link_power Tweedie link power
* @param theta Theta
* @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
* with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
* datasets with many columns.
* @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
* alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
* specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
* 0.5 otherwise.
* @param lambda Regularization strength
* @param startval double array to initialize coefficients for GAM.
* @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
* @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided)
* @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
* set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
* otherwise it is set to 100.
* @param standardize Standardize numeric columns to have zero mean and unit variance
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
* @param non_negative Restrict coefficients (not intercept) to be non-negative
* @param max_iterations Maximum number of iterations
* @param beta_epsilon Converge if beta changes less (using L-infinity norm) than beta esilon, ONLY applies to
* IRLSM solver
* @param objective_epsilon Converge if objective value changes less than this. Default indicates: If lambda_search
* is set to True the value of objective_epsilon is set to .0001. If the lambda_search is
* set to False and lambda is equal to zero, the value of objective_epsilon is set to
* .000001, for any other value of lambda the default value of objective_epsilon is set to
* .0001.
* @param gradient_epsilon Converge if objective changes less (using L-infinity norm) than this, ONLY applies to
* L-BFGS solver. Default indicates: If lambda_search is set to False and lambda is equal to
* zero, the default value of gradient_epsilon is equal to .000001, otherwise the default
* value is .0001. If lambda_search is set to True, the conditional values above are 1E-8
* and 1E-6 respectively.
* @param obj_reg Likelihood divider in objective value computation, default is 1/nobs
* @param link Link function.
* @param intercept Include constant term in the model
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param cold_start Only applicable to multiple alpha/lambda values when calling GLM from GAM. If false, build the
* next model for next set of alpha/lambda values starting from the values provided by current
* model. If true will start GLM model from scratch.
* @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
* lambda that drives all coefficients to zero). Default indicates: if the number of
* observations is greater than the number of variables, then lambda_min_ratio is set to
* 0.0001; if the number of observations is less than the number of variables, then
* lambda_min_ratio is set to 0.01.
* @param beta_constraints Beta constraints
* @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
* to prevent expensive model building with many predictors. Default indicates: If the
* IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
* is set to 100000000.
* @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
* for the list.
* @param interaction_pairs A list of pairwise (first order) column interactions.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
* @param remove_collinear_columns In case of linearly dependent columns, remove some of the dependent columns
* @param store_knot_locations If set to true, will return knot locations as double[][] array for gam column names
* found knots_for_gam. Default to false.
* @param num_knots Number of knots for gam predictors. If specified, must specify one for each gam predictor. For
* monotone I-splines, mininum = 2, for cs spline, minimum = 3. For thin plate, minimum is size of
* polynomial basis + 2.
* @param spline_orders Order of I-splines or NBSplineTypeI M-splines used for gam predictors. If specified, must be
* the same size as gam_columns. For I-splines, the spline_orders will be the same as the
* polynomials used to generate the splines. For M-splines, the polynomials used to generate
* the splines will be spline_order-1. Values for bs=0 or 1 will be ignored.
* @param splines_non_negative Valid for I-spline (bs=2) only. True if the I-splines are monotonically increasing
* (and monotonically non-decreasing) and False if the I-splines are monotonically
* decreasing (and monotonically non-increasing). If specified, must be the same size
* as gam_columns. Values for other spline types will be ignored. Default to true.
* @param gam_columns Arrays of predictor column names for gam for smoothers using single or multiple predictors
* like {{'c1'},{'c2','c3'},{'c4'},...}
* @param scale Smoothing parameter for gam predictors. If specified, must be of the same length as gam_columns
* @param bs Basis function type for each gam predictors, 0 for cr, 1 for thin plate regression with knots, 2 for
* monotone I-splines, 3 for NBSplineTypeI M-splines (refer to doc here:
* https://github.com/h2oai/h2o-3/issues/6926). If specified, must be the same size as gam_columns
* @param keep_gam_cols Save keys of model matrix
* @param standardize_tp_gam_cols standardize tp (thin plate) predictor columns
* @param scale_tp_penalty_mat Scale penalty matrix for tp (thin plate) smoothers as in R
* @param knot_ids Array storing frame keys of knots. One for each gam column set specified in gam_columns
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/gam")
Call<GAMV3> trainGam(
@Field("seed") long seed,
@Field("family") GLMFamily family,
@Field("tweedie_variance_power") double tweedie_variance_power,
@Field("tweedie_link_power") double tweedie_link_power,
@Field("theta") double theta,
@Field("solver") GLMSolver solver,
@Field("alpha") double[] alpha,
@Field("lambda") double[] lambda,
@Field("startval") double[] startval,
@Field("lambda_search") boolean lambda_search,
@Field("early_stopping") boolean early_stopping,
@Field("nlambdas") int nlambdas,
@Field("standardize") boolean standardize,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("plug_values") String plug_values,
@Field("non_negative") boolean non_negative,
@Field("max_iterations") int max_iterations,
@Field("beta_epsilon") double beta_epsilon,
@Field("objective_epsilon") double objective_epsilon,
@Field("gradient_epsilon") double gradient_epsilon,
@Field("obj_reg") double obj_reg,
@Field("link") GLMLink link,
@Field("intercept") boolean intercept,
@Field("prior") double prior,
@Field("cold_start") boolean cold_start,
@Field("lambda_min_ratio") double lambda_min_ratio,
@Field("beta_constraints") String beta_constraints,
@Field("max_active_predictors") int max_active_predictors,
@Field("interactions") String[] interactions,
@Field("interaction_pairs") StringPairV3[] interaction_pairs,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("compute_p_values") boolean compute_p_values,
@Field("remove_collinear_columns") boolean remove_collinear_columns,
@Field("store_knot_locations") boolean store_knot_locations,
@Field("num_knots") int[] num_knots,
@Field("spline_orders") int[] spline_orders,
@Field("splines_non_negative") boolean[] splines_non_negative,
@Field("gam_columns") String[][] gam_columns,
@Field("scale") double[] scale,
@Field("bs") int[] bs,
@Field("keep_gam_cols") boolean keep_gam_cols,
@Field("standardize_tp_gam_cols") boolean standardize_tp_gam_cols,
@Field("scale_tp_penalty_mat") boolean scale_tp_penalty_mat,
@Field("knot_ids") String[] knot_ids,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/gam")
Call<GAMV3> trainGam(@Field("gam_columns") String[][] gam_columns);
/**
* Validate a set of GAM model builder parameters.
* @param seed Seed for pseudo random number generator (if applicable)
* @param family Family. Use binomial for classification with logistic regression, others are for regression
* problems.
* @param tweedie_variance_power Tweedie variance power
* @param tweedie_link_power Tweedie link power
* @param theta Theta
* @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
* with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
* datasets with many columns.
* @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
* alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
* specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
* 0.5 otherwise.
* @param lambda Regularization strength
* @param startval double array to initialize coefficients for GAM.
* @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
* @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided)
* @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
* set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
* otherwise it is set to 100.
* @param standardize Standardize numeric columns to have zero mean and unit variance
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
* @param non_negative Restrict coefficients (not intercept) to be non-negative
* @param max_iterations Maximum number of iterations
* @param beta_epsilon Converge if beta changes less (using L-infinity norm) than beta esilon, ONLY applies to
* IRLSM solver
* @param objective_epsilon Converge if objective value changes less than this. Default indicates: If lambda_search
* is set to True the value of objective_epsilon is set to .0001. If the lambda_search is
* set to False and lambda is equal to zero, the value of objective_epsilon is set to
* .000001, for any other value of lambda the default value of objective_epsilon is set to
* .0001.
* @param gradient_epsilon Converge if objective changes less (using L-infinity norm) than this, ONLY applies to
* L-BFGS solver. Default indicates: If lambda_search is set to False and lambda is equal to
* zero, the default value of gradient_epsilon is equal to .000001, otherwise the default
* value is .0001. If lambda_search is set to True, the conditional values above are 1E-8
* and 1E-6 respectively.
* @param obj_reg Likelihood divider in objective value computation, default is 1/nobs
* @param link Link function.
* @param intercept Include constant term in the model
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param cold_start Only applicable to multiple alpha/lambda values when calling GLM from GAM. If false, build the
* next model for next set of alpha/lambda values starting from the values provided by current
* model. If true will start GLM model from scratch.
* @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
* lambda that drives all coefficients to zero). Default indicates: if the number of
* observations is greater than the number of variables, then lambda_min_ratio is set to
* 0.0001; if the number of observations is less than the number of variables, then
* lambda_min_ratio is set to 0.01.
* @param beta_constraints Beta constraints
* @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
* to prevent expensive model building with many predictors. Default indicates: If the
* IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
* is set to 100000000.
* @param interactions A list of predictor column indices to interact. All pairwise combinations will be computed
* for the list.
* @param interaction_pairs A list of pairwise (first order) column interactions.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
* @param remove_collinear_columns In case of linearly dependent columns, remove some of the dependent columns
* @param store_knot_locations If set to true, will return knot locations as double[][] array for gam column names
* found knots_for_gam. Default to false.
* @param num_knots Number of knots for gam predictors. If specified, must specify one for each gam predictor. For
* monotone I-splines, mininum = 2, for cs spline, minimum = 3. For thin plate, minimum is size of
* polynomial basis + 2.
* @param spline_orders Order of I-splines or NBSplineTypeI M-splines used for gam predictors. If specified, must be
* the same size as gam_columns. For I-splines, the spline_orders will be the same as the
* polynomials used to generate the splines. For M-splines, the polynomials used to generate
* the splines will be spline_order-1. Values for bs=0 or 1 will be ignored.
* @param splines_non_negative Valid for I-spline (bs=2) only. True if the I-splines are monotonically increasing
* (and monotonically non-decreasing) and False if the I-splines are monotonically
* decreasing (and monotonically non-increasing). If specified, must be the same size
* as gam_columns. Values for other spline types will be ignored. Default to true.
* @param gam_columns Arrays of predictor column names for gam for smoothers using single or multiple predictors
* like {{'c1'},{'c2','c3'},{'c4'},...}
* @param scale Smoothing parameter for gam predictors. If specified, must be of the same length as gam_columns
* @param bs Basis function type for each gam predictors, 0 for cr, 1 for thin plate regression with knots, 2 for
* monotone I-splines, 3 for NBSplineTypeI M-splines (refer to doc here:
* https://github.com/h2oai/h2o-3/issues/6926). If specified, must be the same size as gam_columns
* @param keep_gam_cols Save keys of model matrix
* @param standardize_tp_gam_cols standardize tp (thin plate) predictor columns
* @param scale_tp_penalty_mat Scale penalty matrix for tp (thin plate) smoothers as in R
* @param knot_ids Array storing frame keys of knots. One for each gam column set specified in gam_columns
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/gam/parameters")
Call<GAMV3> validate_parametersGam(
@Field("seed") long seed,
@Field("family") GLMFamily family,
@Field("tweedie_variance_power") double tweedie_variance_power,
@Field("tweedie_link_power") double tweedie_link_power,
@Field("theta") double theta,
@Field("solver") GLMSolver solver,
@Field("alpha") double[] alpha,
@Field("lambda") double[] lambda,
@Field("startval") double[] startval,
@Field("lambda_search") boolean lambda_search,
@Field("early_stopping") boolean early_stopping,
@Field("nlambdas") int nlambdas,
@Field("standardize") boolean standardize,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("plug_values") String plug_values,
@Field("non_negative") boolean non_negative,
@Field("max_iterations") int max_iterations,
@Field("beta_epsilon") double beta_epsilon,
@Field("objective_epsilon") double objective_epsilon,
@Field("gradient_epsilon") double gradient_epsilon,
@Field("obj_reg") double obj_reg,
@Field("link") GLMLink link,
@Field("intercept") boolean intercept,
@Field("prior") double prior,
@Field("cold_start") boolean cold_start,
@Field("lambda_min_ratio") double lambda_min_ratio,
@Field("beta_constraints") String beta_constraints,
@Field("max_active_predictors") int max_active_predictors,
@Field("interactions") String[] interactions,
@Field("interaction_pairs") StringPairV3[] interaction_pairs,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("compute_p_values") boolean compute_p_values,
@Field("remove_collinear_columns") boolean remove_collinear_columns,
@Field("store_knot_locations") boolean store_knot_locations,
@Field("num_knots") int[] num_knots,
@Field("spline_orders") int[] spline_orders,
@Field("splines_non_negative") boolean[] splines_non_negative,
@Field("gam_columns") String[][] gam_columns,
@Field("scale") double[] scale,
@Field("bs") int[] bs,
@Field("keep_gam_cols") boolean keep_gam_cols,
@Field("standardize_tp_gam_cols") boolean standardize_tp_gam_cols,
@Field("scale_tp_penalty_mat") boolean scale_tp_penalty_mat,
@Field("knot_ids") String[] knot_ids,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/gam/parameters")
Call<GAMV3> validate_parametersGam(@Field("gam_columns") String[][] gam_columns);
/**
* Train a ANOVAGLM model.
* @param seed Seed for pseudo random number generator (if applicable)
* @param standardize Standardize numeric columns to have zero mean and unit variance
* @param family Family. Use binomial for classification with logistic regression, others are for regression
* problems.
* @param tweedie_variance_power Tweedie variance power
* @param tweedie_link_power Tweedie link power
* @param theta Theta
* @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
* alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
* specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
* 0.5 otherwise.
* @param lambda Regularization strength
* @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
* @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
* with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
* datasets with many columns.
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
* @param non_negative Restrict coefficients (not intercept) to be non-negative
* @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
* @param max_iterations Maximum number of iterations
* @param link Link function.
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param highest_interaction_term Limit the number of interaction terms, if 2 means interaction between 2 columns
* only, 3 for three columns and so on... Default to 2.
* @param type Refer to the SS type 1, 2, 3, or 4. We are currently only supporting 3
* @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided).
* @param save_transformed_framekeys true to save the keys of transformed predictors and interaction column.
* @param nparallelism Number of models to build in parallel. Default to 4. Adjust according to your system.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/anovaglm")
Call<ANOVAGLMV3> trainAnovaglm(
@Field("seed") long seed,
@Field("standardize") boolean standardize,
@Field("family") GLMFamily family,
@Field("tweedie_variance_power") double tweedie_variance_power,
@Field("tweedie_link_power") double tweedie_link_power,
@Field("theta") double theta,
@Field("alpha") double[] alpha,
@Field("lambda") double[] lambda,
@Field("lambda_search") boolean lambda_search,
@Field("solver") GLMSolver solver,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("plug_values") String plug_values,
@Field("non_negative") boolean non_negative,
@Field("compute_p_values") boolean compute_p_values,
@Field("max_iterations") int max_iterations,
@Field("link") GLMLink link,
@Field("prior") double prior,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("highest_interaction_term") int highest_interaction_term,
@Field("type") int type,
@Field("early_stopping") boolean early_stopping,
@Field("save_transformed_framekeys") boolean save_transformed_framekeys,
@Field("nparallelism") int nparallelism,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/anovaglm")
Call<ANOVAGLMV3> trainAnovaglm();
/**
* Validate a set of ANOVAGLM model builder parameters.
* @param seed Seed for pseudo random number generator (if applicable)
* @param standardize Standardize numeric columns to have zero mean and unit variance
* @param family Family. Use binomial for classification with logistic regression, others are for regression
* problems.
* @param tweedie_variance_power Tweedie variance power
* @param tweedie_link_power Tweedie link power
* @param theta Theta
* @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
* alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
* specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
* 0.5 otherwise.
* @param lambda Regularization strength
* @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
* @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
* with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
* datasets with many columns.
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
* @param non_negative Restrict coefficients (not intercept) to be non-negative
* @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
* @param max_iterations Maximum number of iterations
* @param link Link function.
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param highest_interaction_term Limit the number of interaction terms, if 2 means interaction between 2 columns
* only, 3 for three columns and so on... Default to 2.
* @param type Refer to the SS type 1, 2, 3, or 4. We are currently only supporting 3
* @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided).
* @param save_transformed_framekeys true to save the keys of transformed predictors and interaction column.
* @param nparallelism Number of models to build in parallel. Default to 4. Adjust according to your system.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/anovaglm/parameters")
Call<ANOVAGLMV3> validate_parametersAnovaglm(
@Field("seed") long seed,
@Field("standardize") boolean standardize,
@Field("family") GLMFamily family,
@Field("tweedie_variance_power") double tweedie_variance_power,
@Field("tweedie_link_power") double tweedie_link_power,
@Field("theta") double theta,
@Field("alpha") double[] alpha,
@Field("lambda") double[] lambda,
@Field("lambda_search") boolean lambda_search,
@Field("solver") GLMSolver solver,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("plug_values") String plug_values,
@Field("non_negative") boolean non_negative,
@Field("compute_p_values") boolean compute_p_values,
@Field("max_iterations") int max_iterations,
@Field("link") GLMLink link,
@Field("prior") double prior,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("highest_interaction_term") int highest_interaction_term,
@Field("type") int type,
@Field("early_stopping") boolean early_stopping,
@Field("save_transformed_framekeys") boolean save_transformed_framekeys,
@Field("nparallelism") int nparallelism,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/anovaglm/parameters")
Call<ANOVAGLMV3> validate_parametersAnovaglm();
/**
* Train a PSVM model.
* @param hyper_param Penalty parameter C of the error term
* @param kernel_type Type of used kernel
* @param gamma Coefficient of the kernel (currently RBF gamma for gaussian kernel, -1 means 1/#features)
* @param rank_ratio Desired rank of the ICF matrix expressed as an ration of number of input rows (-1 means use
* sqrt(#rows)).
* @param positive_weight Weight of positive (+1) class of observations
* @param negative_weight Weight of positive (-1) class of observations
* @param disable_training_metrics Disable calculating training metrics (expensive on large datasets)
* @param sv_threshold Threshold for accepting a candidate observation into the set of support vectors
* @param max_iterations Maximum number of iteration of the algorithm
* @param fact_threshold Convergence threshold of the Incomplete Cholesky Factorization (ICF)
* @param feasible_threshold Convergence threshold for primal-dual residuals in the IPM iteration
* @param surrogate_gap_threshold Feasibility criterion of the surrogate duality gap (eta)
* @param mu_factor Increasing factor mu
* @param seed Seed for pseudo random number generator (if applicable)
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/psvm")
Call<PSVMV3> trainPsvm(
@Field("hyper_param") double hyper_param,
@Field("kernel_type") GenmodelalgospsvmKernelType kernel_type,
@Field("gamma") double gamma,
@Field("rank_ratio") double rank_ratio,
@Field("positive_weight") double positive_weight,
@Field("negative_weight") double negative_weight,
@Field("disable_training_metrics") boolean disable_training_metrics,
@Field("sv_threshold") double sv_threshold,
@Field("max_iterations") int max_iterations,
@Field("fact_threshold") double fact_threshold,
@Field("feasible_threshold") double feasible_threshold,
@Field("surrogate_gap_threshold") double surrogate_gap_threshold,
@Field("mu_factor") double mu_factor,
@Field("seed") long seed,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/psvm")
Call<PSVMV3> trainPsvm();
/**
* Validate a set of PSVM model builder parameters.
* @param hyper_param Penalty parameter C of the error term
* @param kernel_type Type of used kernel
* @param gamma Coefficient of the kernel (currently RBF gamma for gaussian kernel, -1 means 1/#features)
* @param rank_ratio Desired rank of the ICF matrix expressed as an ration of number of input rows (-1 means use
* sqrt(#rows)).
* @param positive_weight Weight of positive (+1) class of observations
* @param negative_weight Weight of positive (-1) class of observations
* @param disable_training_metrics Disable calculating training metrics (expensive on large datasets)
* @param sv_threshold Threshold for accepting a candidate observation into the set of support vectors
* @param max_iterations Maximum number of iteration of the algorithm
* @param fact_threshold Convergence threshold of the Incomplete Cholesky Factorization (ICF)
* @param feasible_threshold Convergence threshold for primal-dual residuals in the IPM iteration
* @param surrogate_gap_threshold Feasibility criterion of the surrogate duality gap (eta)
* @param mu_factor Increasing factor mu
* @param seed Seed for pseudo random number generator (if applicable)
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/psvm/parameters")
Call<PSVMV3> validate_parametersPsvm(
@Field("hyper_param") double hyper_param,
@Field("kernel_type") GenmodelalgospsvmKernelType kernel_type,
@Field("gamma") double gamma,
@Field("rank_ratio") double rank_ratio,
@Field("positive_weight") double positive_weight,
@Field("negative_weight") double negative_weight,
@Field("disable_training_metrics") boolean disable_training_metrics,
@Field("sv_threshold") double sv_threshold,
@Field("max_iterations") int max_iterations,
@Field("fact_threshold") double fact_threshold,
@Field("feasible_threshold") double feasible_threshold,
@Field("surrogate_gap_threshold") double surrogate_gap_threshold,
@Field("mu_factor") double mu_factor,
@Field("seed") long seed,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/psvm/parameters")
Call<PSVMV3> validate_parametersPsvm();
/**
* Train a RuleFit model.
* @param seed Seed for pseudo random number generator (if applicable).
* @param algorithm The algorithm to use to generate rules.
* @param min_rule_length Minimum length of rules. Defaults to 3.
* @param max_rule_length Maximum length of rules. Defaults to 3.
* @param max_num_rules The maximum number of rules to return. defaults to -1 which means the number of rules is
* selected
* by diminishing returns in model deviance.
* @param model_type Specifies type of base learners in the ensemble.
* @param rule_generation_ntrees Specifies the number of trees to build in the tree model. Defaults to 50.
* @param remove_duplicates Whether to remove rules which are identical to an earlier rule. Defaults to true.
* @param lambda Lambda for LASSO regressor.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/rulefit")
Call<RuleFitV3> trainRulefit(
@Field("seed") long seed,
@Field("algorithm") RuleFitModelAlgorithm algorithm,
@Field("min_rule_length") int min_rule_length,
@Field("max_rule_length") int max_rule_length,
@Field("max_num_rules") int max_num_rules,
@Field("model_type") RuleFitModelModelType model_type,
@Field("rule_generation_ntrees") int rule_generation_ntrees,
@Field("remove_duplicates") boolean remove_duplicates,
@Field("lambda") double[] lambda,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/rulefit")
Call<RuleFitV3> trainRulefit();
/**
* Validate a set of RuleFit model builder parameters.
* @param seed Seed for pseudo random number generator (if applicable).
* @param algorithm The algorithm to use to generate rules.
* @param min_rule_length Minimum length of rules. Defaults to 3.
* @param max_rule_length Maximum length of rules. Defaults to 3.
* @param max_num_rules The maximum number of rules to return. defaults to -1 which means the number of rules is
* selected
* by diminishing returns in model deviance.
* @param model_type Specifies type of base learners in the ensemble.
* @param rule_generation_ntrees Specifies the number of trees to build in the tree model. Defaults to 50.
* @param remove_duplicates Whether to remove rules which are identical to an earlier rule. Defaults to true.
* @param lambda Lambda for LASSO regressor.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/rulefit/parameters")
Call<RuleFitV3> validate_parametersRulefit(
@Field("seed") long seed,
@Field("algorithm") RuleFitModelAlgorithm algorithm,
@Field("min_rule_length") int min_rule_length,
@Field("max_rule_length") int max_rule_length,
@Field("max_num_rules") int max_num_rules,
@Field("model_type") RuleFitModelModelType model_type,
@Field("rule_generation_ntrees") int rule_generation_ntrees,
@Field("remove_duplicates") boolean remove_duplicates,
@Field("lambda") double[] lambda,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/rulefit/parameters")
Call<RuleFitV3> validate_parametersRulefit();
/**
* Train a UpliftDRF model.
* @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p}
* for classification and p/3 for regression (where p is the # of predictors
* @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
* @param treatment_column Define the column which will be used for computing uplift gain to select best split for a
* tree. The column has to divide the dataset into treatment (value 1) and control (value 0)
* groups.
* @param uplift_metric Divergence metric used to find best split when building an uplift tree.
* @param auuc_type Metric used to calculate Area Under Uplift Curve.
* @param auuc_nbins Number of bins to calculate Area Under Uplift Curve.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param ntrees Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
* best point
* @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
* root level, then decrease by factor of two per level
* @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
* point. Higher values can lead to more overfitting.
* @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
* stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
* trees when the R^2 metric equals or exceeds this
* @param seed Seed for pseudo random number generator (if applicable)
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
* 1.0), for each tree
* @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
* 0.0 and <= 2.0)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
* @param histogram_type What type of histogram to use for finding optimal split points
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
* the response column is a constant value.If disabled, then model will train
* regardless of the response column being a constant value or not.
* @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
* running. In case of cluster shutdown, this checkpoint can be used to restart
* training.
* @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
* only when in_training_checkpoints_dir is defined
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/upliftdrf")
Call<UpliftDRFV3> trainUpliftdrf(
@Field("mtries") int mtries,
@Field("sample_rate") double sample_rate,
@Field("treatment_column") String treatment_column,
@Field("uplift_metric") TreeupliftUpliftDRFModelUpliftDRFParametersUpliftMetricType uplift_metric,
@Field("auuc_type") AUUCType auuc_type,
@Field("auuc_nbins") int auuc_nbins,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("nbins") int nbins,
@Field("nbins_top_level") int nbins_top_level,
@Field("nbins_cats") int nbins_cats,
@Field("r2_stopping") double r2_stopping,
@Field("seed") long seed,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("sample_rate_per_class") double[] sample_rate_per_class,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
@Field("score_tree_interval") int score_tree_interval,
@Field("min_split_improvement") double min_split_improvement,
@Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("check_constant_response") boolean check_constant_response,
@Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
@Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/upliftdrf")
Call<UpliftDRFV3> trainUpliftdrf(@Field("treatment_column") String treatment_column);
/**
* Validate a set of UpliftDRF model builder parameters.
* @param mtries Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p}
* for classification and p/3 for regression (where p is the # of predictors
* @param sample_rate Row sample rate per tree (from 0.0 to 1.0)
* @param treatment_column Define the column which will be used for computing uplift gain to select best split for a
* tree. The column has to divide the dataset into treatment (value 1) and control (value 0)
* groups.
* @param uplift_metric Divergence metric used to find best split when building an uplift tree.
* @param auuc_type Metric used to calculate Area Under Uplift Curve.
* @param auuc_nbins Number of bins to calculate Area Under Uplift Curve.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param ntrees Number of trees.
* @param max_depth Maximum tree depth (0 for unlimited).
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param nbins For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the
* best point
* @param nbins_top_level For numerical columns (real/int), build a histogram of (at most) this many bins at the
* root level, then decrease by factor of two per level
* @param nbins_cats For categorical columns (factors), build a histogram of this many bins, then split at the best
* point. Higher values can lead to more overfitting.
* @param r2_stopping r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds,
* stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making
* trees when the R^2 metric equals or exceeds this
* @param seed Seed for pseudo random number generator (if applicable)
* @param build_tree_one_node Run on one node only; no network overhead but fewer cpus used. Suitable for small
* datasets.
* @param sample_rate_per_class A list of row sample rates per class (relative fraction for each class, from 0.0 to
* 1.0), for each tree
* @param col_sample_rate_per_tree Column sample rate per tree (from 0.0 to 1.0)
* @param col_sample_rate_change_per_level Relative change of the column sampling rate for every level (must be >
* 0.0 and <= 2.0)
* @param score_tree_interval Score the model after every so many trees. Disabled if set to 0.
* @param min_split_improvement Minimum relative improvement in squared error reduction for a split to happen
* @param histogram_type What type of histogram to use for finding optimal split points
* @param calibrate_model Use Platt Scaling (default) or Isotonic Regression to calculate calibrated class
* probabilities. Calibration can provide more accurate estimates of class probabilities.
* @param calibration_frame Data for model calibration
* @param calibration_method Calibration method to use
* @param check_constant_response Check if response column is constant. If enabled, then an exception is thrown if
* the response column is a constant value.If disabled, then model will train
* regardless of the response column being a constant value or not.
* @param in_training_checkpoints_dir Create checkpoints into defined directory while training process is still
* running. In case of cluster shutdown, this checkpoint can be used to restart
* training.
* @param in_training_checkpoints_tree_interval Checkpoint the model after every so many trees. Parameter is used
* only when in_training_checkpoints_dir is defined
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/upliftdrf/parameters")
Call<UpliftDRFV3> validate_parametersUpliftdrf(
@Field("mtries") int mtries,
@Field("sample_rate") double sample_rate,
@Field("treatment_column") String treatment_column,
@Field("uplift_metric") TreeupliftUpliftDRFModelUpliftDRFParametersUpliftMetricType uplift_metric,
@Field("auuc_type") AUUCType auuc_type,
@Field("auuc_nbins") int auuc_nbins,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("ntrees") int ntrees,
@Field("max_depth") int max_depth,
@Field("min_rows") double min_rows,
@Field("nbins") int nbins,
@Field("nbins_top_level") int nbins_top_level,
@Field("nbins_cats") int nbins_cats,
@Field("r2_stopping") double r2_stopping,
@Field("seed") long seed,
@Field("build_tree_one_node") boolean build_tree_one_node,
@Field("sample_rate_per_class") double[] sample_rate_per_class,
@Field("col_sample_rate_per_tree") double col_sample_rate_per_tree,
@Field("col_sample_rate_change_per_level") double col_sample_rate_change_per_level,
@Field("score_tree_interval") int score_tree_interval,
@Field("min_split_improvement") double min_split_improvement,
@Field("histogram_type") TreeSharedTreeModelSharedTreeParametersHistogramType histogram_type,
@Field("calibrate_model") boolean calibrate_model,
@Field("calibration_frame") String calibration_frame,
@Field("calibration_method") TreeCalibrationHelperCalibrationMethod calibration_method,
@Field("check_constant_response") boolean check_constant_response,
@Field("in_training_checkpoints_dir") String in_training_checkpoints_dir,
@Field("in_training_checkpoints_tree_interval") int in_training_checkpoints_tree_interval,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/upliftdrf/parameters")
Call<UpliftDRFV3> validate_parametersUpliftdrf(@Field("treatment_column") String treatment_column);
/**
* Train a ModelSelection model.
* @param seed Seed for pseudo random number generator (if applicable)
* @param family Family. For maxr/maxrsweep, only gaussian. For backward, ordinal and multinomial families are not
* supported
* @param tweedie_variance_power Tweedie variance power
* @param tweedie_link_power Tweedie link power
* @param theta Theta
* @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
* with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
* datasets with many columns.
* @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
* alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
* specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
* 0.5 otherwise.
* @param lambda Regularization strength
* @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
* @param multinode_mode For maxrsweep only. If enabled, will attempt to perform sweeping action using multiple
* nodes in the cluster. Defaults to false.
* @param build_glm_model For maxrsweep mode only. If true, will return full blown GLM models with the desired
* predictorsubsets. If false, only the predictor subsets, predictor coefficients are
* returned. This is forspeeding up the model selection process. The users can choose to
* build the GLM models themselvesby using the predictor subsets themselves. Defaults to
* false.
* @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided)
* @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
* set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
* otherwise it is set to 100.
* @param score_iteration_interval Perform scoring for every score_iteration_interval iterations
* @param standardize Standardize numeric columns to have zero mean and unit variance
* @param cold_start Only applicable to multiple alpha/lambda values. If false, build the next model for next set
* of alpha/lambda values starting from the values provided by current model. If true will start
* GLM model from scratch.
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
* @param non_negative Restrict coefficients (not intercept) to be non-negative
* @param max_iterations Maximum number of iterations
* @param beta_epsilon Converge if beta changes less (using L-infinity norm) than beta esilon, ONLY applies to
* IRLSM solver
* @param objective_epsilon Converge if objective value changes less than this. Default (of -1.0) indicates: If
* lambda_search is set to True the value of objective_epsilon is set to .0001. If the
* lambda_search is set to False and lambda is equal to zero, the value of
* objective_epsilon is set to .000001, for any other value of lambda the default value of
* objective_epsilon is set to .0001.
* @param gradient_epsilon Converge if objective changes less (using L-infinity norm) than this, ONLY applies to
* L-BFGS solver. Default (of -1.0) indicates: If lambda_search is set to False and lambda
* is equal to zero, the default value of gradient_epsilon is equal to .000001, otherwise
* the default value is .0001. If lambda_search is set to True, the conditional values above
* are 1E-8 and 1E-6 respectively.
* @param obj_reg Likelihood divider in objective value computation, default (of -1.0) will set it to 1/nobs
* @param link Link function.
* @param startval Double array to initialize coefficients for GLM.
* @param calc_like If true, will return likelihood function value for GLM.
* @param mode Mode: Used to choose model selection algorithms to use. Options include 'allsubsets' for all
* subsets, 'maxr' that uses sequential replacement and GLM to build all models, slow but works with
* cross-validation, validation frames for more robust results, 'maxrsweep' that uses sequential
* replacement and sweeping action, much faster than 'maxr', 'backward' for backward selection.
* @param intercept Include constant term in the model
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
* lambda that drives all coefficients to zero). Default indicates: if the number of
* observations is greater than the number of variables, then lambda_min_ratio is set to
* 0.0001; if the number of observations is less than the number of variables, then
* lambda_min_ratio is set to 0.01.
* @param beta_constraints Beta constraints
* @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
* to prevent expensive model building with many predictors. Default indicates: If the
* IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
* is set to 100000000.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
* @param remove_collinear_columns In case of linearly dependent columns, remove some of the dependent columns
* @param max_predictor_number Maximum number of predictors to be considered when building GLM models. Defaults to
* 1.
* @param min_predictor_number For mode = 'backward' only. Minimum number of predictors to be considered when
* building GLM models starting with all predictors to be included. Defaults to 1.
* @param nparallelism number of models to build in parallel. Defaults to 0.0 which is adaptive to the system
* capability
* @param p_values_threshold For mode='backward' only. If specified, will stop the model building process when all
* coefficientsp-values drop below this threshold
* @param influence If set to dfbetas will calculate the difference in beta when a datarow is included and excluded
* in the dataset.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/modelselection")
Call<ModelSelectionV3> trainModelselection(
@Field("seed") long seed,
@Field("family") GLMFamily family,
@Field("tweedie_variance_power") double tweedie_variance_power,
@Field("tweedie_link_power") double tweedie_link_power,
@Field("theta") double theta,
@Field("solver") GLMSolver solver,
@Field("alpha") double[] alpha,
@Field("lambda") double[] lambda,
@Field("lambda_search") boolean lambda_search,
@Field("multinode_mode") boolean multinode_mode,
@Field("build_glm_model") boolean build_glm_model,
@Field("early_stopping") boolean early_stopping,
@Field("nlambdas") int nlambdas,
@Field("score_iteration_interval") int score_iteration_interval,
@Field("standardize") boolean standardize,
@Field("cold_start") boolean cold_start,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("plug_values") String plug_values,
@Field("non_negative") boolean non_negative,
@Field("max_iterations") int max_iterations,
@Field("beta_epsilon") double beta_epsilon,
@Field("objective_epsilon") double objective_epsilon,
@Field("gradient_epsilon") double gradient_epsilon,
@Field("obj_reg") double obj_reg,
@Field("link") GLMLink link,
@Field("startval") double[] startval,
@Field("calc_like") boolean calc_like,
@Field("mode") ModelSelectionMode mode,
@Field("intercept") boolean intercept,
@Field("prior") double prior,
@Field("lambda_min_ratio") double lambda_min_ratio,
@Field("beta_constraints") String beta_constraints,
@Field("max_active_predictors") int max_active_predictors,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("compute_p_values") boolean compute_p_values,
@Field("remove_collinear_columns") boolean remove_collinear_columns,
@Field("max_predictor_number") int max_predictor_number,
@Field("min_predictor_number") int min_predictor_number,
@Field("nparallelism") int nparallelism,
@Field("p_values_threshold") double p_values_threshold,
@Field("influence") GLMInfluence influence,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/modelselection")
Call<ModelSelectionV3> trainModelselection();
/**
* Validate a set of ModelSelection model builder parameters.
* @param seed Seed for pseudo random number generator (if applicable)
* @param family Family. For maxr/maxrsweep, only gaussian. For backward, ordinal and multinomial families are not
* supported
* @param tweedie_variance_power Tweedie variance power
* @param tweedie_link_power Tweedie link power
* @param theta Theta
* @param solver AUTO will set the solver based on given data and the other parameters. IRLSM is fast on on problems
* with small number of predictors and for lambda-search with L1 penalty, L_BFGS scales better for
* datasets with many columns.
* @param alpha Distribution of regularization between the L1 (Lasso) and L2 (Ridge) penalties. A value of 1 for
* alpha represents Lasso regression, a value of 0 produces Ridge regression, and anything in between
* specifies the amount of mixing between the two. Default value of alpha is 0 when SOLVER = 'L-BFGS';
* 0.5 otherwise.
* @param lambda Regularization strength
* @param lambda_search Use lambda search starting at lambda max, given lambda is then interpreted as lambda min
* @param multinode_mode For maxrsweep only. If enabled, will attempt to perform sweeping action using multiple
* nodes in the cluster. Defaults to false.
* @param build_glm_model For maxrsweep mode only. If true, will return full blown GLM models with the desired
* predictorsubsets. If false, only the predictor subsets, predictor coefficients are
* returned. This is forspeeding up the model selection process. The users can choose to
* build the GLM models themselvesby using the predictor subsets themselves. Defaults to
* false.
* @param early_stopping Stop early when there is no more relative improvement on train or validation (if provided)
* @param nlambdas Number of lambdas to be used in a search. Default indicates: If alpha is zero, with lambda search
* set to True, the value of nlamdas is set to 30 (fewer lambdas are needed for ridge regression)
* otherwise it is set to 100.
* @param score_iteration_interval Perform scoring for every score_iteration_interval iterations
* @param standardize Standardize numeric columns to have zero mean and unit variance
* @param cold_start Only applicable to multiple alpha/lambda values. If false, build the next model for next set
* of alpha/lambda values starting from the values provided by current model. If true will start
* GLM model from scratch.
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues)
* @param non_negative Restrict coefficients (not intercept) to be non-negative
* @param max_iterations Maximum number of iterations
* @param beta_epsilon Converge if beta changes less (using L-infinity norm) than beta esilon, ONLY applies to
* IRLSM solver
* @param objective_epsilon Converge if objective value changes less than this. Default (of -1.0) indicates: If
* lambda_search is set to True the value of objective_epsilon is set to .0001. If the
* lambda_search is set to False and lambda is equal to zero, the value of
* objective_epsilon is set to .000001, for any other value of lambda the default value of
* objective_epsilon is set to .0001.
* @param gradient_epsilon Converge if objective changes less (using L-infinity norm) than this, ONLY applies to
* L-BFGS solver. Default (of -1.0) indicates: If lambda_search is set to False and lambda
* is equal to zero, the default value of gradient_epsilon is equal to .000001, otherwise
* the default value is .0001. If lambda_search is set to True, the conditional values above
* are 1E-8 and 1E-6 respectively.
* @param obj_reg Likelihood divider in objective value computation, default (of -1.0) will set it to 1/nobs
* @param link Link function.
* @param startval Double array to initialize coefficients for GLM.
* @param calc_like If true, will return likelihood function value for GLM.
* @param mode Mode: Used to choose model selection algorithms to use. Options include 'allsubsets' for all
* subsets, 'maxr' that uses sequential replacement and GLM to build all models, slow but works with
* cross-validation, validation frames for more robust results, 'maxrsweep' that uses sequential
* replacement and sweeping action, much faster than 'maxr', 'backward' for backward selection.
* @param intercept Include constant term in the model
* @param prior Prior probability for y==1. To be used only for logistic regression iff the data has been sampled
* and the mean of response does not reflect reality.
* @param lambda_min_ratio Minimum lambda used in lambda search, specified as a ratio of lambda_max (the smallest
* lambda that drives all coefficients to zero). Default indicates: if the number of
* observations is greater than the number of variables, then lambda_min_ratio is set to
* 0.0001; if the number of observations is less than the number of variables, then
* lambda_min_ratio is set to 0.01.
* @param beta_constraints Beta constraints
* @param max_active_predictors Maximum number of active predictors during computation. Use as a stopping criterion
* to prevent expensive model building with many predictors. Default indicates: If the
* IRLSM solver is used, the value of max_active_predictors is set to 5000 otherwise it
* is set to 100000000.
* @param balance_classes Balance training data class counts via over/under-sampling (for imbalanced data).
* @param class_sampling_factors Desired over/under-sampling ratios per class (in lexicographic order). If not
* specified, sampling factors will be automatically computed to obtain class balance
* during training. Requires balance_classes.
* @param max_after_balance_size Maximum relative size of the training data after balancing class counts (can be
* less than 1.0). Requires balance_classes.
* @param max_confusion_matrix_size [Deprecated] Maximum size (# classes) for confusion matrices to be printed in
* the Logs
* @param compute_p_values Request p-values computation, p-values work only with IRLSM solver and no regularization
* @param remove_collinear_columns In case of linearly dependent columns, remove some of the dependent columns
* @param max_predictor_number Maximum number of predictors to be considered when building GLM models. Defaults to
* 1.
* @param min_predictor_number For mode = 'backward' only. Minimum number of predictors to be considered when
* building GLM models starting with all predictors to be included. Defaults to 1.
* @param nparallelism number of models to build in parallel. Defaults to 0.0 which is adaptive to the system
* capability
* @param p_values_threshold For mode='backward' only. If specified, will stop the model building process when all
* coefficientsp-values drop below this threshold
* @param influence If set to dfbetas will calculate the difference in beta when a datarow is included and excluded
* in the dataset.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/modelselection/parameters")
Call<ModelSelectionV3> validate_parametersModelselection(
@Field("seed") long seed,
@Field("family") GLMFamily family,
@Field("tweedie_variance_power") double tweedie_variance_power,
@Field("tweedie_link_power") double tweedie_link_power,
@Field("theta") double theta,
@Field("solver") GLMSolver solver,
@Field("alpha") double[] alpha,
@Field("lambda") double[] lambda,
@Field("lambda_search") boolean lambda_search,
@Field("multinode_mode") boolean multinode_mode,
@Field("build_glm_model") boolean build_glm_model,
@Field("early_stopping") boolean early_stopping,
@Field("nlambdas") int nlambdas,
@Field("score_iteration_interval") int score_iteration_interval,
@Field("standardize") boolean standardize,
@Field("cold_start") boolean cold_start,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("plug_values") String plug_values,
@Field("non_negative") boolean non_negative,
@Field("max_iterations") int max_iterations,
@Field("beta_epsilon") double beta_epsilon,
@Field("objective_epsilon") double objective_epsilon,
@Field("gradient_epsilon") double gradient_epsilon,
@Field("obj_reg") double obj_reg,
@Field("link") GLMLink link,
@Field("startval") double[] startval,
@Field("calc_like") boolean calc_like,
@Field("mode") ModelSelectionMode mode,
@Field("intercept") boolean intercept,
@Field("prior") double prior,
@Field("lambda_min_ratio") double lambda_min_ratio,
@Field("beta_constraints") String beta_constraints,
@Field("max_active_predictors") int max_active_predictors,
@Field("balance_classes") boolean balance_classes,
@Field("class_sampling_factors") float[] class_sampling_factors,
@Field("max_after_balance_size") float max_after_balance_size,
@Field("max_confusion_matrix_size") int max_confusion_matrix_size,
@Field("compute_p_values") boolean compute_p_values,
@Field("remove_collinear_columns") boolean remove_collinear_columns,
@Field("max_predictor_number") int max_predictor_number,
@Field("min_predictor_number") int min_predictor_number,
@Field("nparallelism") int nparallelism,
@Field("p_values_threshold") double p_values_threshold,
@Field("influence") GLMInfluence influence,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/modelselection/parameters")
Call<ModelSelectionV3> validate_parametersModelselection();
/**
* Train a IsotonicRegression model.
* @param out_of_bounds Method of handling values of X predictor that are outside of the bounds seen in training.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/isotonicregression")
Call<IsotonicRegressionV3> trainIsotonicregression(
@Field("out_of_bounds") IsotonicRegressionModelOutOfBoundsHandling out_of_bounds,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/isotonicregression")
Call<IsotonicRegressionV3> trainIsotonicregression();
/**
* Validate a set of IsotonicRegression model builder parameters.
* @param out_of_bounds Method of handling values of X predictor that are outside of the bounds seen in training.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/isotonicregression/parameters")
Call<IsotonicRegressionV3> validate_parametersIsotonicregression(
@Field("out_of_bounds") IsotonicRegressionModelOutOfBoundsHandling out_of_bounds,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/isotonicregression/parameters")
Call<IsotonicRegressionV3> validate_parametersIsotonicregression();
/**
* Train a DT model.
* @param seed Seed for random numbers (affects sampling)
* @param max_depth Max depth of tree.
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/dt")
Call<DTV3> trainDt(
@Field("seed") long seed,
@Field("max_depth") int max_depth,
@Field("min_rows") int min_rows,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/dt")
Call<DTV3> trainDt();
/**
* Validate a set of DT model builder parameters.
* @param seed Seed for random numbers (affects sampling)
* @param max_depth Max depth of tree.
* @param min_rows Fewest allowed (weighted) observations in a leaf.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/dt/parameters")
Call<DTV3> validate_parametersDt(
@Field("seed") long seed,
@Field("max_depth") int max_depth,
@Field("min_rows") int min_rows,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/dt/parameters")
Call<DTV3> validate_parametersDt();
/**
* Train a HGLM model.
* @param score_iteration_interval Perform scoring for every score_iteration_interval iterations.
* @param seed Seed for pseudo random number generator (if applicable).
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues).
* @param family Family. Only gaussian is supported now.
* @param rand_family Set distribution of random effects. Only Gaussian is implemented now.
* @param max_iterations Maximum number of iterations. Value should >=1. A value of 0 is only set when only the
* model coefficient names and model coefficient dimensions are needed.
* @param initial_fixed_effects An array that contains initial values of the fixed effects coefficient.
* @param initial_random_effects A H2OFrame id that contains initial values of the random effects coefficient. The
* row names shouldbe the random coefficient names. If you are not sure what the
* random coefficient names are, build HGLM model with max_iterations = 0 and checkout
* the model output field random_coefficient_names. The number of rows of this frame
* should be the number of level 2 units. Again, to figure this out, build HGLM model
* with max_iterations=0 and check out the model output field group_column_names. The
* number of rows should equal the length of thegroup_column_names.
* @param initial_t_matrix A H2OFrame id that contains initial values of the T matrix. It should be a positive
* symmetric matrix.
* @param tau_u_var_init Initial variance of random coefficient effects. If set, should provide a value > 0.0. If
* not set, will be randomly set in the model building process.
* @param tau_e_var_init Initial variance of random noise. If set, should provide a value > 0.0. If not set, will
* be randomly set in the model building process.
* @param random_columns Random columns indices for HGLM.
* @param method We only implemented EM as a method to obtain the fixed, random coefficients and the various
* variances.
* @param em_epsilon Converge if beta/ubeta/tmat/tauEVar changes less (using L-infinity norm) than em esilon. ONLY
* applies to EM method.
* @param random_intercept If true, will allow random component to the GLM coefficients.
* @param group_column Group column is the column that is categorical and used to generate the groups in HGLM
* @param gen_syn_data If true, add gaussian noise with variance specified in parms._tau_e_var_init.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/hglm")
Call<HGLMV3> trainHglm(
@Field("score_iteration_interval") int score_iteration_interval,
@Field("seed") long seed,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("plug_values") String plug_values,
@Field("family") GLMFamily family,
@Field("rand_family") GLMFamily rand_family,
@Field("max_iterations") int max_iterations,
@Field("initial_fixed_effects") double[] initial_fixed_effects,
@Field("initial_random_effects") String initial_random_effects,
@Field("initial_t_matrix") String initial_t_matrix,
@Field("tau_u_var_init") double tau_u_var_init,
@Field("tau_e_var_init") double tau_e_var_init,
@Field("random_columns") String[] random_columns,
@Field("method") HGLMMethod method,
@Field("em_epsilon") double em_epsilon,
@Field("random_intercept") boolean random_intercept,
@Field("group_column") String group_column,
@Field("gen_syn_data") boolean gen_syn_data,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/hglm")
Call<HGLMV3> trainHglm();
/**
* Validate a set of HGLM model builder parameters.
* @param score_iteration_interval Perform scoring for every score_iteration_interval iterations.
* @param seed Seed for pseudo random number generator (if applicable).
* @param missing_values_handling Handling of missing values. Either MeanImputation, Skip or PlugValues.
* @param plug_values Plug Values (a single row frame containing values that will be used to impute missing values
* of the training/validation frame, use with conjunction missing_values_handling = PlugValues).
* @param family Family. Only gaussian is supported now.
* @param rand_family Set distribution of random effects. Only Gaussian is implemented now.
* @param max_iterations Maximum number of iterations. Value should >=1. A value of 0 is only set when only the
* model coefficient names and model coefficient dimensions are needed.
* @param initial_fixed_effects An array that contains initial values of the fixed effects coefficient.
* @param initial_random_effects A H2OFrame id that contains initial values of the random effects coefficient. The
* row names shouldbe the random coefficient names. If you are not sure what the
* random coefficient names are, build HGLM model with max_iterations = 0 and checkout
* the model output field random_coefficient_names. The number of rows of this frame
* should be the number of level 2 units. Again, to figure this out, build HGLM model
* with max_iterations=0 and check out the model output field group_column_names. The
* number of rows should equal the length of thegroup_column_names.
* @param initial_t_matrix A H2OFrame id that contains initial values of the T matrix. It should be a positive
* symmetric matrix.
* @param tau_u_var_init Initial variance of random coefficient effects. If set, should provide a value > 0.0. If
* not set, will be randomly set in the model building process.
* @param tau_e_var_init Initial variance of random noise. If set, should provide a value > 0.0. If not set, will
* be randomly set in the model building process.
* @param random_columns Random columns indices for HGLM.
* @param method We only implemented EM as a method to obtain the fixed, random coefficients and the various
* variances.
* @param em_epsilon Converge if beta/ubeta/tmat/tauEVar changes less (using L-infinity norm) than em esilon. ONLY
* applies to EM method.
* @param random_intercept If true, will allow random component to the GLM coefficients.
* @param group_column Group column is the column that is categorical and used to generate the groups in HGLM
* @param gen_syn_data If true, add gaussian noise with variance specified in parms._tau_e_var_init.
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/hglm/parameters")
Call<HGLMV3> validate_parametersHglm(
@Field("score_iteration_interval") int score_iteration_interval,
@Field("seed") long seed,
@Field("missing_values_handling") GLMMissingValuesHandling missing_values_handling,
@Field("plug_values") String plug_values,
@Field("family") GLMFamily family,
@Field("rand_family") GLMFamily rand_family,
@Field("max_iterations") int max_iterations,
@Field("initial_fixed_effects") double[] initial_fixed_effects,
@Field("initial_random_effects") String initial_random_effects,
@Field("initial_t_matrix") String initial_t_matrix,
@Field("tau_u_var_init") double tau_u_var_init,
@Field("tau_e_var_init") double tau_e_var_init,
@Field("random_columns") String[] random_columns,
@Field("method") HGLMMethod method,
@Field("em_epsilon") double em_epsilon,
@Field("random_intercept") boolean random_intercept,
@Field("group_column") String group_column,
@Field("gen_syn_data") boolean gen_syn_data,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/hglm/parameters")
Call<HGLMV3> validate_parametersHglm();
/**
* Train a AdaBoost model.
* @param nlearners Number of AdaBoost weak learners.
* @param weak_learner Choose a weak learner type. Defaults to AUTO, which means DRF.
* @param learn_rate Learning rate (from 0.0 to 1.0)
* @param weak_learner_params Customized parameters for the weak_learner algorithm.
* @param seed Seed for pseudo random number generator (if applicable)
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/adaboost")
Call<AdaBoostV3> trainAdaboost(
@Field("nlearners") int nlearners,
@Field("weak_learner") AdaBoostModelAlgorithm weak_learner,
@Field("learn_rate") double learn_rate,
@Field("weak_learner_params") String weak_learner_params,
@Field("seed") long seed,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/adaboost")
Call<AdaBoostV3> trainAdaboost();
/**
* Validate a set of AdaBoost model builder parameters.
* @param nlearners Number of AdaBoost weak learners.
* @param weak_learner Choose a weak learner type. Defaults to AUTO, which means DRF.
* @param learn_rate Learning rate (from 0.0 to 1.0)
* @param weak_learner_params Customized parameters for the weak_learner algorithm.
* @param seed Seed for pseudo random number generator (if applicable)
* @param model_id Destination id for this model; auto-generated if not specified.
* @param training_frame Id of the training data frame.
* @param validation_frame Id of the validation data frame.
* @param nfolds Number of folds for K-fold cross-validation (0 to disable or >= 2).
* @param keep_cross_validation_models Whether to keep the cross-validation models.
* @param keep_cross_validation_predictions Whether to keep the predictions of the cross-validation models.
* @param keep_cross_validation_fold_assignment Whether to keep the cross-validation fold assignment.
* @param parallelize_cross_validation Allow parallel training of cross-validation models
* @param distribution Distribution function
* @param tweedie_power Tweedie power for Tweedie regression, must be between 1 and 2.
* @param quantile_alpha Desired quantile for Quantile regression, must be between 0 and 1.
* @param huber_alpha Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be
* between 0 and 1).
* @param response_column Response variable column.
* @param weights_column Column with observation weights. Giving some observation a weight of zero is equivalent to
* excluding it from the dataset; giving an observation a relative weight of 2 is equivalent
* to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row
* observation weights and do not increase the size of the data frame. This is typically the
* number of times a row is repeated, but non-integer values are supported as well. During
* training, rows with higher weights matter more, due to the larger loss function pre-factor.
* If you set weight = 0 for a row, the returned prediction frame at that row is zero and this
* is incorrect. To get an accurate prediction, remove all rows with weight == 0.
* @param offset_column Offset column. This will be added to the combination of columns before applying the link
* function.
* @param fold_column Column with cross-validation fold index assignment per observation.
* @param fold_assignment Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified'
* option will stratify the folds based on the response variable, for classification
* problems.
* @param categorical_encoding Encoding scheme for categorical features
* @param max_categorical_levels For every categorical feature, only use this many most frequent categorical levels
* for model training. Only used for categorical_encoding == EnumLimited.
* @param ignored_columns Names of columns to ignore for training.
* @param ignore_const_cols Ignore constant columns.
* @param score_each_iteration Whether to score during each iteration of model training.
* @param checkpoint Model checkpoint to resume training with.
* @param stopping_rounds Early stopping based on convergence of stopping_metric. Stop if simple moving average of
* length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0
* to disable)
* @param max_runtime_secs Maximum allowed runtime in seconds for model training. Use 0 to disable.
* @param stopping_metric Metric to use for early stopping (AUTO: logloss for classification, deviance for
* regression and anomaly_score for Isolation Forest). Note that custom and custom_increasing
* can only be used in GBM and DRF with the Python client.
* @param stopping_tolerance Relative tolerance for metric-based stopping criterion (stop if relative improvement is
* not at least this much)
* @param gainslift_bins Gains/Lift table number of bins. 0 means disabled.. Default value -1 means automatic
* binning.
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param custom_distribution_func Reference to custom distribution, format: `language:keyName=funcName`
* @param export_checkpoints_dir Automatically export generated models to this directory.
* @param auc_type Set default multinomial AUC type.
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/adaboost/parameters")
Call<AdaBoostV3> validate_parametersAdaboost(
@Field("nlearners") int nlearners,
@Field("weak_learner") AdaBoostModelAlgorithm weak_learner,
@Field("learn_rate") double learn_rate,
@Field("weak_learner_params") String weak_learner_params,
@Field("seed") long seed,
@Field("model_id") String model_id,
@Field("training_frame") String training_frame,
@Field("validation_frame") String validation_frame,
@Field("nfolds") int nfolds,
@Field("keep_cross_validation_models") boolean keep_cross_validation_models,
@Field("keep_cross_validation_predictions") boolean keep_cross_validation_predictions,
@Field("keep_cross_validation_fold_assignment") boolean keep_cross_validation_fold_assignment,
@Field("parallelize_cross_validation") boolean parallelize_cross_validation,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("tweedie_power") double tweedie_power,
@Field("quantile_alpha") double quantile_alpha,
@Field("huber_alpha") double huber_alpha,
@Field("response_column") String response_column,
@Field("weights_column") String weights_column,
@Field("offset_column") String offset_column,
@Field("fold_column") String fold_column,
@Field("fold_assignment") ModelParametersFoldAssignmentScheme fold_assignment,
@Field("categorical_encoding") ModelParametersCategoricalEncodingScheme categorical_encoding,
@Field("max_categorical_levels") int max_categorical_levels,
@Field("ignored_columns") String[] ignored_columns,
@Field("ignore_const_cols") boolean ignore_const_cols,
@Field("score_each_iteration") boolean score_each_iteration,
@Field("checkpoint") String checkpoint,
@Field("stopping_rounds") int stopping_rounds,
@Field("max_runtime_secs") double max_runtime_secs,
@Field("stopping_metric") ScoreKeeperStoppingMetric stopping_metric,
@Field("stopping_tolerance") double stopping_tolerance,
@Field("gainslift_bins") int gainslift_bins,
@Field("custom_metric_func") String custom_metric_func,
@Field("custom_distribution_func") String custom_distribution_func,
@Field("export_checkpoints_dir") String export_checkpoints_dir,
@Field("auc_type") MultinomialAucType auc_type
);
@FormUrlEncoded
@POST("/3/ModelBuilders/adaboost/parameters")
Call<AdaBoostV3> validate_parametersAdaboost();
/**
* Return a new unique model_id for the specified algorithm.
* @param algo Algo of ModelBuilder of interest
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/ModelBuilders/{algo}/model_id")
Call<ModelIdV3> calcModelId(
@Path("algo") String algo,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/ModelBuilders/{algo}/model_id")
Call<ModelIdV3> calcModelId(@Path("algo") String algo);
/**
* Return the Model Builder metadata for the specified algorithm.
* @param algo Algo of ModelBuilder of interest
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/ModelBuilders/{algo}")
Call<ModelBuildersV3> fetch(
@Path("algo") String algo,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/ModelBuilders/{algo}")
Call<ModelBuildersV3> fetch(@Path("algo") String algo);
/**
* Return the Model Builder metadata for all available algorithms.
* @param algo Algo of ModelBuilder of interest
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/ModelBuilders")
Call<ModelBuildersV3> list(
@Query("algo") String algo,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/ModelBuilders")
Call<ModelBuildersV3> list();
@SuppressWarnings("unused")
class Helper {
/**
* Train a XGBoost model.
*/
public static Call<XGBoostV3> trainXgboost(ModelBuilders z, XGBoostParametersV3 p) {
return z.trainXgboost(
p.ntrees,
p.maxDepth,
p.minRows,
p.minChildWeight,
p.learnRate,
p.eta,
p.sampleRate,
p.subsample,
p.colSampleRate,
p.colsampleBylevel,
p.colSampleRatePerTree,
p.colsampleBytree,
p.colsampleBynode,
p.monotoneConstraints,
p.maxAbsLeafnodePred,
p.maxDeltaStep,
p.scoreTreeInterval,
p.seed,
p.minSplitImprovement,
p.gamma,
p.nthread,
p.buildTreeOneNode,
p.saveMatrixDirectory,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.maxBins,
p.maxLeaves,
p.treeMethod,
p.growPolicy,
p.booster,
p.regLambda,
p.regAlpha,
p.quietMode,
p.sampleType,
p.normalizeType,
p.rateDrop,
p.oneDrop,
p.skipDrop,
p.dmatrixType,
p.backend,
p.gpuId,
p.interactionConstraints,
p.scalePosWeight,
p.evalMetric,
p.scoreEvalMetricOnly,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of XGBoost model builder parameters.
*/
public static Call<XGBoostV3> validate_parametersXgboost(ModelBuilders z, XGBoostParametersV3 p) {
return z.validate_parametersXgboost(
p.ntrees,
p.maxDepth,
p.minRows,
p.minChildWeight,
p.learnRate,
p.eta,
p.sampleRate,
p.subsample,
p.colSampleRate,
p.colsampleBylevel,
p.colSampleRatePerTree,
p.colsampleBytree,
p.colsampleBynode,
p.monotoneConstraints,
p.maxAbsLeafnodePred,
p.maxDeltaStep,
p.scoreTreeInterval,
p.seed,
p.minSplitImprovement,
p.gamma,
p.nthread,
p.buildTreeOneNode,
p.saveMatrixDirectory,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.maxBins,
p.maxLeaves,
p.treeMethod,
p.growPolicy,
p.booster,
p.regLambda,
p.regAlpha,
p.quietMode,
p.sampleType,
p.normalizeType,
p.rateDrop,
p.oneDrop,
p.skipDrop,
p.dmatrixType,
p.backend,
p.gpuId,
p.interactionConstraints,
p.scalePosWeight,
p.evalMetric,
p.scoreEvalMetricOnly,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a Infogram model.
*/
public static Call<InfogramV3> trainInfogram(ModelBuilders z, InfogramParametersV3 p) {
return z.trainInfogram(
p.seed,
p.standardize,
(p.plugValues == null? null : p.plugValues.name),
p.maxIterations,
p.prior,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.algorithm,
p.algorithmParams,
p.protectedColumns,
p.totalInformationThreshold,
p.netInformationThreshold,
p.relevanceIndexThreshold,
p.safetyIndexThreshold,
p.dataFraction,
p.topNFeatures,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of Infogram model builder parameters.
*/
public static Call<InfogramV3> validate_parametersInfogram(ModelBuilders z, InfogramParametersV3 p) {
return z.validate_parametersInfogram(
p.seed,
p.standardize,
(p.plugValues == null? null : p.plugValues.name),
p.maxIterations,
p.prior,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.algorithm,
p.algorithmParams,
p.protectedColumns,
p.totalInformationThreshold,
p.netInformationThreshold,
p.relevanceIndexThreshold,
p.safetyIndexThreshold,
p.dataFraction,
p.topNFeatures,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a TargetEncoder model.
*/
public static Call<TargetEncoderV3> trainTargetencoder(ModelBuilders z, TargetEncoderParametersV3 p) {
return z.trainTargetencoder(
p.columnsToEncode,
p.keepOriginalCategoricalColumns,
p.blending,
p.inflectionPoint,
p.smoothing,
p.dataLeakageHandling,
p.noise,
p.seed,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of TargetEncoder model builder parameters.
*/
public static Call<TargetEncoderV3> validate_parametersTargetencoder(ModelBuilders z, TargetEncoderParametersV3 p) {
return z.validate_parametersTargetencoder(
p.columnsToEncode,
p.keepOriginalCategoricalColumns,
p.blending,
p.inflectionPoint,
p.smoothing,
p.dataLeakageHandling,
p.noise,
p.seed,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a DeepLearning model.
*/
public static Call<DeepLearningV3> trainDeeplearning(ModelBuilders z, DeepLearningParametersV3 p) {
return z.trainDeeplearning(
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.activation,
p.hidden,
p.epochs,
p.trainSamplesPerIteration,
p.targetRatioCommToComp,
p.seed,
p.adaptiveRate,
p.rho,
p.epsilon,
p.rate,
p.rateAnnealing,
p.rateDecay,
p.momentumStart,
p.momentumRamp,
p.momentumStable,
p.nesterovAcceleratedGradient,
p.inputDropoutRatio,
p.hiddenDropoutRatios,
p.l1,
p.l2,
p.maxW2,
p.initialWeightDistribution,
p.initialWeightScale,
(p.initialWeights == null? null : keyArrayToStringArray(p.initialWeights)),
(p.initialBiases == null? null : keyArrayToStringArray(p.initialBiases)),
p.loss,
p.scoreInterval,
p.scoreTrainingSamples,
p.scoreValidationSamples,
p.scoreDutyCycle,
p.classificationStop,
p.regressionStop,
p.quietMode,
p.scoreValidationSampling,
p.overwriteWithBestModel,
p.autoencoder,
p.useAllFactorLevels,
p.standardize,
p.diagnostics,
p.variableImportances,
p.fastMode,
p.forceLoadBalance,
p.replicateTrainingData,
p.singleNodeMode,
p.shuffleTrainingData,
p.missingValuesHandling,
p.sparse,
p.colMajor,
p.averageActivation,
p.sparsityBeta,
p.maxCategoricalFeatures,
p.reproducible,
p.exportWeightsAndBiases,
p.miniBatchSize,
p.elasticAveraging,
p.elasticAveragingMovingRate,
p.elasticAveragingRegularization,
(p.pretrainedAutoencoder == null? null : p.pretrainedAutoencoder.name),
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of DeepLearning model builder parameters.
*/
public static Call<DeepLearningV3> validate_parametersDeeplearning(ModelBuilders z, DeepLearningParametersV3 p) {
return z.validate_parametersDeeplearning(
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.activation,
p.hidden,
p.epochs,
p.trainSamplesPerIteration,
p.targetRatioCommToComp,
p.seed,
p.adaptiveRate,
p.rho,
p.epsilon,
p.rate,
p.rateAnnealing,
p.rateDecay,
p.momentumStart,
p.momentumRamp,
p.momentumStable,
p.nesterovAcceleratedGradient,
p.inputDropoutRatio,
p.hiddenDropoutRatios,
p.l1,
p.l2,
p.maxW2,
p.initialWeightDistribution,
p.initialWeightScale,
(p.initialWeights == null? null : keyArrayToStringArray(p.initialWeights)),
(p.initialBiases == null? null : keyArrayToStringArray(p.initialBiases)),
p.loss,
p.scoreInterval,
p.scoreTrainingSamples,
p.scoreValidationSamples,
p.scoreDutyCycle,
p.classificationStop,
p.regressionStop,
p.quietMode,
p.scoreValidationSampling,
p.overwriteWithBestModel,
p.autoencoder,
p.useAllFactorLevels,
p.standardize,
p.diagnostics,
p.variableImportances,
p.fastMode,
p.forceLoadBalance,
p.replicateTrainingData,
p.singleNodeMode,
p.shuffleTrainingData,
p.missingValuesHandling,
p.sparse,
p.colMajor,
p.averageActivation,
p.sparsityBeta,
p.maxCategoricalFeatures,
p.reproducible,
p.exportWeightsAndBiases,
p.miniBatchSize,
p.elasticAveraging,
p.elasticAveragingMovingRate,
p.elasticAveragingRegularization,
(p.pretrainedAutoencoder == null? null : p.pretrainedAutoencoder.name),
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a GLM model.
*/
public static Call<GLMV3> trainGlm(ModelBuilders z, GLMParametersV3 p) {
return z.trainGlm(
p.seed,
p.family,
p.tweedieVariancePower,
p.dispersionLearningRate,
p.tweedieLinkPower,
p.theta,
p.solver,
p.alpha,
p.lambda,
p.lambdaSearch,
p.earlyStopping,
p.nlambdas,
p.scoreIterationInterval,
p.standardize,
p.coldStart,
p.missingValuesHandling,
p.influence,
(p.plugValues == null? null : p.plugValues.name),
p.nonNegative,
p.maxIterations,
p.betaEpsilon,
p.objectiveEpsilon,
p.gradientEpsilon,
p.objReg,
p.link,
p.dispersionParameterMethod,
p.startval,
p.calcLike,
p.generateVariableInflationFactors,
p.intercept,
p.buildNullModel,
p.fixDispersionParameter,
p.initDispersionParameter,
p.prior,
p.lambdaMinRatio,
(p.betaConstraints == null? null : p.betaConstraints.name),
(p.linearConstraints == null? null : p.linearConstraints.name),
p.maxActivePredictors,
p.interactions,
p.interactionPairs,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.computePValues,
p.fixTweedieVariancePower,
p.removeCollinearColumns,
p.dispersionEpsilon,
p.tweedieEpsilon,
p.maxIterationsDispersion,
p.generateScoringHistory,
p.initOptimalGlm,
p.separateLinearBeta,
p.constraintEta0,
p.constraintTau,
p.constraintAlpha,
p.constraintBeta,
p.constraintC0,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of GLM model builder parameters.
*/
public static Call<GLMV3> validate_parametersGlm(ModelBuilders z, GLMParametersV3 p) {
return z.validate_parametersGlm(
p.seed,
p.family,
p.tweedieVariancePower,
p.dispersionLearningRate,
p.tweedieLinkPower,
p.theta,
p.solver,
p.alpha,
p.lambda,
p.lambdaSearch,
p.earlyStopping,
p.nlambdas,
p.scoreIterationInterval,
p.standardize,
p.coldStart,
p.missingValuesHandling,
p.influence,
(p.plugValues == null? null : p.plugValues.name),
p.nonNegative,
p.maxIterations,
p.betaEpsilon,
p.objectiveEpsilon,
p.gradientEpsilon,
p.objReg,
p.link,
p.dispersionParameterMethod,
p.startval,
p.calcLike,
p.generateVariableInflationFactors,
p.intercept,
p.buildNullModel,
p.fixDispersionParameter,
p.initDispersionParameter,
p.prior,
p.lambdaMinRatio,
(p.betaConstraints == null? null : p.betaConstraints.name),
(p.linearConstraints == null? null : p.linearConstraints.name),
p.maxActivePredictors,
p.interactions,
p.interactionPairs,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.computePValues,
p.fixTweedieVariancePower,
p.removeCollinearColumns,
p.dispersionEpsilon,
p.tweedieEpsilon,
p.maxIterationsDispersion,
p.generateScoringHistory,
p.initOptimalGlm,
p.separateLinearBeta,
p.constraintEta0,
p.constraintTau,
p.constraintAlpha,
p.constraintBeta,
p.constraintC0,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a GLRM model.
*/
public static Call<GLRMV3> trainGlrm(ModelBuilders z, GLRMParametersV3 p) {
return z.trainGlrm(
p.transform,
p.k,
p.loss,
p.multiLoss,
p.lossByCol,
p.lossByColIdx,
p.period,
p.regularizationX,
p.regularizationY,
p.gammaX,
p.gammaY,
p.maxIterations,
p.maxUpdates,
p.initStepSize,
p.minStepSize,
p.seed,
p.init,
p.svdMethod,
(p.userY == null? null : p.userY.name),
(p.userX == null? null : p.userX.name),
p.loadingName,
p.representationName,
p.expandUserY,
p.imputeOriginal,
p.recoverSvd,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of GLRM model builder parameters.
*/
public static Call<GLRMV3> validate_parametersGlrm(ModelBuilders z, GLRMParametersV3 p) {
return z.validate_parametersGlrm(
p.transform,
p.k,
p.loss,
p.multiLoss,
p.lossByCol,
p.lossByColIdx,
p.period,
p.regularizationX,
p.regularizationY,
p.gammaX,
p.gammaY,
p.maxIterations,
p.maxUpdates,
p.initStepSize,
p.minStepSize,
p.seed,
p.init,
p.svdMethod,
(p.userY == null? null : p.userY.name),
(p.userX == null? null : p.userX.name),
p.loadingName,
p.representationName,
p.expandUserY,
p.imputeOriginal,
p.recoverSvd,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a KMeans model.
*/
public static Call<KMeansV3> trainKmeans(ModelBuilders z, KMeansParametersV3 p) {
return z.trainKmeans(
(p.userPoints == null? null : p.userPoints.name),
p.maxIterations,
p.standardize,
p.seed,
p.init,
p.estimateK,
p.clusterSizeConstraints,
p.k,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of KMeans model builder parameters.
*/
public static Call<KMeansV3> validate_parametersKmeans(ModelBuilders z, KMeansParametersV3 p) {
return z.validate_parametersKmeans(
(p.userPoints == null? null : p.userPoints.name),
p.maxIterations,
p.standardize,
p.seed,
p.init,
p.estimateK,
p.clusterSizeConstraints,
p.k,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a NaiveBayes model.
*/
public static Call<NaiveBayesV3> trainNaivebayes(ModelBuilders z, NaiveBayesParametersV3 p) {
return z.trainNaivebayes(
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.laplace,
p.minSdev,
p.epsSdev,
p.minProb,
p.epsProb,
p.computeMetrics,
p.seed,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of NaiveBayes model builder parameters.
*/
public static Call<NaiveBayesV3> validate_parametersNaivebayes(ModelBuilders z, NaiveBayesParametersV3 p) {
return z.validate_parametersNaivebayes(
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.laplace,
p.minSdev,
p.epsSdev,
p.minProb,
p.epsProb,
p.computeMetrics,
p.seed,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a PCA model.
*/
public static Call<PCAV3> trainPca(ModelBuilders z, PCAParametersV3 p) {
return z.trainPca(
p.transform,
p.pcaMethod,
p.pcaImpl,
p.k,
p.maxIterations,
p.seed,
p.useAllFactorLevels,
p.computeMetrics,
p.imputeMissing,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of PCA model builder parameters.
*/
public static Call<PCAV3> validate_parametersPca(ModelBuilders z, PCAParametersV3 p) {
return z.validate_parametersPca(
p.transform,
p.pcaMethod,
p.pcaImpl,
p.k,
p.maxIterations,
p.seed,
p.useAllFactorLevels,
p.computeMetrics,
p.imputeMissing,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a SVD model.
*/
public static Call<SVDV99> trainSvd(ModelBuilders z, SVDParametersV99 p) {
return z.trainSvd(
p.transform,
p.svdMethod,
p.nv,
p.maxIterations,
p.seed,
p.keepU,
p.uName,
p.useAllFactorLevels,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of SVD model builder parameters.
*/
public static Call<SVDV99> validate_parametersSvd(ModelBuilders z, SVDParametersV99 p) {
return z.validate_parametersSvd(
p.transform,
p.svdMethod,
p.nv,
p.maxIterations,
p.seed,
p.keepU,
p.uName,
p.useAllFactorLevels,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a DRF model.
*/
public static Call<DRFV3> trainDrf(ModelBuilders z, DRFParametersV3 p) {
return z.trainDrf(
p.mtries,
p.binomialDoubleTrees,
p.sampleRate,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.ntrees,
p.maxDepth,
p.minRows,
p.nbins,
p.nbinsTopLevel,
p.nbinsCats,
p.r2Stopping,
p.seed,
p.buildTreeOneNode,
p.sampleRatePerClass,
p.colSampleRatePerTree,
p.colSampleRateChangePerLevel,
p.scoreTreeInterval,
p.minSplitImprovement,
p.histogramType,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.checkConstantResponse,
p.inTrainingCheckpointsDir,
p.inTrainingCheckpointsTreeInterval,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of DRF model builder parameters.
*/
public static Call<DRFV3> validate_parametersDrf(ModelBuilders z, DRFParametersV3 p) {
return z.validate_parametersDrf(
p.mtries,
p.binomialDoubleTrees,
p.sampleRate,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.ntrees,
p.maxDepth,
p.minRows,
p.nbins,
p.nbinsTopLevel,
p.nbinsCats,
p.r2Stopping,
p.seed,
p.buildTreeOneNode,
p.sampleRatePerClass,
p.colSampleRatePerTree,
p.colSampleRateChangePerLevel,
p.scoreTreeInterval,
p.minSplitImprovement,
p.histogramType,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.checkConstantResponse,
p.inTrainingCheckpointsDir,
p.inTrainingCheckpointsTreeInterval,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a GBM model.
*/
public static Call<GBMV3> trainGbm(ModelBuilders z, GBMParametersV3 p) {
return z.trainGbm(
p.learnRate,
p.learnRateAnnealing,
p.sampleRate,
p.colSampleRate,
p.monotoneConstraints,
p.maxAbsLeafnodePred,
p.predNoiseBandwidth,
p.interactionConstraints,
p.autoRebalance,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.ntrees,
p.maxDepth,
p.minRows,
p.nbins,
p.nbinsTopLevel,
p.nbinsCats,
p.r2Stopping,
p.seed,
p.buildTreeOneNode,
p.sampleRatePerClass,
p.colSampleRatePerTree,
p.colSampleRateChangePerLevel,
p.scoreTreeInterval,
p.minSplitImprovement,
p.histogramType,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.checkConstantResponse,
p.inTrainingCheckpointsDir,
p.inTrainingCheckpointsTreeInterval,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of GBM model builder parameters.
*/
public static Call<GBMV3> validate_parametersGbm(ModelBuilders z, GBMParametersV3 p) {
return z.validate_parametersGbm(
p.learnRate,
p.learnRateAnnealing,
p.sampleRate,
p.colSampleRate,
p.monotoneConstraints,
p.maxAbsLeafnodePred,
p.predNoiseBandwidth,
p.interactionConstraints,
p.autoRebalance,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.ntrees,
p.maxDepth,
p.minRows,
p.nbins,
p.nbinsTopLevel,
p.nbinsCats,
p.r2Stopping,
p.seed,
p.buildTreeOneNode,
p.sampleRatePerClass,
p.colSampleRatePerTree,
p.colSampleRateChangePerLevel,
p.scoreTreeInterval,
p.minSplitImprovement,
p.histogramType,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.checkConstantResponse,
p.inTrainingCheckpointsDir,
p.inTrainingCheckpointsTreeInterval,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a IsolationForest model.
*/
public static Call<IsolationForestV3> trainIsolationforest(ModelBuilders z, IsolationForestParametersV3 p) {
return z.trainIsolationforest(
p.sampleSize,
p.sampleRate,
p.mtries,
p.contamination,
(p.validationResponseColumn == null? null : p.validationResponseColumn.columnName),
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.ntrees,
p.maxDepth,
p.minRows,
p.nbins,
p.nbinsTopLevel,
p.nbinsCats,
p.r2Stopping,
p.seed,
p.buildTreeOneNode,
p.sampleRatePerClass,
p.colSampleRatePerTree,
p.colSampleRateChangePerLevel,
p.scoreTreeInterval,
p.minSplitImprovement,
p.histogramType,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.checkConstantResponse,
p.inTrainingCheckpointsDir,
p.inTrainingCheckpointsTreeInterval,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of IsolationForest model builder parameters.
*/
public static Call<IsolationForestV3> validate_parametersIsolationforest(ModelBuilders z, IsolationForestParametersV3 p) {
return z.validate_parametersIsolationforest(
p.sampleSize,
p.sampleRate,
p.mtries,
p.contamination,
(p.validationResponseColumn == null? null : p.validationResponseColumn.columnName),
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.ntrees,
p.maxDepth,
p.minRows,
p.nbins,
p.nbinsTopLevel,
p.nbinsCats,
p.r2Stopping,
p.seed,
p.buildTreeOneNode,
p.sampleRatePerClass,
p.colSampleRatePerTree,
p.colSampleRateChangePerLevel,
p.scoreTreeInterval,
p.minSplitImprovement,
p.histogramType,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.checkConstantResponse,
p.inTrainingCheckpointsDir,
p.inTrainingCheckpointsTreeInterval,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a ExtendedIsolationForest model.
*/
public static Call<ExtendedIsolationForestV3> trainExtendedisolationforest(ModelBuilders z, ExtendedIsolationForestParametersV3 p) {
return z.trainExtendedisolationforest(
p.ntrees,
p.sampleSize,
p.extensionLevel,
p.seed,
p.scoreTreeInterval,
p.disableTrainingMetrics,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of ExtendedIsolationForest model builder parameters.
*/
public static Call<ExtendedIsolationForestV3> validate_parametersExtendedisolationforest(ModelBuilders z, ExtendedIsolationForestParametersV3 p) {
return z.validate_parametersExtendedisolationforest(
p.ntrees,
p.sampleSize,
p.extensionLevel,
p.seed,
p.scoreTreeInterval,
p.disableTrainingMetrics,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a Aggregator model.
*/
public static Call<AggregatorV99> trainAggregator(ModelBuilders z, AggregatorParametersV99 p) {
return z.trainAggregator(
p.transform,
p.pcaMethod,
p.k,
p.maxIterations,
p.targetNumExemplars,
p.relTolNumExemplars,
p.seed,
p.useAllFactorLevels,
p.saveMappingFrame,
p.numIterationWithoutNewExemplar,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of Aggregator model builder parameters.
*/
public static Call<AggregatorV99> validate_parametersAggregator(ModelBuilders z, AggregatorParametersV99 p) {
return z.validate_parametersAggregator(
p.transform,
p.pcaMethod,
p.k,
p.maxIterations,
p.targetNumExemplars,
p.relTolNumExemplars,
p.seed,
p.useAllFactorLevels,
p.saveMappingFrame,
p.numIterationWithoutNewExemplar,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a Word2Vec model.
*/
public static Call<Word2VecV3> trainWord2vec(ModelBuilders z, Word2VecParametersV3 p) {
return z.trainWord2vec(
p.vecSize,
p.windowSize,
p.sentSampleRate,
p.normModel,
p.epochs,
p.minWordFreq,
p.initLearningRate,
p.wordModel,
(p.preTrained == null? null : p.preTrained.name),
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of Word2Vec model builder parameters.
*/
public static Call<Word2VecV3> validate_parametersWord2vec(ModelBuilders z, Word2VecParametersV3 p) {
return z.validate_parametersWord2vec(
p.vecSize,
p.windowSize,
p.sentSampleRate,
p.normModel,
p.epochs,
p.minWordFreq,
p.initLearningRate,
p.wordModel,
(p.preTrained == null? null : p.preTrained.name),
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a StackedEnsemble model.
*/
public static Call<StackedEnsembleV99> trainStackedensemble(ModelBuilders z, StackedEnsembleParametersV99 p) {
return z.trainStackedensemble(
(p.baseModels == null? null : keyArrayToStringArray(p.baseModels)),
p.metalearnerAlgorithm,
p.metalearnerNfolds,
p.metalearnerFoldAssignment,
(p.metalearnerFoldColumn == null? null : p.metalearnerFoldColumn.columnName),
p.metalearnerTransform,
p.keepLeveloneFrame,
p.metalearnerParams,
(p.blendingFrame == null? null : p.blendingFrame.name),
p.seed,
p.scoreTrainingSamples,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of StackedEnsemble model builder parameters.
*/
public static Call<StackedEnsembleV99> validate_parametersStackedensemble(ModelBuilders z, StackedEnsembleParametersV99 p) {
return z.validate_parametersStackedensemble(
(p.baseModels == null? null : keyArrayToStringArray(p.baseModels)),
p.metalearnerAlgorithm,
p.metalearnerNfolds,
p.metalearnerFoldAssignment,
(p.metalearnerFoldColumn == null? null : p.metalearnerFoldColumn.columnName),
p.metalearnerTransform,
p.keepLeveloneFrame,
p.metalearnerParams,
(p.blendingFrame == null? null : p.blendingFrame.name),
p.seed,
p.scoreTrainingSamples,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a CoxPH model.
*/
public static Call<CoxPHV3> trainCoxph(ModelBuilders z, CoxPHParametersV3 p) {
return z.trainCoxph(
(p.startColumn == null? null : p.startColumn.columnName),
(p.stopColumn == null? null : p.stopColumn.columnName),
p.stratifyBy,
p.ties,
p.init,
p.lreMin,
p.maxIterations,
p.interactionsOnly,
p.interactions,
p.interactionPairs,
p.useAllFactorLevels,
p.singleNodeMode,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of CoxPH model builder parameters.
*/
public static Call<CoxPHV3> validate_parametersCoxph(ModelBuilders z, CoxPHParametersV3 p) {
return z.validate_parametersCoxph(
(p.startColumn == null? null : p.startColumn.columnName),
(p.stopColumn == null? null : p.stopColumn.columnName),
p.stratifyBy,
p.ties,
p.init,
p.lreMin,
p.maxIterations,
p.interactionsOnly,
p.interactions,
p.interactionPairs,
p.useAllFactorLevels,
p.singleNodeMode,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a Generic model.
*/
public static Call<GenericV3> trainGeneric(ModelBuilders z, GenericParametersV3 p) {
return z.trainGeneric(
p.path,
(p.modelKey == null? null : p.modelKey.name),
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of Generic model builder parameters.
*/
public static Call<GenericV3> validate_parametersGeneric(ModelBuilders z, GenericParametersV3 p) {
return z.validate_parametersGeneric(
p.path,
(p.modelKey == null? null : p.modelKey.name),
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a GAM model.
*/
public static Call<GAMV3> trainGam(ModelBuilders z, GAMParametersV3 p) {
return z.trainGam(
p.seed,
p.family,
p.tweedieVariancePower,
p.tweedieLinkPower,
p.theta,
p.solver,
p.alpha,
p.lambda,
p.startval,
p.lambdaSearch,
p.earlyStopping,
p.nlambdas,
p.standardize,
p.missingValuesHandling,
(p.plugValues == null? null : p.plugValues.name),
p.nonNegative,
p.maxIterations,
p.betaEpsilon,
p.objectiveEpsilon,
p.gradientEpsilon,
p.objReg,
p.link,
p.intercept,
p.prior,
p.coldStart,
p.lambdaMinRatio,
(p.betaConstraints == null? null : p.betaConstraints.name),
p.maxActivePredictors,
p.interactions,
p.interactionPairs,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.computePValues,
p.removeCollinearColumns,
p.storeKnotLocations,
p.numKnots,
p.splineOrders,
p.splinesNonNegative,
p.gamColumns,
p.scale,
p.bs,
p.keepGamCols,
p.standardizeTpGamCols,
p.scaleTpPenaltyMat,
p.knotIds,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of GAM model builder parameters.
*/
public static Call<GAMV3> validate_parametersGam(ModelBuilders z, GAMParametersV3 p) {
return z.validate_parametersGam(
p.seed,
p.family,
p.tweedieVariancePower,
p.tweedieLinkPower,
p.theta,
p.solver,
p.alpha,
p.lambda,
p.startval,
p.lambdaSearch,
p.earlyStopping,
p.nlambdas,
p.standardize,
p.missingValuesHandling,
(p.plugValues == null? null : p.plugValues.name),
p.nonNegative,
p.maxIterations,
p.betaEpsilon,
p.objectiveEpsilon,
p.gradientEpsilon,
p.objReg,
p.link,
p.intercept,
p.prior,
p.coldStart,
p.lambdaMinRatio,
(p.betaConstraints == null? null : p.betaConstraints.name),
p.maxActivePredictors,
p.interactions,
p.interactionPairs,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.computePValues,
p.removeCollinearColumns,
p.storeKnotLocations,
p.numKnots,
p.splineOrders,
p.splinesNonNegative,
p.gamColumns,
p.scale,
p.bs,
p.keepGamCols,
p.standardizeTpGamCols,
p.scaleTpPenaltyMat,
p.knotIds,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a ANOVAGLM model.
*/
public static Call<ANOVAGLMV3> trainAnovaglm(ModelBuilders z, ANOVAGLMParametersV3 p) {
return z.trainAnovaglm(
p.seed,
p.standardize,
p.family,
p.tweedieVariancePower,
p.tweedieLinkPower,
p.theta,
p.alpha,
p.lambda,
p.lambdaSearch,
p.solver,
p.missingValuesHandling,
(p.plugValues == null? null : p.plugValues.name),
p.nonNegative,
p.computePValues,
p.maxIterations,
p.link,
p.prior,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.highestInteractionTerm,
p.type,
p.earlyStopping,
p.saveTransformedFramekeys,
p.nparallelism,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of ANOVAGLM model builder parameters.
*/
public static Call<ANOVAGLMV3> validate_parametersAnovaglm(ModelBuilders z, ANOVAGLMParametersV3 p) {
return z.validate_parametersAnovaglm(
p.seed,
p.standardize,
p.family,
p.tweedieVariancePower,
p.tweedieLinkPower,
p.theta,
p.alpha,
p.lambda,
p.lambdaSearch,
p.solver,
p.missingValuesHandling,
(p.plugValues == null? null : p.plugValues.name),
p.nonNegative,
p.computePValues,
p.maxIterations,
p.link,
p.prior,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.highestInteractionTerm,
p.type,
p.earlyStopping,
p.saveTransformedFramekeys,
p.nparallelism,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a PSVM model.
*/
public static Call<PSVMV3> trainPsvm(ModelBuilders z, PSVMParametersV3 p) {
return z.trainPsvm(
p.hyperParam,
p.kernelType,
p.gamma,
p.rankRatio,
p.positiveWeight,
p.negativeWeight,
p.disableTrainingMetrics,
p.svThreshold,
p.maxIterations,
p.factThreshold,
p.feasibleThreshold,
p.surrogateGapThreshold,
p.muFactor,
p.seed,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of PSVM model builder parameters.
*/
public static Call<PSVMV3> validate_parametersPsvm(ModelBuilders z, PSVMParametersV3 p) {
return z.validate_parametersPsvm(
p.hyperParam,
p.kernelType,
p.gamma,
p.rankRatio,
p.positiveWeight,
p.negativeWeight,
p.disableTrainingMetrics,
p.svThreshold,
p.maxIterations,
p.factThreshold,
p.feasibleThreshold,
p.surrogateGapThreshold,
p.muFactor,
p.seed,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a RuleFit model.
*/
public static Call<RuleFitV3> trainRulefit(ModelBuilders z, RuleFitParametersV3 p) {
return z.trainRulefit(
p.seed,
p.algorithm,
p.minRuleLength,
p.maxRuleLength,
p.maxNumRules,
p.modelType,
p.ruleGenerationNtrees,
p.removeDuplicates,
p.lambda,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of RuleFit model builder parameters.
*/
public static Call<RuleFitV3> validate_parametersRulefit(ModelBuilders z, RuleFitParametersV3 p) {
return z.validate_parametersRulefit(
p.seed,
p.algorithm,
p.minRuleLength,
p.maxRuleLength,
p.maxNumRules,
p.modelType,
p.ruleGenerationNtrees,
p.removeDuplicates,
p.lambda,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a UpliftDRF model.
*/
public static Call<UpliftDRFV3> trainUpliftdrf(ModelBuilders z, UpliftDRFParametersV3 p) {
return z.trainUpliftdrf(
p.mtries,
p.sampleRate,
p.treatmentColumn,
p.upliftMetric,
p.auucType,
p.auucNbins,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.ntrees,
p.maxDepth,
p.minRows,
p.nbins,
p.nbinsTopLevel,
p.nbinsCats,
p.r2Stopping,
p.seed,
p.buildTreeOneNode,
p.sampleRatePerClass,
p.colSampleRatePerTree,
p.colSampleRateChangePerLevel,
p.scoreTreeInterval,
p.minSplitImprovement,
p.histogramType,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.checkConstantResponse,
p.inTrainingCheckpointsDir,
p.inTrainingCheckpointsTreeInterval,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of UpliftDRF model builder parameters.
*/
public static Call<UpliftDRFV3> validate_parametersUpliftdrf(ModelBuilders z, UpliftDRFParametersV3 p) {
return z.validate_parametersUpliftdrf(
p.mtries,
p.sampleRate,
p.treatmentColumn,
p.upliftMetric,
p.auucType,
p.auucNbins,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.ntrees,
p.maxDepth,
p.minRows,
p.nbins,
p.nbinsTopLevel,
p.nbinsCats,
p.r2Stopping,
p.seed,
p.buildTreeOneNode,
p.sampleRatePerClass,
p.colSampleRatePerTree,
p.colSampleRateChangePerLevel,
p.scoreTreeInterval,
p.minSplitImprovement,
p.histogramType,
p.calibrateModel,
(p.calibrationFrame == null? null : p.calibrationFrame.name),
p.calibrationMethod,
p.checkConstantResponse,
p.inTrainingCheckpointsDir,
p.inTrainingCheckpointsTreeInterval,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a ModelSelection model.
*/
public static Call<ModelSelectionV3> trainModelselection(ModelBuilders z, ModelSelectionParametersV3 p) {
return z.trainModelselection(
p.seed,
p.family,
p.tweedieVariancePower,
p.tweedieLinkPower,
p.theta,
p.solver,
p.alpha,
p.lambda,
p.lambdaSearch,
p.multinodeMode,
p.buildGlmModel,
p.earlyStopping,
p.nlambdas,
p.scoreIterationInterval,
p.standardize,
p.coldStart,
p.missingValuesHandling,
(p.plugValues == null? null : p.plugValues.name),
p.nonNegative,
p.maxIterations,
p.betaEpsilon,
p.objectiveEpsilon,
p.gradientEpsilon,
p.objReg,
p.link,
p.startval,
p.calcLike,
p.mode,
p.intercept,
p.prior,
p.lambdaMinRatio,
(p.betaConstraints == null? null : p.betaConstraints.name),
p.maxActivePredictors,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.computePValues,
p.removeCollinearColumns,
p.maxPredictorNumber,
p.minPredictorNumber,
p.nparallelism,
p.pValuesThreshold,
p.influence,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of ModelSelection model builder parameters.
*/
public static Call<ModelSelectionV3> validate_parametersModelselection(ModelBuilders z, ModelSelectionParametersV3 p) {
return z.validate_parametersModelselection(
p.seed,
p.family,
p.tweedieVariancePower,
p.tweedieLinkPower,
p.theta,
p.solver,
p.alpha,
p.lambda,
p.lambdaSearch,
p.multinodeMode,
p.buildGlmModel,
p.earlyStopping,
p.nlambdas,
p.scoreIterationInterval,
p.standardize,
p.coldStart,
p.missingValuesHandling,
(p.plugValues == null? null : p.plugValues.name),
p.nonNegative,
p.maxIterations,
p.betaEpsilon,
p.objectiveEpsilon,
p.gradientEpsilon,
p.objReg,
p.link,
p.startval,
p.calcLike,
p.mode,
p.intercept,
p.prior,
p.lambdaMinRatio,
(p.betaConstraints == null? null : p.betaConstraints.name),
p.maxActivePredictors,
p.balanceClasses,
p.classSamplingFactors,
p.maxAfterBalanceSize,
p.maxConfusionMatrixSize,
p.computePValues,
p.removeCollinearColumns,
p.maxPredictorNumber,
p.minPredictorNumber,
p.nparallelism,
p.pValuesThreshold,
p.influence,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a IsotonicRegression model.
*/
public static Call<IsotonicRegressionV3> trainIsotonicregression(ModelBuilders z, IsotonicRegressionParametersV3 p) {
return z.trainIsotonicregression(
p.outOfBounds,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of IsotonicRegression model builder parameters.
*/
public static Call<IsotonicRegressionV3> validate_parametersIsotonicregression(ModelBuilders z, IsotonicRegressionParametersV3 p) {
return z.validate_parametersIsotonicregression(
p.outOfBounds,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a DT model.
*/
public static Call<DTV3> trainDt(ModelBuilders z, DTParametersV3 p) {
return z.trainDt(
p.seed,
p.maxDepth,
p.minRows,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of DT model builder parameters.
*/
public static Call<DTV3> validate_parametersDt(ModelBuilders z, DTParametersV3 p) {
return z.validate_parametersDt(
p.seed,
p.maxDepth,
p.minRows,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a HGLM model.
*/
public static Call<HGLMV3> trainHglm(ModelBuilders z, HGLMParametersV3 p) {
return z.trainHglm(
p.scoreIterationInterval,
p.seed,
p.missingValuesHandling,
(p.plugValues == null? null : p.plugValues.name),
p.family,
p.randFamily,
p.maxIterations,
p.initialFixedEffects,
(p.initialRandomEffects == null? null : p.initialRandomEffects.name),
(p.initialTMatrix == null? null : p.initialTMatrix.name),
p.tauUVarInit,
p.tauEVarInit,
p.randomColumns,
p.method,
p.emEpsilon,
p.randomIntercept,
p.groupColumn,
p.genSynData,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of HGLM model builder parameters.
*/
public static Call<HGLMV3> validate_parametersHglm(ModelBuilders z, HGLMParametersV3 p) {
return z.validate_parametersHglm(
p.scoreIterationInterval,
p.seed,
p.missingValuesHandling,
(p.plugValues == null? null : p.plugValues.name),
p.family,
p.randFamily,
p.maxIterations,
p.initialFixedEffects,
(p.initialRandomEffects == null? null : p.initialRandomEffects.name),
(p.initialTMatrix == null? null : p.initialTMatrix.name),
p.tauUVarInit,
p.tauEVarInit,
p.randomColumns,
p.method,
p.emEpsilon,
p.randomIntercept,
p.groupColumn,
p.genSynData,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Train a AdaBoost model.
*/
public static Call<AdaBoostV3> trainAdaboost(ModelBuilders z, AdaBoostParametersV3 p) {
return z.trainAdaboost(
p.nlearners,
p.weakLearner,
p.learnRate,
p.weakLearnerParams,
p.seed,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Validate a set of AdaBoost model builder parameters.
*/
public static Call<AdaBoostV3> validate_parametersAdaboost(ModelBuilders z, AdaBoostParametersV3 p) {
return z.validate_parametersAdaboost(
p.nlearners,
p.weakLearner,
p.learnRate,
p.weakLearnerParams,
p.seed,
(p.modelId == null? null : p.modelId.name),
(p.trainingFrame == null? null : p.trainingFrame.name),
(p.validationFrame == null? null : p.validationFrame.name),
p.nfolds,
p.keepCrossValidationModels,
p.keepCrossValidationPredictions,
p.keepCrossValidationFoldAssignment,
p.parallelizeCrossValidation,
p.distribution,
p.tweediePower,
p.quantileAlpha,
p.huberAlpha,
(p.responseColumn == null? null : p.responseColumn.columnName),
(p.weightsColumn == null? null : p.weightsColumn.columnName),
(p.offsetColumn == null? null : p.offsetColumn.columnName),
(p.foldColumn == null? null : p.foldColumn.columnName),
p.foldAssignment,
p.categoricalEncoding,
p.maxCategoricalLevels,
p.ignoredColumns,
p.ignoreConstCols,
p.scoreEachIteration,
(p.checkpoint == null? null : p.checkpoint.name),
p.stoppingRounds,
p.maxRuntimeSecs,
p.stoppingMetric,
p.stoppingTolerance,
p.gainsliftBins,
p.customMetricFunc,
p.customDistributionFunc,
p.exportCheckpointsDir,
p.aucType
);
}
/**
* Return an array of Strings for an array of keys.
*/
public static String[] keyArrayToStringArray(KeyV3[] keys) {
if (keys == null) return null;
String[] ids = new String[keys.length];
int i = 0;
for (KeyV3 key : keys) ids[i++] = key.name;
return ids;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/ModelMetrics.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface ModelMetrics {
/**
* Return the saved scoring metrics for the specified Model and Frame.
* @param model Key of Model of interest (optional)
* @param frame Key of Frame of interest (optional)
* @param predictions_frame Key of predictions frame, if predictions are requested (optional)
* @param deviances_frame Key for the frame containing per-observation deviances (optional)
* @param reconstruction_error Compute reconstruction error (optional, only for Deep Learning AutoEncoder models)
* @param reconstruction_error_per_feature Compute reconstruction error per feature (optional, only for Deep
* Learning AutoEncoder models)
* @param deep_features_hidden_layer Extract Deep Features for given hidden layer (optional, only for Deep Learning
* models)
* @param deep_features_hidden_layer_name Extract Deep Features for given hidden layer by name (optional, only for
* Deep Water models)
* @param reconstruct_train Reconstruct original training frame (optional, only for GLRM models)
* @param project_archetypes Project GLRM archetypes back into original feature space (optional, only for GLRM
* models)
* @param reverse_transform Reverse transformation applied during training to model output (optional, only for GLRM
* models)
* @param leaf_node_assignment Return the leaf node assignment (optional, only for DRF/GBM models)
* @param leaf_node_assignment_type Type of the leaf node assignment (optional, only for DRF/GBM models)
* @param predict_staged_proba Predict the class probabilities at each stage (optional, only for GBM models)
* @param predict_contributions Predict the feature contributions - Shapley values (optional, only for DRF, GBM and
* XGBoost models)
* @param row_to_tree_assignment Return which row is used in which tree (optional, only for GBM models)
* @param predict_contributions_output_format Specify how to output feature contributions in XGBoost - XGBoost by
* default outputs contributions for 1-hot encoded features, specifying a
* Compact output format will produce a per-feature contribution
* @param top_n Only for predict_contributions function - sort Shapley values and return top_n highest (optional)
* @param bottom_n Only for predict_contributions function - sort Shapley values and return bottom_n lowest
* (optional)
* @param compare_abs Only for predict_contributions function - sort absolute Shapley values (optional)
* @param feature_frequencies Retrieve the feature frequencies on paths in trees in tree-based models (optional,
* only for GBM, DRF and Isolation Forest)
* @param exemplar_index Retrieve all members for a given exemplar (optional, only for Aggregator models)
* @param deviances Compute the deviances per row (optional, only for classification or regression models)
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param auc_type Set default multinomial AUC type. Must be one of: "AUTO", "NONE", "MACRO_OVR", "WEIGHTED_OVR",
* "MACRO_OVO", "WEIGHTED_OVO". Default is "NONE" (optional, only for multinomial classification).
* @param auuc_type Set default AUUC type for uplift binomial classification. Must be one of: "AUTO", "qini",
* "lift", "gain". Default is "AUTO" (optional, only for uplift binomial classification).
* @param custom_auuc_thresholds Custom AUUC thresholds (for uplift binomial classification).
* @param auuc_nbins Set number of bins to calculate AUUC. Must be -1 or higher than 0. Default is -1 which means
* 1000 (optional, only for uplift binomial classification).
* @param background_frame Specify background frame used as a reference for calculating SHAP.
* @param output_space If true, transform contributions so that they sum up to the difference in the output space
* (applicable iff contributions are in link space). Note that this transformation is an
* approximation and the contributions won't be exact SHAP values.
* @param output_per_reference If true, return contributions against each background sample (aka reference), i.e.
* phi(feature, x, bg), otherwise return contributions averaged over the background
* sample (phi(feature, x) = E_{bg} phi(feature, x, bg))
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/ModelMetrics/models/{model}/frames/{frame}")
Call<ModelMetricsListSchemaV3> fetch(
@Path("model") String model,
@Path("frame") String frame,
@Query("predictions_frame") String predictions_frame,
@Query("deviances_frame") String deviances_frame,
@Query("reconstruction_error") boolean reconstruction_error,
@Query("reconstruction_error_per_feature") boolean reconstruction_error_per_feature,
@Query("deep_features_hidden_layer") int deep_features_hidden_layer,
@Query("deep_features_hidden_layer_name") String deep_features_hidden_layer_name,
@Query("reconstruct_train") boolean reconstruct_train,
@Query("project_archetypes") boolean project_archetypes,
@Query("reverse_transform") boolean reverse_transform,
@Query("leaf_node_assignment") boolean leaf_node_assignment,
@Query("leaf_node_assignment_type") ModelLeafNodeAssignmentLeafNodeAssignmentType leaf_node_assignment_type,
@Query("predict_staged_proba") boolean predict_staged_proba,
@Query("predict_contributions") boolean predict_contributions,
@Query("row_to_tree_assignment") boolean row_to_tree_assignment,
@Query("predict_contributions_output_format") ModelContributionsContributionsOutputFormat predict_contributions_output_format,
@Query("top_n") int top_n,
@Query("bottom_n") int bottom_n,
@Query("compare_abs") boolean compare_abs,
@Query("feature_frequencies") boolean feature_frequencies,
@Query("exemplar_index") int exemplar_index,
@Query("deviances") boolean deviances,
@Query("custom_metric_func") String custom_metric_func,
@Query("auc_type") String auc_type,
@Query("auuc_type") String auuc_type,
@Query("custom_auuc_thresholds") double[] custom_auuc_thresholds,
@Query("auuc_nbins") int auuc_nbins,
@Query("background_frame") String background_frame,
@Query("output_space") boolean output_space,
@Query("output_per_reference") boolean output_per_reference,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/ModelMetrics/models/{model}/frames/{frame}")
Call<ModelMetricsListSchemaV3> fetch(
@Path("model") String model,
@Path("frame") String frame
);
/**
* Return the saved scoring metrics for the specified Model and Frame.
* @param model Key of Model of interest (optional)
* @param frame Key of Frame of interest (optional)
* @param predictions_frame Key of predictions frame, if predictions are requested (optional)
* @param deviances_frame Key for the frame containing per-observation deviances (optional)
* @param reconstruction_error Compute reconstruction error (optional, only for Deep Learning AutoEncoder models)
* @param reconstruction_error_per_feature Compute reconstruction error per feature (optional, only for Deep
* Learning AutoEncoder models)
* @param deep_features_hidden_layer Extract Deep Features for given hidden layer (optional, only for Deep Learning
* models)
* @param deep_features_hidden_layer_name Extract Deep Features for given hidden layer by name (optional, only for
* Deep Water models)
* @param reconstruct_train Reconstruct original training frame (optional, only for GLRM models)
* @param project_archetypes Project GLRM archetypes back into original feature space (optional, only for GLRM
* models)
* @param reverse_transform Reverse transformation applied during training to model output (optional, only for GLRM
* models)
* @param leaf_node_assignment Return the leaf node assignment (optional, only for DRF/GBM models)
* @param leaf_node_assignment_type Type of the leaf node assignment (optional, only for DRF/GBM models)
* @param predict_staged_proba Predict the class probabilities at each stage (optional, only for GBM models)
* @param predict_contributions Predict the feature contributions - Shapley values (optional, only for DRF, GBM and
* XGBoost models)
* @param row_to_tree_assignment Return which row is used in which tree (optional, only for GBM models)
* @param predict_contributions_output_format Specify how to output feature contributions in XGBoost - XGBoost by
* default outputs contributions for 1-hot encoded features, specifying a
* Compact output format will produce a per-feature contribution
* @param top_n Only for predict_contributions function - sort Shapley values and return top_n highest (optional)
* @param bottom_n Only for predict_contributions function - sort Shapley values and return bottom_n lowest
* (optional)
* @param compare_abs Only for predict_contributions function - sort absolute Shapley values (optional)
* @param feature_frequencies Retrieve the feature frequencies on paths in trees in tree-based models (optional,
* only for GBM, DRF and Isolation Forest)
* @param exemplar_index Retrieve all members for a given exemplar (optional, only for Aggregator models)
* @param deviances Compute the deviances per row (optional, only for classification or regression models)
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param auc_type Set default multinomial AUC type. Must be one of: "AUTO", "NONE", "MACRO_OVR", "WEIGHTED_OVR",
* "MACRO_OVO", "WEIGHTED_OVO". Default is "NONE" (optional, only for multinomial classification).
* @param auuc_type Set default AUUC type for uplift binomial classification. Must be one of: "AUTO", "qini",
* "lift", "gain". Default is "AUTO" (optional, only for uplift binomial classification).
* @param custom_auuc_thresholds Custom AUUC thresholds (for uplift binomial classification).
* @param auuc_nbins Set number of bins to calculate AUUC. Must be -1 or higher than 0. Default is -1 which means
* 1000 (optional, only for uplift binomial classification).
* @param background_frame Specify background frame used as a reference for calculating SHAP.
* @param output_space If true, transform contributions so that they sum up to the difference in the output space
* (applicable iff contributions are in link space). Note that this transformation is an
* approximation and the contributions won't be exact SHAP values.
* @param output_per_reference If true, return contributions against each background sample (aka reference), i.e.
* phi(feature, x, bg), otherwise return contributions averaged over the background
* sample (phi(feature, x) = E_{bg} phi(feature, x, bg))
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@DELETE("/3/ModelMetrics/models/{model}/frames/{frame}")
Call<ModelMetricsListSchemaV3> delete(
@Path("model") String model,
@Path("frame") String frame,
@Field("predictions_frame") String predictions_frame,
@Field("deviances_frame") String deviances_frame,
@Field("reconstruction_error") boolean reconstruction_error,
@Field("reconstruction_error_per_feature") boolean reconstruction_error_per_feature,
@Field("deep_features_hidden_layer") int deep_features_hidden_layer,
@Field("deep_features_hidden_layer_name") String deep_features_hidden_layer_name,
@Field("reconstruct_train") boolean reconstruct_train,
@Field("project_archetypes") boolean project_archetypes,
@Field("reverse_transform") boolean reverse_transform,
@Field("leaf_node_assignment") boolean leaf_node_assignment,
@Field("leaf_node_assignment_type") ModelLeafNodeAssignmentLeafNodeAssignmentType leaf_node_assignment_type,
@Field("predict_staged_proba") boolean predict_staged_proba,
@Field("predict_contributions") boolean predict_contributions,
@Field("row_to_tree_assignment") boolean row_to_tree_assignment,
@Field("predict_contributions_output_format") ModelContributionsContributionsOutputFormat predict_contributions_output_format,
@Field("top_n") int top_n,
@Field("bottom_n") int bottom_n,
@Field("compare_abs") boolean compare_abs,
@Field("feature_frequencies") boolean feature_frequencies,
@Field("exemplar_index") int exemplar_index,
@Field("deviances") boolean deviances,
@Field("custom_metric_func") String custom_metric_func,
@Field("auc_type") String auc_type,
@Field("auuc_type") String auuc_type,
@Field("custom_auuc_thresholds") double[] custom_auuc_thresholds,
@Field("auuc_nbins") int auuc_nbins,
@Field("background_frame") String background_frame,
@Field("output_space") boolean output_space,
@Field("output_per_reference") boolean output_per_reference,
@Field("_exclude_fields") String _exclude_fields
);
@DELETE("/3/ModelMetrics/models/{model}/frames/{frame}")
Call<ModelMetricsListSchemaV3> delete(
@Path("model") String model,
@Path("frame") String frame
);
/**
* Return the scoring metrics for the specified Frame with the specified Model. If the Frame has already been scored
* with the Model then cached results will be returned; otherwise predictions for all rows in the Frame will be
* generated and the metrics will be returned.
* @param model Key of Model of interest (optional)
* @param frame Key of Frame of interest (optional)
* @param predictions_frame Key of predictions frame, if predictions are requested (optional)
* @param deviances_frame Key for the frame containing per-observation deviances (optional)
* @param reconstruction_error Compute reconstruction error (optional, only for Deep Learning AutoEncoder models)
* @param reconstruction_error_per_feature Compute reconstruction error per feature (optional, only for Deep
* Learning AutoEncoder models)
* @param deep_features_hidden_layer Extract Deep Features for given hidden layer (optional, only for Deep Learning
* models)
* @param deep_features_hidden_layer_name Extract Deep Features for given hidden layer by name (optional, only for
* Deep Water models)
* @param reconstruct_train Reconstruct original training frame (optional, only for GLRM models)
* @param project_archetypes Project GLRM archetypes back into original feature space (optional, only for GLRM
* models)
* @param reverse_transform Reverse transformation applied during training to model output (optional, only for GLRM
* models)
* @param leaf_node_assignment Return the leaf node assignment (optional, only for DRF/GBM models)
* @param leaf_node_assignment_type Type of the leaf node assignment (optional, only for DRF/GBM models)
* @param predict_staged_proba Predict the class probabilities at each stage (optional, only for GBM models)
* @param predict_contributions Predict the feature contributions - Shapley values (optional, only for DRF, GBM and
* XGBoost models)
* @param row_to_tree_assignment Return which row is used in which tree (optional, only for GBM models)
* @param predict_contributions_output_format Specify how to output feature contributions in XGBoost - XGBoost by
* default outputs contributions for 1-hot encoded features, specifying a
* Compact output format will produce a per-feature contribution
* @param top_n Only for predict_contributions function - sort Shapley values and return top_n highest (optional)
* @param bottom_n Only for predict_contributions function - sort Shapley values and return bottom_n lowest
* (optional)
* @param compare_abs Only for predict_contributions function - sort absolute Shapley values (optional)
* @param feature_frequencies Retrieve the feature frequencies on paths in trees in tree-based models (optional,
* only for GBM, DRF and Isolation Forest)
* @param exemplar_index Retrieve all members for a given exemplar (optional, only for Aggregator models)
* @param deviances Compute the deviances per row (optional, only for classification or regression models)
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param auc_type Set default multinomial AUC type. Must be one of: "AUTO", "NONE", "MACRO_OVR", "WEIGHTED_OVR",
* "MACRO_OVO", "WEIGHTED_OVO". Default is "NONE" (optional, only for multinomial classification).
* @param auuc_type Set default AUUC type for uplift binomial classification. Must be one of: "AUTO", "qini",
* "lift", "gain". Default is "AUTO" (optional, only for uplift binomial classification).
* @param custom_auuc_thresholds Custom AUUC thresholds (for uplift binomial classification).
* @param auuc_nbins Set number of bins to calculate AUUC. Must be -1 or higher than 0. Default is -1 which means
* 1000 (optional, only for uplift binomial classification).
* @param background_frame Specify background frame used as a reference for calculating SHAP.
* @param output_space If true, transform contributions so that they sum up to the difference in the output space
* (applicable iff contributions are in link space). Note that this transformation is an
* approximation and the contributions won't be exact SHAP values.
* @param output_per_reference If true, return contributions against each background sample (aka reference), i.e.
* phi(feature, x, bg), otherwise return contributions averaged over the background
* sample (phi(feature, x) = E_{bg} phi(feature, x, bg))
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/ModelMetrics/models/{model}/frames/{frame}")
Call<ModelMetricsListSchemaV3> score(
@Path("model") String model,
@Path("frame") String frame,
@Field("predictions_frame") String predictions_frame,
@Field("deviances_frame") String deviances_frame,
@Field("reconstruction_error") boolean reconstruction_error,
@Field("reconstruction_error_per_feature") boolean reconstruction_error_per_feature,
@Field("deep_features_hidden_layer") int deep_features_hidden_layer,
@Field("deep_features_hidden_layer_name") String deep_features_hidden_layer_name,
@Field("reconstruct_train") boolean reconstruct_train,
@Field("project_archetypes") boolean project_archetypes,
@Field("reverse_transform") boolean reverse_transform,
@Field("leaf_node_assignment") boolean leaf_node_assignment,
@Field("leaf_node_assignment_type") ModelLeafNodeAssignmentLeafNodeAssignmentType leaf_node_assignment_type,
@Field("predict_staged_proba") boolean predict_staged_proba,
@Field("predict_contributions") boolean predict_contributions,
@Field("row_to_tree_assignment") boolean row_to_tree_assignment,
@Field("predict_contributions_output_format") ModelContributionsContributionsOutputFormat predict_contributions_output_format,
@Field("top_n") int top_n,
@Field("bottom_n") int bottom_n,
@Field("compare_abs") boolean compare_abs,
@Field("feature_frequencies") boolean feature_frequencies,
@Field("exemplar_index") int exemplar_index,
@Field("deviances") boolean deviances,
@Field("custom_metric_func") String custom_metric_func,
@Field("auc_type") String auc_type,
@Field("auuc_type") String auuc_type,
@Field("custom_auuc_thresholds") double[] custom_auuc_thresholds,
@Field("auuc_nbins") int auuc_nbins,
@Field("background_frame") String background_frame,
@Field("output_space") boolean output_space,
@Field("output_per_reference") boolean output_per_reference,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/ModelMetrics/models/{model}/frames/{frame}")
Call<ModelMetricsListSchemaV3> score(
@Path("model") String model,
@Path("frame") String frame
);
/**
* Create a ModelMetrics object from the predicted and actual values, and a domain for classification problems or a
* distribution family for regression problems.
* @param predictions_frame Predictions Frame.
* @param actuals_frame Actuals Frame.
* @param weights_frame Weights Frame.
* @param treatment_frame Treatment Frame.
* @param domain Domain (for classification).
* @param distribution Distribution (for regression).
* @param auc_type Default AUC type (for multinomial classification).
* @param auuc_type Default AUUC type (for uplift binomial classification).
* @param auuc_nbins Number of bins to calculate AUUC (for uplift binomial classification).
* @param custom_auuc_thresholds Custom AUUC thresholds (for uplift binomial classification).
*/
@FormUrlEncoded
@POST("/3/ModelMetrics/predictions_frame/{predictions_frame}/actuals_frame/{actuals_frame}")
Call<ModelMetricsMakerSchemaV3> make(
@Path("predictions_frame") String predictions_frame,
@Path("actuals_frame") String actuals_frame,
@Field("weights_frame") String weights_frame,
@Field("treatment_frame") String treatment_frame,
@Field("domain") String[] domain,
@Field("distribution") GenmodelutilsDistributionFamily distribution,
@Field("auc_type") MultinomialAucType auc_type,
@Field("auuc_type") AUUCType auuc_type,
@Field("auuc_nbins") int auuc_nbins,
@Field("custom_auuc_thresholds") double[] custom_auuc_thresholds
);
@FormUrlEncoded
@POST("/3/ModelMetrics/predictions_frame/{predictions_frame}/actuals_frame/{actuals_frame}")
Call<ModelMetricsMakerSchemaV3> make(
@Path("predictions_frame") String predictions_frame,
@Path("actuals_frame") String actuals_frame
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Models.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Models {
/**
* Return the specified Model from the H2O distributed K/V store, optionally with the list of compatible Frames.
* @param model_id Name of Model of interest
* @param preview Return potentially abridged model suitable for viewing in a browser
* @param find_compatible_frames Find and return compatible frames?
* @param export_cross_validation_predictions Flag indicating whether the exported model artifact should also
* include CV Holdout Frame predictions
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Models/{model_id}")
Call<ModelsV3> fetch(
@Path("model_id") String model_id,
@Query("preview") boolean preview,
@Query("find_compatible_frames") boolean find_compatible_frames,
@Query("export_cross_validation_predictions") boolean export_cross_validation_predictions,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Models/{model_id}")
Call<ModelsV3> fetch(@Path("model_id") String model_id);
/**
* Return all Models from the H2O distributed K/V store.
* @param model_id Name of Model of interest
* @param preview Return potentially abridged model suitable for viewing in a browser
* @param find_compatible_frames Find and return compatible frames?
* @param export_cross_validation_predictions Flag indicating whether the exported model artifact should also
* include CV Holdout Frame predictions
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Models")
Call<ModelsV3> list(
@Query("model_id") String model_id,
@Query("preview") boolean preview,
@Query("find_compatible_frames") boolean find_compatible_frames,
@Query("export_cross_validation_predictions") boolean export_cross_validation_predictions,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Models")
Call<ModelsV3> list();
/**
* Delete the specified Model from the H2O distributed K/V store.
* @param model_id Name of Model of interest
* @param preview Return potentially abridged model suitable for viewing in a browser
* @param find_compatible_frames Find and return compatible frames?
* @param export_cross_validation_predictions Flag indicating whether the exported model artifact should also
* include CV Holdout Frame predictions
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@DELETE("/3/Models/{model_id}")
Call<ModelsV3> delete(
@Path("model_id") String model_id,
@Field("preview") boolean preview,
@Field("find_compatible_frames") boolean find_compatible_frames,
@Field("export_cross_validation_predictions") boolean export_cross_validation_predictions,
@Field("_exclude_fields") String _exclude_fields
);
@DELETE("/3/Models/{model_id}")
Call<ModelsV3> delete(@Path("model_id") String model_id);
/**
* Delete all Models from the H2O distributed K/V store.
* @param model_id Name of Model of interest
* @param preview Return potentially abridged model suitable for viewing in a browser
* @param find_compatible_frames Find and return compatible frames?
* @param export_cross_validation_predictions Flag indicating whether the exported model artifact should also
* include CV Holdout Frame predictions
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@DELETE("/3/Models")
Call<ModelsV3> deleteAll(
@Field("model_id") String model_id,
@Field("preview") boolean preview,
@Field("find_compatible_frames") boolean find_compatible_frames,
@Field("export_cross_validation_predictions") boolean export_cross_validation_predictions,
@Field("_exclude_fields") String _exclude_fields
);
@DELETE("/3/Models")
Call<ModelsV3> deleteAll();
/**
* Return potentially abridged model suitable for viewing in a browser (currently only used for java model code).
* @param model_id Name of Model of interest
* @param preview Return potentially abridged model suitable for viewing in a browser
* @param find_compatible_frames Find and return compatible frames?
* @param export_cross_validation_predictions Flag indicating whether the exported model artifact should also
* include CV Holdout Frame predictions
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Models.java/{model_id}/preview")
Call<StreamingSchema> fetchPreview(
@Path("model_id") String model_id,
@Query("preview") boolean preview,
@Query("find_compatible_frames") boolean find_compatible_frames,
@Query("export_cross_validation_predictions") boolean export_cross_validation_predictions,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Models.java/{model_id}/preview")
Call<StreamingSchema> fetchPreview(@Path("model_id") String model_id);
/**
* [DEPRECATED] Return the stream containing model implementation in Java code.
* @param model_id Name of Model of interest
* @param preview Return potentially abridged model suitable for viewing in a browser
* @param find_compatible_frames Find and return compatible frames?
* @param export_cross_validation_predictions Flag indicating whether the exported model artifact should also
* include CV Holdout Frame predictions
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Models.java/{model_id}")
Call<StreamingSchema> fetchJavaCode(
@Path("model_id") String model_id,
@Query("preview") boolean preview,
@Query("find_compatible_frames") boolean find_compatible_frames,
@Query("export_cross_validation_predictions") boolean export_cross_validation_predictions,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Models.java/{model_id}")
Call<StreamingSchema> fetchJavaCode(@Path("model_id") String model_id);
/**
* Return the model in the MOJO format. This format can then be interpreted by gen_model.jar in order to perform
* prediction / scoring. Currently works for GBM and DRF algos only.
* @param model_id Name of Model of interest
* @param preview Return potentially abridged model suitable for viewing in a browser
* @param find_compatible_frames Find and return compatible frames?
* @param export_cross_validation_predictions Flag indicating whether the exported model artifact should also
* include CV Holdout Frame predictions
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Models/{model_id}/mojo")
Call<StreamingSchema> fetchMojo(
@Path("model_id") String model_id,
@Query("preview") boolean preview,
@Query("find_compatible_frames") boolean find_compatible_frames,
@Query("export_cross_validation_predictions") boolean export_cross_validation_predictions,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Models/{model_id}/mojo")
Call<StreamingSchema> fetchMojo(@Path("model_id") String model_id);
/**
* Return the model in the binary format.
* @param model_id Name of Model of interest
* @param preview Return potentially abridged model suitable for viewing in a browser
* @param find_compatible_frames Find and return compatible frames?
* @param export_cross_validation_predictions Flag indicating whether the exported model artifact should also
* include CV Holdout Frame predictions
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Models.fetch.bin/{model_id}")
Call<StreamingSchema> fetchBinaryModel(
@Path("model_id") String model_id,
@Query("preview") boolean preview,
@Query("find_compatible_frames") boolean find_compatible_frames,
@Query("export_cross_validation_predictions") boolean export_cross_validation_predictions,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Models.fetch.bin/{model_id}")
Call<StreamingSchema> fetchBinaryModel(@Path("model_id") String model_id);
/**
* Import given binary model into H2O.
* @param model_id Save imported model under given key into DKV.
* @param dir Source directory (hdfs, s3, local) containing serialized model
* @param force Override existing model in case it exists or throw exception if set to false
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/99/Models.bin/{model_id}")
Call<ModelsV3> importModel(
@Path("model_id") String model_id,
@Field("dir") String dir,
@Field("force") boolean force,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/99/Models.bin/{model_id}")
Call<ModelsV3> importModel(@Path("model_id") String model_id);
/**
* Export given model.
* @param model_id Name of Model of interest
* @param dir Destination file (hdfs, s3, local)
* @param force Overwrite destination file in case it exists or throw exception if set to false.
* @param export_cross_validation_predictions Flag indicating whether the exported model artifact should also
* include CV Holdout Frame predictions
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/99/Models.bin/{model_id}")
Call<ModelExportV3> exportModel(
@Path("model_id") String model_id,
@Query("dir") String dir,
@Query("force") boolean force,
@Query("export_cross_validation_predictions") boolean export_cross_validation_predictions,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/99/Models.bin/{model_id}")
Call<ModelExportV3> exportModel(@Path("model_id") String model_id);
/**
* Upload given binary model into H2O.
* @param model_id Save imported model under given key into DKV.
* @param dir Source directory (hdfs, s3, local) containing serialized model
* @param force Override existing model in case it exists or throw exception if set to false
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/99/Models.upload.bin/{model_id}")
Call<ModelsV3> uploadModel(
@Path("model_id") String model_id,
@Field("dir") String dir,
@Field("force") boolean force,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/99/Models.upload.bin/{model_id}")
Call<ModelsV3> uploadModel(@Path("model_id") String model_id);
/**
* Export given model as Mojo.
* @param model_id Name of Model of interest
* @param dir Destination file (hdfs, s3, local)
* @param force Overwrite destination file in case it exists or throw exception if set to false.
* @param export_cross_validation_predictions Flag indicating whether the exported model artifact should also
* include CV Holdout Frame predictions
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/99/Models.mojo/{model_id}")
Call<ModelExportV3> exportMojo(
@Path("model_id") String model_id,
@Query("dir") String dir,
@Query("force") boolean force,
@Query("export_cross_validation_predictions") boolean export_cross_validation_predictions,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/99/Models.mojo/{model_id}")
Call<ModelExportV3> exportMojo(@Path("model_id") String model_id);
/**
* Export given model details in json format.
* @param model_id Name of Model of interest
* @param dir Destination file (hdfs, s3, local)
* @param force Overwrite destination file in case it exists or throw exception if set to false.
* @param export_cross_validation_predictions Flag indicating whether the exported model artifact should also
* include CV Holdout Frame predictions
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/99/Models/{model_id}/json")
Call<ModelExportV3> exportModelDetails(
@Path("model_id") String model_id,
@Query("dir") String dir,
@Query("force") boolean force,
@Query("export_cross_validation_predictions") boolean export_cross_validation_predictions,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/99/Models/{model_id}/json")
Call<ModelExportV3> exportModelDetails(@Path("model_id") String model_id);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Modelsinfo.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Modelsinfo {
/**
* Return basic information about all models available to train.
* @param __schema Url describing the schema of the current object.
*/
@GET("/4/modelsinfo")
Call<ModelsInfoV4> modelsInfo(@Query("__schema") String __schema);
@GET("/4/modelsinfo")
Call<ModelsInfoV4> modelsInfo();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/NetworkTest.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface NetworkTest {
/**
* Run a network test to measure the performance of the cluster interconnect.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/NetworkTest")
Call<NetworkTestV3> fetch(@Query("_exclude_fields") String _exclude_fields);
@GET("/3/NetworkTest")
Call<NetworkTestV3> fetch();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/NodePersistentStorage.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface NodePersistentStorage {
/**
* Return true or false.
* @param category Category name
* @param name Key name
* @param value Value
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/NodePersistentStorage/categories/{category}/names/{name}/exists")
Call<NodePersistentStorageV3> exists(
@Path("category") String category,
@Path("name") String name,
@Query("value") String value,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/NodePersistentStorage/categories/{category}/names/{name}/exists")
Call<NodePersistentStorageV3> exists(
@Path("category") String category,
@Path("name") String name
);
/**
* Return true or false.
* @param category Category name
* @param name Key name
* @param value Value
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/NodePersistentStorage/configured")
Call<NodePersistentStorageV3> configured(
@Query("category") String category,
@Query("name") String name,
@Query("value") String value,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/NodePersistentStorage/configured")
Call<NodePersistentStorageV3> configured();
/**
* Store a named value.
* @param category Category name
* @param name Key name
* @param value Value
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/NodePersistentStorage/{category}/{name}")
Call<NodePersistentStorageV3> put_with_name(
@Path("category") String category,
@Path("name") String name,
@Field("value") String value,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/NodePersistentStorage/{category}/{name}")
Call<NodePersistentStorageV3> put_with_name(
@Path("category") String category,
@Path("name") String name
);
/**
* Return value for a given name.
* @param category Category name
* @param name Key name
* @param value Value
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/NodePersistentStorage/{category}/{name}")
Call<NodePersistentStorageV3> get_as_string(
@Path("category") String category,
@Path("name") String name,
@Query("value") String value,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/NodePersistentStorage/{category}/{name}")
Call<NodePersistentStorageV3> get_as_string(
@Path("category") String category,
@Path("name") String name
);
/**
* Delete a key.
* @param category Category name
* @param name Key name
* @param value Value
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@DELETE("/3/NodePersistentStorage/{category}/{name}")
Call<NodePersistentStorageV3> delete(
@Path("category") String category,
@Path("name") String name,
@Field("value") String value,
@Field("_exclude_fields") String _exclude_fields
);
@DELETE("/3/NodePersistentStorage/{category}/{name}")
Call<NodePersistentStorageV3> delete(
@Path("category") String category,
@Path("name") String name
);
/**
* Store a value.
* @param category Category name
* @param name Key name
* @param value Value
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/NodePersistentStorage/{category}")
Call<NodePersistentStorageV3> put(
@Path("category") String category,
@Field("name") String name,
@Field("value") String value,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/NodePersistentStorage/{category}")
Call<NodePersistentStorageV3> put(@Path("category") String category);
/**
* Return all keys stored for a given category.
* @param category Category name
* @param name Key name
* @param value Value
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/NodePersistentStorage/{category}")
Call<NodePersistentStorageV3> list(
@Path("category") String category,
@Query("name") String name,
@Query("value") String value,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/NodePersistentStorage/{category}")
Call<NodePersistentStorageV3> list(@Path("category") String category);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Parse.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Parse {
/**
* Parse a raw byte-oriented Frame into a useful columnar data Frame.
* @param destination_frame Final frame name
* @param source_frames Source frames
* @param parse_type Parser type
* @param separator Field separator
* @param single_quotes Single Quotes
* @param check_header Check header: 0 means guess, +1 means 1st line is header not data, -1 means 1st line is data
* not header
* @param number_columns Number of columns
* @param column_names Column names
* @param column_types Value types for columns
* @param skipped_columns Skipped columns indices
* @param force_col_types If true, will force the column types to be either the ones in Parquet schema for Parquet
* files or the ones specified in column_types. This parameter is used for numerical columns
* only. Other columnsettings will happen without setting this parameter. Defaults to
* false.
* @param domains Domains for categorical columns
* @param na_strings NA strings for columns
* @param chunk_size Size of individual parse tasks
* @param delete_on_done Delete input key after parse
* @param blocking Block until the parse completes (as opposed to returning early and requiring polling
* @param decrypt_tool Key-reference to an initialized instance of a Decryption Tool
* @param custom_non_data_line_markers Custom characters to be treated as non-data line markers
* @param partition_by Name of the column the persisted dataset has been partitioned by.
* @param escapechar One ASCII character used to escape other characters.
* @param tz_adjust_to_local Adjust the imported time from GMT timezone to cluster timezone.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/Parse")
Call<ParseV3> parse(
@Field("destination_frame") String destination_frame,
@Field("source_frames") String[] source_frames,
@Field("parse_type") ApiParseTypeValuesProvider parse_type,
@Field("separator") byte separator,
@Field("single_quotes") boolean single_quotes,
@Field("check_header") int check_header,
@Field("number_columns") int number_columns,
@Field("column_names") String[] column_names,
@Field("column_types") String[] column_types,
@Field("skipped_columns") int[] skipped_columns,
@Field("force_col_types") boolean force_col_types,
@Field("domains") String[][] domains,
@Field("na_strings") String[][] na_strings,
@Field("chunk_size") int chunk_size,
@Field("delete_on_done") boolean delete_on_done,
@Field("blocking") boolean blocking,
@Field("decrypt_tool") String decrypt_tool,
@Field("custom_non_data_line_markers") String custom_non_data_line_markers,
@Field("partition_by") String[] partition_by,
@Field("escapechar") byte escapechar,
@Field("tz_adjust_to_local") boolean tz_adjust_to_local,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/Parse")
Call<ParseV3> parse(
@Field("destination_frame") String destination_frame,
@Field("source_frames") String[] source_frames
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/ParseSVMLight.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface ParseSVMLight {
/**
* Parse a raw byte-oriented Frame into a useful columnar data Frame.
* @param destination_frame Final frame name
* @param source_frames Source frames
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/ParseSVMLight")
Call<JobV3> parseSVMLight(
@Field("destination_frame") String destination_frame,
@Field("source_frames") String[] source_frames,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/ParseSVMLight")
Call<JobV3> parseSVMLight(@Field("source_frames") String[] source_frames);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/ParseSetup.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface ParseSetup {
/**
* Guess the parameters for parsing raw byte-oriented data into an H2O Frame.
* @param source_frames Source frames
* @param parse_type Parser type
* @param separator Field separator
* @param single_quotes Single quotes
* @param check_header Check header: 0 means guess, +1 means 1st line is header not data, -1 means 1st line is data
* not header
* @param column_names Column names
* @param skipped_columns Skipped columns indices
* @param column_types Value types for columns
* @param na_strings NA strings for columns
* @param column_name_filter Regex for names of columns to return
* @param column_offset Column offset to return
* @param column_count Number of columns to return
* @param total_filtered_column_count Total number of columns we would return with no column pagination
* @param custom_non_data_line_markers Custom characters to be treated as non-data line markers
* @param decrypt_tool Key-reference to an initialized instance of a Decryption Tool
* @param partition_by Names of the columns the persisted dataset has been partitioned by.
* @param escapechar One ASCII character used to escape other characters.
* @param force_col_types If true, will force the column types to be either the ones in Parquet schema for Parquet
* files or the ones specified in column_types. This parameter is used for numerical columns
* only. Other column settings will happen without setting this parameter. Defaults to
* false.
* @param tz_adjust_to_local Adjust the imported time from GMT timezone to cluster timezone.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/ParseSetup")
Call<ParseSetupV3> guessSetup(
@Field("source_frames") String[] source_frames,
@Field("parse_type") ApiParseTypeValuesProvider parse_type,
@Field("separator") byte separator,
@Field("single_quotes") boolean single_quotes,
@Field("check_header") int check_header,
@Field("column_names") String[] column_names,
@Field("skipped_columns") int[] skipped_columns,
@Field("column_types") String[] column_types,
@Field("na_strings") String[][] na_strings,
@Field("column_name_filter") String column_name_filter,
@Field("column_offset") int column_offset,
@Field("column_count") int column_count,
@Field("total_filtered_column_count") int total_filtered_column_count,
@Field("custom_non_data_line_markers") String custom_non_data_line_markers,
@Field("decrypt_tool") String decrypt_tool,
@Field("partition_by") String[] partition_by,
@Field("escapechar") byte escapechar,
@Field("force_col_types") boolean force_col_types,
@Field("tz_adjust_to_local") boolean tz_adjust_to_local,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/ParseSetup")
Call<ParseSetupV3> guessSetup(@Field("source_frames") String[] source_frames);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/PartialDependence.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface PartialDependence {
/**
* Create data for partial dependence plot(s) for the specified model and frame.
* @param model_id Model
* @param frame_id Frame
* @param row_index Row Index
* @param cols Column(s)
* @param weight_column_index weight_column_index
* @param add_missing_na add_missing_na
* @param nbins Number of bins
* @param user_splits User define split points
* @param user_cols Column(s) of user defined splits
* @param num_user_splits Number of user defined splits per column
* @param col_pairs_2dpdp lists of column name pairs to plot 2D pdp for
* @param destination_key Key to store the destination
* @param targets Target classes for multinomial classification
*/
@FormUrlEncoded
@POST("/3/PartialDependence/")
Call<JobV3> makePartialDependence(
@Field("model_id") String model_id,
@Field("frame_id") String frame_id,
@Field("row_index") long row_index,
@Field("cols") String[] cols,
@Field("weight_column_index") int weight_column_index,
@Field("add_missing_na") boolean add_missing_na,
@Field("nbins") int nbins,
@Field("user_splits") double[] user_splits,
@Field("user_cols") String[] user_cols,
@Field("num_user_splits") int[] num_user_splits,
@Field("col_pairs_2dpdp") String[][] col_pairs_2dpdp,
@Field("destination_key") String destination_key,
@Field("targets") String[] targets
);
@FormUrlEncoded
@POST("/3/PartialDependence/")
Call<JobV3> makePartialDependence();
/**
* Fetch partial dependence data.
* @param name Name (string representation) for this Key.
* @param type Name (string representation) for the type of Keyed this Key points to.
* @param URL URL for the resource that this Key points to, if one exists.
*/
@GET("/3/PartialDependence/{name}")
Call<PartialDependenceV3> fetchPartialDependence(
@Path("name") String name,
@Query("type") String type,
@Query("URL") String URL
);
@GET("/3/PartialDependence/{name}")
Call<PartialDependenceV3> fetchPartialDependence(@Path("name") String name);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/PersistS3.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface PersistS3 {
/**
* Set Amazon S3 credentials (Secret Key ID, Secret Access Key)
* @param secret_key_id S3 Secret Key ID
* @param secret_access_key S3 Secret Key
* @param session_token S3 Session token
*/
@FormUrlEncoded
@POST("/3/PersistS3")
Call<PersistS3CredentialsV3> setS3Credentials(
@Field("secret_key_id") String secret_key_id,
@Field("secret_access_key") String secret_access_key,
@Field("session_token") String session_token
);
@FormUrlEncoded
@POST("/3/PersistS3")
Call<PersistS3CredentialsV3> setS3Credentials(
@Field("secret_key_id") String secret_key_id,
@Field("secret_access_key") String secret_access_key
);
/**
* Remove store Amazon S3 credentials
* @param secret_key_id S3 Secret Key ID
* @param secret_access_key S3 Secret Key
* @param session_token S3 Session token
*/
@DELETE("/3/PersistS3")
Call<PersistS3CredentialsV3> removeS3Credentials(
@Field("secret_key_id") String secret_key_id,
@Field("secret_access_key") String secret_access_key,
@Field("session_token") String session_token
);
@DELETE("/3/PersistS3")
Call<PersistS3CredentialsV3> removeS3Credentials(
@Field("secret_key_id") String secret_key_id,
@Field("secret_access_key") String secret_access_key
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Ping.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Ping {
/**
* The endpoint used to let H2O know from external services that it should keep running.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Ping")
Call<PingV3> ping(@Query("_exclude_fields") String _exclude_fields);
@GET("/3/Ping")
Call<PingV3> ping();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Predictions.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Predictions {
/**
* Score (generate predictions) for the specified Frame with the specified Model. Both the Frame of predictions and
* the metrics will be returned.
* @param model Key of Model of interest (optional)
* @param frame Key of Frame of interest (optional)
* @param predictions_frame Key of predictions frame, if predictions are requested (optional)
* @param deviances_frame Key for the frame containing per-observation deviances (optional)
* @param reconstruction_error Compute reconstruction error (optional, only for Deep Learning AutoEncoder models)
* @param reconstruction_error_per_feature Compute reconstruction error per feature (optional, only for Deep
* Learning AutoEncoder models)
* @param deep_features_hidden_layer Extract Deep Features for given hidden layer (optional, only for Deep Learning
* models)
* @param deep_features_hidden_layer_name Extract Deep Features for given hidden layer by name (optional, only for
* Deep Water models)
* @param reconstruct_train Reconstruct original training frame (optional, only for GLRM models)
* @param project_archetypes Project GLRM archetypes back into original feature space (optional, only for GLRM
* models)
* @param reverse_transform Reverse transformation applied during training to model output (optional, only for GLRM
* models)
* @param leaf_node_assignment Return the leaf node assignment (optional, only for DRF/GBM models)
* @param leaf_node_assignment_type Type of the leaf node assignment (optional, only for DRF/GBM models)
* @param predict_staged_proba Predict the class probabilities at each stage (optional, only for GBM models)
* @param predict_contributions Predict the feature contributions - Shapley values (optional, only for DRF, GBM and
* XGBoost models)
* @param row_to_tree_assignment Return which row is used in which tree (optional, only for GBM models)
* @param predict_contributions_output_format Specify how to output feature contributions in XGBoost - XGBoost by
* default outputs contributions for 1-hot encoded features, specifying a
* Compact output format will produce a per-feature contribution
* @param top_n Only for predict_contributions function - sort Shapley values and return top_n highest (optional)
* @param bottom_n Only for predict_contributions function - sort Shapley values and return bottom_n lowest
* (optional)
* @param compare_abs Only for predict_contributions function - sort absolute Shapley values (optional)
* @param feature_frequencies Retrieve the feature frequencies on paths in trees in tree-based models (optional,
* only for GBM, DRF and Isolation Forest)
* @param exemplar_index Retrieve all members for a given exemplar (optional, only for Aggregator models)
* @param deviances Compute the deviances per row (optional, only for classification or regression models)
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param auc_type Set default multinomial AUC type. Must be one of: "AUTO", "NONE", "MACRO_OVR", "WEIGHTED_OVR",
* "MACRO_OVO", "WEIGHTED_OVO". Default is "NONE" (optional, only for multinomial classification).
* @param auuc_type Set default AUUC type for uplift binomial classification. Must be one of: "AUTO", "qini",
* "lift", "gain". Default is "AUTO" (optional, only for uplift binomial classification).
* @param custom_auuc_thresholds Custom AUUC thresholds (for uplift binomial classification).
* @param auuc_nbins Set number of bins to calculate AUUC. Must be -1 or higher than 0. Default is -1 which means
* 1000 (optional, only for uplift binomial classification).
* @param background_frame Specify background frame used as a reference for calculating SHAP.
* @param output_space If true, transform contributions so that they sum up to the difference in the output space
* (applicable iff contributions are in link space). Note that this transformation is an
* approximation and the contributions won't be exact SHAP values.
* @param output_per_reference If true, return contributions against each background sample (aka reference), i.e.
* phi(feature, x, bg), otherwise return contributions averaged over the background
* sample (phi(feature, x) = E_{bg} phi(feature, x, bg))
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/Predictions/models/{model}/frames/{frame}")
Call<ModelMetricsListSchemaV3> predict(
@Path("model") String model,
@Path("frame") String frame,
@Field("predictions_frame") String predictions_frame,
@Field("deviances_frame") String deviances_frame,
@Field("reconstruction_error") boolean reconstruction_error,
@Field("reconstruction_error_per_feature") boolean reconstruction_error_per_feature,
@Field("deep_features_hidden_layer") int deep_features_hidden_layer,
@Field("deep_features_hidden_layer_name") String deep_features_hidden_layer_name,
@Field("reconstruct_train") boolean reconstruct_train,
@Field("project_archetypes") boolean project_archetypes,
@Field("reverse_transform") boolean reverse_transform,
@Field("leaf_node_assignment") boolean leaf_node_assignment,
@Field("leaf_node_assignment_type") ModelLeafNodeAssignmentLeafNodeAssignmentType leaf_node_assignment_type,
@Field("predict_staged_proba") boolean predict_staged_proba,
@Field("predict_contributions") boolean predict_contributions,
@Field("row_to_tree_assignment") boolean row_to_tree_assignment,
@Field("predict_contributions_output_format") ModelContributionsContributionsOutputFormat predict_contributions_output_format,
@Field("top_n") int top_n,
@Field("bottom_n") int bottom_n,
@Field("compare_abs") boolean compare_abs,
@Field("feature_frequencies") boolean feature_frequencies,
@Field("exemplar_index") int exemplar_index,
@Field("deviances") boolean deviances,
@Field("custom_metric_func") String custom_metric_func,
@Field("auc_type") String auc_type,
@Field("auuc_type") String auuc_type,
@Field("custom_auuc_thresholds") double[] custom_auuc_thresholds,
@Field("auuc_nbins") int auuc_nbins,
@Field("background_frame") String background_frame,
@Field("output_space") boolean output_space,
@Field("output_per_reference") boolean output_per_reference,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/Predictions/models/{model}/frames/{frame}")
Call<ModelMetricsListSchemaV3> predict(
@Path("model") String model,
@Path("frame") String frame
);
/**
* Score (generate predictions) for the specified Frame with the specified Model. Both the Frame of predictions and
* the metrics will be returned.
* @param model Key of Model of interest (optional)
* @param frame Key of Frame of interest (optional)
* @param predictions_frame Key of predictions frame, if predictions are requested (optional)
* @param deviances_frame Key for the frame containing per-observation deviances (optional)
* @param reconstruction_error Compute reconstruction error (optional, only for Deep Learning AutoEncoder models)
* @param reconstruction_error_per_feature Compute reconstruction error per feature (optional, only for Deep
* Learning AutoEncoder models)
* @param deep_features_hidden_layer Extract Deep Features for given hidden layer (optional, only for Deep Learning
* models)
* @param deep_features_hidden_layer_name Extract Deep Features for given hidden layer by name (optional, only for
* Deep Water models)
* @param reconstruct_train Reconstruct original training frame (optional, only for GLRM models)
* @param project_archetypes Project GLRM archetypes back into original feature space (optional, only for GLRM
* models)
* @param reverse_transform Reverse transformation applied during training to model output (optional, only for GLRM
* models)
* @param leaf_node_assignment Return the leaf node assignment (optional, only for DRF/GBM models)
* @param leaf_node_assignment_type Type of the leaf node assignment (optional, only for DRF/GBM models)
* @param predict_staged_proba Predict the class probabilities at each stage (optional, only for GBM models)
* @param predict_contributions Predict the feature contributions - Shapley values (optional, only for DRF, GBM and
* XGBoost models)
* @param row_to_tree_assignment Return which row is used in which tree (optional, only for GBM models)
* @param predict_contributions_output_format Specify how to output feature contributions in XGBoost - XGBoost by
* default outputs contributions for 1-hot encoded features, specifying a
* Compact output format will produce a per-feature contribution
* @param top_n Only for predict_contributions function - sort Shapley values and return top_n highest (optional)
* @param bottom_n Only for predict_contributions function - sort Shapley values and return bottom_n lowest
* (optional)
* @param compare_abs Only for predict_contributions function - sort absolute Shapley values (optional)
* @param feature_frequencies Retrieve the feature frequencies on paths in trees in tree-based models (optional,
* only for GBM, DRF and Isolation Forest)
* @param exemplar_index Retrieve all members for a given exemplar (optional, only for Aggregator models)
* @param deviances Compute the deviances per row (optional, only for classification or regression models)
* @param custom_metric_func Reference to custom evaluation function, format: `language:keyName=funcName`
* @param auc_type Set default multinomial AUC type. Must be one of: "AUTO", "NONE", "MACRO_OVR", "WEIGHTED_OVR",
* "MACRO_OVO", "WEIGHTED_OVO". Default is "NONE" (optional, only for multinomial classification).
* @param auuc_type Set default AUUC type for uplift binomial classification. Must be one of: "AUTO", "qini",
* "lift", "gain". Default is "AUTO" (optional, only for uplift binomial classification).
* @param custom_auuc_thresholds Custom AUUC thresholds (for uplift binomial classification).
* @param auuc_nbins Set number of bins to calculate AUUC. Must be -1 or higher than 0. Default is -1 which means
* 1000 (optional, only for uplift binomial classification).
* @param background_frame Specify background frame used as a reference for calculating SHAP.
* @param output_space If true, transform contributions so that they sum up to the difference in the output space
* (applicable iff contributions are in link space). Note that this transformation is an
* approximation and the contributions won't be exact SHAP values.
* @param output_per_reference If true, return contributions against each background sample (aka reference), i.e.
* phi(feature, x, bg), otherwise return contributions averaged over the background
* sample (phi(feature, x) = E_{bg} phi(feature, x, bg))
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/4/Predictions/models/{model}/frames/{frame}")
Call<JobV3> predictAsync(
@Path("model") String model,
@Path("frame") String frame,
@Field("predictions_frame") String predictions_frame,
@Field("deviances_frame") String deviances_frame,
@Field("reconstruction_error") boolean reconstruction_error,
@Field("reconstruction_error_per_feature") boolean reconstruction_error_per_feature,
@Field("deep_features_hidden_layer") int deep_features_hidden_layer,
@Field("deep_features_hidden_layer_name") String deep_features_hidden_layer_name,
@Field("reconstruct_train") boolean reconstruct_train,
@Field("project_archetypes") boolean project_archetypes,
@Field("reverse_transform") boolean reverse_transform,
@Field("leaf_node_assignment") boolean leaf_node_assignment,
@Field("leaf_node_assignment_type") ModelLeafNodeAssignmentLeafNodeAssignmentType leaf_node_assignment_type,
@Field("predict_staged_proba") boolean predict_staged_proba,
@Field("predict_contributions") boolean predict_contributions,
@Field("row_to_tree_assignment") boolean row_to_tree_assignment,
@Field("predict_contributions_output_format") ModelContributionsContributionsOutputFormat predict_contributions_output_format,
@Field("top_n") int top_n,
@Field("bottom_n") int bottom_n,
@Field("compare_abs") boolean compare_abs,
@Field("feature_frequencies") boolean feature_frequencies,
@Field("exemplar_index") int exemplar_index,
@Field("deviances") boolean deviances,
@Field("custom_metric_func") String custom_metric_func,
@Field("auc_type") String auc_type,
@Field("auuc_type") String auuc_type,
@Field("custom_auuc_thresholds") double[] custom_auuc_thresholds,
@Field("auuc_nbins") int auuc_nbins,
@Field("background_frame") String background_frame,
@Field("output_space") boolean output_space,
@Field("output_per_reference") boolean output_per_reference,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/4/Predictions/models/{model}/frames/{frame}")
Call<JobV3> predictAsync(
@Path("model") String model,
@Path("frame") String frame
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Profiler.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Profiler {
/**
* Report real-time profiling information for all nodes (sorted, aggregated stack traces).
* @param depth Stack trace depth
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Profiler")
Call<ProfilerV3> fetch(
@Query("depth") int depth,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Profiler")
Call<ProfilerV3> fetch(@Query("depth") int depth);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Rapids.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Rapids {
/**
* Execute an Rapids AstRoot.
* @param ast A Rapids AstRoot expression
* @param session_id Session key
* @param id [DEPRECATED] Key name to assign Frame results
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/99/Rapids")
Call<RapidsSchemaV3> rapidsExec(
@Field("ast") String ast,
@Field("session_id") String session_id,
@Field("id") String id,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/99/Rapids")
Call<RapidsSchemaV3> rapidsExec(@Field("ast") String ast);
/**
* Produce help for Rapids AstRoot language.
*/
@GET("/99/Rapids/help")
Call<RapidsHelpV3> genHelp();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Recovery.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Recovery {
/**
* Recover stored state and resume interrupted job.
* @param recovery_dir Full path to the directory with recovery data
*/
@FormUrlEncoded
@POST("/3/Recovery/resume")
Call<ResumeV3> resume(@Field("recovery_dir") String recovery_dir);
@FormUrlEncoded
@POST("/3/Recovery/resume")
Call<ResumeV3> resume();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Sample.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Sample {
/**
* Example of an experimental endpoint. Call via /EXPERIMENTAL/Sample. Experimental endpoints can change at any
* moment.
* @param skip_ticks skip_ticks
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/99/Sample")
Call<CloudV3> status(
@Query("skip_ticks") boolean skip_ticks,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/99/Sample")
Call<CloudV3> status();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/SaveToHiveTable.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface SaveToHiveTable {
/**
* Save an H2O Frame contents into a Hive table.
* @param frame_id H2O Frame ID
* @param jdbc_url HIVE JDBC URL
* @param table_name Name of table to save data to.
* @param table_path HDFS Path to where the table should be stored.
* @param format Storage format of the created table.
* @param tmp_path HDFS Path where to store temporary data.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/SaveToHiveTable")
Call<SaveToHiveTableV3> saveToHiveTable(
@Field("frame_id") String frame_id,
@Field("jdbc_url") String jdbc_url,
@Field("table_name") String table_name,
@Field("table_path") String table_path,
@Field("format") ApiSaveToHiveTableHandlerHiveFrameSaverFormat format,
@Field("tmp_path") String tmp_path,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/SaveToHiveTable")
Call<SaveToHiveTableV3> saveToHiveTable(
@Field("frame_id") String frame_id,
@Field("jdbc_url") String jdbc_url,
@Field("table_name") String table_name
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/SessionProperties.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface SessionProperties {
/**
* Set session property.
* @param session_key Session ID
* @param key Property Key
* @param value Property Value
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/SessionProperties")
Call<SessionPropertyV3> setSessionProperty(
@Field("session_key") String session_key,
@Field("key") String key,
@Field("value") String value,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/SessionProperties")
Call<SessionPropertyV3> setSessionProperty();
/**
* Get session property.
* @param session_key Session ID
* @param key Property Key
* @param value Property Value
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/SessionProperties")
Call<SessionPropertyV3> getSessionProperty(
@Query("session_key") String session_key,
@Query("key") String key,
@Query("value") String value,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/SessionProperties")
Call<SessionPropertyV3> getSessionProperty();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Sessions.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Sessions {
/**
* Start a new Rapids session, and return the session id.
* @param _fields Filter on the set of output fields: if you set _fields="foo,bar,baz", then only those fields will
* be included in the output; or you can specify _fields="-goo,gee" to include all fields except goo
* and gee. If the result contains nested data structures, then you can refer to the fields within
* those structures as well. For example if you specify _fields="foo(oof),bar(-rab)", then only
* fields foo and bar will be included, and within foo there will be only field oof, whereas within
* bar all fields except rab will be reported.
*/
@FormUrlEncoded
@POST("/4/sessions")
Call<SessionIdV4> newSession4(@Field("_fields") String _fields);
@FormUrlEncoded
@POST("/4/sessions")
Call<SessionIdV4> newSession4();
/**
* Close the Rapids session.
* @param session_key Session ID
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@DELETE("/4/sessions/{session_key}")
Call<InitIDV3> endSession(
@Path("session_key") String session_key,
@Field("_exclude_fields") String _exclude_fields
);
@DELETE("/4/sessions/{session_key}")
Call<InitIDV3> endSession(@Path("session_key") String session_key);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Shutdown.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Shutdown {
/**
* Shut down the cluster.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/Shutdown")
Call<ShutdownV3> shutdown(@Field("_exclude_fields") String _exclude_fields);
@FormUrlEncoded
@POST("/3/Shutdown")
Call<ShutdownV3> shutdown();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/SignificantRules.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface SignificantRules {
/**
* Fetch significant rules table.
* @param model_id Model id of interest
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/SignificantRules")
Call<SignificantRulesV3> makeSignificantRulesTable(
@Field("model_id") String model_id,
@Field("_exclude_fields") String _exclude_fields
);
@FormUrlEncoded
@POST("/3/SignificantRules")
Call<SignificantRulesV3> makeSignificantRulesTable();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/SplitFrame.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface SplitFrame {
/**
* Split an H2O Frame.
* @param key Job Key
* @param dataset Dataset
* @param ratios Split ratios - resulting number of split is ratios.length+1
* @param destination_frames Destination keys for each output frame split.
*/
@FormUrlEncoded
@POST("/3/SplitFrame")
Call<SplitFrameV3> run(
@Field("key") String key,
@Field("dataset") String dataset,
@Field("ratios") double[] ratios,
@Field("destination_frames") String[] destination_frames
);
@FormUrlEncoded
@POST("/3/SplitFrame")
Call<SplitFrameV3> run();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/SteamMetrics.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface SteamMetrics {
/**
* Get metrics for Steam from H2O.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/SteamMetrics")
Call<SteamMetricsV3> fetch(@Query("_exclude_fields") String _exclude_fields);
@GET("/3/SteamMetrics")
Call<SteamMetricsV3> fetch();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Tabulate.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Tabulate {
/**
* Tabulate one column vs another.
* @param dataset Dataset
* @param predictor Predictor
* @param response Response
* @param weight Observation weights (optional)
* @param nbins_predictor Number of bins for predictor column
* @param nbins_response Number of bins for response column
*/
@FormUrlEncoded
@POST("/99/Tabulate")
Call<TabulateV3> run(
@Field("dataset") String dataset,
@Field("predictor") String predictor,
@Field("response") String response,
@Field("weight") String weight,
@Field("nbins_predictor") int nbins_predictor,
@Field("nbins_response") int nbins_response
);
@FormUrlEncoded
@POST("/99/Tabulate")
Call<TabulateV3> run(
@Field("dataset") String dataset,
@Field("predictor") String predictor,
@Field("response") String response
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/TargetEncoderTransform.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface TargetEncoderTransform {
/**
* Transform using give TargetEncoderModel
* @param model Target Encoder model to use.
* @param frame Frame to transform.
* @param as_training Force encoding mode for training data: when using a leakage handling strategy different from
* None, training data should be transformed with this flag set to true (Defaults to false).
* @param blending Enables or disables blending. Defaults to the value assigned at model creation.
* @param inflection_point Inflection point. Defaults to the value assigned at model creation.
* @param smoothing Smoothing. Defaults to the value assigned at model creation.
* @param noise Noise. Defaults to the value assigned at model creation.
*/
@GET("/3/TargetEncoderTransform")
Call<FrameKeyV3> transform(
@Query("model") String model,
@Query("frame") String frame,
@Query("as_training") boolean as_training,
@Query("blending") boolean blending,
@Query("inflection_point") double inflection_point,
@Query("smoothing") double smoothing,
@Query("noise") double noise
);
@GET("/3/TargetEncoderTransform")
Call<FrameKeyV3> transform();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Timeline.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Timeline {
/**
* Debugging tool that provides information on current communication between nodes.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Timeline")
Call<TimelineV3> fetch(@Query("_exclude_fields") String _exclude_fields);
@GET("/3/Timeline")
Call<TimelineV3> fetch();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Tree.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Tree {
/**
* Obtain a traverseable representation of a specific tree
* @param model Key of the model the desired tree belongs to
* @param tree_number Index of the tree in the model.
* @param tree_class Name of the class of the tree. Ignored for regression and binomial.
* @param plain_language_rules Whether to generate plain language rules.
*/
@GET("/3/Tree")
Call<TreeV3> getTree(
@Query("model") String model,
@Query("tree_number") int tree_number,
@Query("tree_class") String tree_class,
@Query("plain_language_rules") TreeHandlerPlainLanguageRules plain_language_rules
);
@GET("/3/Tree")
Call<TreeV3> getTree(
@Query("model") String model,
@Query("tree_number") int tree_number
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Typeahead.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Typeahead {
/**
* Typeahead hander for filename completion.
* @param src training_frame
* @param limit limit
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/Typeahead/files")
Call<TypeaheadV3> files(
@Query("src") String src,
@Query("limit") int limit,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/Typeahead/files")
Call<TypeaheadV3> files(@Query("src") String src);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/UnlockKeys.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface UnlockKeys {
/**
* Unlock all keys in the H2O distributed K/V store, to attempt to recover from a crash.
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@FormUrlEncoded
@POST("/3/UnlockKeys")
Call<UnlockKeysV3> unlock(@Field("_exclude_fields") String _exclude_fields);
@FormUrlEncoded
@POST("/3/UnlockKeys")
Call<UnlockKeysV3> unlock();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/WaterMeterCpuTicks.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface WaterMeterCpuTicks {
/**
* Return a CPU usage snapshot of all cores of all nodes in the H2O cluster.
* @param nodeidx Index of node to query ticks for (0-based)
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/WaterMeterCpuTicks/{nodeidx}")
Call<WaterMeterCpuTicksV3> fetch(
@Path("nodeidx") int nodeidx,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/WaterMeterCpuTicks/{nodeidx}")
Call<WaterMeterCpuTicksV3> fetch(@Path("nodeidx") int nodeidx);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/WaterMeterIo.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface WaterMeterIo {
/**
* Return IO usage snapshot of all nodes in the H2O cluster.
* @param nodeidx Index of node to query ticks for (0-based)
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/WaterMeterIo/{nodeidx}")
Call<WaterMeterIoV3> fetch(
@Path("nodeidx") int nodeidx,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/WaterMeterIo/{nodeidx}")
Call<WaterMeterIoV3> fetch(@Path("nodeidx") int nodeidx);
/**
* Return IO usage snapshot of all nodes in the H2O cluster.
* @param nodeidx Index of node to query ticks for (0-based)
* @param _exclude_fields Comma-separated list of JSON field paths to exclude from the result, used like:
* "/3/Frames?_exclude_fields=frames/frame_id/URL,__meta"
*/
@GET("/3/WaterMeterIo")
Call<WaterMeterIoV3> fetch_all(
@Query("nodeidx") int nodeidx,
@Query("_exclude_fields") String _exclude_fields
);
@GET("/3/WaterMeterIo")
Call<WaterMeterIoV3> fetch_all();
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Word2VecSynonyms.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Word2VecSynonyms {
/**
* Find synonyms using a word2vec model
* @param model Source word2vec Model
* @param word Target word to find synonyms for
* @param count Number of synonyms
* @param synonyms Synonymous words
* @param scores Similarity scores
*/
@GET("/3/Word2VecSynonyms")
Call<Word2VecSynonymsV3> findSynonyms(
@Query("model") String model,
@Query("word") String word,
@Query("count") int count,
@Query("synonyms") String[] synonyms,
@Query("scores") double[] scores
);
@GET("/3/Word2VecSynonyms")
Call<Word2VecSynonymsV3> findSynonyms(
@Query("model") String model,
@Query("word") String word,
@Query("count") int count
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/Word2VecTransform.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface Word2VecTransform {
/**
* Transform words to vectors using a word2vec model
* @param model Source word2vec Model
* @param words_frame Words Frame
* @param aggregate_method Method of aggregating word-vector sequences into a single vector
*/
@GET("/3/Word2VecTransform")
Call<Word2VecTransformV3> transform(
@Query("model") String model,
@Query("words_frame") String words_frame,
@Query("aggregate_method") Word2VecModelAggregateMethod aggregate_method
);
@GET("/3/Word2VecTransform")
Call<Word2VecTransformV3> transform(
@Query("model") String model,
@Query("words_frame") String words_frame
);
}
|
0
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies
|
java-sources/ai/h2o/h2o-bindings/3.46.0.7/water/bindings/proxies/retrofit/XGBoostExecutor.java
|
/*
* This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py
* Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
*/
package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import retrofit2.*;
import retrofit2.http.*;
public interface XGBoostExecutor {
/**
* Remote XGBoost execution - init
* @param key Identifier
* @param data Arbitrary request data stored as Base64 encoded binary
*/
@FormUrlEncoded
@POST("/3/XGBoostExecutor.init")
Call<XGBoostExecRespV3> init(
@Field("key") String key,
@Field("data") String data
);
@FormUrlEncoded
@POST("/3/XGBoostExecutor.init")
Call<XGBoostExecRespV3> init();
/**
* Remote XGBoost execution - setup
* @param key Identifier
* @param data Arbitrary request data stored as Base64 encoded binary
*/
@FormUrlEncoded
@POST("/3/XGBoostExecutor.setup")
Call<StreamingSchema> setup(
@Field("key") String key,
@Field("data") String data
);
@FormUrlEncoded
@POST("/3/XGBoostExecutor.setup")
Call<StreamingSchema> setup();
/**
* Remote XGBoost execution - update
* @param key Identifier
* @param data Arbitrary request data stored as Base64 encoded binary
*/
@FormUrlEncoded
@POST("/3/XGBoostExecutor.update")
Call<XGBoostExecRespV3> update(
@Field("key") String key,
@Field("data") String data
);
@FormUrlEncoded
@POST("/3/XGBoostExecutor.update")
Call<XGBoostExecRespV3> update();
/**
* Remote XGBoost execution - getEvalMetric
* @param key Identifier
* @param data Arbitrary request data stored as Base64 encoded binary
*/
@FormUrlEncoded
@POST("/3/XGBoostExecutor.getEvalMetric")
Call<XGBoostExecRespV3> getEvalMetric(
@Field("key") String key,
@Field("data") String data
);
@FormUrlEncoded
@POST("/3/XGBoostExecutor.getEvalMetric")
Call<XGBoostExecRespV3> getEvalMetric();
/**
* Remote XGBoost execution - get booster
* @param key Identifier
* @param data Arbitrary request data stored as Base64 encoded binary
*/
@FormUrlEncoded
@POST("/3/XGBoostExecutor.getBooster")
Call<StreamingSchema> getBooster(
@Field("key") String key,
@Field("data") String data
);
@FormUrlEncoded
@POST("/3/XGBoostExecutor.getBooster")
Call<StreamingSchema> getBooster();
/**
* Remote XGBoost execution - cleanup
* @param key Identifier
* @param data Arbitrary request data stored as Base64 encoded binary
*/
@FormUrlEncoded
@POST("/3/XGBoostExecutor.cleanup")
Call<XGBoostExecRespV3> cleanup(
@Field("key") String key,
@Field("data") String data
);
@FormUrlEncoded
@POST("/3/XGBoostExecutor.cleanup")
Call<XGBoostExecRespV3> cleanup();
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/ConfusionMatrix.java
|
package hex;
import static water.api.DocGen.FieldDoc;
import static water.util.Utils.printConfusionMatrix;
import dontweave.gson.JsonArray;
import dontweave.gson.JsonPrimitive;
import water.Iced;
import water.api.Request.API;
import java.util.Arrays;
public class ConfusionMatrix extends Iced {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="Confusion matrix (Actual/Predicted)")
public long[][] _arr; // [actual][predicted]
@API(help = "Prediction error by class")
public final double[] _classErr;
@API(help = "Prediction error")
public double _predErr;
@Override public ConfusionMatrix clone() {
ConfusionMatrix res = new ConfusionMatrix(0);
res._arr = _arr.clone();
for( int i = 0; i < _arr.length; ++i )
res._arr[i] = _arr[i].clone();
return res;
}
public enum ErrMetric {
MAXC, SUMC, TOTAL;
public double computeErr(ConfusionMatrix cm) {
double[] cerr = cm.classErr();
double res = 0;
switch( this ) {
case MAXC:
res = cerr[0];
for( double d : cerr )
if( d > res )
res = d;
break;
case SUMC:
for( double d : cerr )
res += d;
break;
case TOTAL:
res = cm.err();
break;
default:
throw new RuntimeException("unexpected err metric " + this);
}
return res;
}
}
public ConfusionMatrix(int n) {
_arr = new long[n][n];
_classErr = classErr();
_predErr = err();
}
public ConfusionMatrix(long[][] value) {
_arr = value;
_classErr = classErr();
_predErr = err();
}
public ConfusionMatrix(long[][] value, int dim) {
_arr = new long[dim][dim];
for (int i=0; i<dim; ++i)
for (int j=0; j<dim; ++j)
_arr[i][j] = value[i][j];
_classErr = classErr();
_predErr = err();
}
public void add(int i, int j) {
_arr[i][j]++;
}
public double[] classErr() {
double[] res = new double[_arr.length];
for( int i = 0; i < res.length; ++i )
res[i] = classErr(i);
return res;
}
public final int size() {
return _arr.length;
}
public void reComputeErrors(){
for(int i = 0; i < _arr.length; ++i)
_classErr[i] = classErr(i);
_predErr = err();
}
public final long classErrCount(int c) {
long s = 0;
for( long x : _arr[c] )
s += x;
return s - _arr[c][c];
}
public final double classErr(int c) {
long s = 0;
for( long x : _arr[c] )
s += x;
if( s == 0 )
return 0.0; // Either 0 or NaN, but 0 is nicer
return (double) (s - _arr[c][c]) / s;
}
public long totalRows() {
long n = 0;
for( int a = 0; a < _arr.length; ++a )
for( int p = 0; p < _arr[a].length; ++p )
n += _arr[a][p];
return n;
}
public void add(ConfusionMatrix other) {
water.util.Utils.add(_arr, other._arr);
}
/**
* @return overall classification error
*/
public double err() {
long n = totalRows();
long err = n;
for( int d = 0; d < _arr.length; ++d )
err -= _arr[d][d];
return (double) err / n;
}
public long errCount() {
long n = totalRows();
long err = n;
for( int d = 0; d < _arr.length; ++d )
err -= _arr[d][d];
return err;
}
/**
* The percentage of predictions that are correct.
*/
public double accuracy() { return 1-err(); }
/**
* The percentage of negative labeled instances that were predicted as negative.
* @return TNR / Specificity
*/
public double specificity() {
if(!isBinary())throw new UnsupportedOperationException("specificity is only implemented for 2 class problems.");
double tn = _arr[0][0];
double fp = _arr[0][1];
return tn / (tn + fp);
}
/**
* The percentage of positive labeled instances that were predicted as positive.
* @return Recall / TPR / Sensitivity
*/
public double recall() {
if(!isBinary())throw new UnsupportedOperationException("recall is only implemented for 2 class problems.");
double tp = _arr[1][1];
double fn = _arr[1][0];
return tp / (tp + fn);
}
/**
* The percentage of positive predictions that are correct.
* @return Precision
*/
public double precision() {
if(!isBinary())throw new UnsupportedOperationException("precision is only implemented for 2 class problems.");
double tp = _arr[1][1];
double fp = _arr[0][1];
return tp / (tp + fp);
}
/**
* The Matthews Correlation Coefficient, takes true negatives into account in contrast to F-Score
* See <a href="http://en.wikipedia.org/wiki/Matthews_correlation_coefficient">MCC</a>
* MCC = Correlation between observed and predicted binary classification
* @return mcc ranges from -1 (total disagreement) ... 0 (no better than random) ... 1 (perfect)
*/
public double mcc() {
if(!isBinary())throw new UnsupportedOperationException("precision is only implemented for 2 class problems.");
double tn = _arr[0][0];
double fp = _arr[0][1];
double tp = _arr[1][1];
double fn = _arr[1][0];
double mcc = (tp*tn - fp*fn)/Math.sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn));
return mcc;
}
/**
* The maximum per-class error
* @return max(classErr(i))
*/
public double max_per_class_error() {
int n = nclasses();
if(n == 0)throw new UnsupportedOperationException("max per class error is only defined for classification problems");
double res = classErr(0);
for(int i = 1; i < n; ++i)
res = Math.max(res,classErr(i));
return res;
}
public final int nclasses(){return _arr == null?0:_arr.length;}
public final boolean isBinary(){return nclasses() == 2;}
/**
* Returns the F-measure which combines precision and recall in a balanced way. <br>
* See <a href="http://en.wikipedia.org/wiki/Precision_and_recall.">Precision_and_recall</a>
*/
public double F1() {
final double precision = precision();
final double recall = recall();
return 2. * (precision * recall) / (precision + recall);
}
/**
* Returns the F-measure which combines precision and recall and weights recall higher than precision. <br>
* See <a href="http://en.wikipedia.org/wiki/F1_score.">F1_score</a>
*/
public double F2() {
final double precision = precision();
final double recall = recall();
return 5. * (precision * recall) / (4. * precision + recall);
}
/**
* Returns the F-measure which combines precision and recall and weights precision higher than recall. <br>
* See <a href="http://en.wikipedia.org/wiki/F1_score.">F1_score</a>
*/
public double F0point5() {
final double precision = precision();
final double recall = recall();
return 1.25 * (precision * recall) / (.25 * precision + recall);
}
@Override public String toString() {
StringBuilder sb = new StringBuilder();
for( long[] r : _arr )
sb.append(Arrays.toString(r) + "\n");
return sb.toString();
}
public JsonArray toJson() {
JsonArray res = new JsonArray();
JsonArray header = new JsonArray();
header.add(new JsonPrimitive("Actual / Predicted"));
for( int i = 0; i < _arr.length; ++i )
header.add(new JsonPrimitive("class " + i));
header.add(new JsonPrimitive("Error"));
res.add(header);
for( int i = 0; i < _arr.length; ++i ) {
JsonArray row = new JsonArray();
row.add(new JsonPrimitive("class " + i));
long s = 0;
for( int j = 0; j < _arr.length; ++j ) {
s += _arr[i][j];
row.add(new JsonPrimitive(_arr[i][j]));
}
double err = s - _arr[i][i];
err /= s;
row.add(new JsonPrimitive(err));
res.add(row);
}
JsonArray totals = new JsonArray();
totals.add(new JsonPrimitive("Totals"));
long S = 0;
long DS = 0;
for( int i = 0; i < _arr.length; ++i ) {
long s = 0;
for( int j = 0; j < _arr.length; ++j )
s += _arr[j][i];
totals.add(new JsonPrimitive(s));
S += s;
DS += _arr[i][i];
}
double err = (S - DS) / (double) S;
totals.add(new JsonPrimitive(err));
res.add(totals);
return res;
}
public void toHTML(StringBuilder sb, String[] domain) {
long[][] cm = _arr;
printConfusionMatrix(sb, cm, domain, true);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/CoxPH.java
|
package hex;
import Jama.Matrix;
import java.util.Arrays;
import jsr166y.ForkJoinTask;
import jsr166y.RecursiveAction;
import hex.FrameTask.DataInfo;
import water.DKV;
import water.Futures;
import water.Job;
import water.Key;
import water.MemoryManager;
import water.Model;
import water.Request2;
import water.api.CoxPHProgressPage;
import water.api.DocGen;
import water.fvec.Frame;
import water.fvec.Vec;
import water.fvec.Vec.CollectDomain;
import water.util.Utils;
public class CoxPH extends Job {
@API(help="Data Frame", required=true, filter=Default.class, json=true)
public Frame source;
@API(help="Start Time Column", required=false, filter=CoxPHVecSelect.class, json=true)
public Vec start_column = null;
@API(help="Stop Time Column", required=true, filter=CoxPHVecSelect.class, json=true)
public Vec stop_column;
@API(help="Event Column", required=true, filter=CoxPHVecSelect.class, json=true)
public Vec event_column;
@API(help="X Columns", required=true, filter=CoxPHMultiVecSelect.class, json=true)
public int[] x_columns;
@API(help="Weights Column", required=false, filter=CoxPHVecSelect.class, json=true)
public Vec weights_column = null;
@API(help="Offset Columns", required=false, filter=CoxPHMultiVecSelect.class, json=true)
public int[] offset_columns;
@API(help="Method for Handling Ties", required=true, filter=Default.class, json=true)
public CoxPHTies ties = CoxPHTies.efron;
@API(help="coefficient starting value", required=true, filter=Default.class, json=true)
public double init = 0;
@API(help="minimum log-relative error", required=true, filter=Default.class, json=true)
public double lre_min = 9;
@API(help="maximum number of iterations", required=true, filter=Default.class, json=true)
public int iter_max = 20;
private class CoxPHVecSelect extends VecSelect { CoxPHVecSelect() { super("source"); } }
private class CoxPHMultiVecSelect extends MultiVecSelect { CoxPHMultiVecSelect() { super("source"); } }
public static final int MAX_TIME_BINS = 10000;
public static enum CoxPHTies { efron, breslow }
public static double[][] malloc2DArray(final int d1, final int d2) {
final double[][] array = new double[d1][];
for (int j = 0; j < d1; ++j)
array[j] = MemoryManager.malloc8d(d2);
return array;
}
public static double[][][] malloc3DArray(final int d1, final int d2, final int d3) {
final double[][][] array = new double[d1][d2][];
for (int j = 0; j < d1; ++j)
for (int k = 0; k < d2; ++k)
array[j][k] = MemoryManager.malloc8d(d3);
return array;
}
public static class CoxPHModel extends Model implements Job.Progress {
static final int API_WEAVER = 1; // This file has auto-generated doc & JSON fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from auto-generated code.
@API(help = "model parameters", json = true)
final private CoxPH parameters;
@API(help="Input data info")
DataInfo data_info;
@API(help = "names of coefficients")
String[] coef_names;
@API(help = "coefficients")
double[] coef;
@API(help = "exp(coefficients)")
double[] exp_coef;
@API(help = "exp(-coefficients)")
double[] exp_neg_coef;
@API(help = "se(coefficients)")
double[] se_coef;
@API(help = "z-score")
double[] z_coef;
@API(help = "var(coefficients)")
double[][] var_coef;
@API(help = "null log-likelihood")
double null_loglik;
@API(help = "log-likelihood")
double loglik;
@API(help = "log-likelihood test stat")
double loglik_test;
@API(help = "Wald test stat")
double wald_test;
@API(help = "Score test stat")
double score_test;
@API(help = "R-square")
double rsq;
@API(help = "Maximum R-square")
double maxrsq;
@API(help = "gradient", json = false)
double[] gradient;
@API(help = "Hessian", json = false)
double[][] hessian;
@API(help = "log relative error")
double lre;
@API(help = "number of iterations")
int iter;
@API(help = "x weighted mean vector for categorical variables")
double[] x_mean_cat;
@API(help = "x weighted mean vector for numeric variables")
double[] x_mean_num;
@API(help = "unweighted mean vector for numeric offsets")
double[] mean_offset;
@API(help = "names of offsets")
String[] offset_names;
@API(help = "n")
long n;
@API(help = "number of rows with missing values")
long n_missing;
@API(help = "total events")
long total_event;
@API(help = "minimum time")
long min_time;
@API(help = "maximum time")
long max_time;
@API(help = "time")
long[] time;
@API(help = "number at risk")
double[] n_risk;
@API(help = "number of events")
double[] n_event;
@API(help = "number of censored obs")
double[] n_censor;
@API(help = "baseline cumulative hazard")
double[] cumhaz_0;
@API(help = "component of var(cumhaz)", json = false)
double[] var_cumhaz_1;
@API(help = "component of var(cumhaz)", json = false)
double[][] var_cumhaz_2;
public CoxPHModel(CoxPH job, Key selfKey, Key dataKey, Frame fr, float[] priorClassDist) {
super(selfKey, dataKey, fr, priorClassDist);
parameters = (CoxPH) job.clone();
}
@Override
public final CoxPH get_params() { return parameters; }
@Override
public final Request2 job() { return get_params(); }
@Override
public float progress() { return (float) iter / (float) get_params().iter_max; }
// Following three overrides created for use in super.scoreImpl
@Override
public String[] classNames() {
final String[] names = new String[nclasses()];
for (int i = 0; i < time.length; ++i) {
final long t = time[i];
names[i] = "cumhaz_" + t;
names[i + time.length] = "se_cumhaz_" + t;
}
return names;
}
@Override
public boolean isClassifier() { return false; }
@Override
public int nclasses() { return 2 * time.length; }
@Override
protected float[] score0(double[] data, float[] preds) {
final int n_offsets = (parameters.offset_columns == null) ? 0 : parameters.offset_columns.length;
final int n_time = time.length;
final int n_coef = coef.length;
final int n_cats = data_info._cats;
final int n_nums = data_info._nums;
final int n_data = n_cats + n_nums;
final int n_full = n_coef + n_offsets;
final int numStart = data_info.numStart();
boolean catsAllNA = true;
boolean catsHasNA = false;
boolean numsHasNA = false;
for (int j = 0; j < n_cats; ++j) {
catsAllNA &= Double.isNaN(data[j]);
catsHasNA |= Double.isNaN(data[j]);
}
for (int j = n_cats; j < n_data; ++j)
numsHasNA |= Double.isNaN(data[j]);
if (numsHasNA || (catsHasNA && !catsAllNA)) {
for (int i = 1; i <= 2 * n_time; ++i)
preds[i] = Float.NaN;
} else {
double[] full_data = MemoryManager.malloc8d(n_full);
for (int j = 0; j < n_cats; ++j)
if (Double.isNaN(data[j])) {
final int kst = data_info._catOffsets[j];
final int klen = data_info._catOffsets[j+1] - kst;
System.arraycopy(x_mean_cat, kst, full_data, kst, klen);
} else if (data[j] != 0)
full_data[data_info._catOffsets[j] + (int) (data[j] - 1)] = 1;
for (int j = 0; j < n_nums; ++j)
full_data[numStart + j] = data[n_cats + j] - data_info._normSub[j];
double logRisk = 0;
for (int j = 0; j < n_coef; ++j)
logRisk += full_data[j] * coef[j];
for (int j = n_coef; j < full_data.length; ++j)
logRisk += full_data[j];
final double risk = Math.exp(logRisk);
for (int t = 0; t < n_time; ++t)
preds[t + 1] = (float) (risk * cumhaz_0[t]);
for (int t = 0; t < n_time; ++t) {
final double cumhaz_0_t = cumhaz_0[t];
double var_cumhaz_2_t = 0;
for (int j = 0; j < n_coef; ++j) {
double sum = 0;
for (int k = 0; k < n_coef; ++k)
sum += var_coef[j][k] * (full_data[k] * cumhaz_0_t - var_cumhaz_2[t][k]);
var_cumhaz_2_t += (full_data[j] * cumhaz_0_t - var_cumhaz_2[t][j]) * sum;
}
preds[t + 1 + n_time] = (float) (risk * Math.sqrt(var_cumhaz_1[t] + var_cumhaz_2_t));
}
}
preds[0] = Float.NaN;
return preds;
}
protected void initStats(final Frame source, final DataInfo dinfo) {
n = source.numRows();
data_info = dinfo;
final int n_offsets = (parameters.offset_columns == null) ? 0 : parameters.offset_columns.length;
final int n_coef = data_info.fullN() - n_offsets;
final String[] coefNames = data_info.coefNames();
coef_names = new String[n_coef];
System.arraycopy(coefNames, 0, coef_names, 0, n_coef);
coef = MemoryManager.malloc8d(n_coef);
exp_coef = MemoryManager.malloc8d(n_coef);
exp_neg_coef = MemoryManager.malloc8d(n_coef);
se_coef = MemoryManager.malloc8d(n_coef);
z_coef = MemoryManager.malloc8d(n_coef);
gradient = MemoryManager.malloc8d(n_coef);
hessian = malloc2DArray(n_coef, n_coef);
var_coef = malloc2DArray(n_coef, n_coef);
x_mean_cat = MemoryManager.malloc8d(n_coef - (data_info._nums - n_offsets));
x_mean_num = MemoryManager.malloc8d(data_info._nums - n_offsets);
mean_offset = MemoryManager.malloc8d(n_offsets);
offset_names = new String[n_offsets];
System.arraycopy(coefNames, n_coef, offset_names, 0, n_offsets);
final Vec start_column = source.vec(source.numCols() - 3);
final Vec stop_column = source.vec(source.numCols() - 2);
min_time = parameters.start_column == null ? (long) stop_column.min():
(long) start_column.min() + 1;
max_time = (long) stop_column.max();
final int n_time = new CollectDomain(stop_column).doAll(stop_column).domain().length;
time = MemoryManager.malloc8(n_time);
n_risk = MemoryManager.malloc8d(n_time);
n_event = MemoryManager.malloc8d(n_time);
n_censor = MemoryManager.malloc8d(n_time);
cumhaz_0 = MemoryManager.malloc8d(n_time);
var_cumhaz_1 = MemoryManager.malloc8d(n_time);
var_cumhaz_2 = malloc2DArray(n_time, n_coef);
}
protected void calcCounts(final CoxPHTask coxMR) {
n_missing = n - coxMR.n;
n = coxMR.n;
for (int j = 0; j < x_mean_cat.length; j++)
x_mean_cat[j] = coxMR.sumWeightedCatX[j] / coxMR.sumWeights;
for (int j = 0; j < x_mean_num.length; j++)
x_mean_num[j] = coxMR._dinfo._normSub[j] + coxMR.sumWeightedNumX[j] / coxMR.sumWeights;
System.arraycopy(coxMR._dinfo._normSub, x_mean_num.length, mean_offset, 0, mean_offset.length);
int nz = 0;
for (int t = 0; t < coxMR.countEvents.length; ++t) {
total_event += coxMR.countEvents[t];
if (coxMR.sizeEvents[t] > 0 || coxMR.sizeCensored[t] > 0) {
time[nz] = min_time + t;
n_risk[nz] = coxMR.sizeRiskSet[t];
n_event[nz] = coxMR.sizeEvents[t];
n_censor[nz] = coxMR.sizeCensored[t];
nz++;
}
}
if (parameters.start_column == null)
for (int t = n_risk.length - 2; t >= 0; --t)
n_risk[t] += n_risk[t + 1];
}
protected double calcLoglik(final CoxPHTask coxMR) {
final int n_coef = coef.length;
final int n_time = coxMR.sizeEvents.length;
double newLoglik = 0;
for (int j = 0; j < n_coef; ++j)
gradient[j] = 0;
for (int j = 0; j < n_coef; ++j)
for (int k = 0; k < n_coef; ++k)
hessian[j][k] = 0;
switch (parameters.ties) {
case efron:
final double[] newLoglik_t = MemoryManager.malloc8d(n_time);
final double[][] gradient_t = malloc2DArray(n_time, n_coef);
final double[][][] hessian_t = malloc3DArray(n_time, n_coef, n_coef);
ForkJoinTask[] fjts = new ForkJoinTask[n_time];
for (int t = n_time - 1; t >= 0; --t) {
final int _t = t;
fjts[t] = new RecursiveAction() {
@Override protected void compute() {
final double sizeEvents_t = coxMR.sizeEvents[_t];
if (sizeEvents_t > 0) {
final long countEvents_t = coxMR.countEvents[_t];
final double sumLogRiskEvents_t = coxMR.sumLogRiskEvents[_t];
final double sumRiskEvents_t = coxMR.sumRiskEvents[_t];
final double rcumsumRisk_t = coxMR.rcumsumRisk[_t];
final double avgSize = sizeEvents_t / countEvents_t;
newLoglik_t[_t] = sumLogRiskEvents_t;
System.arraycopy(coxMR.sumXEvents[_t], 0, gradient_t[_t], 0, n_coef);
for (long e = 0; e < countEvents_t; ++e) {
final double frac = ((double) e) / ((double) countEvents_t);
final double term = rcumsumRisk_t - frac * sumRiskEvents_t;
newLoglik_t[_t] -= avgSize * Math.log(term);
for (int j = 0; j < n_coef; ++j) {
final double djTerm = coxMR.rcumsumXRisk[_t][j] - frac * coxMR.sumXRiskEvents[_t][j];
final double djLogTerm = djTerm / term;
gradient_t[_t][j] -= avgSize * djLogTerm;
for (int k = 0; k < n_coef; ++k) {
final double dkTerm = coxMR.rcumsumXRisk[_t][k] - frac * coxMR.sumXRiskEvents[_t][k];
final double djkTerm = coxMR.rcumsumXXRisk[_t][j][k] - frac * coxMR.sumXXRiskEvents[_t][j][k];
hessian_t[_t][j][k] -= avgSize * (djkTerm / term - (djLogTerm * (dkTerm / term)));
}
}
}
}
}
};
}
ForkJoinTask.invokeAll(fjts);
for (int t = 0; t < n_time; ++t)
newLoglik += newLoglik_t[t];
for (int t = 0; t < n_time; ++t)
for (int j = 0; j < n_coef; ++j)
gradient[j] += gradient_t[t][j];
for (int t = 0; t < n_time; ++t)
for (int j = 0; j < n_coef; ++j)
for (int k = 0; k < n_coef; ++k)
hessian[j][k] += hessian_t[t][j][k];
break;
case breslow:
for (int t = n_time - 1; t >= 0; --t) {
final double sizeEvents_t = coxMR.sizeEvents[t];
if (sizeEvents_t > 0) {
final double sumLogRiskEvents_t = coxMR.sumLogRiskEvents[t];
final double rcumsumRisk_t = coxMR.rcumsumRisk[t];
newLoglik += sumLogRiskEvents_t;
newLoglik -= sizeEvents_t * Math.log(rcumsumRisk_t);
for (int j = 0; j < n_coef; ++j) {
final double dlogTerm = coxMR.rcumsumXRisk[t][j] / rcumsumRisk_t;
gradient[j] += coxMR.sumXEvents[t][j];
gradient[j] -= sizeEvents_t * dlogTerm;
for (int k = 0; k < n_coef; ++k)
hessian[j][k] -= sizeEvents_t *
(((coxMR.rcumsumXXRisk[t][j][k] / rcumsumRisk_t) -
(dlogTerm * (coxMR.rcumsumXRisk[t][k] / rcumsumRisk_t))));
}
}
}
break;
default:
throw new IllegalArgumentException("ties method must be either efron or breslow");
}
return newLoglik;
}
protected void calcModelStats(final double[] newCoef, final double newLoglik) {
final int n_coef = coef.length;
final Matrix inv_hessian = new Matrix(hessian).inverse();
for (int j = 0; j < n_coef; ++j) {
for (int k = 0; k <= j; ++k) {
final double elem = -inv_hessian.get(j, k);
var_coef[j][k] = elem;
var_coef[k][j] = elem;
}
}
for (int j = 0; j < n_coef; ++j) {
coef[j] = newCoef[j];
exp_coef[j] = Math.exp(coef[j]);
exp_neg_coef[j] = Math.exp(- coef[j]);
se_coef[j] = Math.sqrt(var_coef[j][j]);
z_coef[j] = coef[j] / se_coef[j];
}
if (iter == 0) {
null_loglik = newLoglik;
maxrsq = 1 - Math.exp(2 * null_loglik / n);
score_test = 0;
for (int j = 0; j < n_coef; ++j) {
double sum = 0;
for (int k = 0; k < n_coef; ++k)
sum += var_coef[j][k] * gradient[k];
score_test += gradient[j] * sum;
}
}
loglik = newLoglik;
loglik_test = - 2 * (null_loglik - loglik);
rsq = 1 - Math.exp(- loglik_test / n);
wald_test = 0;
for (int j = 0; j < n_coef; ++j) {
double sum = 0;
for (int k = 0; k < n_coef; ++k)
sum -= hessian[j][k] * (coef[k] - parameters.init);
wald_test += (coef[j] - parameters.init) * sum;
}
}
protected void calcCumhaz_0(final CoxPHTask coxMR) {
final int n_coef = coef.length;
int nz = 0;
switch (parameters.ties) {
case efron:
for (int t = 0; t < coxMR.sizeEvents.length; ++t) {
final double sizeEvents_t = coxMR.sizeEvents[t];
final double sizeCensored_t = coxMR.sizeCensored[t];
if (sizeEvents_t > 0 || sizeCensored_t > 0) {
final long countEvents_t = coxMR.countEvents[t];
final double sumRiskEvents_t = coxMR.sumRiskEvents[t];
final double rcumsumRisk_t = coxMR.rcumsumRisk[t];
final double avgSize = sizeEvents_t / countEvents_t;
cumhaz_0[nz] = 0;
var_cumhaz_1[nz] = 0;
for (int j = 0; j < n_coef; ++j)
var_cumhaz_2[nz][j] = 0;
for (long e = 0; e < countEvents_t; ++e) {
final double frac = ((double) e) / ((double) countEvents_t);
final double haz = 1 / (rcumsumRisk_t - frac * sumRiskEvents_t);
final double haz_sq = haz * haz;
cumhaz_0[nz] += avgSize * haz;
var_cumhaz_1[nz] += avgSize * haz_sq;
for (int j = 0; j < n_coef; ++j)
var_cumhaz_2[nz][j] +=
avgSize * ((coxMR.rcumsumXRisk[t][j] - frac * coxMR.sumXRiskEvents[t][j]) * haz_sq);
}
nz++;
}
}
break;
case breslow:
for (int t = 0; t < coxMR.sizeEvents.length; ++t) {
final double sizeEvents_t = coxMR.sizeEvents[t];
final double sizeCensored_t = coxMR.sizeCensored[t];
if (sizeEvents_t > 0 || sizeCensored_t > 0) {
final double rcumsumRisk_t = coxMR.rcumsumRisk[t];
final double cumhaz_0_nz = sizeEvents_t / rcumsumRisk_t;
cumhaz_0[nz] = cumhaz_0_nz;
var_cumhaz_1[nz] = sizeEvents_t / (rcumsumRisk_t * rcumsumRisk_t);
for (int j = 0; j < n_coef; ++j)
var_cumhaz_2[nz][j] = (coxMR.rcumsumXRisk[t][j] / rcumsumRisk_t) * cumhaz_0_nz;
nz++;
}
}
break;
default:
throw new IllegalArgumentException("ties method must be either efron or breslow");
}
for (int t = 1; t < cumhaz_0.length; ++t) {
cumhaz_0[t] = cumhaz_0[t - 1] + cumhaz_0[t];
var_cumhaz_1[t] = var_cumhaz_1[t - 1] + var_cumhaz_1[t];
for (int j = 0; j < n_coef; ++j)
var_cumhaz_2[t][j] = var_cumhaz_2[t - 1][j] + var_cumhaz_2[t][j];
}
}
public Frame makeSurvfit(final Key key, double x_new) { // FIXME
int j = 0;
if (Double.isNaN(x_new))
x_new = data_info._normSub[j];
final int n_time = time.length;
final Vec[] vecs = Vec.makeNewCons((long) n_time, 4, 0, null);
final Vec timevec = vecs[0];
final Vec cumhaz = vecs[1];
final Vec se_cumhaz = vecs[2];
final Vec surv = vecs[3];
final double x_centered = x_new - data_info._normSub[j];
final double risk = Math.exp(coef[j] * x_centered);
for (int t = 0; t < n_time; ++t)
timevec.set(t, time[t]);
for (int t = 0; t < n_time; ++t) {
final double cumhaz_1 = risk * cumhaz_0[t];
cumhaz.set(t, cumhaz_1);
surv.set(t, Math.exp(-cumhaz_1));
}
for (int t = 0; t < n_time; ++t) {
final double gamma = x_centered * cumhaz_0[t] - var_cumhaz_2[t][j];
se_cumhaz.set(t, risk * Math.sqrt(var_cumhaz_1[t] + (gamma * var_coef[j][j] * gamma)));
}
final Frame fr = new Frame(key, new String[] {"time", "cumhaz", "se_cumhaz", "surv"}, vecs);
final Futures fs = new Futures();
DKV.put(key, fr, fs);
fs.blockForPending();
return fr;
}
public void generateHTML(final String title, final StringBuilder sb) {
DocGen.HTML.title(sb, title);
sb.append("<h4>Data</h4>");
sb.append("<table class='table table-striped table-bordered table-condensed'><col width=\"25%\"><col width=\"75%\">");
sb.append("<tr><th>Number of Complete Cases</th><td>"); sb.append(n); sb.append("</td></tr>");
sb.append("<tr><th>Number of Non Complete Cases</th><td>"); sb.append(n_missing); sb.append("</td></tr>");
sb.append("<tr><th>Number of Events in Complete Cases</th><td>");sb.append(total_event);sb.append("</td></tr>");
sb.append("</table>");
sb.append("<h4>Coefficients</h4>");
sb.append("<table class='table table-striped table-bordered table-condensed'>");
sb.append("<tr><th></th><th>coef</th><th>exp(coef)</th><th>se(coef)</th><th>z</th></tr>");
for (int j = 0; j < coef.length; ++j) {
sb.append("<tr><th>");
sb.append(coef_names[j]);sb.append("</th><td>");sb.append(coef[j]); sb.append("</td><td>");
sb.append(exp_coef[j]); sb.append("</td><td>");sb.append(se_coef[j]);sb.append("</td><td>");
sb.append(z_coef[j]);
sb.append("</td></tr>");
}
sb.append("</table>");
sb.append("<h4>Model Statistics</h4>");
sb.append("<table class='table table-striped table-bordered table-condensed'><col width=\"15%\"><col width=\"85%\">");
sb.append("<tr><th>Rsquare</th><td>");sb.append(String.format("%.3f", rsq));
sb.append(" (max possible = "); sb.append(String.format("%.3f", maxrsq));sb.append(")</td></tr>");
sb.append("<tr><th>Likelihood ratio test</th><td>");sb.append(String.format("%.2f", loglik_test));
sb.append(" on ");sb.append(coef.length);sb.append(" df</td></tr>");
sb.append("<tr><th>Wald test </th><td>");sb.append(String.format("%.2f", wald_test));
sb.append(" on ");sb.append(coef.length);sb.append(" df</td></tr>");
sb.append("<tr><th>Score (logrank) test </th><td>");sb.append(String.format("%.2f", score_test));
sb.append(" on ");sb.append(coef.length);sb.append(" df</td></tr>");
sb.append("</table>");
}
public void toJavaHtml(StringBuilder sb) {
}
}
private CoxPHModel model;
@Override
protected void init() {
super.init();
if ((start_column != null) && !start_column.isInt())
throw new IllegalArgumentException("start time must be null or of type integer");
if (!stop_column.isInt())
throw new IllegalArgumentException("stop time must be of type integer");
if (!event_column.isInt() && !event_column.isEnum())
throw new IllegalArgumentException("event must be of type integer or factor");
if (Double.isNaN(lre_min) || lre_min <= 0)
throw new IllegalArgumentException("lre_min must be a positive number");
if (iter_max < 1)
throw new IllegalArgumentException("iter_max must be a positive integer");
final long min_time = (start_column == null) ? (long) stop_column.min() : (long) start_column.min() + 1;
final int n_time = (int) (stop_column.max() - min_time + 1);
if (n_time < 1)
throw new IllegalArgumentException("start times must be strictly less than stop times");
if (n_time > MAX_TIME_BINS)
throw new IllegalArgumentException("number of distinct stop times is " + n_time +
"; maximum number allowed is " + MAX_TIME_BINS);
source = getSubframe();
int n_resp = 2;
if (weights_column != null)
n_resp++;
if (start_column != null)
n_resp++;
final DataInfo dinfo = new DataInfo(source, n_resp, false, false, DataInfo.TransformType.DEMEAN);
model = new CoxPHModel(this, dest(), source._key, source, null);
model.initStats(source, dinfo);
}
@Override
protected void execImpl() {
final DataInfo dinfo = model.data_info;
final int n_offsets = (model.parameters.offset_columns == null) ? 0 : model.parameters.offset_columns.length;
final int n_coef = dinfo.fullN() - n_offsets;
final double[] step = MemoryManager.malloc8d(n_coef);
final double[] oldCoef = MemoryManager.malloc8d(n_coef);
final double[] newCoef = MemoryManager.malloc8d(n_coef);
Arrays.fill(step, Double.NaN);
Arrays.fill(oldCoef, Double.NaN);
for (int j = 0; j < n_coef; ++j)
newCoef[j] = init;
double oldLoglik = - Double.MAX_VALUE;
final int n_time = (int) (model.max_time - model.min_time + 1);
final boolean has_start_column = (model.parameters.start_column != null);
final boolean has_weights_column = (model.parameters.weights_column != null);
for (int i = 0; i <= iter_max; ++i) {
model.iter = i;
final CoxPHTask coxMR = new CoxPHTask(self(), dinfo, newCoef, model.min_time, n_time, n_offsets,
has_start_column, has_weights_column).doAll(dinfo._adaptedFrame);
final double newLoglik = model.calcLoglik(coxMR);
if (newLoglik > oldLoglik) {
if (i == 0)
model.calcCounts(coxMR);
model.calcModelStats(newCoef, newLoglik);
model.calcCumhaz_0(coxMR);
if (newLoglik == 0)
model.lre = - Math.log10(Math.abs(oldLoglik - newLoglik));
else
model.lre = - Math.log10(Math.abs((oldLoglik - newLoglik) / newLoglik));
if (model.lre >= lre_min)
break;
Arrays.fill(step, 0);
for (int j = 0; j < n_coef; ++j)
for (int k = 0; k < n_coef; ++k)
step[j] -= model.var_coef[j][k] * model.gradient[k];
for (int j = 0; j < n_coef; ++j)
if (Double.isNaN(step[j]) || Double.isInfinite(step[j]))
break;
oldLoglik = newLoglik;
System.arraycopy(newCoef, 0, oldCoef, 0, oldCoef.length);
} else {
for (int j = 0; j < n_coef; ++j)
step[j] /= 2;
}
for (int j = 0; j < n_coef; ++j)
newCoef[j] = oldCoef[j] - step[j];
}
final Futures fs = new Futures();
DKV.put(dest(), model, fs);
fs.blockForPending();
}
@Override
protected Response redirect() {
return CoxPHProgressPage.redirect(this, self(), dest());
}
private Frame getSubframe() {
final boolean use_start_column = (start_column != null);
final boolean use_weights_column = (weights_column != null);
final int x_ncol = x_columns.length;
final int offset_ncol = offset_columns == null ? 0 : offset_columns.length;
int ncol = x_ncol + offset_ncol + 2;
if (use_weights_column)
ncol++;
if (use_start_column)
ncol++;
final String[] names = new String[ncol];
for (int j = 0; j < x_ncol; ++j)
names[j] = source.names()[x_columns[j]];
for (int j = 0; j < offset_ncol; ++j)
names[x_ncol + j] = source.names()[offset_columns[j]];
if (use_weights_column)
names[x_ncol + offset_ncol] = source.names()[source.find(weights_column)];
if (use_start_column)
names[ncol - 3] = source.names()[source.find(start_column)];
names[ncol - 2] = source.names()[source.find(stop_column)];
names[ncol - 1] = source.names()[source.find(event_column)];
return source.subframe(names);
}
protected static class CoxPHTask extends FrameTask<CoxPHTask> {
private final double[] _beta;
private final int _n_time;
private final long _min_time;
private final int _n_offsets;
private final boolean _has_start_column;
private final boolean _has_weights_column;
protected long n;
protected long n_missing;
protected double sumWeights;
protected double[] sumWeightedCatX;
protected double[] sumWeightedNumX;
protected double[] sizeRiskSet;
protected double[] sizeCensored;
protected double[] sizeEvents;
protected long[] countEvents;
protected double[][] sumXEvents;
protected double[] sumRiskEvents;
protected double[][] sumXRiskEvents;
protected double[][][] sumXXRiskEvents;
protected double[] sumLogRiskEvents;
protected double[] rcumsumRisk;
protected double[][] rcumsumXRisk;
protected double[][][] rcumsumXXRisk;
CoxPHTask(Key jobKey, DataInfo dinfo, final double[] beta, final long min_time, final int n_time,
final int n_offsets, final boolean has_start_column, final boolean has_weights_column) {
super(jobKey, dinfo);
_beta = beta;
_n_time = n_time;
_min_time = min_time;
_n_offsets = n_offsets;
_has_start_column = has_start_column;
_has_weights_column = has_weights_column;
}
@Override
protected void chunkInit(){
final int n_coef = _beta.length;
sumWeightedCatX = MemoryManager.malloc8d(n_coef - (_dinfo._nums - _n_offsets));
sumWeightedNumX = MemoryManager.malloc8d(_dinfo._nums);
sizeRiskSet = MemoryManager.malloc8d(_n_time);
sizeCensored = MemoryManager.malloc8d(_n_time);
sizeEvents = MemoryManager.malloc8d(_n_time);
countEvents = MemoryManager.malloc8(_n_time);
sumRiskEvents = MemoryManager.malloc8d(_n_time);
sumLogRiskEvents = MemoryManager.malloc8d(_n_time);
rcumsumRisk = MemoryManager.malloc8d(_n_time);
sumXEvents = malloc2DArray(_n_time, n_coef);
sumXRiskEvents = malloc2DArray(_n_time, n_coef);
rcumsumXRisk = malloc2DArray(_n_time, n_coef);
sumXXRiskEvents = malloc3DArray(_n_time, n_coef, n_coef);
rcumsumXXRisk = malloc3DArray(_n_time, n_coef, n_coef);
}
@Override
protected void processRow(long gid, double [] nums, int ncats, int [] cats, double [] response) {
n++;
final double weight = _has_weights_column ? response[0] : 1.0;
if (weight <= 0)
throw new IllegalArgumentException("weights must be positive values");
final long event = (long) response[response.length - 1];
final int t1 = _has_start_column ? (int) (((long) response[response.length - 3] + 1) - _min_time) : -1;
final int t2 = (int) (((long) response[response.length - 2]) - _min_time);
if (t1 > t2)
throw new IllegalArgumentException("start times must be strictly less than stop times");
final int numStart = _dinfo.numStart();
sumWeights += weight;
for (int j = 0; j < ncats; ++j)
sumWeightedCatX[cats[j]] += weight;
for (int j = 0; j < nums.length; ++j)
sumWeightedNumX[j] += weight * nums[j];
double logRisk = 0;
for (int j = 0; j < ncats; ++j)
logRisk += _beta[cats[j]];
for (int j = 0; j < nums.length - _n_offsets; ++j)
logRisk += nums[j] * _beta[numStart + j];
for (int j = nums.length - _n_offsets; j < nums.length; ++j)
logRisk += nums[j];
final double risk = weight * Math.exp(logRisk);
logRisk *= weight;
if (event > 0) {
countEvents[t2]++;
sizeEvents[t2] += weight;
sumLogRiskEvents[t2] += logRisk;
sumRiskEvents[t2] += risk;
} else
sizeCensored[t2] += weight;
if (_has_start_column) {
for (int t = t1; t <= t2; ++t)
sizeRiskSet[t] += weight;
for (int t = t1; t <= t2; ++t)
rcumsumRisk[t] += risk;
} else {
sizeRiskSet[t2] += weight;
rcumsumRisk[t2] += risk;
}
final int ntotal = ncats + (nums.length - _n_offsets);
final int numStartIter = numStart - ncats;
for (int jit = 0; jit < ntotal; ++jit) {
final boolean jIsCat = jit < ncats;
final int j = jIsCat ? cats[jit] : numStartIter + jit;
final double x1 = jIsCat ? 1.0 : nums[jit - ncats];
final double xRisk = x1 * risk;
if (event > 0) {
sumXEvents[t2][j] += weight * x1;
sumXRiskEvents[t2][j] += xRisk;
}
if (_has_start_column) {
for (int t = t1; t <= t2; ++t)
rcumsumXRisk[t][j] += xRisk;
} else {
rcumsumXRisk[t2][j] += xRisk;
}
for (int kit = 0; kit < ntotal; ++kit) {
final boolean kIsCat = kit < ncats;
final int k = kIsCat ? cats[kit] : numStartIter + kit;
final double x2 = kIsCat ? 1.0 : nums[kit - ncats];
final double xxRisk = x2 * xRisk;
if (event > 0)
sumXXRiskEvents[t2][j][k] += xxRisk;
if (_has_start_column) {
for (int t = t1; t <= t2; ++t)
rcumsumXXRisk[t][j][k] += xxRisk;
} else {
rcumsumXXRisk[t2][j][k] += xxRisk;
}
}
}
}
@Override
public void reduce(CoxPHTask that) {
n += that.n;
sumWeights += that.sumWeights;
Utils.add(sumWeightedCatX, that.sumWeightedCatX);
Utils.add(sumWeightedNumX, that.sumWeightedNumX);
Utils.add(sizeRiskSet, that.sizeRiskSet);
Utils.add(sizeCensored, that.sizeCensored);
Utils.add(sizeEvents, that.sizeEvents);
Utils.add(countEvents, that.countEvents);
Utils.add(sumXEvents, that.sumXEvents);
Utils.add(sumRiskEvents, that.sumRiskEvents);
Utils.add(sumXRiskEvents, that.sumXRiskEvents);
Utils.add(sumXXRiskEvents, that.sumXXRiskEvents);
Utils.add(sumLogRiskEvents, that.sumLogRiskEvents);
Utils.add(rcumsumRisk, that.rcumsumRisk);
Utils.add(rcumsumXRisk, that.rcumsumXRisk);
Utils.add(rcumsumXXRisk, that.rcumsumXXRisk);
}
@Override
protected void postGlobal() {
if (!_has_start_column) {
for (int t = rcumsumRisk.length - 2; t >= 0; --t)
rcumsumRisk[t] += rcumsumRisk[t + 1];
for (int t = rcumsumXRisk.length - 2; t >= 0; --t)
for (int j = 0; j < rcumsumXRisk[t].length; ++j)
rcumsumXRisk[t][j] += rcumsumXRisk[t + 1][j];
for (int t = rcumsumXXRisk.length - 2; t >= 0; --t)
for (int j = 0; j < rcumsumXXRisk[t].length; ++j)
for (int k = 0; k < rcumsumXXRisk[t][j].length; ++k)
rcumsumXXRisk[t][j][k] += rcumsumXXRisk[t + 1][j][k];
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/CreateFrame.java
|
package hex;
import water.*;
import water.api.DocGen;
import water.fvec.*;
import water.util.Log;
import water.util.RString;
import java.util.Random;
/**
* Create a Frame from scratch
* If randomize = true, then the frame is filled with Random values.
*
*/
public class CreateFrame extends Request2 {
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "Name (Key) of frame to be created", required = true, filter = Default.class, json=true)
public String key;
@API(help = "Number of rows", required = true, filter = Default.class, lmin = 1, json=true)
public long rows = 10000;
@API(help = "Number of data columns (in addition to the first response column)", required = true, filter = Default.class, lmin = 1, json=true)
public int cols = 10;
@API(help = "Random number seed", filter = Default.class, json=true)
public long seed = new Random().nextLong();
@API(help = "Whether frame should be randomized", filter = Default.class, json=true)
public boolean randomize = true;
@API(help = "Constant value (for randomize=false)", filter = Default.class, json=true)
public long value = 0;
@API(help = "Range for real variables (-range ... range)", filter = Default.class, json=true)
public long real_range = 100;
@API(help = "Fraction of categorical columns (for randomize=true)", filter = Default.class, dmin = 0, dmax = 1, json=true)
public double categorical_fraction = 0.2;
@API(help = "Factor levels for categorical variables", filter = Default.class, lmin = 2, json=true)
public int factors = 100;
@API(help = "Fraction of integer columns (for randomize=true)", filter = Default.class, dmin = 0, dmax = 1, json=true)
public double integer_fraction = 0.2;
@API(help = "Range for integer variables (-range ... range)", filter = Default.class, json=true)
public long integer_range = 100;
@API(help = "Fraction of missing values", filter = Default.class, dmin = 0, dmax = 1, json=true)
public double missing_fraction = 0.01;
@API(help = "Number of factor levels of the first column (1=real, 2=binomial, N=multinomial)", filter = Default.class, lmin = 1, json=true)
public int response_factors = 2;
public boolean positive_response; // only for response_factors=1
@Override public Response serve() {
try {
if (integer_fraction + categorical_fraction > 1) throw new IllegalArgumentException("Integer and categorical fractions must add up to <= 1.");
if (Math.abs(missing_fraction) > 1) throw new IllegalArgumentException("Missing fraction must be between 0 and 1.");
if (Math.abs(integer_fraction) > 1) throw new IllegalArgumentException("Integer fraction must be between 0 and 1.");
if (Math.abs(categorical_fraction) > 1) throw new IllegalArgumentException("Categorical fraction must be between 0 and 1.");
if (categorical_fraction > 0 && factors <= 1) throw new IllegalArgumentException("Factors must be larger than 2 for categorical data.");
if (response_factors < 1) throw new IllegalArgumentException("Response factors must be either 1 (real-valued response), or >=2 (factor levels).");
if (cols <= 0 || rows <= 0) throw new IllegalArgumentException("Must have number of rows > 0 and columns > 1.");
if (key.length() == 0) throw new IllegalArgumentException("Output key must be provided.");
if (!randomize) {
if (integer_fraction != 0 || categorical_fraction != 0)
throw new IllegalArgumentException("Cannot have integer or categorical fractions > 0 unless randomize=true.");
} else {
if (value != 0)
throw new IllegalArgumentException("Cannot set data to a constant value if randomize=true.");
}
final FrameCreator fct = new FrameCreator(this);
H2O.submitTask(fct);
fct.join();
Log.info("Created frame '" + key + "'.");
return Response.done(this);
} catch( Throwable t ) {
return Response.error(t);
}
}
@Override public boolean toHTML( StringBuilder sb ) {
Frame fr = UKV.get(Key.make(key));
if (fr==null) {
return false;
}
RString aft = new RString("<a href='Inspect2.html?src_key=%$key'>%key</a>");
aft.replace("key", key);
DocGen.HTML.section(sb, "Frame creation done.<br/>Frame '" + aft.toString()
+ "' now has " + fr.numRows() + " rows and " + (fr.numCols()-1)
+ " data columns, as well as a " + (response_factors == 1 ? "real-valued" : (response_factors == 2 ? "binomial" : "multi-nomial"))
+ " response variable as the first column.<br/>Number of chunks: " + fr.anyVec().nChunks() + ".");
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/FrameExtractor.java
|
package hex;
import java.util.Arrays;
import jsr166y.CountedCompleter;
import water.*;
import water.H2O.H2OCountedCompleter;
import water.fvec.*;
import water.util.Utils;
/**
* Support class for extracting things from frame.
**/
public abstract class FrameExtractor extends H2OCountedCompleter {
/** Dataset to split */
final Frame dataset;
/** Destination keys for each output frame split. */
final Key[] destKeys;
/** Optional job key */
final Key jobKey;
/** Output frames for each output split part */
private Frame[] splits;
/** Temporary variable holding exceptions of workers */
private Throwable[] workersExceptions;
public FrameExtractor(Frame dataset, Key[] destKeys, Key jobKey) {
this.dataset = dataset;
this.jobKey = jobKey;
this.destKeys = destKeys!=null ? destKeys : generateDestKeys(dataset!=null?dataset._key:null, numOfOutputs());
}
@Override public void compute2() {
// Lock all possible data
dataset.read_lock(jobKey);
// Create a template vector for each segment
final Vec[][] templates = makeTemplates();
final int nsplits = templates.length;
assert templates.length == numOfOutputs() : "Number of outputs and number of created templates differ!";
final Vec[] datasetVecs = dataset.vecs();
// Create output frames
splits = new Frame[nsplits];
for (int s=0; s<nsplits; s++) {
Frame split = new Frame(destKeys[s], dataset.names(), templates[s] );
split.delete_and_lock(jobKey);
splits[s] = split;
}
// Launch number of distributed FJ for each split part
setPendingCount(1);
H2O.submitTask(new H2OCountedCompleter(FrameExtractor.this) {
@Override public void compute2() {
setPendingCount(nsplits);
for (int s=0; s<nsplits; s++) {
MRTask2 mrt = createNewWorker(new H2OCountedCompleter(this) { // Completer for this task
@Override public void compute2() { }
@Override public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller) {
synchronized( FrameExtractor.this ) { // synchronized on this since can be accessed from different workers
workersExceptions = workersExceptions!=null ? Arrays.copyOf(workersExceptions, workersExceptions.length+1) : new Throwable[1];
workersExceptions[workersExceptions.length-1] = ex;
}
tryComplete(); // we handle the exception so wait perform normal completion
return false;
}
}, datasetVecs, s);
assert mrt.getCompleter() != null : "The `createNewWorker` method violates API contract and forgets to setup given counted completer!";
mrt.asyncExec(splits[s]);
}
tryComplete(); // complete the computation of nsplits-tasks
}
});
tryComplete(); // complete the computation of thrown tasks
}
/** Blocking call to obtain a result of computation. */
public Frame[] getResult() {
join();
if (workersExceptions!=null) throw new RuntimeException(workersExceptions[0]);
return splits;
}
@Override public void onCompletion(CountedCompleter caller) {
boolean exceptional = workersExceptions!=null;
dataset.unlock(jobKey);
if (splits!=null) {
for (Frame s : splits) {
if (s!=null) {
if (!exceptional) {
s.update(jobKey);
s.unlock(jobKey);
} else { // Have to unlock and delete here
s.unlock(jobKey);
s.delete(jobKey, 3.14f); // delete all splits
}
}
}
}
}
/** Create a new worker which has to setup given completer. */
protected abstract MRTask2 createNewWorker(H2OCountedCompleter completer, Vec[] inputVecs, int split) ;
/** Create a templates for vector composing output frame */
protected Vec[][] makeTemplates() {
Vec anyVec = dataset.anyVec();
final long[][] espcPerSplit = computeEspcPerSplit(anyVec._espc, anyVec.length());
final int num = dataset.numCols(); // number of columns in input frame
final int nsplits = espcPerSplit.length; // number of splits
final String[][] domains = dataset.domains(); // domains
final boolean[] uuids = dataset.uuids();
final byte[] times = dataset.times();
Vec[][] t = new Vec[nsplits][/*num*/]; // resulting vectors for all
for (int i=0; i<nsplits; i++) {
// vectors for j-th split
t[i] = new Vec(Vec.newKey(),espcPerSplit[i/*-th split*/]).makeZeros(num, domains, uuids, times);
}
return t;
}
/**
* Compute espc for output vectors for each split.
* @param espc input vector espc
* @param nrows total number of rows in input vector
* @return espc for each partition
*/
protected abstract long[][] computeEspcPerSplit(long[] espc, long nrows) ;
/**
* Generates default names for destination keys.
*
* @param masterKey
* key for input dataset
* @param numberOfKeys
* number of keys to generate
* @return return an array of keys.
*/
protected Key[] generateDestKeys(Key masterKey, int numberOfKeys) {
return Utils.generateNumKeys(masterKey, numberOfKeys);
}
/** Return a number of resulting frame which this task produces. */
protected abstract int numOfOutputs();
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/FrameSplitter.java
|
package hex;
import java.util.Arrays;
import jsr166y.CountedCompleter;
import water.*;
import water.H2O.H2OCountedCompleter;
import water.fvec.*;
import water.util.Utils;
/**
* Frame splitter function to divide given frame into
* multiple partitions based on given ratios.
*
* <p>The task creates <code>ratios.length+1</code> output frame each containing a
* demanded fraction of rows from source dataset</p>
*
* <p>The tasks internally extract data from source chunks and create output chunks in preserving order of parts.
* I.e., the 1st partition contains the first P1-rows, the 2nd partition contains following P2-rows, ...
* </p>
*
* <p>Assumptions and invariants</p>
* <ul>
* <li>number of demanding split parts is reasonable number, i.e., <10. The task is not designed to split into many small parts.</li>
* <li>the worker DOES NOT preserves distribution of new chunks over the cloud according to source dataset chunks.</li>
* <li>rows inside one output chunk are not shuffled, they are extracted deterministically in the same order as they appear in source chunk.</li>
* <li>workers can enforce data transfers if they need to obtain data from remote chunks.</li>
* </ul>
*
* <p>NOTE: the implementation is data-transfer expensive and in some cases it would be beneficial to use original
* implementation from <a href="https://github.com/0xdata/h2o/commits/9af3f4e">9af3f4e</a>.</p>.
*/
public class FrameSplitter extends H2OCountedCompleter {
/** Dataset to split */
final Frame dataset;
/** Split ratios - resulting number of split is ratios.length+1 */
final float[] ratios;
/** Destination keys for each output frame split. */
final Key[] destKeys;
/** Optional job key */
final Key jobKey;
/** Output frames for each output split part */
private Frame[] splits;
/** Temporary variable holding exceptions of workers */
private Throwable[] workersExceptions;
public FrameSplitter(Frame dataset, float[] ratios) {
this(dataset, ratios, null, null);
}
public FrameSplitter(Frame dataset, float[] ratios, Key[] destKeys, Key jobKey) {
assert ratios.length > 0 : "No ratio specified!";
assert ratios.length < 100 : "Too many frame splits demanded!";
this.dataset = dataset;
this.ratios = ratios;
this.destKeys = destKeys!=null ? destKeys : Utils.generateNumKeys(dataset._key, ratios.length+1);
assert this.destKeys.length == this.ratios.length+1 : "Unexpected number of destination keys.";
this.jobKey = jobKey;
}
@Override public void compute2() {
// Lock all possible data
dataset.read_lock(jobKey);
// Create a template vector for each segment
final Vec[][] templates = makeTemplates(dataset, ratios);
final int nsplits = templates.length;
assert nsplits == ratios.length+1 : "Unexpected number of split templates!";
// Launch number of distributed FJ for each split part
final Vec[] datasetVecs = dataset.vecs();
splits = new Frame[nsplits];
for (int s=0; s<nsplits; s++) {
Frame split = new Frame(destKeys[s], dataset.names(), templates[s] );
split.delete_and_lock(jobKey);
splits[s] = split;
}
setPendingCount(1);
H2O.submitTask(new H2OCountedCompleter(FrameSplitter.this) {
@Override public void compute2() {
setPendingCount(nsplits);
for (int s=0; s<nsplits; s++) {
new FrameSplitTask(new H2OCountedCompleter(this) { // Completer for this task
@Override public void compute2() { }
@Override public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller) {
synchronized( FrameSplitter.this ) { // synchronized on this since can be accessed from different workers
workersExceptions = workersExceptions!=null ? Arrays.copyOf(workersExceptions, workersExceptions.length+1) : new Throwable[1];
workersExceptions[workersExceptions.length-1] = ex;
}
tryComplete(); // we handle the exception so wait perform normal completion
return false;
}
}, datasetVecs, ratios, s).asyncExec(splits[s]);
}
tryComplete(); // complete the computation of nsplits-tasks
}
});
tryComplete(); // complete the computation of thrown tasks
}
/** Blocking call to obtain a result of computation. */
public Frame[] getResult() {
join();
if (workersExceptions!=null) throw new RuntimeException(workersExceptions[0]);
return splits;
}
@Override public void onCompletion(CountedCompleter caller) {
boolean exceptional = workersExceptions!=null;
dataset.unlock(jobKey);
if (splits!=null) {
for (Frame s : splits) {
if (s!=null) {
if (!exceptional) {
s.update(jobKey);
s.unlock(jobKey);
} else { // Have to unlock and delete here
s.unlock(jobKey);
s.delete(jobKey, 3.14f); // delete all splits
}
}
}
}
}
// Make vector templates for all output frame vectors
private Vec[][] makeTemplates(Frame dataset, float[] ratios) {
Vec anyVec = dataset.anyVec();
final long[][] espcPerSplit = computeEspcPerSplit(anyVec._espc, anyVec.length(), ratios);
final int num = dataset.numCols(); // number of columns in input frame
final int nsplits = espcPerSplit.length; // number of splits
final String[][] domains = dataset.domains(); // domains
final boolean[] uuids = dataset.uuids();
final byte [] times = dataset.times();
Vec[][] t = new Vec[nsplits][/*num*/]; // resulting vectors for all
for (int i=0; i<nsplits; i++) {
// vectors for j-th split
t[i] = new Vec(Vec.newKey(),espcPerSplit[i/*-th split*/]).makeZeros(num, domains, uuids, times);
}
return t;
}
// The task computes ESPC per split
static long[/*nsplits*/][/*nchunks*/] computeEspcPerSplit(long[] espc, long len, float[] ratios) {
assert espc.length>0 && espc[0] == 0;
assert espc[espc.length-1] == len;
long[] partSizes = Utils.partitione(len, ratios); // Split of whole vector
int nparts = ratios.length+1;
long[][] r = new long[nparts][espc.length]; // espc for each partition
long nrows = 0;
long start = 0;
for (int p=0,c=0; p<nparts; p++) {
int nc = 0; // number of chunks for this partition
for(;c<espc.length-1 && (espc[c+1]-start) <= partSizes[p];c++) r[p][++nc] = espc[c+1]-start;
if (r[p][nc] < partSizes[p]) r[p][++nc] = partSizes[p]; // last item in espc contains number of rows
r[p] = Arrays.copyOf(r[p], nc+1);
// Transfer rest of lines to the next part
nrows = nrows-partSizes[p];
start += partSizes[p];
}
return r;
}
/** MR task extract specified part of <code>_srcVecs</code>
* into output chunk.*/
private static class FrameSplitTask extends MRTask2<FrameSplitTask> {
final Vec [] _srcVecs; // a source frame given by list of its columns
final float[] _ratios; // split ratios
final int _partIdx; // part index
transient int _pcidx; // Start chunk index for this partition
transient int _psrow; // Start row in chunk for this partition
public FrameSplitTask(H2OCountedCompleter completer, Vec[] srcVecs, float[] ratios, int partIdx) {
super(completer);
_srcVecs = srcVecs;
_ratios = ratios;
_partIdx = partIdx;
}
@Override protected void setupLocal() {
// Precompute the first input chunk index and start row inside that chunk for this partition
Vec anyInVec = _srcVecs[0];
long[] partSizes = Utils.partitione(anyInVec.length(), _ratios);
long pnrows = 0;
for (int p=0; p<_partIdx; p++) pnrows += partSizes[p];
long[] espc = anyInVec._espc;
while (_pcidx < espc.length-1 && (pnrows -= (espc[_pcidx+1]-espc[_pcidx])) > 0 ) _pcidx++;
assert pnrows <= 0;
_psrow = (int) (pnrows + espc[_pcidx+1]-espc[_pcidx]);
}
@Override public void map(Chunk[] cs) { // Output chunks
int coutidx = cs[0].cidx(); // Index of output Chunk
int cinidx = _pcidx + coutidx;
int startRow = coutidx > 0 ? 0 : _psrow; // where to start extracting
int nrows = cs[0]._len;
// For each output chunk extract appropriate rows for partIdx-th part
for (int i=0; i<cs.length; i++) {
// WARNING: this implementation does not preserve co-location of chunks so we are forcing here network transfer!
ChunkSplitter.extractChunkPart(_srcVecs[i].chunkForChunkIdx(cinidx), cs[i], startRow, nrows, _fs);
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/FrameTask.java
|
package hex;
import water.*;
import water.H2O.H2OCountedCompleter;
import water.Job.JobCancelledException;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.util.Log;
import water.util.Utils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Random;
public abstract class FrameTask<T extends FrameTask<T>> extends MRTask2<T>{
public final DataInfo _dinfo;
final protected Key _jobKey;
// double _ymu = Double.NaN; // mean of the response
// size of the expanded vector of parameters
protected float _useFraction = 1.0f;
protected boolean _shuffle = false;
protected boolean skipMissing() { return true; }
public FrameTask(Key jobKey, DataInfo dinfo) {
this(jobKey,dinfo,null);
}
public FrameTask(Key jobKey, DataInfo dinfo, H2OCountedCompleter cmp) {
super(cmp);
_jobKey = jobKey;
_dinfo = dinfo;
}
protected FrameTask(FrameTask ft){
_dinfo = ft._dinfo;
_jobKey = ft._jobKey;
_useFraction = ft._useFraction;
_shuffle = ft._shuffle;
}
public final double [] normMul(){return _dinfo._normMul;}
public final double [] normSub(){return _dinfo._normSub;}
public final double [] normRespMul(){return _dinfo._normMul;}
public final double [] normRespSub(){return _dinfo._normSub;}
/**
* Method to process one row of the data for GLM functions.
* Numeric and categorical values are passed separately, as is response.
* Categoricals are passed as absolute indexes into the expanded beta vector, 0-levels are skipped
* (so the number of passed categoricals will not be the same for every row).
*
* Categorical expansion/indexing:
* Categoricals are placed in the beginning of the beta vector.
* Each cat variable with n levels is expanded into n-1 independent binary variables.
* Indexes in cats[] will point to the appropriate coefficient in the beta vector, so e.g.
* assume we have 2 categorical columns both with values A,B,C, then the following rows will have following indexes:
* A,A - ncats = 0, we do not pass any categorical here
* A,B - ncats = 1, indexes = [2]
* B,B - ncats = 2, indexes = [0,2]
* and so on
*
* @param gid - global id of this row, in [0,_adaptedFrame.numRows())
* @param nums - numeric values of this row
* @param ncats - number of passed (non-zero) categoricals
* @param cats - indexes of categoricals into the expanded beta-vector.
* @param response - numeric value for the response
*/
protected void processRow(long gid, double [] nums, int ncats, int [] cats, double [] response){throw new RuntimeException("should've been overriden!");}
protected void processRow(long gid, double [] nums, int ncats, int [] cats, double [] response, NewChunk [] outputs){throw new RuntimeException("should've been overriden!");}
public static class DataInfo extends Iced {
public Frame _adaptedFrame;
public int _responses; // number of responses
public enum TransformType { NONE, STANDARDIZE, NORMALIZE, DEMEAN, DESCALE };
public TransformType _predictor_transform;
public TransformType _response_transform;
public boolean _useAllFactorLevels;
public int _nums;
public int _cats;
public int [] _catOffsets;
public int [] _catMissing;
public double [] _normMul;
public double [] _normSub;
public double [] _normRespMul;
public double [] _normRespSub;
public int _foldId;
public int _nfolds;
public Key _frameKey;
public boolean _hasIntercept;
public DataInfo deep_clone() {
AutoBuffer ab = new AutoBuffer();
this.write(ab);
ab.flipForReading();
return new DataInfo().read(ab);
}
private DataInfo() {_catLvls = null; _hasIntercept = true;}
private DataInfo(DataInfo dinfo, int foldId, int nfolds){
assert dinfo._catLvls == null:"Should not be called with filtered levels (assuming the selected levels may change with fold id) ";
_predictor_transform = dinfo._predictor_transform;
_response_transform = dinfo._response_transform;
_responses = dinfo._responses;
_nums = dinfo._nums;
_cats = dinfo._cats;
_adaptedFrame = dinfo._adaptedFrame;
_catOffsets = dinfo._catOffsets;
_catMissing = dinfo._catMissing;
_normMul = dinfo._normMul;
_normSub = dinfo._normSub;
_normRespMul = dinfo._normRespMul;
_normRespSub = dinfo._normRespSub;
_foldId = foldId;
_nfolds = nfolds;
_useAllFactorLevels = dinfo._useAllFactorLevels;
_catLvls = null;
_hasIntercept = dinfo._hasIntercept;
}
public DataInfo(Frame fr, int hasResponses, boolean hasIntercept, boolean useAllFactorLvls, double [] normSub, double [] normMul, TransformType predictor_transform, double [] normRespSub, double [] normRespMul){
this(fr, hasResponses, hasIntercept, useAllFactorLvls,
normMul != null && normSub != null ? predictor_transform : TransformType.NONE, //just allocate, doesn't matter whether standardize or normalize is used (will be overwritten below)
normRespMul != null && normRespSub != null ? TransformType.STANDARDIZE : TransformType.NONE);
assert (normSub == null) == (normMul == null);
assert (normRespSub == null) == (normRespMul == null);
if(normSub != null) {
System.arraycopy(normSub, 0, _normSub, 0, normSub.length);
System.arraycopy(normMul, 0, _normMul, 0, normMul.length);
}
if(normRespSub != null) {
System.arraycopy(normRespSub, 0, _normRespSub, 0, normRespSub.length);
System.arraycopy(normRespMul, 0, _normRespMul, 0, normRespMul.length);
}
}
final int [][] _catLvls;
/**
* Apply data transformation on the given column.
*
* @param c - index into fully exponded vector
* @param v - value of the column to be transformed
* @return v transformed by the transformation (e.g. standardization) defined by this dataset for this column
*/
public double applyTransform(int c, double v){
if(c >= _catOffsets[_catOffsets.length-1]) {
c -= _cats;
if (_normSub != null) v -= _normSub[c];
if (_normMul != null) v *= _normMul[c];
}
return v;
}
/**
* Prepare a Frame (with a single response) to be processed by the FrameTask
* 1) Place response at the end
* 2) (Optionally) Remove columns with constant values or with greater than 20% NaNs
* 3) Possibly turn integer categoricals into enums
*
* @param source A frame to be expanded and sanity checked
* @param response (should be part of source)
* @param toEnum Whether or not to turn categoricals into enums
* @param dropConstantCols Whether or not to drop constant columns
* @return Frame to be used by FrameTask
*/
public static Frame prepareFrame(Frame source, Vec response, int[] ignored_cols, boolean toEnum, boolean dropConstantCols, boolean dropNACols) {
return prepareFrame(source,response != null?new Vec[]{response}:null,ignored_cols,toEnum,dropConstantCols,dropNACols);
}
public static Frame prepareFrame(Frame source, Vec [] response, int[] ignored_cols, boolean toEnum, boolean dropConstantCols, boolean dropNACols) {
Frame fr = new Frame(Key.makeSystem(Key.make().toString()), source._names.clone(), source.vecs().clone());
if(ignored_cols != null && !Utils.isSorted(ignored_cols))
Arrays.sort(ignored_cols);
if(response != null && ignored_cols != null)
for(Vec v:response){
int id = source.find(v);
if(Arrays.binarySearch(ignored_cols,id) >= 0)
throw new IllegalArgumentException("Column can not be both ignored and used as a response.");
}
if (ignored_cols != null) fr.remove(ignored_cols);
final Vec[] vecs = fr.vecs();
// compute rollupstats in parallel
Futures fs = new Futures();
for (Vec v : vecs) v.rollupStats(fs);
fs.blockForPending();
// put response to the end (if not already)
if (response != null) {
for(Vec v:response){
int id = fr.find(v);
final String n = fr._names[id];
if (toEnum && !vecs[id].isEnum()) fr.add(n, fr.remove(id).toEnum()); //convert int classes to enums
else fr.add(n, fr.remove(id));
}
}
ArrayList<Integer> constantOrNAs = new ArrayList<Integer>();
{
ArrayList<Integer> constantCols = new ArrayList<Integer>();
ArrayList<Integer> NACols = new ArrayList<Integer>();
for(int i = 0; i < vecs.length-1; ++i) {
// remove constant cols and cols with too many NAs
final boolean dropconstant = dropConstantCols && vecs[i].min() == vecs[i].max();
final boolean droptoomanyNAs = dropNACols && vecs[i].naCnt() > vecs[i].length()*1;
if(dropconstant) {
constantCols.add(i);
} else if (droptoomanyNAs) {
NACols.add(i);
}
}
constantOrNAs.addAll(constantCols);
constantOrNAs.addAll(NACols);
// Report what is dropped
String msg = "";
if (constantCols.size() > 0) msg += "Dropping constant column(s): ";
for (int i : constantCols) msg += fr._names[i] + " ";
if (NACols.size() > 0) msg += "Dropping column(s) with too many missing values: ";
for (int i : NACols) msg += fr._names[i] + " (" + String.format("%.2f", vecs[i].naCnt() * 100. / vecs[i].length()) + "%) ";
for (String s : msg.split("\n")) Log.info(s);
}
if(!constantOrNAs.isEmpty()){
int [] cols = new int[constantOrNAs.size()];
for(int i = 0; i < cols.length; ++i)
cols[i] = constantOrNAs.get(i);
fr.remove(cols);
}
return fr;
}
public static Frame prepareFrame(Frame source, int[] ignored_cols, boolean dropConstantCols, boolean dropNACols) {
Frame fr = new Frame(Key.makeSystem(Key.make().toString()), source._names.clone(), source.vecs().clone());
if (ignored_cols != null) fr.remove(ignored_cols);
final Vec[] vecs = fr.vecs();
// compute rollupstats in parallel
Futures fs = new Futures();
for (Vec v : vecs) v.rollupStats(fs);
fs.blockForPending();
ArrayList<Integer> constantOrNAs = new ArrayList<Integer>();
{
ArrayList<Integer> constantCols = new ArrayList<Integer>();
ArrayList<Integer> NACols = new ArrayList<Integer>();
for(int i = 0; i < vecs.length; ++i) {
// remove constant cols and cols with too many NAs
final boolean dropconstant = dropConstantCols && vecs[i].min() == vecs[i].max();
final boolean droptoomanyNAs = dropNACols && vecs[i].naCnt() > vecs[i].length()*0.2;
if(dropconstant) {
constantCols.add(i);
} else if (droptoomanyNAs) {
NACols.add(i);
}
}
constantOrNAs.addAll(constantCols);
constantOrNAs.addAll(NACols);
// Report what is dropped
String msg = "";
if (constantCols.size() > 0) msg += "Dropping constant column(s): ";
for (int i : constantCols) msg += fr._names[i] + " ";
if (NACols.size() > 0) msg += "Dropping column(s) with too many missing values: ";
for (int i : NACols) msg += fr._names[i] + " (" + String.format("%.2f", vecs[i].naCnt() * 100. / vecs[i].length()) + "%) ";
for (String s : msg.split("\n")) Log.info(s);
}
if(!constantOrNAs.isEmpty()){
int [] cols = new int[constantOrNAs.size()];
for(int i = 0; i < cols.length; ++i)
cols[i] = constantOrNAs.get(i);
fr.remove(cols);
}
return fr;
}
public static Frame prepareFrame(Frame source, Vec response, int[] ignored_cols, boolean toEnum, boolean dropConstantCols) {
return prepareFrame(source, response, ignored_cols, toEnum, dropConstantCols, false);
}
public DataInfo(Frame fr, int nResponses, boolean hasIntercept, boolean useAllFactors, TransformType predictor_transform) {
this(fr, nResponses, hasIntercept, useAllFactors, predictor_transform, TransformType.NONE);
}
//new DataInfo(f,catLvls, _responses, _standardize, _response_transform);
private DataInfo(Frame fr, int[][] catLevels, int responses, boolean hasIntercept, TransformType predictor_transform, TransformType response_transform, int foldId, int nfolds){
_hasIntercept = hasIntercept;
_adaptedFrame = fr;
_catOffsets = MemoryManager.malloc4(catLevels.length+1);
_catMissing = new int[catLevels.length];
int s = 0;
// compute rollupstats in parallel
Futures fs = new Futures();
for (Vec v : fr.vecs()) v.rollupStats(fs);
fs.blockForPending();
for(int i = 0; i < catLevels.length; ++i){
_catOffsets[i] = s;
s += catLevels[i].length;
}
_catLvls = catLevels;
_catOffsets[_catOffsets.length-1] = s;
_responses = responses;
_cats = catLevels.length;
_nums = fr.numCols()-_cats - responses;
_predictor_transform = predictor_transform;
if(_nums > 0){
switch(_predictor_transform) {
case STANDARDIZE:
_normMul = MemoryManager.malloc8d(_nums);
_normSub = MemoryManager.malloc8d(_nums);
for (int i = 0; i < _nums; ++i) {
Vec v = fr.vec(catLevels.length+i);
_normMul[i] = (v.sigma() != 0)?1.0/v.sigma():1.0;
_normSub[i] = v.mean();
}
break;
case NORMALIZE:
_normMul = MemoryManager.malloc8d(_nums);
_normSub = MemoryManager.malloc8d(_nums);
for (int i = 0; i < _nums; ++i) {
Vec v = fr.vec(catLevels.length+i);
_normMul[i] = (v.max() - v.min() > 0)?1.0/(v.max() - v.min()):1.0;
_normSub[i] = v.mean();
}
break;
case DESCALE:
_normSub = null;
_normMul = MemoryManager.malloc8d(_nums);;
for (int i = 0; i < _nums; ++i) {
Vec v = fr.vec(catLevels.length+i);
_normMul[i] = (v.sigma() != 0)?1.0/v.sigma():1.0;
}
break;
case DEMEAN:
_normMul = null;
_normSub = MemoryManager.malloc8d(_nums);
for (int i = 0; i < _nums; ++i) {
Vec v = fr.vec(catLevels.length+i);
_normSub[i] = v.mean();
}
break;
case NONE:
_normMul = null;
_normSub = null;
break;
default:
throw H2O.unimpl();
}
}
_response_transform = response_transform;
if(responses > 0){
switch(_response_transform) {
case STANDARDIZE:
_normRespMul = MemoryManager.malloc8d(responses);
_normRespSub = MemoryManager.malloc8d(responses);
for (int i = 0; i < responses; ++i) {
Vec v = fr.vec(fr.numCols()-responses+i);
_normRespSub[i] = (v.sigma() != 0)?1.0/v.sigma():1.0;
_normRespSub[i] = v.mean();
}
break;
case NORMALIZE:
_normRespMul = MemoryManager.malloc8d(responses);
_normRespSub = MemoryManager.malloc8d(responses);
for (int i = 0; i < responses; ++i) {
Vec v = fr.vec(fr.numCols()-responses+i);
_normRespSub[i] = (v.max() - v.min() > 0)?1.0/(v.max() - v.min()):1.0;
_normRespSub[i] = v.mean();
}
break;
case DEMEAN:
_normRespMul = null;
_normRespSub = MemoryManager.malloc8d(responses);
for (int i = 0; i < responses; ++i) {
Vec v = fr.vec(fr.numCols()-responses+i);
_normRespSub[i] = v.mean();
}
break;
case NONE:
_normRespMul = null;
_normRespSub = null;
break;
default:
throw H2O.unimpl();
}
}
_useAllFactorLevels = false;
_adaptedFrame.reloadVecs();
_nfolds = nfolds;
_foldId = foldId;
}
public DataInfo(Frame fr, int nResponses, boolean hasIntercept, boolean useAllFactorLevels, TransformType predictor_transform, TransformType response_transform) {
_nfolds = _foldId = 0;
_predictor_transform = predictor_transform;
_response_transform = response_transform;
_responses = nResponses;
_useAllFactorLevels = useAllFactorLevels;
_catLvls = null;
_hasIntercept = hasIntercept;
final Vec [] vecs = fr.vecs();
// compute rollupstats in parallel
Futures fs = new Futures();
for (Vec v : vecs) v.rollupStats(fs);
fs.blockForPending();
final int n = vecs.length-_responses;
if (n < 1) throw new IllegalArgumentException("Training data must have at least one column.");
int [] nums = MemoryManager.malloc4(n);
int [] cats = MemoryManager.malloc4(n);
int nnums = 0, ncats = 0;
for(int i = 0; i < n; ++i){
if(vecs[i].isEnum())
cats[ncats++] = i;
else
nums[nnums++] = i;
}
_nums = nnums;
_cats = ncats;
// sort the cats in the decreasing order according to their size
for(int i = 0; i < ncats; ++i)
for(int j = i+1; j < ncats; ++j)
if(vecs[cats[i]].domain().length < vecs[cats[j]].domain().length){
int x = cats[i];
cats[i] = cats[j];
cats[j] = x;
}
Vec [] vecs2 = vecs.clone();
String [] names = fr._names.clone();
_catOffsets = MemoryManager.malloc4(ncats+1);
_catMissing = new int[ncats];
int len = _catOffsets[0] = 0;
for(int i = 0; i < ncats; ++i){
Vec v = (vecs2[i] = vecs[cats[i]]);
names[i] = fr._names[cats[i]];
_catMissing[i] = v.naCnt() > 0 ? 1 : 0; //needed for test time
_catOffsets[i+1] = (len += v.domain().length - (useAllFactorLevels?0:1) + (v.naCnt()>0?1:0)); //missing values turn into a new factor level
}
switch(predictor_transform) {
case STANDARDIZE:
case NORMALIZE:
_normSub = MemoryManager.malloc8d(nnums);
_normMul = MemoryManager.malloc8d(nnums); Arrays.fill(_normMul, 1);
break;
case DEMEAN:
_normSub = MemoryManager.malloc8d(nnums);
_normMul = null;
break;
case DESCALE:
_normMul = MemoryManager.malloc8d(nnums);
_normSub = null;
break;
case NONE:
_normSub = _normMul = null;
break;
default:
break;
}
for(int i = 0; i < nnums; ++i){
Vec v = (vecs2[i+ncats] = vecs[nums[i]]);
names[i+ncats] = fr._names[nums[i]];
switch(predictor_transform){
case STANDARDIZE:
_normSub[i] = v.mean();
_normMul[i] = v.sigma() != 0 ? 1.0/v.sigma() : 1.0;
break;
case NORMALIZE:
_normSub[i] = v.mean();
_normMul[i] = (v.max() - v.min() > 0)?1.0/(v.max() - v.min()):1.0;
break;
case DESCALE:
_normMul[i] = (v.sigma() != 0)?1.0/v.sigma():1.0;
break;
case DEMEAN:
_normSub[i] = v.mean();
break;
case NONE:
break;
default:
break;
}
}
if (_responses > 0) {
switch(response_transform){
case STANDARDIZE:
case NORMALIZE:
_normRespSub = MemoryManager.malloc8d(_responses);
_normRespMul = MemoryManager.malloc8d(_responses); Arrays.fill(_normRespMul, 1);
break;
case DEMEAN:
_normRespSub = MemoryManager.malloc8d(_responses);
_normRespMul = null;
break;
case NONE:
_normRespSub = _normRespMul = null;
break;
default:
throw H2O.unimpl();
}
for(int i = 0; i < _responses; ++i){
Vec v = (vecs2[nnums+ncats+i] = vecs[nnums+ncats+i]);
switch(response_transform){
case STANDARDIZE:
_normRespSub[i] = v.mean();
_normRespMul[i] = v.sigma() != 0 ? 1.0/v.sigma() : 1.0;
break;
case NORMALIZE:
_normRespSub[i] = v.mean();
_normRespMul[i] = (v.max() - v.min() > 0)?1.0/(v.max() - v.min()):1.0;
break;
case DEMEAN:
_normSub[i] = v.mean();
break;
case NONE:
break;
default:
throw H2O.unimpl();
}
}
}
_adaptedFrame = new Frame(names,vecs2);
_adaptedFrame.reloadVecs();
}
public DataInfo filterExpandedColumns(int [] cols){
if(cols == null)return this;
int i = 0, j = 0, ignoredCnt = 0;
//public DataInfo(Frame fr, int hasResponses, boolean useAllFactorLvls, double [] normSub, double [] normMul, double [] normRespSub, double [] normRespMul){
int [][] catLvls = new int[_cats][];
int [] ignoredCols = MemoryManager.malloc4(_nums + _cats);
// first do categoricals...
if(_catOffsets != null)
while(i < cols.length && cols[i] < _catOffsets[_catOffsets.length-1]){
int [] levels = MemoryManager.malloc4(_catOffsets[j+1] - _catOffsets[j]);
int k = 0;
while(i < cols.length && cols[i] < _catOffsets[j+1])
levels[k++] = cols[i++]-_catOffsets[j];
if(k > 0)
catLvls[j] = Arrays.copyOf(levels, k);
++j;
}
for(int k =0; k < catLvls.length; ++k)
if(catLvls[k] == null)ignoredCols[ignoredCnt++] = k;
if(ignoredCnt > 0){
int [][] c = new int[_cats-ignoredCnt][];
int y = 0;
for (int[] catLvl : catLvls) if (catLvl != null) c[y++] = catLvl;
assert y == c.length;
catLvls = c;
}
// now numerics
int prev = j = 0;
for(; i < cols.length; ++i){
for(int k = prev; k < (cols[i]-numStart()); ++k ){
ignoredCols[ignoredCnt++] = k+_cats;
++j;
}
prev = ++j;
}
for(int k = prev; k < _nums; ++k)
ignoredCols[ignoredCnt++] = k+_cats;
Frame f = new Frame(_adaptedFrame.names().clone(),_adaptedFrame.vecs().clone());
if(ignoredCnt > 0) f.remove(Arrays.copyOf(ignoredCols,ignoredCnt));
assert catLvls.length < f.numCols():"cats = " + catLvls.length + " numcols = " + f.numCols();
return new DataInfo(f,catLvls, _responses, _hasIntercept, _predictor_transform, _response_transform, _foldId, _nfolds);
}
public String toString(){
return "";
}
public DataInfo getFold(int foldId, int nfolds){
return new DataInfo(this, foldId, nfolds);
}
public final int fullN(){return _nums + _catOffsets[_cats];}
public final int largestCat(){return _cats > 0?_catOffsets[1]:0;}
public final int numStart(){return _catOffsets[_cats];}
public final String [] coefNames(){
int k = 0;
final int n = fullN();
String [] res = new String[n];
final Vec [] vecs = _adaptedFrame.vecs();
for(int i = 0; i < _cats; ++i) {
for (int j = _useAllFactorLevels ? 0 : 1; j < vecs[i]._domain.length; ++j)
res[k++] = _adaptedFrame._names[i] + "." + vecs[i]._domain[j];
if (vecs[i].naCnt() > 0) res[k++] = _adaptedFrame._names[i] + ".missing(NA)";
}
final int nums = n-k;
System.arraycopy(_adaptedFrame._names, _cats, res, k, nums);
return res;
}
/**
* Normalize horizontalized categoricals to become probabilities per factor level.
* This is done with the SoftMax function.
* @param in input values
* @param out output values (can be the same as input)
*/
public final void softMaxCategoricals(float[] in, float[] out) {
if (_cats == 0) return;
if (!_useAllFactorLevels) throw new UnsupportedOperationException("All factor levels must be present for re-scaling with SoftMax.");
assert (in.length == out.length);
assert (in.length == fullN());
final Vec[] vecs = _adaptedFrame.vecs();
int k = 0;
for (int i = 0; i < _cats; ++i) {
final int factors = vecs[i]._domain.length;
final float max = Utils.maxValue(in, k, k + factors);
float scale = 0;
for (int j = 0; j < factors; ++j) {
out[k + j] = (float) Math.exp(in[k + j] - max);
scale += out[k + j];
}
for (int j = 0; j < factors; ++j)
out[k + j] /= scale;
k += factors;
}
assert(k == numStart());
}
/**
* Undo the standardization/normalization of numerical columns
* @param in input values
* @param out output values (can be the same as input)
*/
public final void unScaleNumericals(float[] in, float[] out) {
if (_nums == 0) return;
assert (in.length == out.length);
assert (in.length == fullN());
for (int k=numStart(); k < fullN(); ++k)
out[k] = in[k] / (float)_normMul[k-numStart()] + (float)_normSub[k-numStart()];
}
}
@Override
public T dfork(Frame fr){
assert fr == _dinfo._adaptedFrame;
return super.dfork(fr);
}
/**
* Override this to initialize at the beginning of chunk processing.
*/
protected void chunkInit(){}
/**
* Override this to do post-chunk processing work.
* @param n Number of processed rows
*/
protected void chunkDone(long n){}
/**
* Extracts the values, applies standardization/normalization to numerics, adds appropriate offsets to categoricals,
* and adapts response according to the CaseMode/CaseValue if set.
*/
@Override public final void map(Chunk [] chunks, NewChunk [] outputs){
if(_jobKey != null && !Job.isRunning(_jobKey))throw new JobCancelledException();
final int nrows = chunks[0]._len;
final long offset = chunks[0]._start;
chunkInit();
double [] nums = MemoryManager.malloc8d(_dinfo._nums);
int [] cats = MemoryManager.malloc4(_dinfo._cats);
double [] response = _dinfo._responses == 0 ? null : MemoryManager.malloc8d(_dinfo._responses);
int start = 0;
int end = nrows;
Random skip_rng = null; //random generator for skipping rows
//Example:
// _useFraction = 0.8 -> 1 repeat with fraction = 0.8
// _useFraction = 1.0 -> 1 repeat with fraction = 1.0
// _useFraction = 1.1 -> 2 repeats with fraction = 0.55
// _useFraction = 2.1 -> 3 repeats with fraction = 0.7
// _useFraction = 3.0 -> 3 repeats with fraction = 1.0
final int repeats = (int)Math.ceil(_useFraction);
final float fraction = _useFraction / repeats;
if (fraction < 1.0) skip_rng = water.util.Utils.getDeterRNG(new Random().nextLong());
long[] shuf_map = null;
if (_shuffle) {
shuf_map = new long[end-start];
for (int i=0;i<shuf_map.length;++i)
shuf_map[i] = start + i;
Utils.shuffleArray(shuf_map, new Random().nextLong());
}
long num_processed_rows = 0;
for(int rrr = 0; rrr < repeats; ++rrr) {
OUTER:
for(int rr = start; rr < end; ++rr){
final int r = shuf_map != null ? (int)shuf_map[rr-start] : rr;
final long lr = r + chunks[0]._start;
if ((_dinfo._nfolds > 0 && (lr % _dinfo._nfolds) == _dinfo._foldId)
|| (skip_rng != null && skip_rng.nextFloat() > fraction))continue;
++num_processed_rows; //count rows with missing values even if they are skipped
for(Chunk c:chunks)if(skipMissing() && c.isNA0(r))continue OUTER; // skip rows with NAs!
int i = 0, ncats = 0;
for(; i < _dinfo._cats; ++i){
int c;
if (chunks[i].isNA0(r)) {
cats[ncats++] = (_dinfo._catOffsets[i+1]-1); //missing value turns into extra (last) factor
} else {
c = (int) chunks[i].at80(r);
if (_dinfo._catLvls != null) { // some levels are ignored?
c = Arrays.binarySearch(_dinfo._catLvls[i], c);
if (c >= 0)
cats[ncats++] = c + _dinfo._catOffsets[i];
} else if (_dinfo._useAllFactorLevels)
cats[ncats++] = c + _dinfo._catOffsets[i];
else if (c != 0)
cats[ncats++] = c + _dinfo._catOffsets[i] - 1;
}
}
final int n = chunks.length- _dinfo._responses;
for(;i < n;++i){
double d = chunks[i].at0(r); //can be NA if skipMissing() == false
if(_dinfo._normSub != null) d -= _dinfo._normSub[i- _dinfo._cats];
if(_dinfo._normMul != null) d *= _dinfo._normMul[i- _dinfo._cats];
nums[i- _dinfo._cats] = d;
}
for(i = 0; i < _dinfo._responses; ++i) {
response[i] = chunks[chunks.length- _dinfo._responses + i].at0(r);
if (_dinfo._normRespSub != null) response[i] -= _dinfo._normRespSub[i];
if (_dinfo._normRespMul != null) response[i] *= _dinfo._normRespMul[i];
if(Double.isNaN(response[i]))continue OUTER; // skip rows without a valid response (no supervised training possible)
}
long seed = offset + rrr*(end-start) + r;
if (outputs != null && outputs.length > 0)
processRow(seed, nums, ncats, cats, response, outputs);
else
processRow(seed, nums, ncats, cats, response);
}
}
chunkDone(num_processed_rows);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/GridSearch.java
|
package hex;
import hex.KMeans2.KMeans2Model;
import hex.KMeans2.KMeans2ModelView;
import hex.NeuralNet.NeuralNetModel;
import hex.drf.DRF.DRFModel;
import hex.gbm.GBM.GBMModel;
import hex.deeplearning.DeepLearningModel;
import hex.singlenoderf.SpeeDRFModel;
import hex.singlenoderf.SpeeDRFModelView;
import water.*;
import water.api.*;
import water.util.Utils;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
public class GridSearch extends Job {
public Job[] jobs;
public GridSearch(){
}
@Override protected void execImpl() {
UKV.put(destination_key, this);
int max = jobs[0].gridParallelism();
int head = 0, tail = 0;
while( head < jobs.length && isRunning(self()) ) {
if( tail - head < max && tail < jobs.length )
jobs[tail++].fork();
else {
try {
jobs[head++].get();
} catch( Exception e ) {
throw new RuntimeException(e);
}
}
}
}
@Override protected void onCancelled() {
for( Job job : jobs )
job.cancel();
}
@Override public float progress() {
double d = 0.1;
for( Job job : jobs )
if(job.start_time > 0)
d += job.progress();
return Math.min(1f, (float) (d / jobs.length));
}
@Override public Response redirect() {
String redirectName = new GridSearchProgress().href();
return Response.redirect(this, redirectName, "job_key", job_key, "destination_key", destination_key);
}
public static class GridSearchProgress extends Progress2 {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
@API(help = "Jobs")
public Job[] jobs;
@API(help = "Prediction Errors")
public double[] prediction_errors;
@API(help = "State")
public String[] job_state;
@Override protected Response serve() {
Response response = super.serve();
if( destination_key != null ) {
GridSearch grid = UKV.get(destination_key);
if( grid != null )
jobs = grid.jobs;
updateErrors(null);
}
return response;
}
void updateErrors(ArrayList<JobInfo> infos) {
if (jobs == null) return;
prediction_errors = new double[jobs.length];
job_state = new String[jobs.length];
int i = 0;
for( Job job : jobs ) {
JobInfo info = new JobInfo();
info._job = job;
if(job.dest() != null){
Object value = UKV.get(job.dest());
info._model = value instanceof Model ? (Model) value : null;
if( info._model != null ) {
info._cm = info._model.cm();
info._error = info._model.mse();
}
}
if( info._cm != null && (info._model == null || info._model.isClassifier()))
info._error = info._cm.err();
if (infos != null) infos.add(info);
prediction_errors[i] = info._error;
job_state[i] = info._job.state.toString();
i++;
}
}
@Override public boolean toHTML(StringBuilder sb) {
if( jobs != null ) {
DocGen.HTML.arrayHead(sb);
sb.append("<tr class='warning'>");
ArrayList<Argument> args = jobs[0].arguments();
// Filter some keys to simplify UI
args = (ArrayList<Argument>) args.clone();
filter(args, "destination_key", "source", "cols", "ignored_cols", "ignored_cols_by_name", //
"response", "classification", "validation");
for (Argument arg : args) sb.append("<td><b>").append(arg._name).append("</b></td>");
sb.append("<td><b>").append("run time").append("</b></td>");
String perf = jobs[0].speedDescription();
if( perf != null )
sb.append("<td><b>").append(perf).append("</b></td>");
sb.append("<td><b>").append("model key").append("</b></td>");
sb.append("<td><b>").append("prediction error").append("</b></td>");
sb.append("<td><b>").append("F1 score").append("</b></td>");
sb.append("</tr>");
ArrayList<JobInfo> infos = new ArrayList<JobInfo>();
updateErrors(infos);
Collections.sort(infos, new Comparator<JobInfo>() {
@Override public int compare(JobInfo a, JobInfo b) {
return Double.compare(a._error, b._error);
}
});
for( JobInfo info : infos ) {
sb.append("<tr>");
for( Argument a : args ) {
try {
Object value = a._field.get(info._job);
String s;
if( value instanceof int[] )
s = Utils.sampleToString((int[]) value, 20);
else if( value instanceof double[] )
s = Utils.sampleToString((double[]) value, 20);
else
s = "" + value;
sb.append("<td>").append(s).append("</td>");
} catch( Exception e ) {
throw new RuntimeException(e);
}
}
String runTime = "Pending", speed = "";
if( info._job.start_time != 0 ) {
runTime = PrettyPrint.msecs(info._job.runTimeMs(), true);
speed = perf != null ? PrettyPrint.msecs(info._job.speedValue(), true) : "";
}
sb.append("<td>").append(runTime).append("</td>");
if( perf != null )
sb.append("<td>").append(speed).append("</td>");
String link = "";
if( info._job.start_time != 0 && DKV.get(info._job.dest()) != null ) {
link = info._job.dest().toString();
if( info._model instanceof GBMModel )
link = GBMModelView.link(link, info._job.dest());
else if( info._model instanceof DRFModel )
link = DRFModelView.link(link, info._job.dest());
else if( info._model instanceof NeuralNetModel )
link = NeuralNetModelView.link(link, info._job.dest());
else if( info._model instanceof DeepLearningModel)
link = DeepLearningModelView.link(link, info._job.dest());
if( info._model instanceof KMeans2Model )
link = KMeans2ModelView.link(link, info._job.dest());
if (info._model instanceof SpeeDRFModel)
link = SpeeDRFModelView.link(link, info._job.dest());
else
link = Inspect2.link(link, info._job.dest());
}
sb.append("<td>").append(link).append("</td>");
String err, f1 = "";
if( info._cm != null && info._cm._arr != null) {
err = String.format("%.2f", 100 * info._error) + "%";
if (info._cm.isBinary()) f1 = String.format("%.4f", info._cm.F1());
} else err = String.format("%.5f", info._error) ;
sb.append("<td><b>").append(err).append("</b></td>");
sb.append("<td><b>").append(f1).append("</b></td>");
sb.append("</tr>");
}
DocGen.HTML.arrayTail(sb);
}
return true;
}
static class JobInfo {
Job _job;
Model _model;
ConfusionMatrix _cm;
double _error = Double.POSITIVE_INFINITY;
}
static void filter(ArrayList<Argument> args, String... names) {
for( String name : names )
for( int i = args.size() - 1; i >= 0; i-- )
if( args.get(i)._name.equals(name) )
args.remove(i);
}
@Override protected Response jobDone(final Key dst) {
return Response.done(this);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/InsertMissingValues.java
|
package hex;
import water.Key;
import water.Request2;
import water.UKV;
import water.api.DocGen;
import water.fvec.Frame;
import water.fvec.FrameCreator;
import water.util.Log;
import water.util.RString;
import java.util.Random;
/**
* Insert missing values into an existing frame (overwrite in-place).
* Useful to test algorithm's ability to cope with missing values.
*/
public class InsertMissingValues extends Request2 {
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "Key of frame to add missing values to", required = true, filter = Default.class, json=true)
public Key key;
@API(help = "Random number seed", filter = Default.class, json=true)
public long seed = new Random().nextLong();
@API(help = "Fraction of missing values", filter = Default.class, dmin = 1e-10, dmax = 1, json=true)
public double missing_fraction = 0.01;
@Override public Response serve() {
try {
if (missing_fraction == 0) throw new IllegalArgumentException("Missing fraction must be larger than 0.");
if (Math.abs(missing_fraction) > 1) throw new IllegalArgumentException("Missing fraction must be between 0 and 1.");
if (key == null) throw new IllegalArgumentException("A valid key must be provided.");
Frame fr = UKV.get(key);
if (fr == null) throw new IllegalArgumentException("Frame " + key + " not found.");
new FrameCreator.MissingInserter(seed, missing_fraction).doAll(fr);
Log.info("Modified frame '" + key + "' : added " + missing_fraction * 100 + "% missing values.");
return Response.done(this);
} catch( Throwable t ) {
return Response.error(t);
}
}
@Override public boolean toHTML( StringBuilder sb ) {
Frame fr = UKV.get(key);
if (fr==null) {
return false;
}
RString aft = new RString("<a href='Inspect2.html?src_key=%$key'>%key</a>");
aft.replace("key", key);
DocGen.HTML.section(sb, "Inserted missing values into frame '" + aft.toString() + " done.");
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/Interaction.java
|
package hex;
import water.*;
import water.api.DocGen;
import water.fvec.createInteractions;
import water.fvec.Frame;
import water.util.Log;
import water.util.RString;
import java.util.Arrays;
/**
* Create new factors that represent interactions of the given factors
*/
public class Interaction extends Job {
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "Input data frame", required = true, filter = Default.class, json=true)
public Frame source;
@API(help = "Column indices (0-based) of factors for which interaction is to be computed", filter=colsNamesIdxFilter.class, displayName="Interaction columns")
public int[] factors = new int[0];
class colsNamesIdxFilter extends MultiVecSelect { public colsNamesIdxFilter() {super("source", MultiVecSelectType.NAMES_THEN_INDEXES); } }
@API(help = "Whether to create pairwise quadratic interactions between factors (otherwise create one higher-order interaction). Only applicable if there are 3 or more factors.", required = false, filter = Default.class, json=true)
public boolean pairwise = false;
@API(help = "Max. number of factor levels in pair-wise interaction terms (if enforced, one extra catch-all factor will be made)", required = true, filter = Default.class, lmin = 1, lmax = Integer.MAX_VALUE, json=true)
public int max_factors = 100;
@API(help = "Min. occurrence threshold for factor levels in pair-wise interaction terms", required = true, filter = Default.class, lmin = 1, lmax = Integer.MAX_VALUE, json=true)
public int min_occurrence = 1;
long _time;
@Override public Response serve() {
try {
source.read_lock(self());
// if (max_factors < 1) throw new IllegalArgumentException("max_factors must be >1.");
if (factors.length == 0) throw new IllegalArgumentException("factors must be non-empty.");
if (pairwise && factors.length < 3) Log.info("Ignoring the pairwise option, requires 3 or more factors.");
for (int v: factors) {
if (!source.vecs()[v].isEnum()) {
throw new IllegalArgumentException("Column " + source.names()[v] + " is not a factor.");
}
}
if (destination_key == null) {
String target = source._key.toString() + ".interaction.";
target += "C" + factors[0];
for (int i=1; i<factors.length; ++i) {
target += "_C" + factors[i];
}
destination_key = Key.make(target);
}
Timer time = new Timer();
final createInteractions in = new createInteractions(this);
H2O.submitTask(in);
in.join();
_time = time.time();
Log.info(report());
return Response.done(this);
} catch( Throwable t ) {
return Response.error(t);
} finally {
source.unlock(self());
}
}
@Override public boolean toHTML( StringBuilder sb ) {
Frame fr = UKV.get(dest());
if (fr==null) {
return false;
}
RString aft = new RString("<a href='Inspect2.html?src_key=%$key'>%key</a>");
aft.replace("key", destination_key.toString());
DocGen.HTML.section(sb, report() + "<br/>Frame '" + aft.toString() + "' contains the interaction feature(s).");
return true;
}
private String report() {
Frame res = UKV.get(dest());
if (!pairwise)
return "Created interaction feature " + res.names()[0]
+ " (order: " + factors.length + ") with " + res.lastVec().domain().length + " factor levels"
+ " in" + PrettyPrint.msecs(_time, true);
else
return "Created " + res.numCols() + " pair-wise interaction features " + Arrays.deepToString(res.names())
+ " (order: 2) in" + PrettyPrint.msecs(_time, true);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/KMeans2.java
|
package hex;
import water.*;
import water.Job.ColumnsJob;
import water.api.DocGen;
import water.api.Progress2;
import water.api.Request;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.util.Log;
import water.util.RString;
import water.util.Utils;
import java.util.ArrayList;
import java.util.Random;
import java.util.Arrays;
/**
* Scalable K-Means++ (KMeans||)<br>
* http://theory.stanford.edu/~sergei/papers/vldb12-kmpar.pdf<br>
* http://www.youtube.com/watch?v=cigXAxV3XcY
*/
public class KMeans2 extends ColumnsJob {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
static final String DOC_GET = "k-means";
public enum Initialization {
None, PlusPlus, Furthest
};
@API(help = "Cluster initialization: None - chooses initial centers at random; Plus Plus - choose first center at random, subsequent centers chosen from probability distribution weighted so that points further from first center are more likey to be selected; Furthest - chooses initial point at random, subsequent point taken as the point furthest from prior point.", filter = Default.class, json=true)
// public Initialization initialization = Initialization.None;
public Initialization initialization = Initialization.Furthest;
// default Initialization is Furthest. Better results for hard cases, especially with just one trial
// in the browser. PlusPlus can be biased, so Furthest can be best, again especially if just one trial
// Random should never be better than Furthest.
@API(help = "Number of clusters", required = true, filter = Default.class, lmin = 1, lmax = 100000, json=true)
public int k = 2;
@API(help = "Maximum number of iterations before stopping", required = true, filter = Default.class, lmin = 1, lmax = 100000, json=true)
public int max_iter = 100;
@API(help = "Whether data should be normalized", filter = Default.class, json=true)
public boolean normalize;
@API(help = "Seed for the random number generator", filter = Default.class, json=true)
public long seed = new Random().nextLong();
@API(help = "Drop columns with more than 20% missing values", filter = Default.class, json=true)
public boolean drop_na_cols = true;
// Number of categorical columns
private int _ncats;
// Number of reinitialization attempts for preventing empty clusters
transient private int reinit_attempts;
// Make a link that lands on this page
public static String link(Key k, String content) {
RString rs = new RString("<a href='KMeans2.query?source=%$key'>%content</a>");
rs.replace("key", k.toString());
rs.replace("content", content);
return rs.toString();
}
public KMeans2() {
description = "K-means";
}
// ----------------------
@Override public void execImpl() {
Frame fr;
KMeans2Model model = null;
try {
logStart();
source.read_lock(self());
if ( source.numRows() < k) throw new IllegalArgumentException("Cannot make " + k + " clusters out of " + source.numRows() + " rows.");
// Drop ignored cols and, if user asks for it, cols with too many NAs
fr = FrameTask.DataInfo.prepareFrame(source, ignored_cols, false, drop_na_cols);
// fr = source;
if (fr.numCols() == 0) throw new IllegalArgumentException("No columns left to work with.");
// Sort columns, so the categoricals are all up front. They use a
// different distance metric than numeric columns.
Vec vecs[] = fr.vecs();
final int N = vecs.length; // Feature count
int ncats=0, len=N;
while( ncats != len ) {
while( ncats < len && vecs[ncats].isEnum() ) ncats++;
while( len > 0 && !vecs[len-1].isEnum() ) len--;
if( ncats < len-1 ) fr.swap(ncats,len-1);
}
_ncats = ncats;
// The model to be built
model = new KMeans2Model(this, dest(), fr._key, fr.names(), fr.domains());
model.delete_and_lock(self());
// means are used to impute NAs
double[] means = new double[N];
for( int i = 0; i < N; i++ )
means[i] = vecs[i].mean();
// mults & means for normalization
double[] mults = null;
if( normalize ) {
mults = new double[N];
for( int i = 0; i < N; i++ ) {
double sigma = vecs[i].sigma();
mults[i] = normalize(sigma) ? 1.0 / sigma : 1.0;
}
}
// Initialize clusters
Random rand = Utils.getRNG(seed - 1);
double clusters[][]; // Normalized cluster centers
if( initialization == Initialization.None ) {
// Initialize all clusters to random rows. Get 3x the number needed
clusters = model.centers = new double[k*3][fr.numCols()];
for( double[] cluster : clusters )
randomRow(vecs, rand, cluster, means, mults);
// for( int i=0; i<model.centers.length; i++ ) {
// Log.info("random model.centers["+i+"]: "+Arrays.toString(model.centers[i]));
// }
// Recluster down to K normalized clusters.
clusters = recluster(clusters, rand);
} else {
clusters = new double[1][vecs.length];
// Initialize first cluster to random row
randomRow(vecs, rand, clusters[0], means, mults);
while( model.iterations < 5 ) {
// Sum squares distances to clusters
SumSqr sqr = new SumSqr(clusters,means,mults,_ncats).doAll(vecs);
// Log.info("iteration: "+model.iterations+" sqr: "+sqr._sqr);
// Sample with probability inverse to square distance
long randomSeed = (long) rand.nextDouble();
Sampler sampler = new Sampler(clusters, means, mults, _ncats, sqr._sqr, k * 3, randomSeed).doAll(vecs);
clusters = Utils.append(clusters,sampler._sampled);
// Fill in sample clusters into the model
if( !isRunning() ) return; // Stopped/cancelled
model.centers = denormalize(clusters, ncats, means, mults);
// see below. this is sum of squared error now
model.total_within_SS = sqr._sqr;
model.iterations++; // One iteration done
// Log.info("\nKMeans Centers during init models.iterations: "+model.iterations);
// for( int i=0; i<model.centers.length; i++ ) {
// Log.info("model.centers["+i+"]: "+Arrays.toString(model.centers[i]));
// }
// Log.info("model.total_within_SS: "+model.total_within_SS);
// Don't count these iterations as work for model building
model.update(self()); // Early version of model is visible
// Recluster down to K normalized clusters.
// makes more sense to recluster each iteration, since the weighted k*3 effect on sqr vs _sqr
// reflects the k effect on _sqr? ..if there are too many "centers" (samples) then _sqr (sum of all) is too
// big relative to sqr (possible new point, and we don't gather any more samples?
// (so the centers won't change during the init)
clusters = recluster(clusters, rand);
}
}
model.iterations = 0; // Reset iteration count
// ---
// Run the main KMeans Clustering loop
// Stop after enough iterations
boolean done;
LOOP:
for( ; model.iterations < max_iter; model.iterations++ ) {
if( !isRunning() ) return; // Stopped/cancelled
Lloyds task = new Lloyds(clusters,means,mults,_ncats, k).doAll(vecs);
// Pick the max categorical level for clusters' center
max_cats(task._cMeans,task._cats);
// Handle the case where some clusters go dry. Rescue only 1 cluster
// per iteration ('cause we only tracked the 1 worst row)
boolean badrow=false;
for( int clu=0; clu<k; clu++ ) {
if (task._rows[clu] == 0) {
// If we see 2 or more bad rows, just re-run Lloyds to get the
// next-worst row. We don't count this as an iteration, because
// we're not really adjusting the centers, we're trying to get
// some centers *at-all*.
if (badrow) {
Log.warn("KMeans: Re-running Lloyds to re-init another cluster");
model.iterations--; // Do not count against iterations
if (reinit_attempts++ < k) {
continue LOOP; // Rerun Lloyds, and assign points to centroids
} else {
reinit_attempts = 0;
break; //give up and accept empty cluster
}
}
long row = task._worst_row;
Log.warn("KMeans: Re-initializing cluster " + clu + " to row " + row);
data(clusters[clu] = task._cMeans[clu], vecs, row, means, mults);
task._rows[clu] = 1;
badrow = true;
}
}
// Fill in the model; denormalized centers
model.centers = denormalize(task._cMeans, ncats, means, mults);
model.size = task._rows;
model.within_cluster_variances = task._cSqr;
double ssq = 0; // sum squared error
for( int i=0; i<k; i++ ) {
ssq += model.within_cluster_variances[i]; // sum squared error all clusters
// model.within_cluster_variances[i] /= task._rows[i]; // mse per-cluster
}
// model.total_within_SS = ssq/fr.numRows(); // mse total
model.total_within_SS = ssq; //total within sum of squares
model.update(self()); // Update model in K/V store
reinit_attempts = 0;
// Compute change in clusters centers
double sum=0;
for( int clu=0; clu<k; clu++ )
sum += distance(clusters[clu],task._cMeans[clu],ncats);
sum /= N; // Average change per feature
Log.info("KMeans: Change in cluster centers="+sum);
done = ( sum < 1e-6 || model.iterations == max_iter-1);
if (done) {
Log.info("Writing clusters to key " + model._clustersKey);
Clusters cc = new Clusters();
cc._clusters = clusters;
cc._means = means;
cc._mults = mults;
cc.doAll(1, vecs);
Frame fr2 = cc.outputFrame(model._clustersKey, new String[]{"Cluster ID"}, new String[][]{Utils.toStringMap(0, cc._clusters.length - 1)});
fr2.delete_and_lock(self()).unlock(self());
break;
}
clusters = task._cMeans; // Update cluster centers
StringBuilder sb = new StringBuilder();
sb.append("KMeans: iter: ").append(model.iterations).append(", MSE=").append(model.total_within_SS);
for( int i=0; i<k; i++ )
sb.append(", ").append(task._cSqr[i]).append("/").append(task._rows[i]);
Log.info(sb);
}
} catch( Throwable t ) {
t.printStackTrace();
cancel(t);
} finally {
remove(); // Remove Job
if( model != null ) model.unlock(self());
source.unlock(self());
state = UKV.<Job>get(self()).state;
new TAtomic<KMeans2Model>() {
@Override
public KMeans2Model atomic(KMeans2Model m) {
if (m != null) m.get_params().state = state;
return m;
}
}.invoke(dest());
}
}
@Override protected Response redirect() {
return KMeans2Progress.redirect(this, job_key, destination_key);
}
public static class KMeans2Progress extends Progress2 {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
@Override protected Response jobDone(Key dst) {
return KMeans2ModelView.redirect(this, destination_key);
}
public static Response redirect(Request req, Key job_key, Key destination_key) {
return Response.redirect(req, new KMeans2Progress().href(), JOB_KEY, job_key, DEST_KEY, destination_key);
}
}
public static class KMeans2ModelView extends Request2 {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
@API(help = "KMeans2 Model", json = true, filter = Default.class)
public KMeans2Model model;
@API(help="KMeans2 Model Key", required = true, filter = KMeans2Filter.class)
Key _modelKey;
class KMeans2Filter extends H2OKey { public KMeans2Filter() { super("",true); } }
public static String link(String txt, Key model) {
return "<a href='" + new KMeans2ModelView().href() + ".html?_modelKey=" + model + "'>" + txt + "</a>";
}
public static Response redirect(Request req, Key model) {
return Response.redirect(req, "/2/KMeans2ModelView", "_modelKey", model);
// return Response.redirect(req, new KMeans2ModelView().href(), "_modelKey", model);
}
@Override protected Response serve() {
model = DKV.get(_modelKey).get();
return Response.done(this);
}
@Override public boolean toHTML(StringBuilder sb) {
if( model != null && model.centers != null && model.within_cluster_variances != null) {
model.parameters.makeJsonBox(sb);
DocGen.HTML.section(sb, "Cluster Centers: "); //"Total Within Cluster Sum of Squares: " + model.total_within_SS);
table(sb, "Clusters", model._names, model.centers);
double[][] rows = new double[model.within_cluster_variances.length][1];
for( int i = 0; i < rows.length; i++ )
rows[i][0] = model.within_cluster_variances[i];
columnHTMLlong(sb, "Cluster Size", model.size);
DocGen.HTML.section(sb, "Cluster Variances: ");
table(sb, "Clusters", new String[]{"Within Cluster Sum of Squares"}, rows);
// columnHTML(sb, "Between Cluster Variances", model.between_cluster_variances);
sb.append("<br />");
DocGen.HTML.section(sb, "Overall Totals: ");
double[] row = new double[]{model.total_within_SS};
rowHTML(sb, new String[]{"Total Within Cluster Sum of Squares"}, row);
// double[] row = new double[]{model.total_SS, model.total_within_SS, model.between_cluster_SS};
// rowHTML(sb, new String[]{"Total Sum of Squares", "Total Within Cluster Sum of Squares", "Between Cluster Sum of Squares"}, row);
DocGen.HTML.section(sb, "Cluster Assignments by Observation: ");
RString rs = new RString("<a href='Inspect2.html?src_key=%$key'>%content</a>");
rs.replace("key", model._key + "_clusters");
rs.replace("content", "View the row-by-row cluster assignments");
sb.append(rs.toString());
//sb.append("<iframe src=\"" + "/Inspect.html?key=KMeansClusters\"" + "width = \"850\" height = \"550\" marginwidth=\"25\" marginheight=\"25\" scrolling=\"yes\"></iframe>" );
return true;
}
return false;
}
private static void rowHTML(StringBuilder sb, String[] header, double[] ro) {
sb.append("<span style='display: inline-block; '>");
sb.append("<table class='table table-striped table-bordered'>");
sb.append("<tr>");
for (String aHeader : header) sb.append("<th>").append(aHeader).append("</th>");
sb.append("</tr>");
sb.append("<tr>");
for (double row : ro) {
sb.append("<td>").append(ElementBuilder.format(row)).append("</td>");
}
sb.append("</tr>");
sb.append("</table></span>");
}
private static void columnHTML(StringBuilder sb, String name, double[] rows) {
sb.append("<span style='display: inline-block; '>");
sb.append("<table class='table table-striped table-bordered'>");
sb.append("<tr>");
sb.append("<th>").append(name).append("</th>");
sb.append("</tr>");
sb.append("<tr>");
for (double row : rows) {
sb.append("<tr>");
sb.append("<td>").append(ElementBuilder.format(row)).append("</td>");
sb.append("</tr>");
}
sb.append("</table></span>");
}
private static void columnHTMLlong(StringBuilder sb, String name, long[] rows) {
sb.append("<span style='display: inline-block; '>");
sb.append("<table class='table table-striped table-bordered'>");
sb.append("<tr>");
sb.append("<th>").append(name).append("</th>");
sb.append("</tr>");
sb.append("<tr>");
for (double row : rows) {
sb.append("<tr>");
sb.append("<td>").append(ElementBuilder.format(row)).append("</td>");
sb.append("</tr>");
}
sb.append("</table></span>");
}
private static void table(StringBuilder sb, String title, String[] names, double[][] rows) {
sb.append("<span style='display: inline-block;'>");
sb.append("<table class='table table-striped table-bordered'>");
sb.append("<tr>");
sb.append("<th>").append(title).append("</th>");
for( int i = 0; names != null && i < rows[0].length; i++ )
sb.append("<th>").append(names[i]).append("</th>");
sb.append("</tr>");
for( int r = 0; r < rows.length; r++ ) {
sb.append("<tr>");
sb.append("<td>").append(r).append("</td>");
for( int c = 0; c < rows[r].length; c++ )
sb.append("<td>").append(ElementBuilder.format(rows[r][c])).append("</td>");
sb.append("</tr>");
}
sb.append("</table></span>");
}
}
public static class KMeans2Model extends Model implements Progress {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
@API(help = "Model parameters")
private final KMeans2 parameters; // This is used purely for printing values out.
@API(help = "Cluster centers, always denormalized")
public double[][] centers;
@API(help = "Sum of within cluster sum of squares")
public double total_within_SS;
// @API(help = "Between cluster sum of square distances")
// public double between_cluster_SS;
// @API(help = "Total Sum of squares = total_within_SS + betwen_cluster_SS")
// public double total_SS;
@API(help = "Number of clusters")
public int k;
@API(help = "Numbers of observations in each cluster.")
public long[] size;
@API(help = "Whether data was normalized")
public boolean normalized;
@API(help = "Maximum number of iterations before stopping")
public int max_iter = 100;
@API(help = "Iterations the algorithm ran")
public int iterations;
@API(help = "Within cluster sum of squares per cluster")
public double[] within_cluster_variances; //Warning: See note below
//Note: The R wrapper interprets this as withinss (sum of squares), so that's what we compute here, and NOT the variances.
//FIXME: => wrong name, should be within_cluster_sum_of_squares, but leaving to be backward-compatible with REST API
// @API(help = "Between Cluster square distances per cluster")
// public double[] between_cluster_variances;
@API(help = "The row-by-row cluster assignments")
public final Key _clustersKey;
private transient int _ncats;
public KMeans2Model(KMeans2 params, Key selfKey, Key dataKey, String names[], String domains[][]) {
super(selfKey, dataKey, names, domains, /* priorClassDistribution */ null, /* modelClassDistribution */ null);
_ncats = params._ncats;
parameters = params;
// only for backward-compatibility of JSON response
k = params.k;
normalized = params.normalize;
max_iter = params.max_iter;
_clustersKey = Key.make(selfKey.toString() + "_clusters");
}
@Override public final KMeans2 get_params() { return parameters; }
@Override public final Request2 job() { return get_params(); }
@Override public double mse() { return total_within_SS; }
@Override public float progress() {
return Math.min(1f, iterations / (float) parameters.max_iter);
}
@Override protected float[] score0(Chunk[] chunks, int rowInChunk, double[] tmp, float[] preds) {
assert chunks.length>=_names.length;
for( int i=0; i<_names.length; i++ )
tmp[i] = chunks[i].at0(rowInChunk);
return score0(tmp,preds);
}
@Override protected float[] score0(double[] data, float[] preds) {
preds[0] = closest(centers,data,_ncats);
return preds;
}
@Override public int nfeatures() { return _names.length; }
@Override public boolean isSupervised() { return false; }
@Override public String responseName() { throw new IllegalArgumentException("KMeans doesn't have a response."); }
/** Remove any Model internal Keys */
@Override public Futures delete_impl(Futures fs) {
Lockable.delete(_clustersKey);
return fs;
}
}
public class Clusters extends MRTask2<Clusters> {
// IN
double[][] _clusters; // Cluster centers
double[] _means, _mults; // Normalization
int _ncats, _nnums;
@Override public void map(Chunk[] cs, NewChunk ncs) {
double[] values = new double[_clusters[0].length];
ClusterDist cd = new ClusterDist();
for (int row = 0; row < cs[0]._len; row++) {
data(values, cs, row, _means, _mults);
// closest(_clusters, values, cd);
closest(_clusters, values, _ncats, cd);
int clu = cd._cluster;
// ncs[0].addNum(clu);
ncs.addEnum(clu);
}
}
}
// -------------------------------------------------------------------------
// Initial sum-of-square-distance to nearest cluster
private static class SumSqr extends MRTask2<SumSqr> {
// IN
double[][] _clusters;
double[] _means, _mults; // Normalization
final int _ncats;
// OUT
double _sqr;
SumSqr( double[][] clusters, double[] means, double[] mults, int ncats ) {
_clusters = clusters;
_means = means;
_mults = mults;
_ncats = ncats;
}
@Override public void map(Chunk[] cs) {
double[] values = new double[cs.length];
ClusterDist cd = new ClusterDist();
for( int row = 0; row < cs[0].len(); row++ ) {
data(values, cs, row, _means, _mults);
_sqr += minSqr(_clusters, values, _ncats, cd);
}
_means = _mults = null;
_clusters = null;
}
@Override public void reduce(SumSqr other) { _sqr += other._sqr; }
}
// -------------------------------------------------------------------------
// Sample rows with increasing probability the farther they are from any
// cluster.
private static class Sampler extends MRTask2<Sampler> {
// IN
double[][] _clusters;
double[] _means, _mults; // Normalization
final int _ncats;
final double _sqr; // Min-square-error
final double _probability; // Odds to select this point
final long _seed;
// OUT
double[][] _sampled; // New clusters
Sampler( double[][] clusters, double[] means, double[] mults, int ncats, double sqr, double prob, long seed ) {
_clusters = clusters;
_means = means;
_mults = mults;
_ncats = ncats;
_sqr = sqr;
_probability = prob;
_seed = seed;
}
@Override public void map(Chunk[] cs) {
double[] values = new double[cs.length];
ArrayList<double[]> list = new ArrayList<double[]>();
Random rand = Utils.getRNG(_seed + cs[0]._start);
ClusterDist cd = new ClusterDist();
for( int row = 0; row < cs[0].len(); row++ ) {
data(values, cs, row, _means, _mults);
double sqr = minSqr(_clusters, values, _ncats, cd);
if( _probability * sqr > rand.nextDouble() * _sqr ) {
list.add(values.clone());
// Log.info("Sampler map adding to the list used for an init iteration values: "+
// Arrays.toString(values)+" _probability: "+_probability+" sqr: "+sqr+" _sqr: "+_sqr);
}
}
// Log.info("Sampler map summary: that's another "+list.size()+" to the list used for an init iteration values");
_sampled = new double[list.size()][];
list.toArray(_sampled);
_clusters = null;
_means = _mults = null;
}
@Override public void reduce(Sampler other){
_sampled = Utils.append(_sampled, other._sampled);
}
}
public static class Lloyds extends MRTask2<Lloyds> {
// IN
double[][] _clusters;
double[] _means, _mults; // Normalization
final int _ncats, _K;
// OUT
double[][] _cMeans; // Means for each cluster
long[/*K*/][/*ncats*/][] _cats; // Histogram of cat levels
double[] _cSqr; // Sum of squares for each cluster
long[] _rows; // Rows per cluster
long _worst_row; // Row with max err
double _worst_err; // Max-err-row's max-err
Lloyds( double[][] clusters, double[] means, double[] mults, int ncats, int K ) {
_clusters = clusters;
_means = means;
_mults = mults;
_ncats = ncats;
_K = K;
}
@Override public void map(Chunk[] cs) {
int N = cs.length;
assert _clusters[0].length==N;
_cMeans = new double[_K][N];
_cSqr = new double[_K];
_rows = new long[_K];
// Space for cat histograms
_cats = new long[_K][_ncats][];
for( int clu=0; clu<_K; clu++ )
for( int col=0; col<_ncats; col++ )
_cats[clu][col] = new long[cs[col]._vec.cardinality()];
_worst_err = 0;
// Find closest cluster for each row
double[] values = new double[N];
ClusterDist cd = new ClusterDist();
for( int row = 0; row < cs[0].len(); row++ ) {
data(values, cs, row, _means, _mults);
closest(_clusters, values, _ncats, cd);
int clu = cd._cluster;
assert clu != -1; // No broken rows
_cSqr[clu] += cd._dist;
// Add values and increment counter for chosen cluster
for( int col = 0; col < _ncats; col++ )
_cats[clu][col][(int)values[col]]++; // Histogram the cats
for( int col = _ncats; col < N; col++ )
_cMeans[clu][col] += values[col];
_rows[clu]++;
// Track worst row
if( cd._dist > _worst_err) { _worst_err = cd._dist; _worst_row = cs[0]._start+row; }
}
// Scale back down to local mean
for( int clu = 0; clu < _K; clu++ )
if( _rows[clu] != 0 ) Utils.div(_cMeans[clu],_rows[clu]);
_clusters = null;
_means = _mults = null;
}
@Override public void reduce(Lloyds mr) {
for( int clu = 0; clu < _K; clu++ ) {
long ra = _rows[clu];
long rb = mr._rows[clu];
double[] ma = _cMeans[clu];
double[] mb = mr._cMeans[clu];
for( int c = 0; c < ma.length; c++ ) // Recursive mean
if( ra+rb > 0 ) ma[c] = (ma[c] * ra + mb[c] * rb) / (ra + rb);
}
Utils.add(_cats, mr._cats);
Utils.add(_cSqr, mr._cSqr);
Utils.add(_rows, mr._rows);
// track global worst-row
if( _worst_err < mr._worst_err) { _worst_err = mr._worst_err; _worst_row = mr._worst_row; }
}
}
// A pair result: nearest cluster, and the square distance
private static final class ClusterDist { int _cluster; double _dist; }
private static double minSqr(double[][] clusters, double[] point, int ncats, ClusterDist cd) {
return closest(clusters, point, ncats, cd, clusters.length)._dist;
}
private static double minSqr(double[][] clusters, double[] point, int ncats, ClusterDist cd, int count) {
return closest(clusters,point,ncats,cd,count)._dist;
}
private static ClusterDist closest(double[][] clusters, double[] point, int ncats, ClusterDist cd) {
return closest(clusters, point, ncats, cd, clusters.length);
}
private static double distance(double[] cluster, double[] point, int ncats) {
double sqr = 0; // Sum of dimensional distances
int pts = point.length; // Count of valid points
// Categorical columns first. Only equals/unequals matters (i.e., distance is either 0 or 1).
for(int column = 0; column < ncats; column++) {
double d = point[column];
if( Double.isNaN(d) ) pts--;
else if( d != cluster[column] )
sqr += 1.0; // Manhatten distance
}
// Numeric column distance
for( int column = ncats; column < cluster.length; column++ ) {
double d = point[column];
if( Double.isNaN(d) ) pts--; // Do not count
else {
double delta = d - cluster[column];
sqr += delta * delta;
}
}
// Scale distance by ratio of valid dimensions to all dimensions - since
// we did not add any error term for the missing point, the sum of errors
// is small - ratio up "as if" the missing error term is equal to the
// average of other error terms. Same math another way:
// double avg_dist = sqr / pts; // average distance per feature/column/dimension
// sqr = sqr * point.length; // Total dist is average*#dimensions
if( 0 < pts && pts < point.length )
sqr *= point.length / pts;
return sqr;
}
/** Return both nearest of N cluster/centroids, and the square-distance. */
private static ClusterDist closest(double[][] clusters, double[] point, int ncats, ClusterDist cd, int count) {
int min = -1;
double minSqr = Double.MAX_VALUE;
for( int cluster = 0; cluster < count; cluster++ ) {
double sqr = distance(clusters[cluster],point,ncats);
if( sqr < minSqr ) { // Record nearest cluster
min = cluster;
minSqr = sqr;
}
}
cd._cluster = min; // Record nearest cluster
cd._dist = minSqr; // Record square-distance
return cd; // Return for flow-coding
}
// For KMeansModel scoring; just the closest cluster
static int closest(double[][] clusters, double[] point, int ncats) {
int min = -1;
double minSqr = Double.MAX_VALUE;
for( int cluster = 0; cluster < clusters.length; cluster++ ) {
double sqr = distance(clusters[cluster],point,ncats);
if( sqr < minSqr ) { // Record nearest cluster
min = cluster;
minSqr = sqr;
}
}
return min;
}
// KMeans++ re-clustering
private double[][] recluster(double[][] points, Random rand) {
double[][] res = new double[k][];
res[0] = points[0];
int count = 1;
ClusterDist cd = new ClusterDist();
switch( initialization ) {
case PlusPlus: { // k-means++
while( count < res.length ) {
double sum = 0;
for (double[] point1 : points) sum += minSqr(res, point1, _ncats, cd, count);
for (double[] point : points) {
if (minSqr(res, point, _ncats, cd, count) >= rand.nextDouble() * sum) {
res[count++] = point;
break;
}
}
}
break;
}
// if we oversampled for initialization=None, recluster using the Furthest criteria down to k
case None:
case Furthest: { // Takes cluster further from any already chosen ones
while( count < res.length ) {
double max = 0;
int index = 0;
for( int i = 0; i < points.length; i++ ) {
double sqr = minSqr(res, points[i], _ncats, cd, count);
if( sqr > max ) {
max = sqr;
index = i;
}
}
res[count++] = points[index];
}
break;
}
default: throw H2O.fail();
}
return res;
}
private void randomRow(Vec[] vecs, Random rand, double[] cluster, double[] means, double[] mults) {
long row = Math.max(0, (long) (rand.nextDouble() * vecs[0].length()) - 1);
data(cluster, vecs, row, means, mults);
}
private static boolean normalize(double sigma) {
// TODO unify handling of constant columns
return sigma > 1e-6;
}
// Pick most common cat level for each cluster_centers' cat columns
private static double[][] max_cats(double[][] clusters, long[][][] cats) {
int K = cats.length;
int ncats = cats[0].length;
for( int clu = 0; clu < K; clu++ )
for( int col = 0; col < ncats; col++ ) // Cats use max level for cluster center
clusters[clu][col] = Utils.maxIndex(cats[clu][col]);
return clusters;
}
private static double[][] denormalize(double[][] clusters, int ncats, double[] means, double[] mults) {
int K = clusters.length;
int N = clusters[0].length;
double[][] value = new double[K][N];
for( int clu = 0; clu < K; clu++ ) {
System.arraycopy(clusters[clu],0,value[clu],0,N);
if( mults!=null ) // Reverse normalization
for( int col = ncats; col < N; col++ )
value[clu][col] = value[clu][col] / mults[col] + means[col];
}
return value;
}
private static void data(double[] values, Vec[] vecs, long row, double[] means, double[] mults) {
for( int i = 0; i < values.length; i++ ) {
double d = vecs[i].at(row);
values[i] = data(d, i, means, mults, vecs[i].cardinality());
}
}
private static void data(double[] values, Chunk[] chks, int row, double[] means, double[] mults) {
for( int i = 0; i < values.length; i++ ) {
double d = chks[i].at0(row);
values[i] = data(d, i, means, mults, chks[i]._vec.cardinality());
}
}
/**
* Takes mean if NaN, normalize if requested.
*/
private static double data(double d, int i, double[] means, double[] mults, int cardinality) {
if(cardinality == -1) {
if( Double.isNaN(d) )
d = means[i];
if( mults != null ) {
d -= means[i];
d *= mults[i];
}
} else {
// TODO: If NaN, then replace with majority class?
if(Double.isNaN(d))
d = Math.min(Math.round(means[i]), cardinality-1);
}
return d;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/LR2.java
|
package hex;
import water.*;
import water.api.DocGen;
import water.fvec.*;
import water.util.RString;
public class LR2 extends Request2 {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
// This Request supports the HTML 'GET' command, and this is the help text
// for GET.
static final String DOC_GET = "Linear Regression between 2 columns";
@API(help="Data Frame", required=true, filter=Default.class)
Frame source;
@API(help="Column X", required=true, filter=LR2VecSelect.class)
Vec vec_x;
@API(help="Column Y", required=true, filter=LR2VecSelect.class)
Vec vec_y;
class LR2VecSelect extends VecSelect { LR2VecSelect() { super("source"); } }
@API(help="Pass 1 msec") long pass1time;
@API(help="Pass 2 msec") long pass2time;
@API(help="Pass 3 msec") long pass3time;
@API(help="nrows") long nrows;
@API(help="beta0") double beta0;
@API(help="beta1") double beta1;
@API(help="r-squared") double r2;
@API(help="SSTO") double ssto;
@API(help="SSE") double sse;
@API(help="SSR") double ssr;
@API(help="beta0 Std Error") double beta0stderr;
@API(help="beta1 Std Error") double beta1stderr;
@Override public Response serve() {
// Pass 1: compute sums & sums-of-squares
long start = System.currentTimeMillis();
CalcSumsTask lr1 = new CalcSumsTask().doAll(vec_x, vec_y);
long pass1 = System.currentTimeMillis();
pass1time = pass1 - start;
nrows = lr1._n;
// Pass 2: Compute squared errors
final double meanX = lr1._sumX/nrows;
final double meanY = lr1._sumY/nrows;
CalcSquareErrorsTasks lr2 = new CalcSquareErrorsTasks(meanX, meanY).doAll(vec_x, vec_y);
long pass2 = System.currentTimeMillis();
pass2time = pass2 - pass1;
ssto = lr2._YYbar;
// Compute the regression
beta1 = lr2._XYbar / lr2._XXbar;
beta0 = meanY - beta1 * meanX;
CalcRegressionTask lr3 = new CalcRegressionTask(beta0, beta1, meanY).doAll(vec_x, vec_y);
long pass3 = System.currentTimeMillis();
pass3time = pass3 - pass2;
long df = nrows - 2;
r2 = lr3._ssr / lr2._YYbar;
double svar = lr3._rss / df;
double svar1 = svar / lr2._XXbar;
double svar0 = svar/nrows + meanX*meanX*svar1;
beta0stderr = Math.sqrt(svar0);
beta1stderr = Math.sqrt(svar1);
sse = lr3._rss;
ssr = lr3._ssr;
return Response.done(this);
}
public static class CalcSumsTask extends MRTask2<CalcSumsTask> {
long _n; // Rows used
double _sumX,_sumY,_sumX2; // Sum of X's, Y's, X^2's
@Override public void map( Chunk xs, Chunk ys ) {
for( int i=0; i<xs._len; i++ ) {
double X = xs.at0(i);
double Y = ys.at0(i);
if( !Double.isNaN(X) && !Double.isNaN(Y)) {
_sumX += X;
_sumY += Y;
_sumX2+= X*X;
_n++;
}
}
}
@Override public void reduce( CalcSumsTask lr1 ) {
_sumX += lr1._sumX ;
_sumY += lr1._sumY ;
_sumX2+= lr1._sumX2;
_n += lr1._n;
}
}
public static class CalcSquareErrorsTasks extends MRTask2<CalcSquareErrorsTasks> {
final double _meanX, _meanY;
double _XXbar, _YYbar, _XYbar;
CalcSquareErrorsTasks( double meanX, double meanY ) { _meanX = meanX; _meanY = meanY; }
@Override public void map( Chunk xs, Chunk ys ) {
for( int i=0; i<xs._len; i++ ) {
double Xa = xs.at0(i);
double Ya = ys.at0(i);
if(!Double.isNaN(Xa) && !Double.isNaN(Ya)) {
Xa -= _meanX;
Ya -= _meanY;
_XXbar += Xa*Xa;
_YYbar += Ya*Ya;
_XYbar += Xa*Ya;
}
}
}
@Override public void reduce( CalcSquareErrorsTasks lr2 ) {
_XXbar += lr2._XXbar;
_YYbar += lr2._YYbar;
_XYbar += lr2._XYbar;
}
}
public static class CalcRegressionTask extends MRTask2<CalcRegressionTask> {
final double _meanY;
final double _beta0, _beta1;
double _rss, _ssr;
CalcRegressionTask(double beta0, double beta1, double meanY) {_beta0=beta0; _beta1=beta1; _meanY=meanY;}
@Override public void map( Chunk xs, Chunk ys ) {
for( int i=0; i<xs._len; i++ ) {
double X = xs.at0(i); double Y = ys.at0(i);
if( !Double.isNaN(X) && !Double.isNaN(Y) ) {
double fit = _beta1*X + _beta0;
double rs = fit-Y;
_rss += rs*rs;
double sr = fit-_meanY;
_ssr += sr*sr;
}
}
}
@Override public void reduce( CalcRegressionTask lr3 ) {
_rss += lr3._rss;
_ssr += lr3._ssr;
}
}
/** Return the query link to this page */
public static String link(Key k, String content) {
RString rs = new RString("<a href='LR2.query?data_key=%$key'>%content</a>");
rs.replace("key", k.toString());
rs.replace("content", content);
return rs.toString();
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/Layer.java
|
package hex;
import water.*;
import water.api.DocGen;
import water.api.Request.API;
import water.fvec.Chunk;
import water.fvec.Vec;
import water.util.Utils;
import java.util.Random;
/**
* Neural network layer.
*
* @author cypof
*/
public abstract class Layer extends Iced {
static final int API_WEAVER = 1;
public static DocGen.FieldDoc[] DOC_FIELDS;
@API(help = "Number of neurons")
@ParamsSearch.Ignore
public int units;
public NeuralNet params;
// Layer state: activity, error
protected transient float[] _a, _e;
// Shared state: weights and biases (and their momenta)
protected transient float[] _w, _wm;
protected transient float[] _b, _bm;
// Previous and input layers
protected transient Layer _previous;
transient Input _input;
// Dropout (for input + hidden layers)
transient Dropout dropout;
/**
* Start of refactoring in specification & running data, for layers and trainers.
*/
static abstract class Training {
abstract long processed();
}
transient Training _training;
/**
* We need a way to encode a missing value in the neural net forward/back-propagation scheme.
* For simplicity and performance, we simply use the largest values to encode a missing value.
* If we run into exactly one of those values with regular neural net updates, then we're very
* likely also running into overflow problems, which will trigger a NaN somewhere, which will be
* caught and lead to automatic job cancellation.
*/
public static final int missing_int_value = Integer.MAX_VALUE; //encode missing label or target
public static final float missing_float_value = Float.MAX_VALUE; //encode missing input
/**
* Helper class for dropout, only to be used from within a Layer
*/
public class Dropout {
private transient Random _rand;
private transient byte[] _bits;
@Override
public String toString() {
String s = "Dropout: " + super.toString();
s += "\nRandom: " + _rand.toString();
s += "\nbits: ";
for (int i=0; i< _bits.length*8; ++i) s += unit_active(i) ? "1":"0";
s += "\n";
return s;
}
Dropout(int units) {
_bits = new byte[(units+7)/8];
_rand = new Random(0);
}
// for input layer
public void randomlySparsifyActivation(float[] a, double rate, long seed) {
if (rate == 0) return;
setSeed(seed);
for( int i = 0; i < a.length; i++ )
if (_rand.nextFloat() < rate) a[i] = 0;
}
// for hidden layers
public void fillBytes(long seed) {
setSeed(seed);
_rand.nextBytes(_bits);
}
public boolean unit_active(int o) {
return (_bits[o / 8] & (1 << (o % 8))) != 0;
}
private void setSeed(long seed) {
if ((seed >>> 32) < 0x0000ffffL) seed |= 0x5b93000000000000L;
if (((seed << 32) >>> 32) < 0x0000ffffL) seed |= 0xdb910000L;
_rand.setSeed(seed);
}
}
public final void init(Layer[] ls, int index, NeuralNet p) {
params = (NeuralNet)p.clone();
init(ls, index, true);
}
public void init(Layer[] ls, int index, boolean weights) {
params.rate *= Math.pow(params.rate_decay, index-1);
_a = new float[units];
if (!(this instanceof Output) && !(this instanceof Input)) {
_e = new float[units];
}
_previous = ls[index - 1];
_input = (Input) ls[0];
if (this instanceof MaxoutDropout || this instanceof TanhDropout || this instanceof RectifierDropout) {
dropout = new Dropout(units);
}
if( weights ) {
_w = new float[units * _previous.units];
_b = new float[units];
if( params.momentum_start != 0 || params.momentum_stable != 0 ) {
_wm = new float[_w.length];
_bm = new float[_b.length];
}
}
}
/**
*
// helper to initialize weights
// adaptive initialization uses prefactor * sqrt(6 / (units_input_layer + units_this_layer))
* @param seed random generator seed to use
* @param prefactor prefactor for initialization (typical value: 1.0)
*/
// cf. http://machinelearning.wustl.edu/mlpapers/paper_files/AISTATS2010_GlorotB10.pdf
void randomize(long seed, double prefactor) {
if (_w == null) return;
final Random rng = water.util.Utils.getDeterRNG(seed);
if (params.initial_weight_distribution == NeuralNet.InitialWeightDistribution.UniformAdaptive) {
final double range = prefactor * Math.sqrt(6. / (_previous.units + units));
for( int i = 0; i < _w.length; i++ )
_w[i] = (float)uniformDist(rng, -range, range);
}
else {
if (params.initial_weight_distribution == NeuralNet.InitialWeightDistribution.Uniform) {
for (int i = 0; i < _w.length; i++) {
_w[i] = (float)uniformDist(rng, -params.initial_weight_scale, params.initial_weight_scale);
}
} else if (params.initial_weight_distribution == NeuralNet.InitialWeightDistribution.Normal) {
for (int i = 0; i < _w.length; i++) {
_w[i] = (float) (0 + rng.nextGaussian() * params.initial_weight_scale);
}
}
}
}
// TODO: Add "subset randomize" function
// int count = Math.min(15, _previous.units);
// double min = -.1f, max = +.1f;
// //double min = -1f, max = +1f;
// for( int o = 0; o < units; o++ ) {
// for( int n = 0; n < count; n++ ) {
// int i = rand.nextInt(_previous.units);
// int w = o * _previous.units + i;
// _w[w] = uniformDist(rand, min, max);
// }
// }
public void close() {
}
protected abstract void fprop(long seed, boolean training);
protected abstract void bprop();
/**
* Apply gradient g to unit u with rate r and momentum m.
*/
final void bprop(int u, float g, float r, float m) {
// only correct weights if the gradient is large enough
if (params.fast_mode || (_w == null && params.l1 == 0.0 && params.l2 == 0.0)) {
if (g == 0f) return;
}
final float l1 = (float)params.l1;
final float l2 = (float)params.l2;
double r2 = 0;
final int off = u * _previous._a.length;
for( int i = 0; i < _previous._a.length; i++ ) {
int w = off + i;
if( _previous._e != null ) _previous._e[i] += g * _w[w];
if (params.fast_mode && _previous._a[i] == 0) continue;
float d = g * _previous._a[i] - Math.signum(_w[w]) * l1 - _w[w] * l2;
// TODO finish per-weight acceleration, doesn't help for now
// if( _wp != null && d != 0 ) {
// boolean sign = _wp[w] >= 0;
// double mult = Math.abs(_wp[w]);
// // If the gradient kept its sign, increase
// if( (d >= 0) == sign )
// mult += .05f;
// else {
// if( mult > 1 )
// mult *= .95f;
// else
// sign = !sign;
// }
// d *= mult;
// _wp[w] = sign ? mult : -mult;
// }
if( _wm != null ) {
_wm[w] *= m;
_wm[w] += d;
d = _wm[w];
}
_w[w] += r * d;
if (params.max_w2 != Double.POSITIVE_INFINITY) r2 += _w[w] * _w[w];
}
if( params.max_w2 != Double.POSITIVE_INFINITY && r2 > params.max_w2 ) { // C.f. Improving neural networks by preventing co-adaptation of feature detectors
final float scale = Utils.approxSqrt((float)(params.max_w2 / r2));
for( int i = 0; i < _previous._a.length; i++ ) _w[off + i] *= scale;
}
float d = g;
if( _bm != null ) {
_bm[u] *= m;
_bm[u] += d;
d = _bm[u];
}
_b[u] += r * d;
}
public float rate(long n) {
return (float)(params.rate / (1 + params.rate_annealing * n));
}
public float momentum(long n) {
double m = params.momentum_start;
if( params.momentum_ramp > 0 ) {
if( n >= params.momentum_ramp )
m = params.momentum_stable;
else
m += (params.momentum_stable - params.momentum_start) * n / params.momentum_ramp;
}
return (float)m;
}
public static abstract class Input extends Layer {
@ParamsSearch.Ignore
protected long _pos, _len;
@Override public void init(Layer[] ls, int index, boolean weights) {
_a = new float[units];
dropout = new Dropout(units);
}
public void inputDropout(long seed) {
double rate = params.input_dropout_ratio;
seed += params.seed + 0x1337B4BE;
dropout.randomlySparsifyActivation(_a, rate, seed);
}
@Override protected void bprop() {
throw new UnsupportedOperationException();
}
public final long move() {
return _pos = _pos == _len - 1 ? 0 : _pos + 1;
}
}
public static class VecsInput extends Input {
static final int API_WEAVER = 1;
public static DocGen.FieldDoc[] DOC_FIELDS;
public Vec[] vecs;
@API(help = "Categorical classes identified on the training set")
int[] categoricals_lens;
@API(help = "Categorical minimums identified on the training set")
int[] categoricals_mins;
@API(help = "Normalisation stats used during training")
double[] subs, muls;
transient Chunk[] _chunks;
@Override public Layer clone() {
VecsInput o = (VecsInput) super.clone();
if( o._chunks != null )
o._chunks = new Chunk[o._chunks.length];
return o;
}
public VecsInput(Vec[] vecs, VecsInput train) {
Init(vecs, train);
}
public void Init(Vec[] vecs, VecsInput train) {
units = train != null ? train.subs.length : expand(vecs);
this.vecs = vecs;
_len = vecs[0].length();
if( train != null ) {
int a = train.categoricals_lens.length;
int b = vecs.length;
assert a == b;
categoricals_lens = train.categoricals_lens;
categoricals_mins = train.categoricals_mins;
assert train.subs.length == units;
subs = train.subs;
muls = train.muls;
} else {
categoricals_lens = new int[vecs.length];
categoricals_mins = new int[vecs.length];
for( int i = 0; i < vecs.length; i++ ) {
categoricals_lens[i] = categories(vecs[i]);
categoricals_mins[i] = (int) vecs[i].min();
}
subs = new double[units];
muls = new double[units];
stats(vecs);
}
}
static int categories(Vec vec) {
String[] dom = vec.domain();
return dom == null ? 1 : dom.length - 1;
}
static int expand(Vec[] vecs) {
int n = 0;
for (Vec vec : vecs) n += categories(vec);
return n;
}
private void stats(Vec[] vecs) {
Stats stats = new Stats();
stats._units = units;
stats._categoricals_lens = categoricals_lens;
stats._categoricals_mins = categoricals_mins;
stats.doAll(vecs);
for( int i = 0; i < vecs.length; i++ ) {
subs[i] = stats._means[i];
double sigma = Math.sqrt(stats._sigms[i] / (stats._rows - 1));
muls[i] = sigma > 1e-6 ? 1 / sigma : 1;
}
}
@Override protected void fprop(long seed, boolean training) {
if( _chunks == null )
_chunks = new Chunk[vecs.length];
for( int i = 0; i < vecs.length; i++ ) {
Chunk c = _chunks[i];
if( c == null || c._vec != vecs[i] || _pos < c._start || _pos >= c._start + c._len )
_chunks[i] = vecs[i].chunkForRow(_pos);
}
ChunksInput.set(_chunks, _a, (int) (_pos - _chunks[0]._start), subs, muls, categoricals_lens, categoricals_mins);
if (training) inputDropout(seed);
}
}
/**
* Stats with expanded categoricals. Used to normalize the data in the input layer.
*/
static class Stats extends MRTask2<Stats> {
int _units;
int[] _categoricals_lens, _categoricals_mins;
double[] _means, _sigms;
long _rows;
transient double[] _subs, _muls;
@Override protected void setupLocal() {
_subs = new double[_units];
_muls = new double[_units];
for( int i = 0; i < _muls.length; i++ )
_muls[i] = 1;
}
@Override public void map(Chunk[] cs) {
_means = new double[_units];
_sigms = new double[_units];
float[] a = new float[_means.length];
for( int r = 0; r < cs[0]._len; r++ ) {
ChunksInput.set(cs, a, r, _subs, _muls, _categoricals_lens, _categoricals_mins);
for( int c = 0; c < a.length; c++ )
_means[c] += a[c];
}
for( int c = 0; c < a.length; c++ )
_means[c] /= cs[0]._len;
for( int r = 0; r < cs[0]._len; r++ ) {
ChunksInput.set(cs, a, r, _subs, _muls, _categoricals_lens, _categoricals_mins);
for( int c = 0; c < a.length; c++ )
_sigms[c] += (a[c] - _means[c]) * (a[c] - _means[c]);
}
_rows += cs[0]._len;
}
@Override public void reduce(Stats rs) {
reduce(_means, _sigms, _rows, rs._means, rs._sigms, rs._rows);
_rows += rs._rows;
}
static void reduce(double[] ma, double[] sa, long ra, double[] mb, double[] sb, long rb) {
for( int c = 0; c < ma.length; c++ ) {
double delta = ma[c] - mb[c];
ma[c] = (ma[c] * ra + mb[c] * rb) / (ra + rb);
sa[c] = sa[c] + sb[c] + delta * delta * ra * rb / (ra + rb);
}
}
@Override public boolean logVerbose() {
return !H2O.DEBUG;
}
}
/**
* A ChunksInput layer populates the activation values from a FVec chunk.
* Missing values will lead to a 0 activation value in the input layer, which is equivalent to
* setting it to the *average* column value before normalizing. In effect, missing column values are ignored.
*/
static class ChunksInput extends Input {
transient Chunk[] _chunks;
double[] _subs, _muls;
int[] _categoricals_lens;
int[] _categoricals_mins;
public ChunksInput(Chunk[] chunks, VecsInput stats) {
units = stats.subs.length;
_chunks = chunks;
_subs = stats.subs;
_muls = stats.muls;
_categoricals_lens = stats.categoricals_lens;
_categoricals_mins = stats.categoricals_mins;
}
/**
* forward propagation means filling the activation values with all the row's column values
*/
@Override protected void fprop(long seed, boolean training) {
set(_chunks, _a, (int) _pos, _subs, _muls, _categoricals_lens, _categoricals_mins);
if (training) inputDropout(seed);
}
static void set(Chunk[] chunks, float[] a, int row, double[] subs, double[] muls, int[] catLens, int[] catMins) {
int n = 0;
// loop over all columns
for( int i = 0; i < catLens.length; i++ ) {
final boolean missing = chunks[i].isNA0(row);
double d = chunks[i].at0(row);
if( catLens[i] == 1 ) {
//numerical value: normalize
d -= subs[n];
d *= muls[n];
a[n++] = missing ? 0f : (float)d;
} else {
// categorical values: use precomputed stats
int cat = catLens[i];
for( int c = 0; c < cat; c++ )
a[n + c] = missing ? 0f : (float)-subs[n + c];
int c = (int) d - catMins[i] - 1;
if( c >= 0 )
a[n + c] = missing ? 0f : (float)((1 - subs[n + c]) * muls[n + c]);
n += cat;
}
}
assert n == a.length;
}
}
public static abstract class Output extends Layer {
static final int API_WEAVER = 1;
public static DocGen.FieldDoc[] DOC_FIELDS;
protected final long pos() {
return _input._pos;
}
}
/**
* Softmax output layer is used for classification
* Rows with missing values in the response column will be ignored
**/
public static abstract class Softmax extends Output {
protected abstract int target();
@Override public void init(Layer[] ls, int index, boolean weights) {
super.init(ls, index, weights);
if( weights ) {
randomize(params.seed + 0xBAD5EED + index, 4.0f);
}
}
@Override protected void fprop(long seed, boolean training) {
for( int o = 0; o < _a.length; o++ ) {
_a[o] = 0;
for( int i = 0; i < _previous._a.length; i++ )
_a[o] += _w[o * _previous._a.length + i] * _previous._a[i];
_a[o] += _b[o];
}
final float max = Utils.maxValue(_a);
float scale = 0;
for( int o = 0; o < _a.length; o++ ) {
_a[o] = (float)Math.exp(_a[o] - max);
scale += _a[o];
}
for( int o = 0; o < _a.length; o++ )
_a[o] /= scale;
}
@Override protected void bprop() {
long processed = _training.processed();
float m = momentum(processed);
float r = rate(processed) * (1 - m);
int label = target();
if (label == missing_int_value) return; //ignore missing response values
for( int u = 0; u < _a.length; u++ ) {
final float targetval = (u == label ? 1f : 0f);
float g = targetval - _a[u];
if (params.loss == NeuralNet.Loss.CrossEntropy) {
//nothing else needed
} else if (params.loss == NeuralNet.Loss.MeanSquare) {
g *= (1 - _a[u]) * _a[u];
}
bprop(u, g, r, m);
}
}
}
public static class VecSoftmax extends Softmax {
public Vec vec;
private Vec _toClose;
VecSoftmax() {
}
public VecSoftmax(Vec vec, VecSoftmax stats) {
// Waiting for Michal stuff, for now enum must start at 0
// if( vec.domain() == null ) {
// vec = vec.toEnum();
// _toClose = vec;
// }
this.units = stats != null ? stats.units : (int) (vec.max() + 1);
this.vec = vec;
params = stats != null ? (NeuralNet)stats.params.clone() : null;
}
@Override protected int target() {
if( vec.isNA(_input._pos) )
return missing_int_value;
return (int) vec.at8(_input._pos);
}
@Override public void close() {
super.close();
if( _toClose != null )
UKV.remove(_toClose._key);
}
}
static class ChunkSoftmax extends Softmax {
transient Chunk _chunk;
public ChunkSoftmax(Chunk chunk, VecSoftmax stats) {
units = stats.units;
_chunk = chunk;
params = (NeuralNet)stats.params.clone();
}
@Override protected int target() {
if( _chunk.isNA0((int) _input._pos) )
return missing_int_value;
return (int) _chunk.at80((int) _input._pos);
}
}
/**
* Linear output layer is used for regression
* Rows with missing values in the response column will be ignored
**/
public static abstract class Linear extends Output {
abstract float[] target();
@Override public void init(Layer[] ls, int index, boolean weights) {
super.init(ls, index, weights);
if( weights ) {
randomize(params.seed + 0xBAD5EED + index, 1.0f);
}
}
@Override protected void fprop(long seed, boolean training) {
for( int o = 0; o < _a.length; o++ ) {
_a[o] = 0;
for( int i = 0; i < _previous._a.length; i++ )
_a[o] += _w[o * _previous._a.length + i] * _previous._a[i];
_a[o] += _b[o];
}
}
@Override protected void bprop() {
long processed = _training.processed();
float m = momentum(processed);
float r = rate(processed) * (1 - m);
float[] v = target();
assert(params.loss == NeuralNet.Loss.MeanSquare);
for( int u = 0; u < _a.length; u++ ) {
if (v[u] == missing_float_value) continue; //ignore missing regression targets
float g = v[u] - _a[u];
bprop(u, g, r, m);
}
}
}
public static class VecLinear extends Linear {
Vec _vec;
transient float[] _values;
public VecLinear(Vec vec, VecLinear stats) {
assert(stats == null || stats.units == 1);
units = 1; //regression
_vec = vec;
params = stats != null ? (NeuralNet)stats.params.clone() : null;
}
@Override float[] target() {
if( _values == null )
_values = new float[units];
long pos = _input._pos; //pos is a global index into the vector
_values[0] = _vec.isNA(pos) ? missing_float_value : (float)_vec.at(pos);
return _values;
}
}
static class ChunkLinear extends Linear {
transient Chunk _chunk;
transient float[] _values;
public ChunkLinear(Chunk chunk, VecLinear stats) {
assert(stats == null || stats.units == 1);
units = 1;
_chunk = chunk;
params = (NeuralNet) (stats != null ? stats.params.clone() : null);
}
@Override float[] target() {
if( _values == null )
_values = new float[units];
int pos = (int)_input._pos; //pos is a local index for this chunk
_values[0] = _chunk.isNA0(pos) ? missing_float_value : (float)_chunk.at0(pos);
return _values;
}
}
public static class Tanh extends Layer {
public Tanh(int units) { this.units = units; }
@Override public void init(Layer[] ls, int index, boolean weights) {
super.init(ls, index, weights);
if( weights ) {
randomize(params.seed + 0xBAD5EED + index, 1.0f);
}
}
@Override protected void fprop(long seed, boolean training) {
for( int o = 0; o < _a.length; o++ ) {
_a[o] = 0;
if( !training || dropout == null || dropout.unit_active(o) ) {
for( int i = 0; i < _previous._a.length; i++ ) {
_a[o] += _w[o * _previous._a.length + i] * _previous._a[i];
}
_a[o] += _b[o];
_a[o] = 1f - 2f / (1f + (float)Math.exp(2*_a[o])); //evals faster than tanh(x), but is slightly less numerically stable - OK
}
}
}
@Override protected void bprop() {
long processed = _training.processed();
float m = momentum(processed);
float r = rate(processed) * (1 - m);
for( int u = 0; u < _a.length; u++ ) {
// Gradient is error * derivative of hyperbolic tangent: (1 - x^2)
float g = _e[u] * (1f - _a[u] * _a[u]);
bprop(u, g, r, m);
}
}
}
public static class TanhDropout extends Tanh {
public TanhDropout(int units) { super(units); }
@Override
protected void fprop(long seed, boolean training) {
if (training) {
seed += params.seed + 0xDA7A6000;
dropout.fillBytes(seed);
super.fprop(seed, true);
}
else {
super.fprop(seed, false);
Utils.div(_a, 2.f);
}
}
}
/**
* Apply tanh to the weights' transpose. Used for auto-encoders.
*/
public static class TanhPrime extends Tanh {
public TanhPrime(int units) {
super(units);
}
@Override public void init(Layer[] ls, int index, boolean weights) {
super.init(ls, index, weights);
// Auto encoder has its own bias vector
_b = new float[units];
}
@Override protected void fprop(long seed, boolean training) {
for( int o = 0; o < _a.length; o++ ) {
_a[o] = 0;
for( int i = 0; i < _previous._a.length; i++ )
_a[o] += _w[i * _a.length + o] * _previous._a[i];
_a[o] += _b[o];
_a[o] = (float)Math.tanh(_a[o]);
}
}
@Override protected void bprop() {
long processed = _training.processed();
float m = momentum(processed);
float r = rate(processed) * (1 - m);
for( int o = 0; o < _a.length; o++ ) {
assert _previous._previous.units == units;
float e = _previous._previous._a[o] - _a[o];
float g = e; // * (1 - _a[o]) * _a[o]; // Square error
for( int i = 0; i < _previous._a.length; i++ ) {
int w = i * _a.length + o;
if( _previous._e != null )
_previous._e[i] += g * _w[w];
_w[w] += r * (g * _previous._a[i] - _w[w] * params.l2 - Math.signum(_w[w]) * params.l1);
}
_b[o] += r * g;
}
}
}
public static class Maxout extends Layer {
public Maxout(int units) { this.units = units; }
@Override public void init(Layer[] ls, int index, boolean weights) {
super.init(ls, index, weights);
if( weights ) {
randomize(params.seed + 0xBAD5EED + index, 1.0f);
for( int i = 0; i < _b.length; i++ )
_b[i] = index == 1 ? 0.5f : 1f;
}
}
@Override protected void fprop(long seed, boolean training) {
float max = 0;
for( int o = 0; o < _a.length; o++ ) {
_a[o] = 0;
if( !training || dropout == null || dropout.unit_active(o)) {
final int off = o * _previous._a.length;
_a[o] = Float.NEGATIVE_INFINITY;
for( int i = 0; i < _previous._a.length; i++ )
_a[o] = Math.max(_a[o], _w[off+i] * _previous._a[i]);
_a[o] += _b[o];
max = Math.max(_a[o], max);
}
}
if( max > 1 ) Utils.div(_a, max);
}
@Override protected void bprop() {
long processed = _training.processed();
float m = momentum(processed);
float r = rate(processed) * (1 - m);
for( int u = 0; u < _a.length; u++ ) {
float g = _e[u];
// if( _a[o] < 0 ) Not sure if we should be using maxout with a hard zero bottom
// g = 0;
bprop(u, g, r, m);
}
}
}
public static class MaxoutDropout extends Maxout {
public MaxoutDropout(int units) { super(units); }
@Override protected void fprop(long seed, boolean training) {
if (training) {
seed += params.seed + 0x51C8D00D;
dropout.fillBytes(seed);
super.fprop(seed, true);
}
else {
super.fprop(seed, false);
Utils.div(_a, 2.f);
}
}
}
public static class Rectifier extends Layer {
public Rectifier(int units) { this.units = units; }
@Override public void init(Layer[] ls, int index, boolean weights) {
super.init(ls, index, weights);
if( weights ) {
randomize(params.seed + 0xBAD5EED + index, 1.0f);
for( int i = 0; i < _b.length; i++ )
_b[i] = index == 1 ? 0.5f : 1f;
}
}
@Override protected void fprop(long seed, boolean training) {
for( int o = 0; o < _a.length; o++ ) {
_a[o] = 0;
if( !training || dropout == null || dropout.unit_active(o) ) {
for( int i = 0; i < _previous._a.length; i++ )
_a[o] += _w[o * _previous._a.length + i] * _previous._a[i];
_a[o] += _b[o];
_a[o] = Math.max(_a[o], 0f);
}
}
}
@Override protected void bprop() {
long processed = _training.processed();
final float m = momentum(processed);
final float r = rate(processed) * (1 - m);
for( int u = 0; u < _a.length; u++ ) {
//(d/dx)(max(0,x)) = 1 if x > 0, otherwise 0
final float g = _a[u] > 0 ? _e[u] : 0; // * 1.0 (from derivative of rectifier)
bprop(u, g, r, m);
// otherwise g = _e[u] * 0.0 = 0 and we don't allow other contributions by (and to) weights and momenta
}
}
}
public static class RectifierDropout extends Rectifier {
public RectifierDropout(int units) { super(units); }
@Override protected void fprop(long seed, boolean training) {
if (training) {
seed += params.seed + 0x3C71F1ED;
dropout.fillBytes(seed);
super.fprop(seed, true);
}
else {
super.fprop(seed, false);
Utils.div(_a, 2.f);
}
}
}
public static class RectifierPrime extends Rectifier {
public RectifierPrime(int units) { super(units); }
@Override public void init(Layer[] ls, int index, boolean weights) {
super.init(ls, index, weights);
// Auto encoder has its own bias vector
_b = new float[units];
for( int i = 0; i < _b.length; i++ )
_b[i] = index == 1 ? 0.5f : 1f;
}
@Override protected void fprop(long seed, boolean training) {
for( int o = 0; o < _a.length; o++ ) {
_a[o] = 0;
for( int i = 0; i < _previous._a.length; i++ )
_a[o] += _w[i * _a.length + o] * _previous._a[i];
_a[o] += _b[o];
if( _a[o] < 0 )
_a[o] = 0;
}
}
@Override protected void bprop() {
long processed = _training.processed();
float m = momentum(processed);
float r = rate(processed) * (1 - m);
for( int u = 0; u < _a.length; u++ ) {
assert _previous._previous.units == units;
float e = _previous._previous._a[u] - _a[u];
float g = e;//* (1 - _a[o]) * _a[o];
//float g = e * (1 - _a[o]) * _a[o]; // Square error
double r2 = 0;
for( int i = 0; i < _previous._a.length; i++ ) {
int w = i * _a.length + u;
if( _previous._e != null ) _previous._e[i] += g * _w[w];
float d = g * _previous._a[i] - (float)(_w[w] * params.l2) - (float)(Math.signum(_w[w]) * params.l1);
_w[w] += r * d;
if (params.max_w2 != Double.POSITIVE_INFINITY) r2 += _w[w] * _w[w];
}
if( params.max_w2 != Double.POSITIVE_INFINITY && r2 > params.max_w2 ) { // C.f. Improving neural networks by preventing co-adaptation of feature detectors
final double scale = Math.sqrt(params.max_w2 / r2);
for( int i = 0; i < _previous._a.length; i++ ) _w[i * _a.length + u] *= scale;
}
_b[u] += r * g;
}
}
}
@Override public Layer clone() {
Layer l = (Layer) super.clone();
if (dropout != null) l.dropout = new Dropout(units);
return l;
}
public static void shareWeights(Layer src, Layer dst) {
dst._w = src._w;
if (dst._b == null || dst._b.length == src._b.length) dst._b = src._b;
dst._wm = src._wm;
if (dst._bm == null || dst._bm.length == src._bm.length) dst._bm = src._bm;
}
public static void shareWeights(Layer[] src, Layer[] dst) {
for( int y = 1; y < src.length; y++ )
shareWeights(src[y], dst[y]);
}
private static double uniformDist(Random rand, double min, double max) {
return min + rand.nextFloat() * (max - min);
}
@Override public AutoBuffer writeJSON(AutoBuffer bb) {
bb.put1('{');
bb.putJSONStr("type").put1(':').putJSONStr(getClass().getName());
bb.put1(',');
writeJSONFields(bb);
bb.put1('}');
return bb;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/NFoldFrameExtractor.java
|
/**
*
*/
package hex;
import java.util.Arrays;
import water.H2O.H2OCountedCompleter;
import water.*;
import water.fvec.*;
import water.util.Utils;
/**
*
*/
public class NFoldFrameExtractor extends FrameExtractor {
/** Number of folds */
final int nfolds;
/** Active fold which will be extracted. */
final int afold;
public NFoldFrameExtractor(Frame dataset, int nfolds, int afold, Key[] destKeys, Key jobKey) {
super(dataset, destKeys, jobKey);
assert afold >= 0 && afold < nfolds : "afold parameter is out of bound <0,nfolds)";
this.nfolds = nfolds;
this.afold = afold;
}
@Override protected MRTask2 createNewWorker(H2OCountedCompleter completer, Vec[] inputVecs, int split) {
assert split == 0 || split == 1;
return new FoldExtractTask(completer, inputVecs, nfolds, afold, split==1);
}
@Override protected long[][] computeEspcPerSplit(long[] espc, long nrows) {
assert espc[espc.length-1] == nrows : "Total number of rows does not match!";
long[] ith = Utils.nfold(nrows, nfolds, afold); // Compute desired fold position
long startRow = ith[0], endRow = startRow + ith[1];
long[][] r = new long[2][espc.length+1]; // In the worst case we will introduce a new chunk
int c1 = 0, c2 = 0; // Number of chunks in each partition
long p1rows = 0, p2rows = 0;
int c = 0; // Chunk idx
// Extract the first section of the remaining part
for (; c<espc.length-1 && espc[c+1] <= startRow; c++) p1rows = r[0][++c1] = espc[c+1]; // Find the chunk with the split
// c is chunk which needs a split between remaining part and selected fold, but it can be split into 3 pieces as well!
if (r[0][c1] < (p1rows += (startRow-espc[c]))) r[0][++c1] = p1rows; // Start for new chunk of part1
// Now extract i-th fold
for (; c<espc.length-1 && espc[c+1] <= endRow; c++ ) p2rows = r[1][++c2] = espc[c+1]-startRow;
if (r[1][c2] < (p2rows += (endRow-Math.max(espc[c],startRow)))) r[1][++c2] = p2rows;
assert p2rows == ith[1];
// Extract rest
for (; c<espc.length-1; c++) p1rows = r[0][++c1] = espc[c+1]-ith[1];
r[0] = Arrays.copyOf(r[0], c1+1);
r[1] = Arrays.copyOf(r[1], c2+1);
// Post-conditions
assert r[0][r[0].length-1]+r[1][r[1].length-1] == nrows;
return r;
}
@Override protected int numOfOutputs() {
return 2;
}
private static class FoldExtractTask extends MRTask2<FoldExtractTask> {
private final Vec [] _vecs; // source vectors
private final int _nfolds;
private final int _afold;
private final boolean _inFold;
transient int _precedingChks; // number of preceding chunks
transient int _startFoldChkIdx; // idx of 1st chunk for the fold
transient int _startRestChkIdx; // idx of 1st of remaining part
transient int _startFoldRow; // fold start row inside the chunk _startFoldChkIdx
transient int _startRestRow; // index of the 1st row inside chunk _startRestChkIdx begining remaining part of data
@Override protected void setupLocal() {
Vec anyInVec = _vecs[0];
long[] folds = Utils.nfold(anyInVec.length(), _nfolds, _afold);
long startRow = folds[0];
long endRow = startRow+folds[1];
long espc[] = anyInVec._espc;
int c = 0;
for (; c<espc.length-1 && espc[c+1] <= startRow; c++) ;
_startFoldChkIdx = c;
_startFoldRow = (int) (startRow-espc[c]);
_precedingChks = _startFoldRow > 0 ? c+1 : c;
for (; c<espc.length-1 && espc[c+1] <= endRow; c++) ;
_startRestChkIdx = c;
_startRestRow = (int) (endRow-espc[c]);
}
public FoldExtractTask(H2OCountedCompleter completer, Vec[] srcVecs, int nfold, int afold, boolean inFold) {
super(completer);
_vecs = srcVecs;
_nfolds = nfold;
_afold = afold;
_inFold = inFold;
}
@Override public void map(Chunk[] cs) {
int coutidx = cs[0].cidx(); // output chunk where to extract
int cinidx = getInChunkIdx(coutidx); // input chunk where to extract
int startRow = getStartRow(coutidx); // start row for extraction
int nrows = cs[0]._len; // number of rows to extract from the input chunk
for (int i=0; i<cs.length; i++) {
ChunkSplitter.extractChunkPart(_vecs[i].chunkForChunkIdx(cinidx), cs[i], startRow, nrows, _fs);
}
}
private int getInChunkIdx(int coutidx) {
if (_inFold)
return _startFoldChkIdx==_startRestChkIdx ? _startFoldChkIdx : coutidx + _startFoldChkIdx;
else { // out fold part
if (coutidx < _precedingChks)
return coutidx;
else
return _startRestChkIdx + (coutidx-_precedingChks);
}
}
private int getStartRow(int coutidx) {
if (_inFold)
return coutidx == 0 ? _startFoldRow : 0;
else { //out fold part
return coutidx == _precedingChks ? _startRestRow : 0;
}
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.