index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/NeuralNet.java
|
package hex;
import static hex.NeuralNet.ExecutionMode.*;
import hex.Layer.*;
import water.*;
import water.H2O.H2OCountedCompleter;
import water.Job.ValidatedJob;
import water.api.DocGen;
import water.api.NeuralNetProgressPage;
import water.api.RequestServer;
import water.fvec.*;
import water.util.*;
import java.util.Arrays;
import java.util.Random;
/**
* Neural network.
*
* @author cypof
*/
public class NeuralNet extends ValidatedJob {
static final int API_WEAVER = 1;
public static DocGen.FieldDoc[] DOC_FIELDS;
public static final String DOC_GET = "Neural Network";
@API(help = "Execution Mode", filter = Default.class, json = true)
public ExecutionMode mode = ExecutionMode.SingleNode;
@API(help = "Activation function", filter = Default.class, json = true)
public Activation activation = Activation.Tanh;
@API(help = "Input layer dropout ratio", filter = Default.class, dmin = 0, dmax = 1, json = true)
public double input_dropout_ratio = 0.0;
@API(help = "Hidden layer sizes, e.g. 1000, 1000. Grid search: (100, 100), (200, 200)", filter = Default.class, json = true)
public int[] hidden = new int[] { 200, 200 };
@API(help = "Learning rate (higher => less stable, lower => slower convergence)", filter = Default.class, dmin = 0, dmax = 1, json = true)
public double rate = .005;
@API(help = "Learning rate annealing: rate / (1 + rate_annealing * samples)", filter = Default.class, dmin = 0, dmax = 1, json = true)
public double rate_annealing = 1 / 1e6;
@API(help = "L1 regularization, can add stability", filter = Default.class, dmin = 0, dmax = 1, json = true)
public double l1 = 0.0;
@API(help = "L2 regularization, can add stability", filter = Default.class, dmin = 0, dmax = 1, json = true)
public double l2 = 0.0;
@API(help = "Initial momentum at the beginning of training", filter = Default.class, dmin = 0, json = true)
public double momentum_start = .5;
@API(help = "Number of training samples for which momentum increases", filter = Default.class, lmin = 0, json = true)
public long momentum_ramp = 1000000;
@API(help = "Final momentum after the ramp is over", filter = Default.class, dmin = 0, json = true)
public double momentum_stable = 0.99;
@API(help = "How many times the dataset should be iterated (streamed), can be less than 1.0", filter = Default.class, dmin = 0, json = true)
public double epochs = 10;
@API(help = "Seed for random numbers (reproducible results for single-threaded only, cf. Hogwild)", filter = Default.class, json = true)
public long seed = new Random().nextLong();
@API(help = "Enable expert mode", filter = Default.class, json = true)
public boolean expert_mode = false;
@API(help = "Initial Weight Distribution", filter = Default.class, json = true)
public InitialWeightDistribution initial_weight_distribution = InitialWeightDistribution.UniformAdaptive;
@API(help = "Uniform: -value...value, Normal: stddev)", filter = Default.class, dmin = 0, json = true)
public double initial_weight_scale = 1.0;
@API(help = "Loss function", filter = Default.class, json = true)
public Loss loss = Loss.CrossEntropy;
@API(help = "Learning rate decay factor between layers (N-th layer: rate*alpha^(N-1))", filter = Default.class, dmin = 0, json = true)
public double rate_decay = 1.0;
@API(help = "Constraint for squared sum of incoming weights per unit", filter = Default.class, json = true)
public double max_w2 = Double.POSITIVE_INFINITY;
@API(help = "Number of samples to train with non-distributed mode for improved stability", filter = Default.class, lmin = 0, json = true)
public long warmup_samples = 0l;
@API(help = "Number of training set samples for scoring (0 for all)", filter = Default.class, lmin = 0, json = true)
public long score_training = 1000l;
@API(help = "Number of validation set samples for scoring (0 for all)", filter = Default.class, lmin = 0, json = true)
public long score_validation = 0l;
@API(help = "Minimum interval (in seconds) between scoring", filter = Default.class, dmin = 0, json = true)
public double score_interval = 2;
@API(help = "Enable diagnostics for hidden layers", filter = Default.class, json = true)
public boolean diagnostics = true;
@API(help = "Enable fast mode (minor approximation in back-propagation)", filter = Default.class, json = true)
public boolean fast_mode = true;
@Override public boolean toHTML(StringBuilder sb) {
return makeJsonBox(sb);
}
@Override
protected void registered(RequestServer.API_VERSION ver) {
super.registered(ver);
for (Argument arg : _arguments) {
if ( arg._name.equals("activation") || arg._name.equals("initial_weight_distribution")
|| arg._name.equals("mode") || arg._name.equals("expert_mode")) {
arg.setRefreshOnChange();
}
}
}
@Override protected void queryArgumentValueSet(Argument arg, java.util.Properties inputArgs) {
super.queryArgumentValueSet(arg, inputArgs);
if (arg._name.equals("classification")) {
classification = true;
arg.disable("Regression is not currently supported.");
}
if (arg._name.equals("ignored_cols")) arg.disable("Not currently supported.");
if(arg._name.equals("initial_weight_scale") &&
(initial_weight_distribution == InitialWeightDistribution.UniformAdaptive)
) {
arg.disable("Using sqrt(6 / (# units + # units of previous layer)) for Uniform distribution.", inputArgs);
}
if( arg._name.equals("mode") ) {
if (H2O.CLOUD._memary.length > 1) {
//TODO: re-enable this
// arg.disable("Using MapReduce since cluster size > 1.", inputArgs);
// mode = ExecutionMode.MapReduce;
//Temporary solution
if (mode == ExecutionMode.MapReduce) {
arg.disable("Distributed MapReduce mode is not yet fully supported. Will run in single-node mode, wasting "
+ (H2O.CLOUD._memary.length - 1) + " cluster node(s).", inputArgs);
mode = ExecutionMode.SingleNode;
}
}
}
if( arg._name.equals("warmup_samples") && mode == MapReduce && H2O.CLOUD._memary.length > 1) {
arg.disable("Not yet implemented for distributed MapReduce execution modes, using a value of 0.");
warmup_samples = 0;
}
if(arg._name.equals("loss") && !classification) {
arg.disable("Using MeanSquare loss for regression.", inputArgs);
loss = Loss.MeanSquare;
}
if (arg._name.equals("score_validation") && validation == null) {
arg.disable("Only if a validation set is specified.");
}
if (arg._name.equals("loss") || arg._name.equals("max_w2") || arg._name.equals("warmup_samples")
|| arg._name.equals("score_training") || arg._name.equals("score_validation")
|| arg._name.equals("initial_weight_distribution") || arg._name.equals("initial_weight_scale")
|| arg._name.equals("score_interval") || arg._name.equals("diagnostics")
|| arg._name.equals("rate_decay")
) {
if (!expert_mode) arg.disable("Only in expert mode.");
}
}
public enum ExecutionMode {
SingleThread, SingleNode, MapReduce
}
public enum InitialWeightDistribution {
UniformAdaptive, Uniform, Normal
}
/**
* Activation functions
*/
public enum Activation {
Tanh, TanhWithDropout, Rectifier, RectifierWithDropout, Maxout, MaxoutWithDropout
}
/**
* Loss functions
* CrossEntropy is recommended
*/
public enum Loss {
MeanSquare, CrossEntropy
}
// Hack: used to stop the monitor thread
public static volatile boolean running = true;
public NeuralNet() {
description = DOC_GET;
}
@Override public final void execImpl() {
startTrain();
}
void startTrain() {
logStart();
running = true;
// Vec[] vecs = Utils.append(_train, response);
// reChunk(vecs);
// final Vec[] train = new Vec[vecs.length - 1];
// System.arraycopy(vecs, 0, train, 0, train.length);
// final Vec trainResp = classification ? vecs[vecs.length - 1].toEnum() : vecs[vecs.length - 1];
final Vec[] train = _train;
final Vec trainResp = classification ? response.toEnum() : response;
final Layer[] ls = new Layer[hidden.length + 2];
ls[0] = new VecsInput(train, null);
for( int i = 0; i < hidden.length; i++ ) {
switch( activation ) {
case Tanh:
ls[i + 1] = new Tanh(hidden[i]);
break;
case TanhWithDropout:
ls[i + 1] = new TanhDropout(hidden[i]);
break;
case Rectifier:
ls[i + 1] = new Rectifier(hidden[i]);
break;
case RectifierWithDropout:
ls[i + 1] = new RectifierDropout(hidden[i]);
break;
case Maxout:
ls[i + 1] = new Maxout(hidden[i]);
break;
case MaxoutWithDropout:
ls[i + 1] = new MaxoutDropout(hidden[i]);
break;
}
}
if( classification )
ls[ls.length - 1] = new VecSoftmax(trainResp, null);
else
ls[ls.length - 1] = new VecLinear(trainResp, null);
//copy parameters from NeuralNet, and set previous/input layer links
for( int i = 0; i < ls.length; i++ )
ls[i].init(ls, i, this);
final Key sourceKey = Key.make(input("source"));
final Frame frame = new Frame(_names, train);
frame.add(_responseName, trainResp);
final Errors[] trainErrors0 = new Errors[] { new Errors() };
final Errors[] validErrors0 = validation == null ? null : new Errors[] { new Errors() };
NeuralNetModel model = new NeuralNetModel(destination_key, sourceKey, frame, ls, this);
model.training_errors = trainErrors0;
model.validation_errors = validErrors0;
model.delete_and_lock(self());
final Frame[] adapted = validation == null ? null : model.adapt(validation, false);
final Trainer trainer;
final long num_rows = source.numRows();
if (mode == SingleThread) {
Log.info("Entering single-threaded execution mode");
trainer = new Trainer.Direct(ls, epochs, self());
} else {
// one node works on the first batch of points serially for improved stability
if (warmup_samples > 0) {
Log.info("Training the first " + warmup_samples + " samples in serial for improved stability.");
Trainer warmup = new Trainer.Direct(ls, (double)warmup_samples/num_rows, self());
warmup.start();
warmup.join();
//TODO: for MapReduce send weights from master VM to all other VMs
}
if (mode == SingleNode) {
Log.info("Entering single-node (multi-threaded Hogwild) execution mode.");
trainer = new Trainer.Threaded(ls, epochs, self(), -1);
} else if (mode == MapReduce) {
if (warmup_samples > 0 && mode == MapReduce) {
Log.info("Multi-threaded warmup with " + warmup_samples + " samples.");
Trainer warmup = new Trainer.Threaded(ls, (double)warmup_samples/num_rows, self(), -1);
warmup.start();
warmup.join();
//TODO: for MapReduce send weights from master VM to all other VMs
}
Log.info("Entering multi-node (MapReduce + multi-threaded Hogwild) execution mode.");
trainer = new Trainer.MapReduce(ls, epochs, self());
} else throw new RuntimeException("invalid execution mode.");
}
Log.info("Running for " + epochs + " epochs.");
final NeuralNet nn = this;
// Use a separate thread for monitoring (blocked most of the time)
Thread monitor = new Thread() {
Errors[] trainErrors = trainErrors0, validErrors = validErrors0;
@Override public void run() {
try {
Vec[] valid = null;
Vec validResp = null;
if( validation != null ) {
assert adapted != null;
final Vec[] vs = adapted[0].vecs();
valid = Arrays.copyOf(vs, vs.length - 1);
System.arraycopy(adapted[0].vecs(), 0, valid, 0, valid.length);
validResp = vs[vs.length - 1];
}
//score the model every 2 seconds (or less often, if it takes longer to score)
final long num_samples_total = (long)(Math.ceil(num_rows * epochs));
long num = -1, last_eval = runTimeMs();
do {
final long interval = (long)(score_interval * 1000); //time between evaluations
long time_taken = runTimeMs() - last_eval;
if (num >= 0 && time_taken < interval) {
Thread.sleep(interval - time_taken);
}
last_eval = runTimeMs();
num = eval(valid, validResp);
if (num >= num_samples_total) break;
if (mode != MapReduce) {
if (!isRunning(self()) || !running) break;
} else {
if (!running) break; //MapReduce calls cancel() early, we are waiting for running = false
}
} while (true);
// remove validation data
if( adapted != null && adapted[1] != null )
adapted[1].delete();
Log.info("Training finished.");
} catch( Exception ex ) {
cancel(ex);
}
}
private long eval(Vec[] valid, Vec validResp) {
long[][] cm = null;
if( classification ) {
int classes = ls[ls.length - 1].units;
cm = new long[classes][classes];
}
NeuralNetModel model = new NeuralNetModel(destination_key, sourceKey, frame, ls, nn);
// score model on training set
Errors e = eval(train, trainResp, score_training, valid == null ? cm : null);
e.score_training = score_training == 0 ? train[0].length() : score_training;
trainErrors = Utils.append(trainErrors, e);
model.unstable |= Double.isNaN(e.mean_square) || Double.isNaN(e.cross_entropy);
model.training_errors = trainErrors;
// score model on validation set
if( valid != null ) {
e = eval(valid, validResp, score_validation, cm);
e.score_validation = score_validation == 0 ? valid[0].length() : score_validation;
validErrors = Utils.append(validErrors, e);
model.unstable |= Double.isNaN(e.mean_square) || Double.isNaN(e.cross_entropy);
}
model.validation_errors = validErrors;
model.confusion_matrix = cm;
model.update(self());
// terminate model building if we detect that a model is unstable
if (model.unstable) NeuralNet.running = false;
return e.training_samples;
}
private Errors eval(Vec[] vecs, Vec resp, long n, long[][] cm) {
Errors e = NeuralNet.eval(ls, vecs, resp, n, cm);
e.training_samples = trainer.processed();
e.training_time_ms = runTimeMs();
return e;
}
};
trainer.start();
monitor.start();
trainer.join();
// Gracefully terminate the job submitted via H2O web API
if (mode != MapReduce) {
running = false; //tell the monitor thread to finish too
try {
monitor.join();
} catch (InterruptedException e) {
e.printStackTrace();
}
} else {
while (running) { //MapReduce will inform us that running = false
try {
Thread.sleep(1);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
// remove this job -> stop H2O interface from refreshing
H2OCountedCompleter task = _fjtask;
if( task != null )
task.tryComplete();
this.remove();
}
@Override public float progress() {
NeuralNetModel model = UKV.get(destination_key);
if( model != null && source != null) {
Errors e = model.training_errors[model.training_errors.length - 1];
return Math.min(1f, 0.1f + Math.min(1, e.training_samples / (float) (epochs * source.numRows())));
}
return 0;
}
public static Errors eval(Layer[] ls, Vec[] vecs, Vec resp, long n, long[][] cm) {
Output output = (Output) ls[ls.length - 1];
if( output instanceof VecSoftmax )
output = new VecSoftmax(resp, (VecSoftmax) output);
else
output = new VecLinear(resp, (VecLinear) output);
return eval(ls, new VecsInput(vecs, (VecsInput) ls[0]), output, n, cm);
}
private static Errors eval(Layer[] ls, Input input, Output output, long n, long[][] cm) {
Layer[] clones = new Layer[ls.length];
clones[0] = input;
for( int y = 1; y < clones.length - 1; y++ )
clones[y] = ls[y].clone();
clones[clones.length - 1] = output;
for( int y = 0; y < clones.length; y++ )
clones[y].init(clones, y, false);
Layer.shareWeights(ls, clones);
return eval(clones, n, cm);
}
public static Errors eval(Layer[] ls, long n, long[][] cm) {
Errors e = new Errors();
Input input = (Input) ls[0];
long len = input._len;
// TODO: choose random subset instead of first n points (do this once per run)
if( n != 0 )
len = Math.min(len, n);
// classification
if( ls[ls.length - 1] instanceof Softmax ) {
int correct = 0;
e.mean_square = 0;
e.cross_entropy = 0;
for( input._pos = 0; input._pos < len; input._pos++ ) {
if( ((Softmax) ls[ls.length - 1]).target() == Layer.missing_int_value ) //NA
continue;
if( correct(ls, e, cm) )
correct++;
}
e.classification = (len - (double) correct) / len;
e.mean_square /= len;
e.cross_entropy /= len; //want to report the averaged cross-entropy
}
// regression
else {
e.mean_square = 0;
for( input._pos = 0; input._pos < len; input._pos++ )
if( ls[ls.length - 1]._a[0] != Layer.missing_float_value )
error(ls, e);
e.classification = Double.POSITIVE_INFINITY;
e.mean_square /= len;
}
input._pos = 0;
return e;
}
// classification scoring
static boolean correct(Layer[] ls, Errors e, long[][] confusion) {
Softmax output = (Softmax) ls[ls.length - 1];
if( output.target() == -1 )
return false;
for (Layer l : ls) l.fprop(-1, false);
float[] out = ls[ls.length - 1]._a;
int target = output.target();
for( int o = 0; o < out.length; o++ ) {
final boolean hitpos = (o == target);
final double t = hitpos ? 1 : 0;
final double d = t - out[o];
e.mean_square += d * d;
e.cross_entropy += hitpos ? -Math.log(out[o]) : 0;
}
float[] preds = new float[out.length+1];
for (int i=0;i<out.length;++i) preds[i+1] = out[i];
double[] data = new double[ls[0]._a.length];
preds[0] = ModelUtils.getPrediction(preds, data);
if( confusion != null ) {
if (output.target() != Layer.missing_int_value) confusion[output.target()][(int)preds[0]]++;
}
return preds[0] == output.target();
}
// regression scoring
static void error(Layer[] ls, Errors e) {
Linear linear = (Linear) ls[ls.length - 1];
for (Layer l : ls) l.fprop(-1, false);
float[] output = ls[ls.length - 1]._a;
float[] target = linear.target();
e.mean_square = 0;
for( int o = 0; o < output.length; o++ ) {
final double d = target[o] - output[o];
e.mean_square += d * d;
}
}
@Override protected Response redirect() {
return NeuralNetProgressPage.redirect(this, self(), dest());
}
public static String link(Key k, String content) {
NeuralNet req = new NeuralNet();
RString rs = new RString("<a href='" + req.href() + ".query?%key_param=%$key'>%content</a>");
rs.replace("key_param", "source");
rs.replace("key", k.toString());
rs.replace("content", content);
return rs.toString();
}
@Override public String speedDescription() {
return "time/epoch";
}
@Override public long speedValue() {
Value value = DKV.get(dest());
NeuralNetModel m = value != null ? (NeuralNetModel) value.get() : null;
long sv = 0;
if( m != null ) {
Errors[] e = m.training_errors;
double epochsSoFar = e[e.length - 1].training_samples / (double) source.numRows();
sv = (epochsSoFar <= 0) ? 0 : (long) (e[e.length - 1].training_time_ms / epochsSoFar);
}
return sv;
}
public static class Errors extends Iced {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
@API(help = "How many rows the algorithm has processed")
public long training_samples;
@API(help = "How long the algorithm ran in ms")
public long training_time_ms;
@API(help = "Classification error")
public double classification = 1;
@API(help = "Mean square error")
public double mean_square = Double.POSITIVE_INFINITY;
@API(help = "Cross entropy")
public double cross_entropy = Double.POSITIVE_INFINITY;
@API(help = "Number of training set samples for scoring")
public long score_training;
@API(help = "Number of validation set samples for scoring")
public long score_validation;
@Override public String toString() {
return String.format("%.2f", (100 * classification))
+ "% (MSE:" + String.format("%.2e", mean_square)
+ ", MCE:" + String.format("%.2e", cross_entropy)
+ ")";
}
}
public static class NeuralNetModel extends Model {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
@API(help = "Model parameters")
public NeuralNet parameters;
//@API(help = "Layers")
public Layer[] layers;
//@API(help = "Layer weights")
public float[][] weights;
//@API(help = "Layer biases")
public float[][] biases;
@API(help = "Errors on the training set")
public Errors[] training_errors;
@API(help = "Errors on the validation set")
public Errors[] validation_errors;
@API(help = "Confusion matrix")
public long[][] confusion_matrix;
@API(help = "Mean bias")
public float[] mean_bias;
@API(help = "RMS bias")
public float[] rms_bias;
@API(help = "Mean weight")
public float[] mean_weight;
@API(help = "RMS weight")
public float[] rms_weight;
@API(help = "Unstable")
public boolean unstable = false;
NeuralNetModel(Key selfKey, Key dataKey, Frame fr, Layer[] ls, NeuralNet p) {
super(selfKey, dataKey, fr, /* priorClassDistribution */ null);
parameters = p;
layers = ls;
weights = new float[ls.length][];
biases = new float[ls.length][];
for( int y = 1; y < layers.length; y++ ) {
weights[y] = layers[y]._w;
biases[y] = layers[y]._b;
}
if (parameters.diagnostics) {
// compute stats on all nodes
mean_bias = new float[ls.length];
rms_bias = new float[ls.length];
mean_weight = new float[ls.length];
rms_weight = new float[ls.length];
for( int y = 1; y < layers.length; y++ ) {
final Layer l = layers[y];
final int len = l._a.length;
// compute mean values
mean_bias[y] = rms_bias[y] = 0;
mean_weight[y] = rms_weight[y] = 0;
for(int u = 0; u < len; u++) {
mean_bias[y] += biases[y][u];
for( int i = 0; i < l._previous._a.length; i++ ) {
int w = u * l._previous._a.length + i;
mean_weight[y] += weights[y][w];
}
}
mean_bias[y] /= len;
mean_weight[y] /= len * l._previous._a.length;
// compute rms values
for(int u = 0; u < len; ++u) {
final double db = biases[y][u] - mean_bias[y];
rms_bias[y] += db * db;
for( int i = 0; i < l._previous._a.length; i++ ) {
int w = u * l._previous._a.length + i;
final double dw = weights[y][w] - mean_weight[y];
rms_weight[y] += dw * dw;
}
}
rms_bias[y] = (float)Math.sqrt(rms_bias[y]/len);
rms_weight[y] = (float)Math.sqrt(rms_weight[y]/len/l._previous._a.length);
unstable |= Double.isNaN(mean_bias[y]) || Double.isNaN(rms_bias[y])
|| Double.isNaN(mean_weight[y]) || Double.isNaN(rms_weight[y]);
// Abort the run if weights or biases are unreasonably large (Note that all input values are normalized upfront)
// This can happen with Rectifier units when L1/L2/max_w2 are all set to 0, especially when using more than 1 hidden layer.
final double thresh = 1e10;
unstable |= mean_bias[y] > thresh || rms_bias[y] > thresh
|| mean_weight[y] > thresh || rms_weight[y] > thresh;
}
}
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (int i=0; i<weights.length; ++i)
sb.append("\nweights["+i+"][]="+Arrays.toString(weights[i]));
for (int i=0; i<biases.length; ++i)
sb.append("\nbiases["+i+"][]="+Arrays.toString(biases[i]));
sb.append("\n");
return sb.toString();
}
public void toJavaHtml(StringBuilder sb) {
//DocGen.HTML.title(sb, "The Java Neural Net model is not implemented yet.");
}
public boolean generateHTML(String title, StringBuilder sb) {
final String mse_format = "%2.6f";
final String cross_entropy_format = "%2.6f";
DocGen.HTML.title(sb, title);
DocGen.HTML.paragraph(sb, "Model Key: " + _key);
sb.append("<div class='alert'>Actions: " + water.api.Predict.link(_key, "Score on dataset") + ", "
+ NeuralNet.link(_dataKey, "Compute new model") + "</div>");
parameters.toHTML(sb);
// Plot training error
{
float[] train_err = new float[training_errors.length];
float[] train_samples = new float[training_errors.length];
for (int i=0; i<train_err.length; ++i) {
train_err[i] = (float)training_errors[i].classification;
train_samples[i] = training_errors[i].training_samples;
}
new D3Plot(train_samples, train_err, "training samples", "classification error",
"Classification Error on Training Set").generate(sb);
}
// Plot validation error
if (validation_errors != null) {
float[] valid_err = new float[validation_errors.length];
float[] valid_samples = new float[validation_errors.length];
for (int i=0; i<valid_err.length; ++i) {
valid_err[i] = (float)validation_errors[i].classification;
valid_samples[i] = validation_errors[i].training_samples;
}
new D3Plot(valid_samples, valid_err, "training samples", "classification error",
"Classification Error on Validation Set").generate(sb);
}
final boolean classification = isClassifier();
final String cmTitle = "Confusion Matrix" + (validation_errors == null ? " (Training Data)" : "");
// stats for training and validation
final Errors train = training_errors[training_errors.length - 1];
final Errors valid = validation_errors != null ? validation_errors[validation_errors.length - 1] : null;
if (classification) {
DocGen.HTML.section(sb, "Training classification error: " + formatPct(train.classification));
}
DocGen.HTML.section(sb, "Training mean square error: " + String.format(mse_format, train.mean_square));
if (classification) {
DocGen.HTML.section(sb, "Training cross entropy: " + String.format(cross_entropy_format, train.cross_entropy));
if( valid != null ) {
DocGen.HTML.section(sb, "Validation classification error: " + formatPct(valid.classification));
}
}
if( validation_errors != null ) {
assert valid != null;
DocGen.HTML.section(sb, "Validation mean square error: " + String.format(mse_format, valid.mean_square));
if (classification) {
DocGen.HTML.section(sb, "Validation mean cross entropy: " + String.format(cross_entropy_format, valid.cross_entropy));
}
if (valid.training_time_ms > 0)
DocGen.HTML.section(sb, "Training speed: " + valid.training_samples * 1000 / valid.training_time_ms + " samples/s");
}
else {
if (train.training_time_ms > 0)
DocGen.HTML.section(sb, "Training speed: " + train.training_samples * 1000 / train.training_time_ms + " samples/s");
}
if (parameters != null && parameters.diagnostics) {
DocGen.HTML.section(sb, "Status of Hidden and Output Layers");
sb.append("<table class='table table-striped table-bordered table-condensed'>");
sb.append("<tr>");
sb.append("<th>").append("#").append("</th>");
sb.append("<th>").append("Units").append("</th>");
sb.append("<th>").append("Activation").append("</th>");
sb.append("<th>").append("Rate").append("</th>");
sb.append("<th>").append("L1").append("</th>");
sb.append("<th>").append("L2").append("</th>");
sb.append("<th>").append("Momentum").append("</th>");
sb.append("<th>").append("Weight (Mean, RMS)").append("</th>");
sb.append("<th>").append("Bias (Mean, RMS)").append("</th>");
sb.append("</tr>");
for (int i=1; i<layers.length; ++i) {
sb.append("<tr>");
sb.append("<td>").append("<b>").append(i).append("</b>").append("</td>");
sb.append("<td>").append("<b>").append(layers[i].units).append("</b>").append("</td>");
sb.append("<td>").append(layers[i].getClass().getSimpleName().replace("Vec","").replace("Chunk", "")).append("</td>");
sb.append("<td>").append(String.format("%.5g", layers[i].rate(train.training_samples))).append("</td>");
sb.append("<td>").append(layers[i].params.l1).append("</td>");
sb.append("<td>").append(layers[i].params.l2).append("</td>");
final String format = "%g";
sb.append("<td>").append(layers[i].momentum(train.training_samples)).append("</td>");
sb.append("<td>(").append(String.format(format, mean_weight[i])).
append(", ").append(String.format(format, rms_weight[i])).append(")</td>");
sb.append("<td>(").append(String.format(format, mean_bias[i])).
append(", ").append(String.format(format, rms_bias[i])).append(")</td>");
sb.append("</tr>");
}
sb.append("</table>");
}
if (unstable) {
final String msg = "Job was aborted due to observed numerical instability (exponential growth)."
+ " Try a bounded activation function or regularization with L1, L2 or max_w2 and/or use a smaller learning rate or faster annealing.";
DocGen.HTML.section(sb, "=======================================================================================");
DocGen.HTML.section(sb, msg);
DocGen.HTML.section(sb, "=======================================================================================");
}
if( confusion_matrix != null && confusion_matrix.length < 100 ) {
assert(classification);
String[] classes = classNames();
NeuralNetScore.confusion(sb, cmTitle, classes, confusion_matrix);
}
sb.append("<h3>" + "Progress" + "</h3>");
String training = "Number of training set samples for scoring: " + train.score_training;
if (train.score_training > 0) {
if (train.score_training < 1000) training += " (low, scoring might be inaccurate -> consider increasing this number in the expert mode)";
if (train.score_training > 10000) training += " (large, scoring can be slow -> consider reducing this number in the expert mode or scoring manually)";
}
DocGen.HTML.section(sb, training);
if (valid != null) {
String validation = "Number of validation set samples for scoring: " + valid.score_validation;
if (valid.score_validation > 0) {
if (valid.score_validation < 1000) validation += " (low, scoring might be inaccurate -> consider increasing this number in the expert mode)";
if (valid.score_validation > 10000) validation += " (large, scoring can be slow -> consider reducing this number in the expert mode or scoring manually)";
}
DocGen.HTML.section(sb, validation);
}
sb.append("<table class='table table-striped table-bordered table-condensed'>");
sb.append("<tr>");
sb.append("<th>Training Time</th>");
sb.append("<th>Training Samples</th>");
sb.append("<th>Training MSE</th>");
if (classification) {
sb.append("<th>Training MCE</th>");
sb.append("<th>Training Classification Error</th>");
}
sb.append("<th>Validation MSE</th>");
if (classification) {
sb.append("<th>Validation MCE</th>");
sb.append("<th>Validation Classification Error</th>");
}
sb.append("</tr>");
for( int i = training_errors.length - 1; i >= 0; i-- ) {
sb.append("<tr>");
sb.append("<td>" + PrettyPrint.msecs(training_errors[i].training_time_ms, true) + "</td>");
if( validation_errors != null ) {
sb.append("<td>" + String.format("%,d", validation_errors[i].training_samples) + "</td>");
} else {
sb.append("<td>" + String.format("%,d", training_errors[i].training_samples) + "</td>");
}
sb.append("<td>" + String.format(mse_format, training_errors[i].mean_square) + "</td>");
if (classification) {
sb.append("<td>" + String.format(cross_entropy_format, training_errors[i].cross_entropy) + "</td>");
sb.append("<td>" + formatPct(training_errors[i].classification) + "</td>");
}
if( validation_errors != null ) {
sb.append("<td>" + String.format(mse_format, validation_errors[i].mean_square) + "</td>");
if (classification) {
sb.append("<td>" + String.format(cross_entropy_format, validation_errors[i].cross_entropy) + "</td>");
sb.append("<td>" + formatPct(validation_errors[i].classification) + "</td>");
}
} else
sb.append("<td></td><td></td><td></td>");
sb.append("</tr>");
}
sb.append("</table>");
return true;
}
private static String formatPct(double pct) {
String s = "N/A";
if( !Double.isNaN(pct) )
s = String.format("%5.2f %%", 100 * pct);
return s;
}
@Override protected float[] score0(Chunk[] chunks, int rowInChunk, double[] tmp, float[] preds) {
Layer[] clones = new Layer[layers.length];
clones[0] = new ChunksInput(Utils.remove(chunks, chunks.length - 1), (VecsInput) layers[0]);
for( int y = 1; y < layers.length - 1; y++ )
clones[y] = layers[y].clone();
Layer output = layers[layers.length - 1];
if( output instanceof VecSoftmax )
clones[clones.length - 1] = new ChunkSoftmax(chunks[chunks.length - 1], (VecSoftmax) output);
else
clones[clones.length - 1] = new ChunkLinear(chunks[chunks.length - 1], (VecLinear) output);
for( int y = 0; y < clones.length; y++ ) {
clones[y]._w = weights[y];
clones[y]._b = biases[y];
clones[y].init(clones, y, false);
}
((Input) clones[0])._pos = rowInChunk;
for (Layer clone : clones) clone.fprop(-1, false);
float[] out = clones[clones.length - 1]._a;
assert out.length == preds.length;
for (int i=0; i<out.length; ++i) preds[i+1] = out[i];
double[] data = new double[out.length];
for (int i=0; i<out.length; ++i) data[i] = out[i];
preds[0] = ModelUtils.getPrediction(preds, data);
return preds;
}
@Override protected float[] score0(double[] data, float[] preds) {
throw new UnsupportedOperationException();
}
@Override public ConfusionMatrix cm() {
long[][] cm = confusion_matrix;
if( cm != null )
return new ConfusionMatrix(cm);
return null;
}
}
public static class NeuralNetScore extends ModelJob {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
static final String DOC_GET = "Neural network scoring";
@API(help = "Model", required = true, filter = Default.class)
public NeuralNetModel model;
@API(help = "Rows to consider for scoring, 0 (default) means the whole frame", filter = Default.class)
public long max_rows;
@API(help = "Classification error")
public double classification_error;
@API(help = "Mean square error")
public double mean_square_error;
@API(help = "Cross entropy")
public double cross_entropy;
@API(help = "Confusion matrix")
public long[][] confusion_matrix;
public NeuralNetScore() {
description = DOC_GET;
}
@Override protected Response serve() {
init();
Frame[] frs = model.adapt(source, false);
int classes = model.layers[model.layers.length - 1].units;
confusion_matrix = new long[classes][classes];
Layer[] clones = new Layer[model.layers.length];
for( int y = 0; y < model.layers.length; y++ ) {
clones[y] = model.layers[y].clone();
clones[y]._w = model.weights[y];
clones[y]._b = model.biases[y];
}
Vec[] vecs = frs[0].vecs();
Vec[] data = Utils.remove(vecs, vecs.length - 1);
Vec resp = vecs[vecs.length - 1];
Errors e = eval(clones, data, resp, max_rows, confusion_matrix);
classification_error = e.classification;
mean_square_error = e.mean_square;
cross_entropy = e.cross_entropy;
if( frs[1] != null )
frs[1].delete();
return Response.done(this);
}
@Override public boolean toHTML(StringBuilder sb) {
final boolean classification = model.isClassifier();
if (classification) {
DocGen.HTML.section(sb, "Classification error: " + String.format("%5.2f %%", 100 * classification_error));
}
DocGen.HTML.section(sb, "Mean square error: " + mean_square_error);
if (classification) {
DocGen.HTML.section(sb, "Mean cross entropy: " + cross_entropy);
String[] domain = null;
if (response.domain() != null) {
domain = response.domain();
} else {
// find the names for the categories from the model's domains, after finding the correct column
int idx = source.find(response);
if( idx == -1 ) {
Vec vm = response.masterVec();
if( vm != null ) idx = source.find(vm);
}
if (idx != -1) domain = model._domains[idx];
}
confusion(sb, "Confusion Matrix", domain, confusion_matrix);
}
return true;
}
static void confusion(StringBuilder sb, String title, String[] classes, long[][] confusionMatrix) {
//sb.append("<h3>" + title + "</h3>");
sb.append("<table class='table table-striped table-bordered table-condensed'>");
sb.append("<tr><th>Actual \\ Predicted</th>");
if( classes == null ) {
classes = new String[confusionMatrix.length];
for( int i = 0; i < classes.length; i++ )
classes[i] = "" + i;
}
for( String c : classes )
sb.append("<th>" + c + "</th>");
sb.append("<th>Error</th></tr>");
long[] totals = new long[classes.length];
long sumTotal = 0;
long sumError = 0;
for( int crow = 0; crow < classes.length; ++crow ) {
long total = 0;
long error = 0;
sb.append("<tr><th>" + classes[crow] + "</th>");
for( int ccol = 0; ccol < classes.length; ++ccol ) {
long num = confusionMatrix[crow][ccol];
total += num;
totals[ccol] += num;
if( ccol == crow ) {
sb.append("<td style='background-color:LightGreen'>");
} else {
sb.append("<td>");
error += num;
}
sb.append(num);
sb.append("</td>");
}
sb.append("<td>");
sb.append(String.format("%5.3f = %d / %d", (double) error / total, error, total));
sb.append("</td></tr>");
sumTotal += total;
sumError += error;
}
sb.append("<tr><th>Totals</th>");
for (long total : totals) sb.append("<td>" + total + "</td>");
sb.append("<td><b>");
sb.append(String.format("%5.3f = %d / %d", (double) sumError / sumTotal, sumError, sumTotal));
sb.append("</b></td></tr>");
sb.append("</table>");
}
}
static int cores() {
int cores = 0;
for( H2ONode node : H2O.CLOUD._memary )
cores += node._heartbeat._num_cpus;
return cores;
}
/**
* Makes sure small datasets are spread over enough chunks to parallelize training.
*/
public static void reChunk(Vec[] vecs) {
final int splits = cores() * 2; // More in case of unbalance
if( vecs[0].nChunks() < splits ) {
// A new random VectorGroup
Key keys[] = new Vec.VectorGroup().addVecs(vecs.length);
for( int v = 0; v < vecs.length; v++ ) {
AppendableVec vec = new AppendableVec(keys[v]);
long rows = vecs[0].length();
Chunk cache = null;
for( int split = 0; split < splits; split++ ) {
long off = rows * split / splits;
long lim = rows * (split + 1) / splits;
NewChunk chunk = new NewChunk(vec, split);
for( long r = off; r < lim; r++ ) {
if( cache == null || r < cache._start || r >= cache._start + cache._len )
cache = vecs[v].chunkForRow(r);
if( !cache.isNA(r) ) {
if( vecs[v]._domain != null )
chunk.addEnum((int) cache.at8(r));
else if( vecs[v].isInt() )
chunk.addNum(cache.at8(r), 0);
else
chunk.addNum(cache.at(r));
} else {
if( vecs[v].isInt() )
chunk.addNA();
else {
// Don't use addNA() for doubles, as NewChunk uses separate array
chunk.addNum(Double.NaN);
}
}
}
chunk.close(split, null);
}
Vec t = vec.close(null);
t._domain = vecs[v]._domain;
vecs[v] = t;
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/OneHot.java
|
package hex;
import water.MRTask2;
import water.Key;
import water.fvec.*;
import java.util.Arrays;
import java.util.ArrayList;
public class OneHot extends MRTask2<OneHot>{
int[] _offsets;
public static Frame expandDataset(Frame fr, Key destkey) {//, int[] ignored) {
ArrayList<Vec> nvecs = new ArrayList<Vec>();
ArrayList<Vec> evecs = new ArrayList<Vec>();
ArrayList<String> eNames = new ArrayList<String>();
ArrayList<String> nNames = new ArrayList<String>();
int[] offsets = new int[fr.numCols()+1];
Vec[] vecs = fr.vecs();
int c = 0;
// int ip = 0; //ignored pointer
for (int i = 0; i < fr.numCols(); i++) {
if( vecs[i].isEnum() ) {//&& i != ignored[ip]) {//!fr._names {//_names[i]. { //equals(ignored)) {
offsets[evecs.size()] = c;
evecs.add(vecs[i]);
String name = fr._names[i];
c += vecs[i]._domain.length;
for(String s: vecs[i]._domain) eNames.add(name+"."+s);
} else {
//if(i == ignored[ip] && ip < ignored.length - 1) ip++;
nvecs.add(vecs[i]);
nNames.add(fr._names[i]);
}
}
offsets[evecs.size()] = c;
if (evecs.isEmpty()) return fr;
offsets = Arrays.copyOf(offsets, evecs.size() + 1);
OneHot ss = new OneHot();
ss._offsets = offsets;
int l = offsets[evecs.size()];
ss.doAll(l,evecs.toArray(new Vec[evecs.size()]));
Frame fr2 = ss.outputFrame(destkey,eNames.toArray(new String[eNames.size()]),new String[l][]);
fr2.add(new Frame(nNames.toArray(new String[nNames.size()]), nvecs.toArray(new Vec[nvecs.size()])),false);
return fr2;
}
@Override public void map(Chunk[] inputs, NewChunk[] outputs) {
for(int i=0; i <inputs[0]._len; i++ ) {
for(int j=0; j<inputs.length; j++) {
int idx = (int)inputs[j].at0(i);
for(int k = 0; k <( _offsets[j+1] - _offsets[j]); k++) {
outputs[k+_offsets[j]].addNum(k==idx ? 1 : 0, 0);
}
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/ParamsSearch.java
|
package hex;
import hex.rng.MersenneTwisterRNG;
import java.lang.annotation.*;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.*;
import water.util.Log;
/**
* Looks for parameters on a set of objects and perform random search.
*/
public class ParamsSearch {
@Retention(RetentionPolicy.RUNTIME)
public @interface Info {
/**
* Parameter search will move the value relative to origin.
*/
double origin() default 0;
double min() default Double.NaN;
double max() default Double.NaN;
}
@Retention(RetentionPolicy.RUNTIME)
public @interface Ignore {
}
Param[] _params;
Random _rand = new MersenneTwisterRNG(new Random().nextLong());
double _rate = .1;
class Param {
int _objectIndex;
Field _field;
Info _info;
double _initial, _best, _last;
@Info
public double defaults;
void modify(Object o) throws Exception {
if( _field.getType() == boolean.class ) {
if( _rand.nextDouble() < _rate ) {
_last = _best == 0 ? 1 : 0;
_field.set(o, _last == 1);
}
} else {
if( _info == null )
_info = Param.class.getField("defaults").getAnnotation(Info.class);
double delta = (_best - _info.origin()) * _rate;
double min = _best - delta, max = _best + delta;
_last = min + _rand.nextDouble() * (max - min);
if( _field.getType() == float.class )
_field.set(o, (float) _last);
else if( _field.getType() == int.class )
_field.set(o, (int) _last);
}
String change = _best + " -> " + _last;
Log.info(this + ": " + change);
}
void write() {
Log.info(this + ": " + _best);
}
String objectName() {
return _field.getDeclaringClass().getName() + " " + _objectIndex;
}
@Override public String toString() {
return objectName() + "." + _field.getName();
}
}
public void run(Object... os) {
try {
ArrayList<Object> expanded = new ArrayList<Object>();
for( Object o : os ) {
if( o instanceof Object[] )
expanded.addAll(Arrays.asList((Object[]) o));
else if( o instanceof Collection )
expanded.addAll((Collection) o);
else
expanded.add(o);
}
if( _params == null ) {
ArrayList<Param> params = new ArrayList<Param>();
for( int i = 0; i < expanded.size(); i++ ) {
Class c = expanded.get(i).getClass();
ArrayList<Field> fields = new ArrayList<Field>();
getAllFields(fields, c);
for( Field f : fields ) {
f.setAccessible(true);
if( (f.getModifiers() & Modifier.STATIC) == 0 && !ignore(f) ) {
Object v = f.get(expanded.get(i));
if( v instanceof Number || v instanceof Boolean ) {
Param param = new Param();
for( Annotation a : f.getAnnotations() )
if( a.annotationType() == Info.class )
param._info = (Info) a;
param._objectIndex = i;
param._field = f;
if( v instanceof Boolean )
param._initial = ((Boolean) v).booleanValue() ? 1 : 0;
else
param._initial = ((Number) v).doubleValue();
param._last = param._best = param._initial;
params.add(param);
param.write();
}
}
}
}
_params = params.toArray(new Param[0]);
Log.info(toString());
} else {
for( int i = 0; i < _params.length; i++ )
modify(expanded, i);
}
} catch( Exception ex ) {
throw new RuntimeException(ex);
}
}
private static boolean ignore(Field f) {
for( Annotation a : f.getAnnotations() )
if( a.annotationType() == Ignore.class )
return true;
return false;
}
private static void getAllFields(List<Field> fields, Class<?> type) {
for( Field field : type.getDeclaredFields() )
fields.add(field);
if( type.getSuperclass() != null )
getAllFields(fields, type.getSuperclass());
}
private void modify(ArrayList<Object> expanded, int i) throws Exception {
Object o = expanded.get(_params[i]._objectIndex);
_params[i].modify(o);
}
public void save() {
for( int i = 0; i < _params.length; i++ )
_params[i]._best = _params[i]._last;
}
// @Override public String toString() {
// StringBuilder sb = new StringBuilder();
// int objectIndex = -1;
// for( Param param : _params ) {
// if( objectIndex != param._objectIndex ) {
// objectIndex = param._objectIndex;
// sb.append(param._field.getDeclaringClass().getName() + " " + objectIndex + '\n');
// }
// sb.append(" " + param._field.getName() + ": " + param._best + '\n');
// }
// return sb.toString();
// }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/Quantiles.java
|
package hex;
import water.*;
import water.api.*;
import water.api.Request.API;
import water.fvec.*;
import water.util.Utils;
import water.util.Log;
/**
* Quantile of a column.
*/
// R doesn't like NAs in a column
// Error in quantile.default(nah[, 1], c(1)) :
// missing values and NaN's not allowed if 'na.rm' is FALSE
// suppose we have to tolerate empty columns and all NA cols, and single value col
public class Quantiles extends Iced {
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
// This Request supports the HTML 'GET' command, and this is the help text
// for GET.
static final String DOC_GET = "Returns a quantile of a fluid-vec frame";
public static final int MAX_ENUM_SIZE = H2O.DATA_MAX_FACTOR_LEVELS;
public long _totalRows; // non-empty rows per group
// FIX! not sure if I need to save these here from vec
// why were these 'transient' ? doesn't make sense if hcnt2 stuff wasn't transient
// they're not very big. are they serialized in the map/reduce?
final double _max;
final double _min;
final boolean _isInt;
final boolean _isEnum;
final String[] _domain;
// used to feed the next iteration for multipass?
// used in exactQuantilesMultiPass only
final double _valStart;
final double _valEnd;
final long _valMaxBinCnt;
// just for info on current pass?
public double _valRange;
public double _valBinSize;
public double _newValStart;
public double _newValEnd;
public double[] _pctile;
public boolean _interpolated = false; // FIX! do I need this?
public boolean _done = false; // FIX! do I need this?
// OUTPUTS
// Basic info
@API(help="name" ) public String colname; // FIX! currently not set. Need at least one for class loading
public long[] hcnt2; // finer histogram. not visible
public double[] hcnt2_min; // min actual for each bin
public double[] hcnt2_max; // max actual for each bin
public long hcnt2_low; // count below current binning
public long hcnt2_high; // count above current binning
public double hcnt2_high_min; // min above current binning
public static class BinTask2 extends MRTask2<BinTask2> {
private final int _max_qbins;
private final double _valStart;
private final double _valEnd;
public Quantiles _qbins[];
public BinTask2 (int max_qbins, double valStart, double valEnd) {
_max_qbins = max_qbins;
_valStart = valStart;
_valEnd = valEnd;
}
@Override public void map(Chunk[] cs) {
_qbins = new Quantiles[cs.length];
for (int i = 0; i < cs.length; i++)
_qbins[i] = new Quantiles(_fr.vecs()[i], _max_qbins, _valStart, _valEnd).add(cs[i]);
}
@Override public void reduce(BinTask2 other) {
for (int i = 0; i < _qbins.length; i++)
_qbins[i].add(other._qbins[i]);
// will all the map memory get reclaimed now, since the reduce has gathered it?
// we want to keep 1st iteration object around in for lists of thresholds to do
// so hopefully this means just the reduce histogram will stay around.
// FIX! Maybe unnecesary/implied or better way?
other = null;
}
}
// FIX! currently only take one quantile at a time here..ability to do a list though
public void finishUp(Vec vec, double[] quantiles_to_do, int interpolation_type, boolean multiPass) {
assert quantiles_to_do.length == 1 : "currently one quantile at a time. caller can reuse qbin for now.";
// below, we force it to ignore length and only do [0]
// need to figure out if we need to do a list and how that's returned
_pctile = new double[quantiles_to_do.length];
if ( _isEnum ) {
_done = false;
}
else {
if ( multiPass ) {
_done = exactQuantilesMultiPass(_pctile, quantiles_to_do, interpolation_type);
}
else {
_done = approxQuantilesOnePass(_pctile, quantiles_to_do, interpolation_type);
}
}
}
public Quantiles(Vec vec, int max_qbins, double valStart, double valEnd) {
_isEnum = vec.isEnum();
_isInt = vec.isInt();
_domain = vec.isEnum() ? vec.domain() : null;
_max = vec.max();
_min = vec.min();
_totalRows = 0;
_valStart = valStart;
_valEnd = valEnd;
_valRange = valEnd - valStart;
assert max_qbins > 0 && max_qbins <= 1000000 : "max_qbins must be >0 and <= 1000000";
int desiredBinCnt = max_qbins;
int maxBinCnt = desiredBinCnt + 1;
_valBinSize = _valRange / (desiredBinCnt + 0.0);
_valMaxBinCnt = maxBinCnt;
if( vec.isEnum() && _domain.length < MAX_ENUM_SIZE ) {
hcnt2 = new long[_domain.length];
hcnt2_min = new double[_domain.length];
hcnt2_max = new double[_domain.length];
}
else if ( !Double.isNaN(_min) ) {
assert maxBinCnt > 0;
// Log.debug("Q_ Multiple pass histogram starts at "+_valStart);
// Log.debug("Q_ _min "+_min+" _max "+_max);
hcnt2 = new long[maxBinCnt];
hcnt2_min = new double[maxBinCnt];
hcnt2_max = new double[maxBinCnt];
}
else { // vec does not contain finite numbers
// okay this one entry hcnt2 stuff is making the algo die ( I guess the min was nan above)
// for now, just make it length 2
hcnt2 = new long[2];
hcnt2_min = new double[2];
hcnt2_max = new double[2];
}
hcnt2_low = 0;
hcnt2_high = 0;
hcnt2_high_min = 0;
// hcnt2 implicitly zeroed on new
}
public Quantiles(Vec vec) {
// default to 1000 bin
// still would need to call the finishUp you want, to get a result,
// and do multipass iteration/finishUp, if desired
this(vec, 1000, vec.min(), vec.max());
}
public Quantiles add(Chunk chk) {
for (int i = 0; i < chk._len; i++)
add(chk.at0(i));
return this;
}
public void add(double val) {
if ( Double.isNaN(val) ) return;
// can get infinity due to bad enum parse to real
// histogram is sized ok, but the index calc below will be too big
// just drop them. not sure if something better to do?
if( val==Double.POSITIVE_INFINITY ) return;
if( val==Double.NEGATIVE_INFINITY ) return;
if ( _isEnum ) return;
_totalRows++;
long maxBinCnt = _valMaxBinCnt;
// multi pass exact. Should be able to do this for both, if the valStart param is correct
long binIdx2;
// Need to count the stuff outside the bin-gathering,
// since threshold compare is based on total row compare
double valOffset = val - _valStart;
// FIX! do we really need this special case? Not hurting.
if (hcnt2.length==1) {
binIdx2 = 0;
}
else {
binIdx2 = (int) Math.floor(valOffset / _valBinSize);
}
int binIdx2Int = (int) binIdx2;
// we always need the start condition in the bins?
// maybe some redundancy in two compares
if ( valOffset < 0 || binIdx2Int<0 ) {
++hcnt2_low;
}
// we always need the end condition in the bins?
// would using valOffset here be less accurate? maybe some redundancy in two compares
// can't use maxBinCnt-1, because the extra bin is used for one value (the bounds)
else if ( val > _valEnd || binIdx2>=maxBinCnt ) {
if ( (hcnt2_high==0) || (val < hcnt2_high_min) ) hcnt2_high_min = val;
++hcnt2_high;
}
else {
assert (binIdx2Int >= 0 && binIdx2Int < hcnt2.length) :
"binIdx2Int too big for hcnt2 "+binIdx2Int+" "+hcnt2.length;
// Log.debug("Q_ val: "+val+" valOffset: "+valOffset+" _valBinSize: "+_valBinSize);
assert (binIdx2Int>=0) && (binIdx2Int<=maxBinCnt) : "binIdx2Int "+binIdx2Int+" out of range";
if ( hcnt2[binIdx2Int]==0 || (val < hcnt2_min[binIdx2Int]) ) hcnt2_min[binIdx2Int] = val;
if ( hcnt2[binIdx2Int]==0 || (val > hcnt2_max[binIdx2Int]) ) hcnt2_max[binIdx2Int] = val;
++hcnt2[binIdx2Int];
// For debug/info, can report when it goes into extra bin.
// is it ever due to fp arith? Or just the max value?
// not an error! should be protected by newValEnd below, and nextK
// estimates should go into the extra bin if interpolation is needed
if ( false && (binIdx2 == (maxBinCnt-1)) ) {
Log.debug("\nQ_ FP! val went into the extra maxBinCnt bin:"+
binIdx2+" "+hcnt2_high_min+" "+valOffset+" "+
val+" "+_valStart+" "+hcnt2_high+" "+val+" "+_valEnd,"\n");
}
}
}
public Quantiles add(Quantiles other) {
if ( _isEnum ) return this;
assert !Double.isNaN(other._totalRows) : "NaN in other._totalRows merging";
assert !Double.isNaN(_totalRows) : "NaN in _totalRows merging";
_totalRows += other._totalRows;
// merge hcnt2 per-bin mins
// other must be same length, but use it's length for safety
// could add assert on lengths?
for (int k = 0; k < other.hcnt2_min.length; k++) {
// Shouldn't get any
assert !Double.isNaN(other.hcnt2_min[k]) : "NaN in other.hcnt2_min merging";
assert !Double.isNaN(other.hcnt2[k]) : "NaN in hcnt2_min merging";
assert !Double.isNaN(hcnt2_min[k]) : "NaN in hcnt2_min merging";
assert !Double.isNaN(hcnt2[k]) : "NaN in hcnt2_min merging";
// cover the initial case (relying on initial min = 0 to work is wrong)
// Only take the new max if it's hcnt2 is non-zero. like a valid bit
// can hcnt2 ever be null here?
if (other.hcnt2[k] > 0) {
if ( hcnt2[k]==0 || ( other.hcnt2_min[k] < hcnt2_min[k] )) {
hcnt2_min[k] = other.hcnt2_min[k];
}
}
}
// merge hcnt2 per-bin maxs
// other must be same length, but use it's length for safety
for (int k = 0; k < other.hcnt2_max.length; k++) {
// shouldn't get any
assert !Double.isNaN(other.hcnt2_max[k]) : "NaN in other.hcnt2_max merging";
assert !Double.isNaN(other.hcnt2[k]) : "NaN in hcnt2_min merging";
assert !Double.isNaN(hcnt2_max[k]) : "NaN in hcnt2_max merging";
assert !Double.isNaN(hcnt2[k]) : "NaN in hcnt2_max merging";
// cover the initial case (relying on initial min = 0 to work is wrong)
// Only take the new max if it's hcnt2 is non-zero. like a valid bit
// can hcnt2 ever be null here?
if (other.hcnt2[k] > 0) {
if ( hcnt2[k]==0 || ( other.hcnt2_max[k] > hcnt2_max[k] )) {
hcnt2_max[k] = other.hcnt2_max[k];
}
}
}
// 3 new things to merge for multipass histgrams (counts above/below the bins, and the min above the bins)
assert !Double.isNaN(other.hcnt2_high) : "NaN in other.hcnt2_high merging";
assert !Double.isNaN(other.hcnt2_low) : "NaN in other.hcnt2_low merging";
assert !Double.isNaN(hcnt2_high) : "NaN in hcnt2_high merging";
assert !Double.isNaN(hcnt2_low) : "NaN in hcnt2_low merging";
assert other.hcnt2_high==0 || !Double.isNaN(other.hcnt2_high_min) : "0 or NaN in hcnt2_high_min merging";
// these are count merges
hcnt2_low = hcnt2_low + other.hcnt2_low;
hcnt2_high = hcnt2_high + other.hcnt2_high;
// hcnt2_high_min validity is hcnt2_high!=0 (count)
if (other.hcnt2_high > 0) {
if ( hcnt2_high==0 || ( other.hcnt2_high_min < hcnt2_high_min )) {
hcnt2_high_min = other.hcnt2_high_min;
}
}
// can hcnt2 ever be null here?. Inc last, so the zero case is detected above
// seems like everything would fail if hcnt2 doesn't exist here
assert hcnt2 != null;
Utils.add(hcnt2, other.hcnt2);
return this;
}
// need to count >4B rows
private long htot2(long low, long high) {
long cnt = 0;
for (int i = 0; i < hcnt2.length; i++) cnt+=hcnt2[i];
// add the stuff outside the bins, 0,0 for single pass
cnt = cnt + low + high;
return cnt;
}
private boolean exactQuantilesMultiPass(double[] qtiles, double[] quantiles_to_do, int interpolation_type) {
// looked at outside this method. setup for all NA or empty case
// done could be the return value, really should make these 3 available differently
// qtiles is an array just in case we support iterating on quantiles_to_do
// but that would only work for approx, since we won't redo bins here.
boolean done = false;
boolean interpolated = false;
qtiles[0] = Double.NaN;
if( hcnt2.length < 2 ) return false;
assert !_isEnum;
if ( _totalRows==0 ) return false;
assert _totalRows >=0 : _totalRows;
double newValStart = Double.NaN;
double newValEnd = Double.NaN;
double newValRange = Double.NaN;
double newValBinSize = Double.NaN;
boolean forceBestApprox = interpolation_type==-1;
long newValLowCnt;
long maxBinCnt = _valMaxBinCnt;
assert maxBinCnt>1;
long desiredBinCnt = maxBinCnt - 1;
double threshold = quantiles_to_do[0];
assert _valEnd!=Double.NaN : _valEnd;
assert _valStart!=Double.NaN : _valStart;
assert _valBinSize!=Double.NaN : _valBinSize;
if ( _valStart==_valEnd ) Log.debug("exactQuantilesMultiPass: start/end are equal. "+_valStart+" "+_valEnd);
else assert (_valBinSize!=0 && _valBinSize!=Double.NaN) : _valBinSize;
// everything should either be in low, the bins, or high
long totalBinnedRows = htot2(hcnt2_low, hcnt2_high);
Log.debug("Q_ totalRows check: "+_totalRows+" "+totalBinnedRows+" "+hcnt2_low+" "+hcnt2_high+" "+_valStart+" "+_valEnd);
assert _totalRows==totalBinnedRows : _totalRows+" "+totalBinnedRows+" "+hcnt2_low+" "+hcnt2_high;
// Find the row count we want to hit, within some bin.
long currentCnt = hcnt2_low;
double targetCntFull = threshold * (_totalRows-1); // zero based indexing
long targetCntInt = (long) Math.floor(targetCntFull);
double targetCntFract = targetCntFull - (double) targetCntInt;
assert (targetCntFract>=0) && (targetCntFract<=1);
Log.debug("Q_ targetCntInt: "+targetCntInt+" targetCntFract: "+targetCntFract);
// walk thru and find out what bin to look inside
int k = 0;
while(k!=maxBinCnt && ((currentCnt + hcnt2[k]) <= targetCntInt)) {
// Log.debug("Q_ Looping for k: "+threshold+" "+k+" "+maxBinCnt+" "+currentCnt+" "+targetCntInt+
// " "+hcnt2[k]+" "+hcnt2_min[k]+" "+hcnt2_max[k]);
currentCnt += hcnt2[k];
++k;
// Note the loop condition covers the breakout condition:
// (currentCnt==targetCntInt && (hcnt2[k]!=0)
// also: don't go pass array bounds
}
Log.debug("Q_ Found k: "+threshold+" "+k+" "+currentCnt+" "+targetCntInt+
" "+_totalRows+" "+hcnt2[k]+" "+hcnt2_min[k]+" "+hcnt2_max[k]);
assert (currentCnt + hcnt2[k]) > targetCntInt : targetCntInt+" "+currentCnt+" "+k+" "+" "+maxBinCnt;
assert hcnt2[k]!=1 || hcnt2_min[k]==hcnt2_max[k];
// Do mean and linear interpolation, if we don't land on a row
// WATCH OUT when comparing results if linear interpolation...it's dependent on
// the number of rows in the dataset, not just adjacent values. So if you skipped a row
// for some reason (header guess?) in a comparison tool, you can get small errors
// both type 2 and type 7 give exact answers that match alternate tools
// (if they do type 2 and 7). scklearn doesn't do type 2 but does do type 7
// (but not by default in mquantiles())
// the linear interpolation for k between row a (vala) and row b (valb) is
// pctDiff = (k-a)/(b-a)
// dDiff = pctDiff * (valb - vala)
// result = vala + dDiff
double guess = Double.NaN;
double pctDiff, dDiff;
// -1 is for single pass approximation
assert (interpolation_type==2) || (interpolation_type==7) || (interpolation_type==-1): "Unsupported type "+interpolation_type;
// special cases. If the desired row is the last of equal values in this bin (2 or more)
// we will need to intepolate with a nextK out-of-bin value
// we can't iterate, since it won't improve things and the bin-size will be zero!
// trying to resolve case of binsize=0 for next pass, after this, is flawed thinking.
// implies the values are not the same..end of bin interpolate to next
boolean atStartOfBin = hcnt2[k]>=1 && (currentCnt == targetCntInt);
boolean atEndOfBin = !atStartOfBin && (hcnt2[k]>=2 && ((currentCnt + hcnt2[k] - 1) == targetCntInt));
boolean inMidOfBin = !atStartOfBin && !atEndOfBin && (hcnt2[k]>=3) && (hcnt2_min[k]==hcnt2_max[k]);
boolean interpolateEndNeeded = false;
if ( atEndOfBin ) {
if ( targetCntFract != 0 ) {
interpolateEndNeeded = true;
}
else {
guess = hcnt2_max[k];
done = true;
Log.debug("Q_ Guess M "+guess);
}
}
else if ( inMidOfBin ) {
// if we know there is something before and after us with same value,
// we never need to interpolate (only allowed when min=max
guess = hcnt2_min[k];
done = true;
Log.debug("Q_ Guess N "+guess);
}
if ( !done && atStartOfBin ) {
// no interpolation needed
if ( hcnt2[k]>2 && (hcnt2_min[k]==hcnt2_max[k]) ) {
guess = hcnt2_min[k];
done = true;
Log.debug("Q_ Guess A "+guess);
}
// min/max can be equal or not equal here
else if ( hcnt2[k]==2 ) { // interpolate between min/max for the two value bin
if ( interpolation_type==2 ) { // type 2 (mean)
guess = (hcnt2_max[k] + hcnt2_min[k]) / 2.0;
}
else { // default to type 7 (linear interpolation)
// Unlike mean, which just depends on two adjacent values, this adjustment
// adds possible errors related to the arithmetic on the total # of rows.
dDiff = hcnt2_max[k] - hcnt2_min[k]; // two adjacent..as if sorted!
// targetCntFract is fraction of total rows
guess = hcnt2_min[k] + (targetCntFract * dDiff);
}
done = true;
interpolated = true;
Log.debug("Q_ Guess B "+guess+" with type "+interpolation_type+" targetCntFract: "+targetCntFract);
}
// no interpolation needed
else if ( (hcnt2[k]==1) && (targetCntFract==0) ) {
assert hcnt2_min[k]==hcnt2_max[k];
guess = hcnt2_min[k];
done = true;
Log.debug("Q_ Guess C "+guess);
}
}
// interpolate into a nextK value
// all the qualification is so we don't set done when we're not, for multipass
// interpolate from single bin, end of two entry bin, or for approx
boolean stillCanGetIt = atStartOfBin && hcnt2[k]==1 && targetCntFract!=0;
if ( !done && (stillCanGetIt || interpolateEndNeeded || forceBestApprox)) {
if ( hcnt2[k]==1 ) {
assert hcnt2_min[k]==hcnt2_max[k];
Log.debug("Q_ Single value in this bin, but fractional means we need to interpolate to next non-zero");
}
if ( interpolateEndNeeded ) {
Log.debug("Q_ Interpolating off the end of a bin!");
}
int nextK;
if ( k<maxBinCnt ) nextK = k + 1; // could put it over maxBinCnt
else nextK = k;
// definitely see stuff going into the extra bin, so search that too!
while ( (nextK<maxBinCnt) && (hcnt2[nextK]==0) ) ++nextK;
assert nextK > k : k+" "+nextK;
// have the "extra bin" for this
double nextVal;
if ( nextK >= maxBinCnt ) {
// assume we didn't set hcnt2_high_min on first pass, because tighter start/end bounds
if ( forceBestApprox ) {
Log.debug("Q_ Using _valEnd for approx interpolate: "+_valEnd);
nextVal = _valEnd;
}
else {
assert hcnt2_high!=0;
Log.debug("Q_ Using hcnt2_high_min for interpolate: "+hcnt2_high_min);
nextVal = hcnt2_high_min;
}
}
else {
Log.debug("Q_ Using nextK for interpolate: "+nextK);
assert hcnt2[nextK]!=0;
nextVal = hcnt2_min[nextK];
}
Log.debug("Q_ k hcnt2_max[k] nextVal");
Log.debug("Q_ "+k+" "+hcnt2_max[k]+" "+nextVal);
Log.debug("Q_ \nInterpolating result using nextK: "+nextK+ " nextVal: "+nextVal);
// type 7 (linear interpolation) ||
// single pass approx..with unresolved bin
if ( (forceBestApprox & stillCanGetIt) || interpolation_type==7) {
dDiff = nextVal - hcnt2_max[k]; // two adjacent, as if sorted!
// targetCntFract is fraction of total rows
guess = hcnt2_max[k] + (targetCntFract * dDiff);
}
else if ( forceBestApprox ) { // single pass approx..with unresolved bin
// best to use hcnt2_max[k] instead of nextVal here, to keep
// within the guaranteed worst case error bounds
dDiff = (hcnt2_max[k] - hcnt2_min[k]) / hcnt2[k];
guess = hcnt2_min[k] + (targetCntFull-currentCnt) * dDiff;
}
else { // type 2 (mean)
guess = (hcnt2_max[k] + nextVal) / 2.0;
}
interpolated = true;
done = true; // has to be one above us when needed. (or we're at end)
Log.debug("Q_ Guess D "+guess+" with type "+interpolation_type+
" targetCntFull: "+targetCntFull+" targetCntFract: "+targetCntFract+
" _totalRows: " + _totalRows+" "+stillCanGetIt+" "+forceBestApprox);
}
if ( !done && !forceBestApprox) { // don't need for 1 pass approx
// Possible bin leakage at start/end edges due to fp arith.
// bin index arith may resolve OVER the boundary created by the compare for
// hcnt2_high compare.
// I suppose just one value should be in desiredBinCnt+1 bin -> the end value?)
// To cover possible fp issues:
// See if there's a non-zero bin below (min) or above (max) you, to avoid shrinking wrong.
// Just need to check the one bin below and above k, if they exist.
// They might have zero entries, but then it's okay to ignore them.
// update: use the closest edge in the next bin. better forward progress for small bin counts
// This code may make the practical min bin count around 4 or so (not 2).
// what has length 1 hcnt2 that makese this fail? Enums? shouldn't get here.
newValStart = hcnt2_min[k];
if ( k > 0 ) {
if ( hcnt2[k-1]>0 && (hcnt2_max[k-1]<hcnt2_min[k]) ) {
newValStart = hcnt2_max[k-1];
}
}
// subtle. we do sometimes put stuff in the extra end bin (see above)
// k might be pointing to one less than that (like k=0 for 1 bin case)
newValEnd = hcnt2_max[k];
if ( k < (maxBinCnt-1) ) {
assert k+1 < hcnt2.length : k+" "+hcnt2.length+" "+_valMaxBinCnt+" "+_isEnum+" "+_isInt;
if ( hcnt2[k+1]>0 && (hcnt2_min[k+1]>hcnt2_max[k]) ) {
newValEnd = hcnt2_min[k+1];
}
}
newValRange = newValEnd - newValStart;
// maxBinCnt is always binCount + 1, since we might cover over due to rounding/fp issues?
newValBinSize = newValRange / (desiredBinCnt + 0.0);
newValLowCnt = currentCnt - 1; // is this right? don't use for anything (debug?)
// Since we always may need an interpolation, this seems bad if we get this with !done
if ( newValBinSize==0 ) {
Log.debug("Q_ Assuming done because newValBinSize is 0.");
Log.debug("Q_ newValRange: "+newValRange+
" hcnt2[k]: "+hcnt2[k]+
" hcnt2_min[k]: "+hcnt2_min[k]+
" hcnt2_max[k]: "+hcnt2_max[k]);
guess = newValStart;
Log.debug("Q_ Guess G "+guess);
// maybe make this assert false, to see?
assert true : "Should never get newValBinSize==0 in !done branch";
done = true;
}
}
Log.debug("Q_ guess: "+guess+" done: "+done+" hcnt2[k]: "+hcnt2[k]);
Log.debug("Q_ currentCnt: "+currentCnt+" targetCntInt: "+targetCntInt+" hcnt2_low: "+hcnt2_low+" hcnt2_high: "+hcnt2_high);
Log.debug("Q_ was "+_valStart+" "+_valEnd+" "+_valRange+" "+_valBinSize);
Log.debug("Q_ next "+newValStart+" "+newValEnd+" "+newValRange+" "+newValBinSize);
qtiles[0] = guess;
// We want to leave them now! we reuse in exec for multi-thresholds
// hcnt2 = null;
// hcnt2_min = null;
// hcnt2_max = null;
_newValStart = newValStart;
_newValEnd = newValEnd;
_interpolated = interpolated;
return done;
}
// this won't be used with a multipass iteration of qbins. So it alays has to return a best guess
// Also, it needs to interpolate for bins that have different values that aren't resolved by min/max
// so we give it a special interpolation type (-1) that we'll decode and use above
private boolean approxQuantilesOnePass(double[] qtiles, double[] quantiles_to_do, int interpolation_type) {
// exactQuantilesMultiPass(qtiles, quantiles_to_do, -1) ;
exactQuantilesMultiPass(qtiles, quantiles_to_do, -1) ;
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/ReBalance.java
|
package hex;
import water.H2O;
import water.Key;
import water.Request2;
import water.UKV;
import water.api.DocGen;
import water.api.Request;
import water.api.RequestBuilders;
import water.fvec.Frame;
import water.fvec.RebalanceDataSet;
import water.util.RString;
/**
* Rebalance a Frame
*/
public class ReBalance extends Request2 {
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@Request.API(help = "Frame to rebalance", required = true, filter = Request.Default.class, json=true)
public Frame source;
@Request.API(help = "Key for rebalanced frame", filter = Request.Default.class, json=true)
public Key after = source != null ? Key.make(source._key.toString() + ".balanced") : null;
@Request.API(help = "Number of chunks", filter = Request.Default.class, json=true)
public int chunks = H2O.CLOUD.size() * H2O.NUMCPUS * 4;
@Override public RequestBuilders.Response serve() {
if( source==null ) throw new IllegalArgumentException("Missing frame to rebalance!");
try {
if (chunks > source.numRows()) throw new IllegalArgumentException("Cannot create more than " + source.numRows() + " chunks.");
if( after==null ) after = Key.make(source._key.toString() + ".balanced");
RebalanceDataSet rb = new RebalanceDataSet(source, after, chunks);
H2O.submitTask(rb);
rb.join();
return RequestBuilders.Response.done(this);
} catch( Throwable t ) {
return RequestBuilders.Response.error(t);
}
}
@Override public boolean toHTML( StringBuilder sb ) {
if (UKV.get(after)==null) {
return false;
}
RString aft = new RString("<a href='Inspect2.html?src_key=%$key'>%key</a>");
aft.replace("key", after);
DocGen.HTML.section(sb, "Rebalancing done. Frame '" + aft.toString()
+ "' now has " + ((Frame)UKV.get(after)).anyVec().nChunks()
+ " chunks (source: " + source.anyVec().nChunks() + ").");
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/ShuffleTask.java
|
/**
*
*/
package hex;
import java.util.Random;
import water.MRTask2;
import water.fvec.*;
import water.util.Utils;
/** Simple shuffle task based on Fisher/Yates algo.
*
* WARNING: It shuffles data only inside the chunk.
*/
public class ShuffleTask extends MRTask2<ShuffleTask> {
@Override public void map(Chunk ic, Chunk oc) {
if (ic._len==0) return;
// Each vector is shuffled in the same way
Random rng = Utils.getRNG(seed(ic.cidx()));
oc.set0(0,ic.at0(0));
for (int row=1; row<ic._len; row++) {
int j = rng.nextInt(row+1); // inclusive upper bound <0,row>
// Arghhh: expand the vector into double
if (j!=row) oc.set0(row, oc.at0(j));
oc.set0(j, ic.at0(row));
}
}
public static long seed(int cidx) { return (0xe031e74f321f7e29L + (cidx << 32L)); }
public static Vec shuffle(Vec ivec) {
Vec ovec = ivec.makeZero();
new ShuffleTask().doAll(ivec, ovec);
return ovec;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/Summary2.java
|
package hex;
import water.*;
import water.api.*;
import water.api.Request.API;
import water.fvec.*;
import water.exec.Flow;
import water.util.Utils;
import water.util.Log;
import java.util.Arrays;
/**
* Summary of a column.
*/
public class Summary2 extends Iced {
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
// This Request supports the HTML 'GET' command, and this is the help text
// for GET.
static final String DOC_GET = "Returns a summary of a fluid-vec frame";
public static final int MAX_HIST_SZ = H2O.DATA_MAX_FACTOR_LEVELS;
public static final int NMAX = 5;
// updated boundaries to be 0.1% 1%...99%, 99.9% so R code didn't have to change
// ideally we extend the array here, and just update the R extraction of 25/50/75 percentiles
// note python tests (junit?) may look at result
public static final double DEFAULT_PERCENTILES[] = {0.001,0.01,0.10,0.25,0.33,0.50,0.66,0.75,0.90,0.99,0.999};
private static final int T_REAL = 0;
private static final int T_INT = 1;
private static final int T_ENUM = 2;
public BasicStat _stat0; /* Basic Vec stats collected by PrePass. */
public final int _type; // 0 - real; 1 - int; 2 - enum
public double[] _mins;
public double[] _maxs;
long _gprows; // non-empty rows per group
final transient String[] _domain;
final transient double _start;
final transient double _start2;
final transient double _binsz;
final transient double _binsz2; // 2nd finer grained histogram used for quantile estimates for numerics
transient int _len1; /* Size of filled elements in a chunk. */
transient double[] _pctile;
static abstract class Stats extends Iced {
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
@API(help="stats type" ) public String type;
Stats(String type) { this.type = type; }
}
// An internal JSON-output-only class
@SuppressWarnings("unused")
static class EnumStats extends Stats {
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
public EnumStats( int card ) {
super("Enum");
this.cardinality = card;
}
@API(help="cardinality" ) public final int cardinality;
}
static class NumStats extends Stats {
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
public NumStats( double mean, double sigma, long zeros, double[] mins, double[] maxs, double[] pctile) {
super("Numeric");
this.mean = mean;
this.sd = sigma;
this.zeros = zeros;
this.mins = mins;
this.maxs = maxs;
this.pctile = pctile;
this.pct = DEFAULT_PERCENTILES;
}
@API(help="mean" ) public final double mean;
@API(help="sd" ) public final double sd;
@API(help="#zeros" ) public final long zeros;
@API(help="min elements") public final double[] mins; // min N elements
@API(help="max elements") public final double[] maxs; // max N elements
@API(help="percentile thresholds" ) public final double[] pct;
@API(help="percentiles" ) public final double[] pctile;
}
// OUTPUTS
// Basic info
@API(help="name" ) public String colname;
@API(help="type" ) public String type;
// Basic stats
@API(help="NAs" ) public long nacnt;
@API(help="Base Stats" ) public Stats stats;
@API(help="histogram start") public double hstart;
@API(help="histogram bin step") public double hstep;
@API(help="histogram headers" ) public String[] hbrk;
@API(help="histogram bin values") public long[] hcnt;
public long[] hcnt2; // finer histogram. not visible
public double[] hcnt2_min; // min actual for each bin
public double[] hcnt2_max; // max actual for each bin
public static class BasicStat extends Iced {
public long _len; /* length of vec */
public long _nas; /* number of NA's */
public long _nans; /* number of NaN's */
public long _pinfs; /* number of positive infinity's */
public long _ninfs; /* number of positive infinity's */
public long _zeros; /* number of zeros */
public double _min1; /* if there's -Inf, then -Inf, o/w min2 */
public double _max1; /* if there's Inf, then Inf, o/w max2 */
public double _min2; /* min of the finite numbers. NaN if there's none. */
public double _max2; /* max of the finite numbers. NaN if there's none. */
public BasicStat( ) {
_len = 0;
_nas = 0;
_nans = 0;
_pinfs = 0;
_ninfs = 0;
_zeros = 0;
_min1 = Double.NaN;
_max1 = Double.NaN;
_min2 = Double.NaN;
_max2 = Double.NaN;
}
public BasicStat add(Chunk chk) {
_len = chk._len;
for(int i = 0; i < chk._len; i++) {
double val;
if (chk.isNA0(i)) { _nas++; continue; }
if( chk._vec.isUUID() ) continue;
if (Double.isNaN(val = chk.at0(i))) { _nans++; continue; }
if (val == Double.POSITIVE_INFINITY) _pinfs++;
else if (val == Double.NEGATIVE_INFINITY) _ninfs++;
else {
_min2 = Double.isNaN(_min2)? val : Math.min(_min2,val);
_max2 = Double.isNaN(_max2)? val : Math.max(_max2,val);
if (val == .0) _zeros++;
}
}
return this;
}
public BasicStat add(BasicStat other) {
_len += other._len;
_nas += other._nas;
_nans += other._nans;
_pinfs += other._pinfs;
_ninfs += other._ninfs;
_zeros += other._zeros;
if (Double.isNaN(_min2)) _min2 = other._min2;
else if (!Double.isNaN(other._min2)) _min2 = Math.min(_min2, other._min2);
if (Double.isNaN(_max2)) _max2 = other._max2;
else if (!Double.isNaN(other._max2)) _max2 = Math.max(_max2, other._max2);
return this;
}
public BasicStat finishUp() {
_min1 = _ninfs>0? Double.NEGATIVE_INFINITY /* there's -Inf */
: !Double.isNaN(_min2)? _min2 /* min is finite */
: _pinfs>0? Double.POSITIVE_INFINITY /* Only Infs exist */
: Double.NaN; /* All NaN's or NAs */
_max1 = _pinfs>0? Double.POSITIVE_INFINITY /* there's Inf */
: !Double.isNaN(_max2)? _max2 /* max is finite */
: _ninfs>0? Double.NEGATIVE_INFINITY /* Only -Infs exist */
: Double.NaN; /* All NaN's or NAs */
return this;
}
}
public static class PrePass extends MRTask2<PrePass> {
public BasicStat _basicStats[];
@Override public void map(Chunk[] cs) {
_basicStats = new BasicStat[cs.length];
for (int c=0; c < cs.length; c++)
_basicStats[c] = new BasicStat().add(cs[c]);
}
@Override public void reduce(PrePass other){
for (int c = 0; c < _basicStats.length; c++)
_basicStats[c].add(other._basicStats[c]);
}
public PrePass finishUp() {
for (BasicStat stat : _basicStats) stat.finishUp();
return this;
}
}
public static class SummaryTask2 extends MRTask2<SummaryTask2> {
private BasicStat[] _basics;
private int _max_qbins;
public Summary2 _summaries[];
public SummaryTask2 (BasicStat[] basicStats, int max_qbins) { _basics = basicStats; _max_qbins = max_qbins; }
@Override public void map(Chunk[] cs) {
_summaries = new Summary2[cs.length];
for (int i = 0; i < cs.length; i++)
_summaries[i] = new Summary2(_fr.vecs()[i], _fr.names()[i], _basics[i], _max_qbins).add(cs[i]);
}
@Override public void reduce(SummaryTask2 other) {
for (int i = 0; i < _summaries.length; i++)
_summaries[i].add(other._summaries[i]);
}
}
// Entry point for the Flow passes, to allow easy percentiles on filtered GroupBy
public static class SummaryPerRow extends Flow.PerRow<SummaryPerRow> {
public final Frame _fr;
public final Summary2 _summaries[];
public SummaryPerRow( Frame fr ) { this(fr,null); }
private SummaryPerRow( Frame fr, Summary2[] sums ) { _fr = fr; _summaries = sums; }
@Override public void mapreduce( double ds[] ) {
for( int i=0; i<ds.length; i++ )
_summaries[i].add(ds[i]);
}
@Override public void reduce( SummaryPerRow that ) {
for (int i = 0; i < _summaries.length; i++)
_summaries[i].add(that._summaries[i]);
}
@Override public SummaryPerRow make() {
Vec[] vecs = _fr.vecs();
Summary2 sums[] = new Summary2[vecs.length];
BasicStat basics[] = new PrePass().doAll(_fr).finishUp()._basicStats;
for( int i=0; i<vecs.length; i++ )
sums[i] = new Summary2(vecs[i], _fr._names[i], basics[i]);
return new SummaryPerRow(_fr,sums);
}
@Override public String toString() {
String s = "";
for( int i=0; i<_summaries.length; i++ )
s += _fr._names[i]+" "+_summaries[i]+"\n";
return s;
}
public void finishUp() {
Vec[] vecs = _fr.vecs();
for (int i = 0; i < vecs.length; i++)
_summaries[i].finishUp(vecs[i]);
}
}
@Override public String toString() {
String s = "";
if( stats instanceof NumStats ) {
double pct [] = ((NumStats)stats).pct ;
double pctile[] = ((NumStats)stats).pctile;
for( int i=0; i<pct.length; i++ )
s += ""+(pct[i]*100)+"%="+pctile[i]+", ";
} else {
s += "cardinality="+((EnumStats)stats).cardinality;
}
return s;
}
public void finishUp(Vec vec) {
nacnt = _stat0._nas;
if (_type == T_ENUM) {
// Compute majority items for enum data
computeMajorities();
} else {
_pctile = new double[DEFAULT_PERCENTILES.length];
approxQuantiles(_pctile, DEFAULT_PERCENTILES, _stat0._min2, _stat0._max2);
}
// remove the trailing NaNs
for (int i = 0; i < _mins.length; i++) {
if (Double.isNaN(_mins[i])) {
_mins = Arrays.copyOf(_mins, i);
break;
}
}
for (int i = 0; i < _maxs.length; i++) {
if (Double.isNaN(_maxs[i])) {
_maxs = Arrays.copyOf(_maxs, i);
break;
}
}
for (int i = 0; i < _maxs.length>>>1; i++) {
double t = _maxs[i];
_maxs[i] = _maxs[_maxs.length-1-i];
_maxs[_maxs.length-1-i] = t;
}
this.stats = _type==T_ENUM ?
new EnumStats(vec.domain().length) :
new NumStats(vec.mean(), vec.sigma(), _stat0._zeros, _mins, _maxs, _pctile);
if (_type == T_ENUM) {
this.hstart = 0;
this.hstep = 1;
this.hbrk = _domain;
} else {
this.hstart = _start;
this.hstep = _binsz;
this.hbrk = new String[hcnt.length];
for (int i = 0; i < hbrk.length; i++)
hbrk[i] = Utils.p2d(i==0 ? _start : binValue(i));
}
}
public Summary2(Vec vec, String name, BasicStat stat0, int max_qbins) {
colname = name;
_stat0 = stat0;
_type = vec.isEnum()?T_ENUM:vec.isInt()?T_INT:T_REAL;
_domain = vec.isEnum() ? vec.domain() : null;
_gprows = 0;
double sigma = Double.isNaN(vec.sigma()) ? 0 : vec.sigma();
if ( _type != T_ENUM ) {
_mins = MemoryManager.malloc8d((int)Math.min(vec.length(),NMAX));
_maxs = MemoryManager.malloc8d((int)Math.min(vec.length(),NMAX));
Arrays.fill(_mins, Double.NaN);
Arrays.fill(_maxs, Double.NaN);
} else {
_mins = MemoryManager.malloc8d(Math.min(_domain.length,NMAX));
_maxs = MemoryManager.malloc8d(Math.min(_domain.length,NMAX));
}
if( vec.isEnum() && _domain.length < MAX_HIST_SZ ) {
_start = 0;
_start2 = 0;
_binsz = 1;
_binsz2 = 1;
// hack for now. if there are no enum values, keep these length 1, for consistency
// in asserts below
int dlength = _domain.length==0 ? 1 : _domain.length;
hcnt = new long[dlength];
hcnt2 = new long[dlength];
hcnt2_min = new double[dlength];
hcnt2_max = new double[dlength];
}
else if ( !(Double.isNaN(stat0._min2) || Double.isNaN(stat0._max2)) ) {
// guard against improper parse (date type) or zero c._sigma
long N = _stat0._len - stat0._nas - stat0._nans - stat0._pinfs - stat0._ninfs;
double b = Math.max(1e-4,3.5 * sigma/ Math.cbrt(N));
double d = Math.pow(10, Math.floor(Math.log10(b)));
if (b > 20*d/3)
d *= 10;
else if (b > 5*d/3)
d *= 5;
// tweak for integers
if (d < 1. && vec.isInt()) d = 1.;
// Result from the dynamic bin sizing equations
double startSuggest = d * Math.floor(stat0._min2 / d);
double binszSuggest = d;
int nbinSuggest = (int) Math.ceil((stat0._max2 - startSuggest)/d) + 1;
// Protect against massive binning. browser doesn't need
int BROWSER_BIN_TARGET = 100;
// _binsz/_start is used in the histogramming.
// nbin is used in the array declaration. must be big enough.
// the resulting nbin, could be really large number. We need to cap it.
// should also be obsessive and check that it's not 0 and force to 1.
// Since nbin is implied by _binsz, ratio _binsz and recompute nbin
int binCase = 0; // keep track in case we assert
double start;
if ( stat0._max2==stat0._min2) {
binszSuggest = 0; // fixed next with other 0 cases.
start = stat0._min2;
binCase = 1;
}
// minimum 2 if min/max different
else if ( stat0._max2!=stat0._min2 && nbinSuggest<2 ) {
binszSuggest = (stat0._max2 - stat0._min2) / 2.0;
start = stat0._min2;
binCase = 2;
}
else if (nbinSuggest<1 || nbinSuggest>BROWSER_BIN_TARGET ) {
// switch to a static equation with a fixed bin count, and recompute binszSuggest
// one more bin than necessary for the range (99 exact. causes one extra
binszSuggest = (stat0._max2 - stat0._min2) / (BROWSER_BIN_TARGET - 1.0);
start = binszSuggest * Math.floor(stat0._min2 / binszSuggest);
binCase = 3;
}
else {
// align to binszSuggest boundary. (this is for reals)
start = binszSuggest * Math.floor(stat0._min2 / binszSuggest);
binCase = 4;
}
// _binsz = 0 means min/max are equal for reals?. Just make it a little number
// this won't show up in browser display, since bins are labelled by start value
// Now that we know the best bin size that will fit..Floor the _binsz if integer so visible
// histogram looks good for integers. This is our final best bin size.
double binsz = (binszSuggest!=0) ? binszSuggest : (vec.isInt() ? 1 : 1e-13d);
_binsz = vec.isInt() ? Math.floor(binsz) : binsz;
// make integers start on an integer too!
_start = vec.isInt() ? Math.floor(start) : start;
// This equation creates possibility of some of the first bins being empty
// also: _binsz means many _binsz2 could be empty at the start if we resused _start there
// FIX! is this okay if the dynamic range is > 2**32
// align to bin size?
int nbin = (int) Math.ceil((stat0._max2 - _start)/_binsz) + 1;
double impliedBinEnd = _start + (nbin * _binsz);
String assertMsg = _start+" "+_stat0._min2+" "+_stat0._max2+
" "+impliedBinEnd+" "+_binsz+" "+nbin+" "+startSuggest+" "+nbinSuggest+" "+binCase;
// Log.debug("Summary2 bin1. "+assertMsg);
assert _start <= _stat0._min2 : assertMsg;
// just in case, make sure it's big enough
assert nbin > 0: assertMsg;
// just for double checking we're okay (nothing outside the bin rang)
assert impliedBinEnd>=_stat0._max2 : assertMsg;
// create a 2nd finer grained historam for quantile estimates.
// okay if it is approx. 1000 bins (+-1)
// update: we allow api to change max_qbins. default 1000. larger = more accuracy
assert max_qbins > 0 && max_qbins <= 10000000 : "max_qbins must be >0 and <= 10000000";
// okay if 1 more than max_qbins gets created
double d2 = (stat0._max2 - stat0._min2) / max_qbins;
// _binsz2 = 0 means min/max are equal for reals?. Just make it a little number
// this won't show up in browser display, since bins are labelled by start value
_binsz2 = (d2!=0) ? d2 : (vec.isInt() ? 1 : 1e-13d);
_start2 = stat0._min2;
int nbin2 = (int) Math.ceil((stat0._max2 - _start2)/_binsz2) + 1;
double impliedBinEnd2 = _start2 + (nbin2 * _binsz2);
assertMsg = _start2+" "+_stat0._min2+" "+_stat0._max2+
" "+impliedBinEnd2+" "+_binsz2+" "+nbin2;
// Log.debug("Summary2 bin2. "+assertMsg);
assert _start2 <= stat0._min2 : assertMsg;
assert nbin2 > 0 : assertMsg;
// can't make any assertion about _start2 vs _start (either can be smaller due to fp issues)
assert impliedBinEnd2>=_stat0._max2 : assertMsg;
hcnt = new long[nbin];
hcnt2 = new long[nbin2];
hcnt2_min = new double[nbin2];
hcnt2_max = new double[nbin2];
// Log.debug("Finer histogram has "+nbin2+" bins. Visible histogram has "+nbin);
// Log.debug("Finer histogram starts at "+_start2+" Visible histogram starts at "+_start);
// Log.debug("stat0._min2 "+stat0._min2+" stat0._max2 "+stat0._max2);
}
else { // vec does not contain finite numbers
Log.debug("Summary2: NaN in stat0._min2: "+stat0._min2+" or stat0._max2: "+stat0._max2);
// vec.min() wouldn't be any better here. It could be NaN? 4/13/14
// _start = vec.min();
// _start2 = vec.min();
// _binsz = Double.POSITIVE_INFINITY;
// _binsz2 = Double.POSITIVE_INFINITY;
_start = Double.NaN;
_start2 = Double.NaN;
_binsz = Double.NaN;
_binsz2 = Double.NaN;
hcnt = new long[1];
hcnt2 = new long[1];
hcnt2_min = new double[1];
hcnt2_max = new double[1];
}
}
public Summary2(Vec vec, String name, BasicStat stat0) {
this(vec, name, stat0, 1000);
}
public Summary2 add(Chunk chk) {
if( chk._vec.isUUID() ) {
// Log.info("Summary2: isUUID() in add");
return this;
}
for (int i = 0; i < chk._len; i++)
add(chk.at0(i));
return this;
}
public void add(double val) {
if( Double.isNaN(val) ) return;
// can get infinity due to bad enum parse to real
// histogram is sized ok, but the index calc below will be too big
// just drop them. not sure if something better to do?
if( val==Double.POSITIVE_INFINITY ) return;
if( val==Double.NEGATIVE_INFINITY ) return;
_len1++; _gprows++;
if ( _type != T_ENUM ) {
int index;
// update min/max
if (val < _mins[_mins.length-1] || Double.isNaN(_mins[_mins.length-1])) {
index = Arrays.binarySearch(_mins, val);
if (index < 0) {
index = -(index + 1);
for (int j = _mins.length -1; j > index; j--)
_mins[j] = _mins[j-1];
_mins[index] = val;
}
}
boolean hasNan = Double.isNaN(_maxs[_maxs.length-1]);
if (val > _maxs[0] || hasNan) {
index = Arrays.binarySearch(_maxs, val);
if (index < 0) {
index = -(index + 1);
if (hasNan) {
for (int j = _maxs.length -1; j > index; j--)
_maxs[j] = _maxs[j-1];
_maxs[index] = val;
} else {
for (int j = 0; j < index-1; j++)
_maxs[j] = _maxs[j+1];
_maxs[index-1] = val;
}
}
}
// update the finer histogram (used for quantile estimates on numerics)
long binIdx2;
if (hcnt2.length==1) {
binIdx2 = 0; // not used
}
else {
binIdx2 = (int) Math.floor((val - _start2) / _binsz2);
}
int binIdx2Int = (int) binIdx2;
assert (_start2 <= val) : "Why is val < _start2? val:"+val+" _start2:";
assert (binIdx2Int >= 0 && binIdx2Int < hcnt2.length) :
"binIdx2Int too big for hcnt2 "+binIdx2Int+" "+hcnt2.length+" "+val+" "+_start2+" "+_binsz2;
if (hcnt2[binIdx2Int] == 0) {
// Log.debug("New init: "+val+" for index "+binIdx2Int);
hcnt2_min[binIdx2Int] = val;
hcnt2_max[binIdx2Int] = val;
}
else {
if (val < hcnt2_min[binIdx2Int]) {
// Log.debug("New min: "+val+" for index "+binIdx2Int);
hcnt2_min[binIdx2Int] = val;
}
if (val > hcnt2_max[binIdx2Int]) {
// if ( binIdx2Int == 500 ) Log.debug("New max: "+val+" for index "+binIdx2Int);
hcnt2_max[binIdx2Int] = val;
}
}
++hcnt2[binIdx2Int];
}
// update the histogram the browser/json uses
long binIdx;
if (hcnt.length == 1) {
binIdx = 0;
}
// interesting. do we really track Infs in the histogram?
else if (val == Double.NEGATIVE_INFINITY) {
binIdx = 0;
}
else if (val == Double.POSITIVE_INFINITY) {
binIdx = hcnt.length-1;
}
else {
binIdx = (int) Math.floor((val - _start) / _binsz);
}
int binIdxInt = (int) binIdx;
assert (_start <= val) : "Why is val < _start? val:"+val+" _start:";
assert (binIdxInt >= 0 && binIdx < hcnt.length) :
"binIdxInt bad for hcnt2. binIdxInt:"+binIdxInt+" hcnt.length:"+hcnt.length+" val:"+val+" _start:"+_start+" _binsz:"+_binsz;
++hcnt[binIdxInt];
}
public Summary2 add(Summary2 other) {
// merge hcnt and hcnt just by adding
if (hcnt != null)
Utils.add(hcnt, other.hcnt);
_gprows += other._gprows;
if (_type == T_ENUM) return this;
// merge hcnt2 per-bin mins
// other must be same length, but use it's length for safety
// could add assert on lengths?
for (int k = 0; k < other.hcnt2_min.length; k++) {
// for now..die on NaNs
assert !Double.isNaN(other.hcnt2_min[k]) : "NaN in other.hcnt2_min merging";
assert !Double.isNaN(other.hcnt2[k]) : "NaN in hcnt2_min merging";
assert !Double.isNaN(hcnt2_min[k]) : "NaN in hcnt2_min merging";
assert !Double.isNaN(hcnt2[k]) : "NaN in hcnt2_min merging";
// cover the initial case (relying on initial min = 0 to work is wrong)
// Only take the new max if it's hcnt2 is non-zero. like a valid bit
// can hcnt2 ever be null here?
if (other.hcnt2[k] > 0) {
if ( hcnt2[k]==0 || ( other.hcnt2_min[k] < hcnt2_min[k] )) {
hcnt2_min[k] = other.hcnt2_min[k];
}
}
}
// merge hcnt2 per-bin maxs
// other must be same length, but use it's length for safety
for (int k = 0; k < other.hcnt2_max.length; k++) {
// for now..die on NaNs
assert !Double.isNaN(other.hcnt2_max[k]) : "NaN in other.hcnt2_max merging";
assert !Double.isNaN(other.hcnt2[k]) : "NaN in hcnt2_min merging";
assert !Double.isNaN(hcnt2_max[k]) : "NaN in hcnt2_max merging";
assert !Double.isNaN(hcnt2[k]) : "NaN in hcnt2_max merging";
// cover the initial case (relying on initial min = 0 to work is wrong)
// Only take the new max if it's hcnt2 is non-zero. like a valid bit
// can hcnt2 ever be null here?
if (other.hcnt2[k] > 0) {
if ( hcnt2[k]==0 || ( other.hcnt2_max[k] > hcnt2_max[k] )) {
hcnt2_max[k] = other.hcnt2_max[k];
}
}
}
// can hcnt2 ever be null here?. Inc last, so the zero case is detected above
// seems like everything would fail if hcnt2 doesn't exist here
if (hcnt2 != null)
Utils.add(hcnt2, other.hcnt2);
// merge hcnt mins
double[] ds = MemoryManager.malloc8d(_mins.length);
int i = 0, j = 0;
for (int k = 0; k < ds.length; k++)
if (_mins[i] < other._mins[j])
ds[k] = _mins[i++];
else if (Double.isNaN(other._mins[j]))
ds[k] = _mins[i++];
else { // _min[i] >= other._min[j]
if (_mins[i] == other._mins[j]) i++;
ds[k] = other._mins[j++];
}
System.arraycopy(ds,0,_mins,0,ds.length);
for (i = _maxs.length - 1; Double.isNaN(_maxs[i]); i--) if (i == 0) {i--; break;}
for (j = _maxs.length - 1; Double.isNaN(other._maxs[j]); j--) if (j == 0) {j--; break;}
ds = MemoryManager.malloc8d(i + j + 2);
// merge hcnt maxs, also deduplicating against mins?
int k = 0, ii = 0, jj = 0;
while (ii <= i && jj <= j) {
if (_maxs[ii] < other._maxs[jj])
ds[k] = _maxs[ii++];
else if (_maxs[ii] > other._maxs[jj])
ds[k] = other._maxs[jj++];
else { // _maxs[ii] == other.maxs[jj]
ds[k] = _maxs[ii++];
jj++;
}
k++;
}
while (ii <= i) ds[k++] = _maxs[ii++];
while (jj <= j) ds[k++] = other._maxs[jj++];
System.arraycopy(ds,Math.max(0, k - _maxs.length),_maxs,0,Math.min(k,_maxs.length));
for (int t = k; t < _maxs.length; t++) _maxs[t] = Double.NaN;
return this;
}
// _start of each hcnt bin
public double binValue(int b) { return _start + b*_binsz; }
// can we assert against something here?
// assert _gprows==htot2(0, 0) : "_gprows: "+_gprows+" htot2(): "+htot2(0, 0);
// need to count >4B rows
private long htot2(long low, long high) {
long cnt = 0;
for (int i = 0; i < hcnt2.length; i++) cnt+=hcnt2[i];
// add the stuff outside the bins, 0,0 for single pass
cnt = cnt + low + high;
return cnt;
}
//******************************************************************************
// NOTE: only works on a backfilled hcnt2, unlike Quantiles. eliminates nextK search
// The backfill is not done here, so it's only done once (because 10 calls here)
private double approxLikeInQuantiles(double threshold, double valStart, double valEnd) {
// Code is lifted from Quantiles.java, with only a little jiggering
// on the branches around forceBestApprox/interpolation type, and use of globals
// that have different names. Need to merge sometime.
// the 'intent' is to be the same as the single pass Quantiles approx, interpolation_type==-1
// max_qbins was the goal for sizing.
// nbins2 was what was used for size, after various calcs
// just assume hcnt2 is the right length!
// Don't need at least two bins..since we'll always have 'some' answer
// are we being called on constant 0?
int maxBinCnt = hcnt2.length;
// Find the row count we want to hit, within some bin.
long currentCnt = 0;
double targetCntFull = threshold * (_gprows-1); // zero based indexing
long targetCntInt = (long) Math.floor(targetCntFull);
double targetCntFract = targetCntFull - (double) targetCntInt;
assert (targetCntFract>=0) && (targetCntFract<=1);
// Log.debug("QS_ targetCntInt: "+targetCntInt+" targetCntFract: "+targetCntFract);
// walk thru and find out what bin to look inside
int k = 0;
while(k!=maxBinCnt && ((currentCnt + hcnt2[k]) <= targetCntInt)) {
// Log.debug("Q_ Looping for k: "+threshold+" "+k+" "+maxBinCnt+" "+currentCnt+" "+targetCntInt+
// " "+hcnt2[k]+" "+hcnt2_min[k]+" "+hcnt2_max[k]);
currentCnt += hcnt2[k];
++k;
// Note the loop condition covers the breakout condition:
// (currentCnt==targetCntInt && (hcnt2[k]!=0)
// also: don't go pass array bounds
}
assert hcnt2[k]!=0;
// Log.debug("QS_ Found k (approx): "+threshold+" "+k+" "+currentCnt+" "+targetCntInt+
// " "+_gprows+" "+hcnt2[k]+" "+hcnt2_min[k]+" "+hcnt2_max[k]);
assert (currentCnt + hcnt2[k]) > targetCntInt : targetCntInt+" "+currentCnt+" "+k+" "+" "+maxBinCnt;
assert hcnt2[k]!=1 || hcnt2_min[k]==hcnt2_max[k];
boolean done = false;
double guess = Double.NaN;
boolean interpolated = false;
double dDiff;
// special cases. If the desired row is the last of equal values in this bin (2 or more)
// we will need to intepolate with a nextK out-of-bin value
// we can't iterate, since it won't improve things and the bin-size will be zero!
// trying to resolve case of binsize=0 for next pass, after this, is flawed thinking.
// implies the values are not the same..end of bin interpolate to next
boolean atStartOfBin = hcnt2[k]>=1 && (currentCnt == targetCntInt);
boolean atEndOfBin = !atStartOfBin && (hcnt2[k]>=2 && ((currentCnt + hcnt2[k] - 1) == targetCntInt));
boolean inMidOfBin = !atStartOfBin && !atEndOfBin && (hcnt2[k]>=3) && (hcnt2_min[k]==hcnt2_max[k]);
boolean interpolateEndNeeded = false;
if ( atEndOfBin ) {
if ( targetCntFract != 0 ) {
interpolateEndNeeded = true;
}
else {
guess = hcnt2_max[k];
done = true;
// Log.debug("QS_ Guess M "+guess);
}
}
else if ( inMidOfBin ) {
// if we know there is something before and after us with same value,
// we never need to interpolate (only allowed when min=max
guess = hcnt2_min[k];
done = true;
// Log.debug("QS_ Guess N "+guess);
}
if ( !done && atStartOfBin ) {
// no interpolation needed
if ( hcnt2[k]>2 && (hcnt2_min[k]==hcnt2_max[k]) ) {
guess = hcnt2_min[k];
done = true;
// Log.debug("QS_ Guess A "+guess);
}
// min/max can be equal or not equal here
else if ( hcnt2[k]==2 ) { // interpolate between min/max for the two value bin
// type 7 (linear interpolation)
// Unlike mean, which just depends on two adjacent values, this adjustment
// adds possible errors related to the arithmetic on the total # of rows.
dDiff = hcnt2_max[k] - hcnt2_min[k]; // two adjacent..as if sorted!
// targetCntFract is fraction of total rows
guess = hcnt2_min[k] + (targetCntFract * dDiff);
done = true;
interpolated = true;
// Log.debug("QS_ Guess B "+guess+" targetCntFract: "+targetCntFract);
}
// no interpolation needed
else if ( (hcnt2[k]==1) && (targetCntFract==0) ) {
assert hcnt2_min[k]==hcnt2_max[k];
guess = hcnt2_min[k];
done = true;
// Log.debug("QS_ Guess C "+guess);
}
}
// interpolate into a nextK value
// all the qualification is so we don't set done when we're not, for multipass
// interpolate from single bin, end of two entry bin, or for approx
boolean stillCanGetIt = atStartOfBin && hcnt2[k]==1 && targetCntFract!=0;
if ( !done ) {
if ( hcnt2[k]==1 ) {
assert hcnt2_min[k]==hcnt2_max[k];
// Log.debug("QS_ Single value in this bin, but fractional means we need to interpolate to next non-zero");
}
if ( interpolateEndNeeded ) {
// Log.debug("QS_ Interpolating off the end of a bin!");
}
double nextVal;
int nextK;
// if we're at the end
assert k < maxBinCnt : k+" "+maxBinCnt;
if ( (k+1)==maxBinCnt) {
// Log.debug("QS_ Using valEnd for approx interpolate: "+valEnd);
nextVal = valEnd; // just in case the binning didn't max in a bin before the last
}
else {
nextK = k + 1;
nextVal = hcnt2_min[nextK];
// Log.debug("QS_ Using nextK for interpolate: "+nextK+" "+hcnt2_min[nextK]);
// hcnt2[nextK] may be zero here if we backfilled
}
// can still get an exact interpolation, when hcnt2[k]=2
if ( stillCanGetIt ) {
dDiff = nextVal - hcnt2_max[k]; // two adjacent, as if sorted!
// targetCntFract is fraction of total rows
guess = hcnt2_max[k] + (targetCntFract * dDiff);
interpolated = true;
done = true; // has to be one above us when needed. (or we're at end)
// Log.debug("QS_ Guess D "+guess+" "+nextVal+" "+hcnt2_min[k]+" "+hcnt2_max[k]+" "+hcnt2[k]+" "+nextVal+
// " targetCntFull: "+targetCntFull+" targetCntFract: "+targetCntFract+
// " _gprows: " + _gprows+" "+stillCanGetIt);
}
else { // single pass approx..with unresolved bin
assert hcnt2[k]!=0 : hcnt2[k]+" "+k;
// use max within this bin, to stay within the guaranteed error bounds
dDiff = (hcnt2_max[k] - hcnt2_min[k]) / hcnt2[k];
guess = hcnt2_min[k] + (targetCntFull-currentCnt) * dDiff;
interpolated = true;
done = true; // has to be one above us when needed. (or we're at end)
// Log.debug("QS_ Guess E "+guess+" "+nextVal+" "+hcnt2_min[k]+" "+hcnt2_max[k]+" "+hcnt2[k]+" "+nextVal+
// " targetCntFull: "+targetCntFull+" targetCntFract: "+targetCntFract+
// " _gprows: " + _gprows);
}
}
assert !Double.isNaN(guess); // covers positive/negative inf also (if we divide by 0)
return guess;
}
//******************************************************************************
private void approxQuantiles(double[] qtiles, double[] thres, double valStart, double valEnd){
// not called for enums
assert _type != T_ENUM;
// hcnt2 may have been sized differently than the max_qbins goal
int maxBinCnt = hcnt2.length;
if ( maxBinCnt==0 ) return;
// this would imply we didn't get anything correctly. Maybe real col with all NA?
if ( (maxBinCnt==1) && (hcnt2[0]==0) ) return;
// Perf hack that is currently different than Quantiles.java
// back fill hcnt2_min where it's zero, so we can avoid the nextK search
// when we need to interpolate. Keep hcnt2[k]=0 so we know not to use it
// other than for getting nextK without searching. This is powerful
// because if we're getting 10 quantiles from a histogram, we don't
// do searches to the end (potentially) for ever nextK find. This
// makes the Quantiles.java algo work well when reused for multiple quantiles
// here in Summary2
// The use of nextK, rather than just our bin, improves accuracy for various cases.
// (mirroring what Quantiles does for perfect answers)
// start at the end. don't need to fill the 0 case ever, but should for consistency
double backfill = valEnd;
for (int b=(maxBinCnt-1); b>=0; --b) {
if ( hcnt2[b] == 0 ) {
hcnt2_min[b] = backfill;
// Log.debug("QS_ backfilling "+b+" "+backfill);
}
else {
backfill = hcnt2_min[b];
}
}
for(int j = 0; j < thres.length; ++j) {
// 0 okay for threshold?
assert 0 <= thres[j] && thres[j] <= 1;
qtiles[j] = approxLikeInQuantiles(thres[j], valStart, valEnd);
}
}
//******************************************************************************
// Compute majority categories for enums only
public void computeMajorities() {
if ( _type != T_ENUM ) return;
for (int i = 0; i < _mins.length; i++) _mins[i] = i;
for (int i = 0; i < _maxs.length; i++) _maxs[i] = i;
int mini = 0, maxi = 0;
for( int i = 0; i < hcnt.length; i++ ) {
if (hcnt[i] < hcnt[(int)_mins[mini]]) {
_mins[mini] = i;
for (int j = 0; j < _mins.length; j++)
if (hcnt[(int)_mins[j]] > hcnt[(int)_mins[mini]]) mini = j;
}
if (hcnt[i] > hcnt[(int)_maxs[maxi]]) {
_maxs[maxi] = i;
for (int j = 0; j < _maxs.length; j++)
if (hcnt[(int)_maxs[j]] < hcnt[(int)_maxs[maxi]]) maxi = j;
}
}
for (int i = 0; i < _mins.length - 1; i++)
for (int j = 0; j < i; j++) {
if (hcnt[(int)_mins[j]] > hcnt[(int)_mins[j+1]]) {
double t = _mins[j]; _mins[j] = _mins[j+1]; _mins[j+1] = t;
}
}
for (int i = 0; i < _maxs.length - 1; i++)
for (int j = 0; j < i; j++)
if (hcnt[(int)_maxs[j]] < hcnt[(int)_maxs[j+1]]) {
double t = _maxs[j]; _maxs[j] = _maxs[j+1]; _maxs[j+1] = t;
}
}
public double percentileValue(int idx) {
if( _type == T_ENUM ) return Double.NaN;
return _pctile[idx];
}
public void toHTML( Vec vec, String cname, StringBuilder sb ) {
// should be a better way/place to decode this back to string.
String typeStr;
if ( _type == T_REAL) typeStr = "Real";
else if ( _type == T_INT) typeStr = "Int";
else if ( _type == T_ENUM) typeStr = "Enum";
else typeStr = "Undefined";
sb.append("<div class='table' id='col_" + cname + "' style='width:90%;heigth:90%;border-top-style:solid;'>" +
"<div class='alert-success'><h4>Column: " + cname + " (type: " + typeStr + ")</h4></div>\n");
if ( _stat0._len == _stat0._nas ) {
sb.append("<div class='alert'>Empty column, no summary!</div></div>\n");
return;
}
// Base stats
if( _type != T_ENUM ) {
NumStats stats = (NumStats)this.stats;
sb.append("<div style='width:100%;'><table class='table-bordered'>");
sb.append("<tr><th colspan='"+20+"' style='text-align:center;'>Base Stats</th></tr>");
sb.append("<tr>");
sb.append("<th>NAs</th> <td>" + nacnt + "</td>");
sb.append("<th>mean</th><td>" + Utils.p2d(stats.mean)+"</td>");
sb.append("<th>sd</th><td>" + Utils.p2d(stats.sd) + "</td>");
sb.append("<th>zeros</th><td>" + stats.zeros + "</td>");
sb.append("<tr>");
sb.append("<th>min[" + stats.mins.length + "]</th>");
for( double min : stats.mins ) {
sb.append("<td>" + Utils.p2d(min) + "</td>");
}
sb.append("<tr>");
sb.append("<th>max[" + stats.maxs.length + "]</th>");
for( double max : stats.maxs ) {
sb.append("<td>" + Utils.p2d(max) + "</td>");
}
// End of base stats
sb.append("</tr> </table>");
sb.append("</div>");
} else { // Enums
sb.append("<div style='width:100%'><table class='table-bordered'>");
sb.append("<tr><th colspan='" + 4 + "' style='text-align:center;'>Base Stats</th></tr>");
sb.append("<tr><th>NAs</th> <td>" + nacnt + "</td>");
sb.append("<th>cardinality</th> <td>" + vec.domain().length + "</td></tr>");
sb.append("</table></div>");
}
// Histogram
final int MAX_HISTO_BINS_DISPLAYED = 1000;
int len = Math.min(hcnt.length,MAX_HISTO_BINS_DISPLAYED);
sb.append("<div style='width:100%;overflow-x:auto;'><table class='table-bordered'>");
sb.append("<tr> <th colspan="+len+" style='text-align:center'>Histogram</th></tr>");
sb.append("<tr>");
if ( _type == T_ENUM )
for( int i=0; i<len; i++ ) sb.append("<th>" + vec.domain(i) + "</th>");
else
for( int i=0; i<len; i++ ) sb.append("<th>" + Utils.p2d(i==0?_start:binValue(i)) + "</th>");
sb.append("</tr>");
sb.append("<tr>");
for( int i=0; i<len; i++ ) sb.append("<td>" + hcnt[i] + "</td>");
sb.append("</tr>");
sb.append("<tr>");
for( int i=0; i<len; i++ )
sb.append(String.format("<td>%.1f%%</td>",(100.0*hcnt[i]/_stat0._len)));
sb.append("</tr>");
if( hcnt.length >= MAX_HISTO_BINS_DISPLAYED )
sb.append("<div class='alert'>Histogram for this column was too big and was truncated to 1000 values!</div>");
sb.append("</table></div>");
if (_type != T_ENUM) {
NumStats stats = (NumStats)this.stats;
// Percentiles
sb.append("<div style='width:100%;overflow-x:auto;'><table class='table-bordered'>");
sb.append("<tr> <th colspan='" + stats.pct.length + "' " +
"style='text-align:center' " +
">Percentiles</th></tr>");
sb.append("<tr><th>Threshold(%)</th>");
for (double pc : stats.pct)
sb.append("<td>" + Utils.p2d(pc * 100.0) + "</td>");
// sb.append("<td>" + (int) Math.round(pc * 100) + "</td>");
sb.append("</tr>");
sb.append("<tr><th>Value</th>");
for (double pv : stats.pctile)
sb.append("<td>" + pv + "</td>");
sb.append("</tr>");
sb.append("</table>");
sb.append("</div>");
}
sb.append("</div>\n");
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/Trainer.java
|
package hex;
import com.jogamp.opencl.*;
import com.jogamp.opencl.CLMemory.Mem;
import hex.Layer.*;
import jsr166y.CountedCompleter;
import water.*;
import water.H2O.H2OCountedCompleter;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.Log;
import water.util.Utils;
import java.io.IOException;
import java.nio.FloatBuffer;
import java.util.Arrays;
import java.util.Map.Entry;
import java.util.Random;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicIntegerArray;
import java.util.concurrent.atomic.AtomicLong;
/**
* Trains a neural network.
*
* @author cypof
*/
public abstract class Trainer {
Trainer() {
}
public abstract Layer[] layers();
public abstract void start();
public abstract void join();
public long processed() {
throw new UnsupportedOperationException();
}
public static class Base extends Trainer {
final Layer[] _ls;
public Base(Layer[] ls) {
_ls = ls;
}
@Override public Layer[] layers() {
return _ls;
}
@Override public void start() {
throw new UnsupportedOperationException();
}
@Override public void join() {
throw new UnsupportedOperationException();
}
final void step(long seed) {
// Log.info("step with seed " + seed);
fprop(seed);
for( int i = 1; i < _ls.length - 1; i++ )
Arrays.fill(_ls[i]._e, 0);
bprop();
}
final void fprop(long seed) {
for (Layer _l : _ls) _l.fprop(seed, true);
}
final void bprop() {
for( int i = _ls.length - 1; i > 0; i-- )
_ls[i].bprop();
}
}
/**
* Trains NN on current thread.
*/
public static class Direct extends Base {
long _processed, _limit;
Thread _thread;
Key _job;
public Direct(Layer[] ls, double epochs, Key job) {
super(ls);
_limit = (long) Math.ceil(epochs * ((Input) ls[0])._len);
_job = job;
}
@Override public Layer[] layers() {
return _ls;
}
public void run() {
Training training = new Training() {
@Override long processed() {
return _processed;
}
};
for (Layer _l : _ls) _l._training = training;
Input input = (Input) _ls[0];
for( ; _limit == 0 || _processed < _limit; _processed++ ) {
step(_processed);
input.move();
if( _job != null && (!Job.isRunning(_job) || !NeuralNet.running ) )
break;
}
}
@Override public long processed() {
return _processed;
}
@Override public void start() {
_thread = new Thread() {
@Override public void run() {
Direct.this.run();
}
};
_thread.start();
}
@Override public void join() {
try {
_thread.join();
} catch( InterruptedException e ) {
throw new RuntimeException(e);
}
}
}
/**
* Runs several trainers in parallel on the same weights, using threads. Only works on one node.
*/
public static class Threaded extends Trainer {
final Base[] _trainers;
final Thread[] _threads;
final long _stepsPerThread;
final AtomicLong _processed = new AtomicLong();
public Threaded(Layer[] ls, double epochs, final Key job, int threads) {
int num_threads = threads > 0 ? threads : Runtime.getRuntime().availableProcessors();
_trainers = new Base[num_threads];
_threads = new Thread[num_threads];
_stepsPerThread = (long) (epochs * ((Input) ls[0])._len / num_threads);
Log.info("Starting " + num_threads + " threads.");
for( int t = 0; t < num_threads; t++ ) {
Layer[] clones = new Layer[ls.length];
for( int y = 0; y < clones.length; y++ )
clones[y] = ls[y].clone();
for( int y = 0; y < clones.length; y++ ) {
clones[y].init(clones, y, false);
clones[y]._training = new Training() {
@Override long processed() {
return _processed.get();
}
};
}
final Input input = (Input) clones[0];
input._pos = input._len * t / num_threads;
_trainers[t] = new Base(clones);
final Base trainer = _trainers[t];
final int thread_num = t;
_threads[t] = new Thread("H2O Trainer " + t) {
@Override public void run() {
for( long i = 0; _stepsPerThread == 0 || i < _stepsPerThread; i++ ) {
if( job != null && (!Job.isRunning(job) || !NeuralNet.running ) )
break;
try {
// long seed = thread_num * _stepsPerThread + input._pos; //BAD
long seed = new Random().nextLong(); //GOOD
// long seed = thread_num * _stepsPerThread + _processed.get(); //TRY
trainer.step(seed);
input.move();
_processed.incrementAndGet();
} catch (Exception e) {
e.getStackTrace();
}
}
}
};
}
}
@Override public Layer[] layers() {
return _trainers[0].layers();
}
@Override public long processed() {
return _processed.get();
}
@Override public void start() {
for (Thread _thread : _threads) _thread.start();
}
@Override public void join() {
for (Thread _thread : _threads) {
try {
_thread.join();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
public void run() {
start();
join();
}
}
/**
* Distributed trainer. All tasks on a node update the same weights, like Threaded. Updates
* between nodes are synchronized at regular intervals by exchanging messages between the
* initiating machine and others. Requires input to be Frame.
*/
public static class MapReduce extends Trainer {
static final ConcurrentHashMap<Key, MapReduce> _instances = new ConcurrentHashMap<Key, MapReduce>();
Layer[] _ls;
double _epochs;
Key _job;
AtomicIntegerArray _counts;
transient Key _key;
transient Descent _task;
public MapReduce(Layer[] ls, double epochs, Key job) {
_ls = ls;
_epochs = epochs;
_job = job;
_key = Key.make((byte) 1, Key.DFJ_INTERNAL_USER, H2O.SELF);
_instances.put(_key, this);
DKV.put(_key, new Value(_key, new byte[0]));
Vec[] vecs = ((VecsInput) ls[0]).vecs;
assert ls[0]._a.length == VecsInput.expand(vecs);
//assert vecs[0].nChunks() >= NeuralNet.cores() : "Not enough chunks, c.f. NeuralNet.reChunk";
_counts = new AtomicIntegerArray(vecs[0].nChunks());
}
@Override public Layer[] layers() {
return _ls;
}
@Override public long processed() {
Vec[] vecs = ((VecsInput) _ls[0]).vecs;
long n = 0;
for( int i = 0; i < _counts.length(); i++ )
n += _counts.get(i) * vecs[0].chunkLen(i);
return n;
}
@Override public void start() {
// TODO? Chunk weights over all nodes
// _keys = new Key[H2O.CLOUD._memary.length];
// Weights[] weights = new Weights[_keys.length];
_task = new Descent();
_task._job = _job;
_task._ls = _ls;
_task._key = _key;
_task._epochs = _epochs;
_task._ws = new float[_ls.length][];
_task._bs = new float[_ls.length][];
for( int y = 1; y < _ls.length; y++ ) {
_task._ws[y] = _ls[y]._w;
_task._bs[y] = _ls[y]._b;
}
Vec[] vecs = ((VecsInput) _ls[0]).vecs;
Layer out = _ls[_ls.length - 1];
Vec response = out instanceof VecSoftmax ? ((VecSoftmax) out).vec : ((VecLinear) out)._vec;
_task.dfork(new Frame(null, Utils.append(vecs, response)));
}
@Override public void join() {
_task.join();
}
public void run() {
start();
join();
while (NeuralNet.running) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
void done() {
NeuralNet.running = false;
_instances.remove(_key);
UKV.remove(_key);
if( _job != null ) {
Job job = Job.findJob(_job);
if( job != null ) {
H2OCountedCompleter task = job._fjtask;
if( task != null )
task.tryComplete();
job.remove();
}
}
}
}
static class Descent extends MRTask2<Descent> {
Key _job;
Layer[] _ls;
float[][] _ws;
float[][] _bs;
Key _key;
double _epochs;
transient NodeDescent _node;
transient volatile boolean _done;
@Override protected void setupLocal() {
_node = new NodeDescent(_job, _ls, _ws, _bs, _key);
// Separate thread for more regular latency
final boolean home = _key.home();
Thread thread = new Thread() {
@Override public void run() {
while( _job == null || Job.isRunning(_job) ) {
if( !home )
_node.sync();
else {
_node._total = _node._trainer.processed();
try {
Thread.sleep(1);
} catch( InterruptedException ex ) {
}
}
}
}
};
thread.setDaemon(true);
thread.start();
}
@Override protected void closeLocal() {
// Launch actual computation in order, otherwise passes
// between chunks diverge quickly
DescentEpoch epoch = new DescentEpoch();
epoch._node = _node;
epoch._count = _epochs == 0. ? -1 : (int)Math.ceil(_epochs);
H2O.submitTask(epoch);
_ls = null;
_ws = null;
_bs = null;
_key = null;
}
@Override public void map(Chunk[] cs) {
_node._chunks.add(cs);
}
}
private static abstract class NodeTask extends H2OCountedCompleter {
NodeDescent _node;
@Override public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller) {
String error = Utils.getStackAsString(ex);
Log.info(error);
if( _node._job != null )
Job.findJob(_node._job).cancel(error);
return super.onExceptionalCompletion(ex, caller);
}
}
private static class DescentEpoch extends NodeTask {
int _count;
@Override public void compute2() {
if( (_count < 0 || --_count >= 0) && (_node._job == null || Job.isRunning(_node._job)) ) {
for( Chunk[] cs : _node._chunks ) {
DescentChunk task = new DescentChunk();
task._node = _node;
task._cs = cs;
H2O.submitTask(task);
}
reinitialize();
H2O.submitTask(this);
} else {
if( _node._key.home() )
_node._trainer.done();
}
}
}
static class DescentChunk extends NodeTask {
Chunk[] _cs;
@Override public void compute2() {
if( _node._job == null || (Job.isRunning(_node._job) && NeuralNet.running)) {
Layer[] clones = new Layer[_node._ls.length];
ChunksInput input = new ChunksInput(Utils.remove(_cs, _cs.length - 1), (VecsInput) _node._ls[0]);
clones[0] = input;
for( int y = 1; y < _node._ls.length - 1; y++ )
clones[y] = _node._ls[y].clone();
Layer output = _node._ls[_node._ls.length - 1];
if( output instanceof VecSoftmax )
clones[clones.length - 1] = new ChunkSoftmax(_cs[_cs.length - 1], (VecSoftmax) output);
else
clones[clones.length - 1] = new ChunkLinear(_cs[_cs.length - 1], (VecLinear) output);
// create new _a and _e, but link to weights/bias from _node (Hogwild)
for( int y = 0; y < clones.length; y++ ) {
clones[y].init(clones, y, false);
clones[y]._w = _node._ws[y];
clones[y]._b = _node._bs[y];
clones[y]._wm = _node._wm[y];
clones[y]._bm = _node._bm[y];
clones[y]._training = new Training() {
@Override long processed() {
return _node._total;
}
};
}
Base base = new Base(clones);
for( input._pos = 0; input._pos < _cs[0]._len; input._pos++ )
base.step(new Random().nextLong()); //warning: no reproducible seeding
int chunk = _cs[0].cidx();
_node.stepped(chunk);
}
tryComplete();
}
}
static class NodeDescent {
ConcurrentLinkedQueue<Chunk[]> _chunks = new ConcurrentLinkedQueue<Chunk[]>();
Key _job;
Layer[] _ls;
float[][] _ws; // Current weights
float[][] _bs; // Current bias
float[][] _wi; // Initial weights, for synchronization
float[][] _bi; // Initial biases, for synchronization
float[][] _wm; // Momentums
float[][] _bm; // Momentums
Key _key;
ConcurrentHashMap<Integer, Integer> _counters;
MapReduce _trainer;
long _total;
NodeDescent(Key job, Layer[] ls, float[][] ws, float[][] bs, Key key) {
_job = job;
_ls = ls;
_key = key;
_ws = ws;
_bs = bs;
_wi = new float[ws.length][];
_bi = new float[bs.length][];
_wm = new float[ws.length][];
_bm = new float[bs.length][];
for( int y = 1; y < _ws.length; y++ ) {
_wi[y] = ws[y].clone();
_bi[y] = bs[y].clone();
if( ls[y].params.momentum_start != 0 || ls[y].params.momentum_stable != 0 ) {
_wm[y] = new float[ws[y].length];
_bm[y] = new float[bs[y].length];
}
}
_trainer = MapReduce._instances.get(_key);
assert (_trainer != null) == _key.home();
if( _trainer == null )
_counters = new ConcurrentHashMap<Integer, Integer>();
}
void stepped(int chunk) {
assert (_trainer != null) == _key.home();
if( _trainer != null )
_trainer._counts.incrementAndGet(chunk);
else {
for( ;; ) {
Integer n = _counters.get(chunk);
if( n == null ) {
if( _counters.putIfAbsent(chunk, 1) == null )
break;
} else {
if( _counters.replace(chunk, n, n + 1) )
break;
}
}
}
}
boolean sync() {
assert !_key.home();
int[] counts = new int[10];
int n = 0;
for( Entry<Integer, Integer> entry : _counters.entrySet() ) {
if( n == counts.length ) {
int[] t = new int[counts.length * 2];
System.arraycopy(counts, 0, t, 0, counts.length);
counts = t;
}
counts[n++] = entry.getKey();
counts[n++] = _counters.remove(entry.getKey());
}
if( n > counts.length ) {
int[] t = new int[n];
System.arraycopy(counts, 0, t, 0, t.length);
counts = t;
}
if( n > 0 ) {
Shuttle s = new Shuttle();
s._w = new float[_ws.length][];
s._b = new float[_bs.length][];
for( int y = 1; y < _ws.length; y++ ) {
s._w[y] = new float[_ws[y].length];
for( int i = 0; i < _ws[y].length; i++ ) {
s._w[y][i] = _ws[y][i] - _wi[y][i];
_wi[y][i] = _ws[y][i];
}
s._b[y] = new float[_bs[y].length];
for( int i = 0; i < _bs[y].length; i++ ) {
s._b[y][i] = _bs[y][i] - _bi[y][i];
_bi[y][i] = _bs[y][i];
}
}
s._counts = counts;
s.invoke(_key);
_total = s._processed;
for( int y = 1; y < _ws.length; y++ ) {
for( int i = 0; i < _ws[y].length; i++ ) {
float d = _ws[y][i] - _wi[y][i];
_wi[y][i] = s._w[y][i];
_ws[y][i] = s._w[y][i] + d;
}
for( int i = 0; i < _bs[y].length; i++ ) {
float d = _bs[y][i] - _bi[y][i];
_bi[y][i] = s._b[y][i];
_bs[y][i] = s._b[y][i] + d;
}
}
return true;
}
return false;
}
static class Shuttle extends Atomic {
float[][] _w; // Deltas in, values out
float[][] _b; // Deltas in, values out
int[] _counts;
long _processed;
@Override public Value atomic(Value value) {
assert _key.home();
MapReduce trainer = MapReduce._instances.get(_key);
if( trainer != null ) {
for( int y = 1; y < trainer._ls.length; y++ ) {
for( int i = 0; i < _w[y].length; i++ )
trainer._ls[y]._w[i] += _w[y][i];
for( int i = 0; i < _b[y].length; i++ )
trainer._ls[y]._b[i] += _b[y][i];
}
for( int y = 1; y < trainer._ls.length; y++ ) {
_w[y] = trainer._ls[y]._w;
_b[y] = trainer._ls[y]._b;
}
for( int i = 0; i < _counts.length; i += 2 )
trainer._counts.addAndGet(_counts[i], _counts[i + 1]);
_counts = null;
_processed = trainer.processed();
}
return null;
}
}
}
/**
* GPU based trainer. Alpha code!
*/
public static class OpenCL extends Trainer {
final Layer[] _ls;
public OpenCL(Layer[] ls) {
_ls = ls;
}
@Override public Layer[] layers() {
return _ls;
}
@Override public void start() {
CLContext context = CLContext.create();
Log.debug("Created " + context);
try {
CLDevice device = context.getMaxFlopsDevice();
Log.debug("Using " + device);
CLCommandQueue queue = device.createCommandQueue();
CLProgram program = context.createProgram(Boot._init.getResource2("/kernels.cl")).build();
CLKernel[] fprops = new CLKernel[_ls.length];
CLKernel[] bprops = new CLKernel[_ls.length];
CLKernel[] resets = new CLKernel[_ls.length];
CLBuffer<FloatBuffer>[] w = new CLBuffer[_ls.length];
CLBuffer<FloatBuffer>[] b = new CLBuffer[_ls.length];
CLBuffer<FloatBuffer>[] a = new CLBuffer[_ls.length];
CLBuffer<FloatBuffer>[] e = new CLBuffer[_ls.length];
for( int y = 0; y < _ls.length; y++ ) {
a[y] = context.createFloatBuffer(_ls[y]._a.length, Mem.READ_WRITE);
if( y > 0 ) {
w[y] = context.createFloatBuffer(_ls[y]._w.length, Mem.READ_ONLY);
b[y] = context.createFloatBuffer(_ls[y]._b.length, Mem.READ_ONLY);
e[y] = context.createFloatBuffer(_ls[y]._e.length, Mem.READ_ONLY);
queue.putWriteBuffer(w[y], false);
queue.putWriteBuffer(b[y], false);
fprops[y] = program.createCLKernel(_ls.getClass().getSimpleName() + "_fprop");
fprops[y].putArg(_ls[y - 1]._a.length);
fprops[y].putArgs(a[y - 1], w[y], b[y], a[y]);
bprops[y] = program.createCLKernel(_ls.getClass().getSimpleName() + "_bprop");
bprops[y].putArg(_ls[y - 1]._a.length);
bprops[y].putArgs(a[y - 1], w[y], b[y], a[y], e[y]);
// bprops[y].putArg(_ls[y]._r);
if( e[y - 1] != null )
bprops[y].putArg(e[y - 1]);
resets[y] = program.createCLKernel("reset_error");
resets[y].putArg(e[y]);
}
}
int group = device.getMaxWorkGroupSize();
Input input = (Input) _ls[0];
while (true) {
input.fprop(new Random().nextLong(), true);
for( int i = 0; i < input._a.length; i++ )
a[0].getBuffer().put(i, input._a[i]);
queue.putWriteBuffer(a[0], false);
for( int y = 1; y < fprops.length; y++ )
queue.put1DRangeKernel(fprops[y], 0, _ls[y]._a.length, group);
queue.putReadBuffer(a[_ls.length - 1], true);
for( int y = 1; y < fprops.length - 1; y++ )
queue.put1DRangeKernel(resets[y], 0, _ls[y]._a.length, group);
// softmax(input, a[a.length - 1].getBuffer(), e[e.length - 1].getBuffer());
queue.putWriteBuffer(a[_ls.length - 1], false);
queue.putWriteBuffer(e[_ls.length - 1], false);
for( int y = _ls.length - 1; y > 0; y-- )
queue.put1DRangeKernel(bprops[y], 0, _ls[y]._a.length, group);
input.move();
}
} catch( IOException ex ) {
throw new RuntimeException(ex);
} finally {
context.release();
}
}
@Override public void join() {
throw new UnsupportedOperationException();
}
// static void softmax(Input input, FloatBuffer a, FloatBuffer e) {
// float max = Float.NEGATIVE_INFINITY;
// for( int o = 0; o < a.capacity(); o++ )
// if( max < a.get(o) )
// max = a.get(o);
// float scale = 0;
// for( int o = 0; o < a.capacity(); o++ ) {
// a.put(o, (float) Math.exp(a.get(o) - max));
// scale += a.get(o);
// }
// for( int o = 0; o < a.capacity(); o++ ) {
// a.put(o, a.get(o) / scale);
// e.put(o, (o == input.label() ? 1 : 0) - a.get(o));
// }
// }
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/hex/VarImp.java
|
package hex;
import water.Iced;
import water.Model;
import water.api.DocGen;
import water.api.Request.API;
import water.util.UIUtils;
import water.util.Utils;
import java.util.Arrays;
import java.util.Comparator;
public class VarImp extends Iced {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
/** Variable importance measurement method. */
enum VarImpMethod {
PERMUTATION_IMPORTANCE("Mean decrease accuracy"),
RELATIVE_IMPORTANCE("Relative importance");
private final String title;
VarImpMethod(String title) { this.title = title; }
@Override public String toString() { return title; }
}
@API(help="Variable importance of individual variables.")
public float[] varimp;
@API(help="Names of variables.")
protected String[] variables;
@API(help="Variable importance measurement method.")
public final VarImpMethod method;
@API(help="Max. number of variables to show.")
public final int max_var = 100;
@API(help="Scaled measurements.")
public final boolean scaled() { return false; }
public VarImp(float[] varimp) { this(varimp, null, VarImpMethod.RELATIVE_IMPORTANCE); }
public VarImp(float[] varimp, String[] variables) { this(varimp, variables, VarImpMethod.RELATIVE_IMPORTANCE); }
protected VarImp(float[] varimp, String[] variables, VarImpMethod method) {
this.varimp = varimp;
this.variables = variables;
this.method = method;
}
public String[] getVariables() { return variables; }
public void setVariables(String[] variables) { this.variables = variables; }
/** Generate variable importance HTML code. */
public final <T extends Model> StringBuilder toHTML(T model, StringBuilder sb) {
DocGen.HTML.section(sb,"Variable importance of input variables: " + method);
sb.append("<div class=\"alert\">");
sb.append(UIUtils.builderModelLink(model.getClass(), model._dataKey, model.responseName(), "Build a new model using selected variables", "redirectWithCols(this,'vi_chkb')"));
sb.append("</div>");
DocGen.HTML.arrayHead(sb);
// Create a sort order
Integer[] sortOrder = getSortOrder();
// Generate variable labels and raw scores
if (variables != null) DocGen.HTML.tableLine(sb, "Variable", variables, sortOrder, Math.min(max_var, variables.length), true, "vi_chkb");
if (varimp != null) DocGen.HTML.tableLine(sb, method.toString(), varimp, sortOrder, Math.min(max_var, variables.length));
// Print a specific information
toHTMLAppendMoreTableLines(sb, sortOrder);
DocGen.HTML.arrayTail(sb);
// Generate nice graph ;-)
toHTMLGraph(sb, sortOrder);
// And return the result
return sb;
}
protected StringBuilder toHTMLAppendMoreTableLines(StringBuilder sb, Integer[] sortOrder) {
return sb;
}
protected StringBuilder toHTMLGraph(StringBuilder sb, Integer[] sortOrder) {
return toHTMLGraph(sb, variables, varimp, sortOrder, max_var);
}
static final StringBuilder toHTMLGraph(StringBuilder sb, String[] names, float[] vals, Integer[] sortOrder, int max) {
Integer[] so = vals.length > max ? sortOrder : null;
// Generate a graph
DocGen.HTML.graph(sb, "graphvarimp", "g_varimp",
DocGen.HTML.toJSArray(new StringBuilder(), names, so, Math.min(max, vals.length)),
DocGen.HTML.toJSArray(new StringBuilder(), vals , so, Math.min(max, vals.length))
);
sb.append("<button id=\"sortBars\" class=\"btn btn-primary\">Sort</button>\n");
return sb;
}
/** By default provides a sort order according to raw scores stored in <code>varimp</code>. */
protected Integer[] getSortOrder() {
Integer[] sortOrder = new Integer[varimp.length];
for(int i=0; i<sortOrder.length; i++) sortOrder[i] = i;
Arrays.sort(sortOrder, new Comparator<Integer>() {
@Override public int compare(Integer o1, Integer o2) { float f = varimp[o1]-varimp[o2]; return f<0 ? 1 : (f>0 ? -1 : 0); }
});
return sortOrder;
}
/** Variable importance measured as relative influence.
* It provides raw values, scaled values, and summary.
* Motivate by R's GBM package. */
public static class VarImpRI extends VarImp {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
public VarImpRI(float[] varimp) {
super(varimp);
}
@API(help = "Scaled values of raw scores with respect to maximal value (GBM call - relative.influnce(model, scale=T)).")
public float[] scaled_values() {
float[] scaled = new float[varimp.length];
int maxVar = 0;
for (int i=0; i<varimp.length; i++)
if (varimp[i] > varimp[maxVar]) maxVar = i;
float maxVal = varimp[maxVar];
for (int var=0; var<varimp.length; var++)
scaled[var] = varimp[var] / maxVal;
return scaled;
}
@API(help = "Summary of values in percent (the same as produced by summary.gbm).")
public float[] summary() {
float[] summary = new float[varimp.length];
float sum = Utils.sum(varimp);
for (int var=0; var<varimp.length; var++)
summary[var] = 100*varimp[var] / sum;
return summary;
}
@Override protected StringBuilder toHTMLAppendMoreTableLines(StringBuilder sb, Integer[] sortOrder ) {
StringBuilder ssb = super.toHTMLAppendMoreTableLines(sb, sortOrder);
DocGen.HTML.tableLine(sb, "Scaled values", scaled_values(), sortOrder, Math.min(max_var, varimp.length));
DocGen.HTML.tableLine(sb, "Influence in %", summary(), sortOrder, Math.min(max_var, varimp.length));
return ssb;
}
@Override protected StringBuilder toHTMLGraph(StringBuilder sb, Integer[] sortOrder) {
return toHTMLGraph(sb, variables, scaled_values(), sortOrder, max_var );
}
}
/** Variable importance measured as mean decrease in accuracy.
* It provides raw variable importance measures, SD and z-scores. */
public static class VarImpMDA extends VarImp {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="Variable importance SD for individual variables.")
public final float[] varimpSD;
/** Number of trees participating for producing variable importance measurements */
private final int ntrees;
public VarImpMDA(float[] varimp, float[] varimpSD, int ntrees) {
super(varimp,null,VarImpMethod.PERMUTATION_IMPORTANCE);
this.varimpSD = varimpSD;
this.ntrees = ntrees;
}
@API(help = "Z-score for individual variables")
public float[] z_score() {
float[] zscores = new float[varimp.length];
double rnt = Math.sqrt(ntrees);
for(int v = 0; v < varimp.length ; v++) zscores[v] = (float) (varimp[v] / (varimpSD[v] / rnt));
return zscores;
}
@Override protected StringBuilder toHTMLAppendMoreTableLines(StringBuilder sb, Integer[] sortOrder ) {
StringBuilder ssb = super.toHTMLAppendMoreTableLines(sb, sortOrder);
if (varimpSD!=null) {
DocGen.HTML.tableLine(sb, "SD", varimpSD, sortOrder, Math.min(max_var, varimp.length));
float[] zscores = z_score();
DocGen.HTML.tableLine(sb, "Z-scores", zscores, sortOrder, Math.min(max_var, varimp.length));
}
return ssb;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/anomaly/Anomaly.java
|
package hex.anomaly;
import hex.deeplearning.DeepLearningModel;
import water.*;
import water.api.*;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.Log;
import java.util.HashSet;
/**
* Deep Learning Based Anomaly Detection
*/
public class Anomaly extends Job.FrameJob {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
public static DocGen.FieldDoc[] DOC_FIELDS;
public static final String DOC_GET = "Anomaly Detection via Deep Learning";
@API(help = "Deep Learning Auto-Encoder Model ", required=true, filter= Default.class, json = true)
public Key dl_autoencoder_model;
@API(help = "(Optional) Threshold of reconstruction error for rows to be displayed in logs (default: 10x training MSE)", filter= Default.class, json = true)
public double thresh = -1;
@Override
protected final void execImpl() {
if (dl_autoencoder_model == null) throw new IllegalArgumentException("Deep Learning Model must be specified.");
DeepLearningModel dlm = UKV.get(dl_autoencoder_model);
if (dlm == null) throw new IllegalArgumentException("Deep Learning Model not found.");
if (!dlm.get_params().autoencoder) throw new IllegalArgumentException("Deep Learning Model must be build with autoencoder = true.");
if (thresh == -1) {
Log.info("Mean reconstruction error (MSE) of model on training data: " + dlm.mse());
thresh = 10*dlm.mse();
Log.info("Setting MSE threshold for anomaly to: " + thresh + ".");
}
StringBuilder sb = new StringBuilder();
sb.append("\nFinding outliers in frame " + source._key.toString() + ".\n");
Frame mse = dlm.scoreAutoEncoder(source);
sb.append("Storing the reconstruction error (MSE) for all rows under: " + dest() + ".\n");
Frame output = new Frame(dest(), new String[]{"Reconstruction.MSE"}, new Vec[]{mse.vecs()[0]});
output.delete_and_lock(null);
output.unlock(null);
final Vec mse_test = mse.anyVec();
sb.append("Mean reconstruction error (MSE): " + mse_test.mean() + ".\n");
// print stats and potential outliers
sb.append("The following data points have a reconstruction error greater than " + thresh + ":\n");
HashSet<Long> outliers = new HashSet<Long>();
for( long i=0; i<mse_test.length(); i++ ) {
if (mse_test.at(i) > thresh) {
outliers.add(i);
sb.append(String.format("row %d : MSE = %5f\n", i, mse_test.at(i)));
}
}
Log.info(sb);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/deepfeatures/DeepFeatures.java
|
package hex.deepfeatures;
import hex.deeplearning.DeepLearningModel;
import water.Job;
import water.Key;
import water.UKV;
import water.api.DocGen;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.Log;
import java.util.HashSet;
/**
* Deep Learning Based Feature Extractor
* For each row in the input frame (source), make predictions with the Auto-Encoder model
* and extract the last hidden layer's neuron activation values as new features.
*/
public class DeepFeatures extends Job.FrameJob {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
public static DocGen.FieldDoc[] DOC_FIELDS;
public static final String DOC_GET = "Deep Learning Feature Extractor";
@API(help = "Deep Learning Model", required=true, filter= Default.class, json = true)
public Key dl_model;
@API(help = "Which hidden layer (index) to extract (default: -1 -> last hidden layer)", required=true, lmin=-1, filter= Default.class, json = true)
public int layer = -1;
@Override
protected final void execImpl() {
if (dl_model == null) throw new IllegalArgumentException("Deep Learning Model must be specified.");
DeepLearningModel dlm = UKV.get(dl_model);
if (dlm == null) throw new IllegalArgumentException("Deep Learning Model not found.");
StringBuilder sb = new StringBuilder();
if (layer < -1 || layer > dlm.get_params().hidden.length-1) throw new IllegalArgumentException("Layer must be either -1 or between 0 and " + (dlm.get_params().hidden.length-1));
if (layer == -1) layer = dlm.get_params().hidden.length-1;
int features = dlm.get_params().hidden[layer];
sb.append("\nTransforming frame '" + source._key.toString() + "' with " + source.numCols() + " into " + features + " features with model '" + dl_model + "'\n");
Frame df = dlm.scoreDeepFeatures(source, layer);
sb.append("Storing the new features under: " + dest() + ".\n");
Frame output = new Frame(dest(), df.names(), df.vecs());
output.delete_and_lock(null);
output.unlock(null);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/deeplearning/DeepLearning.java
|
package hex.deeplearning;
import com.amazonaws.services.simpleworkflow.model.Run;
import hex.*;
import water.*;
import water.util.*;
import static water.util.MRUtils.sampleFrame;
import static water.util.MRUtils.sampleFrameStratified;
import hex.FrameTask.DataInfo;
import water.api.*;
import water.fvec.Frame;
import water.fvec.RebalanceDataSet;
import water.fvec.Vec;
import java.lang.reflect.Field;
import java.util.Arrays;
import java.util.Random;
/**
* Deep Learning Neural Net implementation based on MRTask2
*/
public class DeepLearning extends Job.ValidatedJob {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
public static DocGen.FieldDoc[] DOC_FIELDS;
public static final String DOC_GET = "Deep Learning";
/**
* A model key associated with a previously trained Deep Learning
* model. This option allows users to build a new model as a
* continuation of a previously generated model (e.g., by a grid search).
*/
@API(help = "Model checkpoint to resume training with", filter= Default.class, json = true)
public Key checkpoint;
/**
* If enabled, store the best model under the destination key of this model at the end of training.
* Only applicable if training is not cancelled.
*/
@API(help = "If enabled, override the final model with the best model found during training", filter= Default.class, json = true)
public boolean override_with_best_model = true;
/**
* Unlock expert mode parameters than can affect model building speed,
* predictive accuracy and scoring. Leaving expert mode parameters at default
* values is fine for many problems, but best results on complex datasets are often
* only attainable via expert mode options.
*/
@API(help = "Enable expert mode (to access all options from GUI)", filter = Default.class, json = true)
public boolean expert_mode = false;
@API(help = "Auto-Encoder (Experimental)", filter= Default.class, json = true)
public boolean autoencoder = false;
@API(help="Use all factor levels of categorical variables. Otherwise, the first factor level is omitted (without loss of accuracy). Useful for variable importances and auto-enabled for autoencoder.",filter=Default.class, json=true, importance = ParamImportance.SECONDARY)
public boolean use_all_factor_levels = true;
/*Neural Net Topology*/
/**
* The activation function (non-linearity) to be used the neurons in the hidden layers.
* Tanh: Hyperbolic tangent function (same as scaled and shifted sigmoid).
* Rectifier: Chooses the maximum of (0, x) where x is the input value.
* Maxout: Choose the maximum coordinate of the input vector.
* With Dropout: Zero out a random user-given fraction of the
* incoming weights to each hidden layer during training, for each
* training row. This effectively trains exponentially many models at
* once, and can improve generalization.
*/
@API(help = "Activation function", filter = Default.class, json = true, importance = ParamImportance.CRITICAL)
public Activation activation = Activation.Rectifier;
/**
* The number and size of each hidden layer in the model.
* For example, if a user specifies "100,200,100" a model with 3 hidden
* layers will be produced, and the middle hidden layer will have 200
* neurons.To specify a grid search, add parentheses around each
* model's specification: "(100,100), (50,50,50), (20,20,20,20)".
*/
@API(help = "Hidden layer sizes (e.g. 100,100). Grid search: (10,10), (20,20,20)", filter = Default.class, json = true, importance = ParamImportance.CRITICAL)
public int[] hidden = new int[] { 200, 200 };
/**
* The number of passes over the training dataset to be carried out.
* It is recommended to start with lower values for initial grid searches.
* This value can be modified during checkpoint restarts and allows continuation
* of selected models.
*/
@API(help = "How many times the dataset should be iterated (streamed), can be fractional", filter = Default.class, dmin = 1e-3, json = true, importance = ParamImportance.CRITICAL)
public double epochs = 10;
/**
* The number of training data rows to be processed per iteration. Note that
* independent of this parameter, each row is used immediately to update the model
* with (online) stochastic gradient descent. This parameter controls the
* synchronization period between nodes in a distributed environment and the
* frequency at which scoring and model cancellation can happen. For example, if
* it is set to 10,000 on H2O running on 4 nodes, then each node will
* process 2,500 rows per iteration, sampling randomly from their local data.
* Then, model averaging between the nodes takes place, and scoring can happen
* (dependent on scoring interval and duty factor). Special values are 0 for
* one epoch per iteration, -1 for processing the maximum amount of data
* per iteration (if **replicate training data** is enabled, N epochs
* will be trained per iteration on N nodes, otherwise one epoch). Special value
* of -2 turns on automatic mode (auto-tuning).
*/
@API(help = "Number of training samples (globally) per MapReduce iteration. Special values are 0: one epoch, -1: all available data (e.g., replicated training data), -2: automatic", filter = Default.class, lmin = -2, json = true, importance = ParamImportance.SECONDARY)
public long train_samples_per_iteration = -2;
// @API(help = "Target ratio of communication overhead to computation. Only for multi-node operation and train_samples_per_iteration=-2 (auto-tuning)", filter = Default.class, dmin = 1e-3, dmax=0.999, json = true, importance = ParamImportance.SECONDARY)
public double target_ratio_comm_to_comp = 0.02;
/**
* The random seed controls sampling and initialization. Reproducible
* results are only expected with single-threaded operation (i.e.,
* when running on one node, turning off load balancing and providing
* a small dataset that fits in one chunk). In general, the
* multi-threaded asynchronous updates to the model parameters will
* result in (intentional) race conditions and non-reproducible
* results. Note that deterministic sampling and initialization might
* still lead to some weak sense of determinism in the model.
*/
@API(help = "Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded", filter = Default.class, json = true)
public long seed = new Random().nextLong();
/*Adaptive Learning Rate*/
/**
* The implemented adaptive learning rate algorithm (ADADELTA) automatically
* combines the benefits of learning rate annealing and momentum
* training to avoid slow convergence. Specification of only two
* parameters (rho and epsilon) simplifies hyper parameter search.
* In some cases, manually controlled (non-adaptive) learning rate and
* momentum specifications can lead to better results, but require the
* specification (and hyper parameter search) of up to 7 parameters.
* If the model is built on a topology with many local minima or
* long plateaus, it is possible for a constant learning rate to produce
* sub-optimal results. Learning rate annealing allows digging deeper into
* local minima, while rate decay allows specification of different
* learning rates per layer. When the gradient is being estimated in
* a long valley in the optimization landscape, a large learning rate
* can cause the gradient to oscillate and move in the wrong
* direction. When the gradient is computed on a relatively flat
* surface with small learning rates, the model can converge far
* slower than necessary.
*/
@API(help = "Adaptive learning rate (ADADELTA)", filter = Default.class, json = true, importance = ParamImportance.SECONDARY)
public boolean adaptive_rate = true;
/**
* The first of two hyper parameters for adaptive learning rate (ADADELTA).
* It is similar to momentum and relates to the memory to prior weight updates.
* Typical values are between 0.9 and 0.999.
* This parameter is only active if adaptive learning rate is enabled.
*/
@API(help = "Adaptive learning rate time decay factor (similarity to prior updates)", filter = Default.class, dmin = 0.01, dmax = 1, json = true, importance = ParamImportance.SECONDARY)
public double rho = 0.99;
/**
* The second of two hyper parameters for adaptive learning rate (ADADELTA).
* It is similar to learning rate annealing during initial training
* and momentum at later stages where it allows forward progress.
* Typical values are between 1e-10 and 1e-4.
* This parameter is only active if adaptive learning rate is enabled.
*/
@API(help = "Adaptive learning rate smoothing factor (to avoid divisions by zero and allow progress)", filter = Default.class, dmin = 1e-15, dmax = 1, json = true, importance = ParamImportance.SECONDARY)
public double epsilon = 1e-8;
/*Learning Rate*/
/**
* When adaptive learning rate is disabled, the magnitude of the weight
* updates are determined by the user specified learning rate
* (potentially annealed), and are a function of the difference
* between the predicted value and the target value. That difference,
* generally called delta, is only available at the output layer. To
* correct the output at each hidden layer, back propagation is
* used. Momentum modifies back propagation by allowing prior
* iterations to influence the current update. Using the momentum
* parameter can aid in avoiding local minima and the associated
* instability. Too much momentum can lead to instabilities, that's
* why the momentum is best ramped up slowly.
* This parameter is only active if adaptive learning rate is disabled.
*/
@API(help = "Learning rate (higher => less stable, lower => slower convergence)", filter = Default.class, dmin = 1e-10, dmax = 1, json = true, importance = ParamImportance.SECONDARY)
public double rate = .005;
/**
* Learning rate annealing reduces the learning rate to "freeze" into
* local minima in the optimization landscape. The annealing rate is the
* inverse of the number of training samples it takes to cut the learning rate in half
* (e.g., 1e-6 means that it takes 1e6 training samples to halve the learning rate).
* This parameter is only active if adaptive learning rate is disabled.
*/
@API(help = "Learning rate annealing: rate / (1 + rate_annealing * samples)", filter = Default.class, dmin = 0, dmax = 1, json = true, importance = ParamImportance.SECONDARY)
public double rate_annealing = 1e-6;
/**
* The learning rate decay parameter controls the change of learning rate across layers.
* For example, assume the rate parameter is set to 0.01, and the rate_decay parameter is set to 0.5.
* Then the learning rate for the weights connecting the input and first hidden layer will be 0.01,
* the learning rate for the weights connecting the first and the second hidden layer will be 0.005,
* and the learning rate for the weights connecting the second and third hidden layer will be 0.0025, etc.
* This parameter is only active if adaptive learning rate is disabled.
*/
@API(help = "Learning rate decay factor between layers (N-th layer: rate*alpha^(N-1))", filter = Default.class, dmin = 0, json = true, importance = ParamImportance.EXPERT)
public double rate_decay = 1.0;
/*Momentum*/
/**
* The momentum_start parameter controls the amount of momentum at the beginning of training.
* This parameter is only active if adaptive learning rate is disabled.
*/
@API(help = "Initial momentum at the beginning of training (try 0.5)", filter = Default.class, dmin = 0, dmax = 0.9999999999, json = true, importance = ParamImportance.SECONDARY)
public double momentum_start = 0;
/**
* The momentum_ramp parameter controls the amount of learning for which momentum increases
* (assuming momentum_stable is larger than momentum_start). The ramp is measured in the number
* of training samples.
* This parameter is only active if adaptive learning rate is disabled.
*/
@API(help = "Number of training samples for which momentum increases", filter = Default.class, dmin = 1, json = true, importance = ParamImportance.SECONDARY)
public double momentum_ramp = 1e6;
/**
* The momentum_stable parameter controls the final momentum value reached after momentum_ramp training samples.
* The momentum used for training will remain the same for training beyond reaching that point.
* This parameter is only active if adaptive learning rate is disabled.
*/
@API(help = "Final momentum after the ramp is over (try 0.99)", filter = Default.class, dmin = 0, dmax = 0.9999999999, json = true, importance = ParamImportance.SECONDARY)
public double momentum_stable = 0;
/**
* The Nesterov accelerated gradient descent method is a modification to
* traditional gradient descent for convex functions. The method relies on
* gradient information at various points to build a polynomial approximation that
* minimizes the residuals in fewer iterations of the descent.
* This parameter is only active if adaptive learning rate is disabled.
*/
@API(help = "Use Nesterov accelerated gradient (recommended)", filter = Default.class, json = true, importance = ParamImportance.SECONDARY)
public boolean nesterov_accelerated_gradient = true;
/*Regularization*/
/**
* A fraction of the features for each training row to be omitted from training in order
* to improve generalization (dimension sampling).
*/
@API(help = "Input layer dropout ratio (can improve generalization, try 0.1 or 0.2)", filter = Default.class, dmin = 0, dmax = 1, json = true, importance = ParamImportance.SECONDARY)
public double input_dropout_ratio = 0.0;
/**
* A fraction of the inputs for each hidden layer to be omitted from training in order
* to improve generalization. Defaults to 0.5 for each hidden layer if omitted.
*/
@API(help = "Hidden layer dropout ratios (can improve generalization), specify one value per hidden layer, defaults to 0.5", filter = Default.class, dmin = 0, dmax = 1, json = true, importance = ParamImportance.SECONDARY)
public double[] hidden_dropout_ratios;
/**
* A regularization method that constrains the absolute value of the weights and
* has the net effect of dropping some weights (setting them to zero) from a model
* to reduce complexity and avoid overfitting.
*/
@API(help = "L1 regularization (can add stability and improve generalization, causes many weights to become 0)", filter = Default.class, dmin = 0, dmax = 1, json = true, importance = ParamImportance.SECONDARY)
public double l1 = 0.0;
/**
* A regularization method that constrdains the sum of the squared
* weights. This method introduces bias into parameter estimates, but
* frequently produces substantial gains in modeling as estimate variance is
* reduced.
*/
@API(help = "L2 regularization (can add stability and improve generalization, causes many weights to be small", filter = Default.class, dmin = 0, dmax = 1, json = true, importance = ParamImportance.SECONDARY)
public double l2 = 0.0;
/**
* A maximum on the sum of the squared incoming weights into
* any one neuron. This tuning parameter is especially useful for unbound
* activation functions such as Maxout or Rectifier.
*/
@API(help = "Constraint for squared sum of incoming weights per unit (e.g. for Rectifier)", filter = Default.class, dmin = 1e-10, json = true, importance = ParamImportance.EXPERT)
public float max_w2 = Float.POSITIVE_INFINITY;
/*Initialization*/
/**
* The distribution from which initial weights are to be drawn. The default
* option is an optimized initialization that considers the size of the network.
* The "uniform" option uses a uniform distribution with a mean of 0 and a given
* interval. The "normal" option draws weights from the standard normal
* distribution with a mean of 0 and given standard deviation.
*/
@API(help = "Initial Weight Distribution", filter = Default.class, json = true, importance = ParamImportance.EXPERT)
public InitialWeightDistribution initial_weight_distribution = InitialWeightDistribution.UniformAdaptive;
/**
* The scale of the distribution function for Uniform or Normal distributions.
* For Uniform, the values are drawn uniformly from -initial_weight_scale...initial_weight_scale.
* For Normal, the values are drawn from a Normal distribution with a standard deviation of initial_weight_scale.
*/
@API(help = "Uniform: -value...value, Normal: stddev)", filter = Default.class, dmin = 0, json = true, importance = ParamImportance.EXPERT)
public double initial_weight_scale = 1.0;
/**
* The loss (error) function to be minimized by the model.
* Cross Entropy loss is used when the model output consists of independent
* hypotheses, and the outputs can be interpreted as the probability that each
* hypothesis is true. Cross entropy is the recommended loss function when the
* target values are class labels, and especially for imbalanced data.
* It strongly penalizes error in the prediction of the actual class label.
* Mean Square loss is used when the model output are continuous real values, but can
* be used for classification as well (where it emphasizes the error on all
* output classes, not just for the actual class).
*/
@API(help = "Loss function", filter = Default.class, json = true, importance = ParamImportance.EXPERT)
public Loss loss = Loss.Automatic;
/*Scoring*/
/**
* The minimum time (in seconds) to elapse between model scoring. The actual
* interval is determined by the number of training samples per iteration and the scoring duty cycle.
*/
@API(help = "Shortest time interval (in secs) between model scoring", filter = Default.class, dmin = 0, json = true, importance = ParamImportance.SECONDARY)
public double score_interval = 5;
/**
* The number of training dataset points to be used for scoring. Will be
* randomly sampled. Use 0 for selecting the entire training dataset.
*/
@API(help = "Number of training set samples for scoring (0 for all)", filter = Default.class, lmin = 0, json = true, importance = ParamImportance.EXPERT)
public long score_training_samples = 10000l;
/**
* The number of validation dataset points to be used for scoring. Can be
* randomly sampled or stratified (if "balance classes" is set and "score
* validation sampling" is set to stratify). Use 0 for selecting the entire
* training dataset.
*/
@API(help = "Number of validation set samples for scoring (0 for all)", filter = Default.class, lmin = 0, json = true, importance = ParamImportance.EXPERT)
public long score_validation_samples = 0l;
/**
* Maximum fraction of wall clock time spent on model scoring on training and validation samples,
* and on diagnostics such as computation of feature importances (i.e., not on training).
*/
@API(help = "Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring).", filter = Default.class, dmin = 0, dmax = 1, json = true, importance = ParamImportance.EXPERT)
public double score_duty_cycle = 0.1;
/**
* The stopping criteria in terms of classification error (1-accuracy) on the
* training data scoring dataset. When the error is at or below this threshold,
* training stops.
*/
@API(help = "Stopping criterion for classification error fraction on training data (-1 to disable)", filter = Default.class, dmin=-1, dmax=1, json = true, importance = ParamImportance.EXPERT)
public double classification_stop = 0;
/**
* The stopping criteria in terms of regression error (MSE) on the training
* data scoring dataset. When the error is at or below this threshold, training
* stops.
*/
@API(help = "Stopping criterion for regression error (MSE) on training data (-1 to disable)", filter = Default.class, dmin=-1, json = true, importance = ParamImportance.EXPERT)
public double regression_stop = 1e-6;
/**
* Enable quiet mode for less output to standard output.
*/
@API(help = "Enable quiet mode for less output to standard output", filter = Default.class, json = true)
public boolean quiet_mode = false;
/**
* For classification models, the maximum size (in terms of classes) of the
* confusion matrix for it to be printed. This option is meant to avoid printing
* extremely large confusion matrices.
*/
@API(help = "Max. size (number of classes) for confusion matrices to be shown", filter = Default.class, json = true)
public int max_confusion_matrix_size = 20;
/**
* The maximum number (top K) of predictions to use for hit ratio computation (for multi-class only, 0 to disable)
*/
@API(help = "Max. number (top K) of predictions to use for hit ratio computation (for multi-class only, 0 to disable)", filter = Default.class, lmin=0, json = true, importance = ParamImportance.EXPERT)
public int max_hit_ratio_k = 10;
/*Imbalanced Classes*/
/**
* For imbalanced data, balance training data class counts via
* over/under-sampling. This can result in improved predictive accuracy.
*/
@API(help = "Balance training data class counts via over/under-sampling (for imbalanced data)", filter = Default.class, json = true, importance = ParamImportance.EXPERT)
public boolean balance_classes = false;
/**
* Desired over/under-sampling ratios per class (lexicographic order). Only when balance_classes is enabled. If not specified, they will be automatically computed to obtain class balance during training.
*/
@API(help = "Desired over/under-sampling ratios per class (lexicographic order).", filter = Default.class, dmin = 0, json = true, importance = ParamImportance.SECONDARY)
public float[] class_sampling_factors;
/**
* When classes are balanced, limit the resulting dataset size to the
* specified multiple of the original dataset size.
*/
@API(help = "Maximum relative size of the training data after balancing class counts (can be less than 1.0)", filter = Default.class, json = true, dmin=1e-3, importance = ParamImportance.EXPERT)
public float max_after_balance_size = 5.0f;
/**
* Method used to sample the validation dataset for scoring, see Score Validation Samples above.
*/
@API(help = "Method used to sample validation dataset for scoring", filter = Default.class, json = true, importance = ParamImportance.EXPERT)
public ClassSamplingMethod score_validation_sampling = ClassSamplingMethod.Uniform;
/*Misc*/
/**
* Gather diagnostics for hidden layers, such as mean and RMS values of learning
* rate, momentum, weights and biases.
*/
@API(help = "Enable diagnostics for hidden layers", filter = Default.class, json = true)
public boolean diagnostics = true;
/**
* Whether to compute variable importances for input features.
* The implemented method (by Gedeon) considers the weights connecting the
* input features to the first two hidden layers.
*/
@API(help = "Compute variable importances for input features (Gedeon method) - can be slow for large networks", filter = Default.class, json = true)
public boolean variable_importances = false;
/**
* Enable fast mode (minor approximation in back-propagation), should not affect results significantly.
*/
@API(help = "Enable fast mode (minor approximation in back-propagation)", filter = Default.class, json = true, importance = ParamImportance.EXPERT)
public boolean fast_mode = true;
/**
* Ignore constant training columns (no information can be gained anyway).
*/
@API(help = "Ignore constant training columns (no information can be gained anyway)", filter = Default.class, json = true, importance = ParamImportance.EXPERT)
public boolean ignore_const_cols = true;
/**
* Increase training speed on small datasets by splitting it into many chunks
* to allow utilization of all cores.
*/
@API(help = "Force extra load balancing to increase training speed for small datasets (to keep all cores busy)", filter = Default.class, json = true)
public boolean force_load_balance = true;
/**
* Replicate the entire training dataset onto every node for faster training on small datasets.
*/
@API(help = "Replicate the entire training dataset onto every node for faster training on small datasets", filter = Default.class, json = true, importance = ParamImportance.EXPERT)
public boolean replicate_training_data = true;
/**
* Run on a single node for fine-tuning of model parameters. Can be useful for
* checkpoint resumes after training on multiple nodes for fast initial
* convergence.
*/
@API(help = "Run on a single node for fine-tuning of model parameters", filter = Default.class, json = true)
public boolean single_node_mode = false;
/**
* Enable shuffling of training data (on each node). This option is
* recommended if training data is replicated on N nodes, and the number of training samples per iteration
* is close to N times the dataset size, where all nodes train will (almost) all
* the data. It is automatically enabled if the number of training samples per iteration is set to -1 (or to N
* times the dataset size or larger).
*/
@API(help = "Enable shuffling of training data (recommended if training data is replicated and train_samples_per_iteration is close to #nodes x #rows)", filter = Default.class, json = true, importance = ParamImportance.EXPERT)
public boolean shuffle_training_data = false;
// @API(help = "Handling of missing values. Either Skip or MeanImputation.", filter= Default.class, json = true)
public MissingValuesHandling missing_values_handling = MissingValuesHandling.MeanImputation;
@API(help = "Sparse data handling (Experimental).", filter = Default.class, json = true, importance = ParamImportance.EXPERT)
public boolean sparse = false;
@API(help = "Use a column major weight matrix for input layer. Can speed up forward propagation, but might slow down backpropagation (Experimental).", filter = Default.class, json = true, importance = ParamImportance.EXPERT)
public boolean col_major = false;
@API(help = "Average activation for sparse auto-encoder (Experimental)", filter= Default.class, json = true)
public double average_activation = 0;
@API(help = "Sparsity regularization (Experimental)", filter= Default.class, json = true)
public double sparsity_beta = 0;
@API(help = "Max. number of categorical features, enforced via hashing (Experimental).", filter= Default.class, lmin = 1, json = true)
public int max_categorical_features = Integer.MAX_VALUE;
@API(help = "Force reproducibility on small data (will be slow - only uses 1 thread)", filter= Default.class, json = true)
public boolean reproducible = false;
public enum MissingValuesHandling {
Skip, MeanImputation
}
public enum ClassSamplingMethod {
Uniform, Stratified
}
public enum InitialWeightDistribution {
UniformAdaptive, Uniform, Normal
}
/**
* Activation functions
*/
public enum Activation {
Tanh, TanhWithDropout, Rectifier, RectifierWithDropout, Maxout, MaxoutWithDropout
}
/**
* Loss functions
* CrossEntropy is recommended
*/
public enum Loss {
Automatic, MeanSquare, CrossEntropy
}
// the following parameters can only be specified in expert mode
transient final String [] expert_options = new String[] {
"use_all_factor_levels",
"loss",
"max_w2",
"score_training_samples",
"score_validation_samples",
"initial_weight_distribution",
"initial_weight_scale",
"diagnostics",
"rate_decay",
"score_duty_cycle",
"variable_importances",
"fast_mode",
"score_validation_sampling",
"ignore_const_cols",
"force_load_balance",
"replicate_training_data",
"shuffle_training_data",
"nesterov_accelerated_gradient",
"classification_stop",
"regression_stop",
"quiet_mode",
"max_confusion_matrix_size",
"max_hit_ratio_k",
"hidden_dropout_ratios",
"single_node_mode",
"sparse",
"col_major",
"autoencoder",
"average_activation",
"sparsity_beta",
"max_categorical_features",
};
// the following parameters can be modified when restarting from a checkpoint
transient final String [] cp_modifiable = new String[] {
"expert_mode",
"seed",
"epochs",
"score_interval",
"train_samples_per_iteration",
"target_ratio_comm_to_comp",
"score_duty_cycle",
"classification_stop",
"regression_stop",
"quiet_mode",
"max_confusion_matrix_size",
"max_hit_ratio_k",
"diagnostics",
"variable_importances",
"force_load_balance",
"replicate_training_data",
"shuffle_training_data",
"single_node_mode",
"sparse",
"col_major",
// Allow modification of the regularization parameters after a checkpoint restart
"l1",
"l2",
"max_w2",
};
/**
* Helper to specify which arguments trigger a refresh on change
* @param ver
*/
@Override
protected void registered(RequestServer.API_VERSION ver) {
super.registered(ver);
for (Argument arg : _arguments) {
if ( arg._name.equals("activation") || arg._name.equals("initial_weight_distribution")
|| arg._name.equals("expert_mode") || arg._name.equals("adaptive_rate")
|| arg._name.equals("replicate_training_data")
|| arg._name.equals("balance_classes")
|| arg._name.equals("n_folds")
|| arg._name.equals("autoencoder")
|| arg._name.equals("checkpoint")) {
arg.setRefreshOnChange();
}
}
}
/**
* Helper to handle arguments based on existing input values
* @param arg
* @param inputArgs
*/
@Override protected void queryArgumentValueSet(Argument arg, java.util.Properties inputArgs) {
super.queryArgumentValueSet(arg, inputArgs);
if (!arg._name.equals("checkpoint") && !Utils.contains(cp_modifiable, arg._name)) {
if (checkpoint != null) {
arg.disable("Taken from model checkpoint.");
final DeepLearningModel cp_model = UKV.get(checkpoint);
if (cp_model == null) {
throw new IllegalArgumentException("Checkpointed model was not found.");
}
if (cp_model.model_info().unstable()) {
throw new IllegalArgumentException("Checkpointed model was unstable. Not restarting.");
}
return;
}
}
if(arg._name.equals("initial_weight_scale") &&
(initial_weight_distribution == InitialWeightDistribution.UniformAdaptive)
) {
arg.disable("Using sqrt(6 / (# units + # units of previous layer)) for Uniform distribution.", inputArgs);
}
if (classification) {
if(arg._name.equals("regression_stop")) {
arg.disable("Only for regression.", inputArgs);
}
if((arg._name.equals("max_after_balance_size") || arg._name.equals("class_sampling_factors")) && !balance_classes) {
arg.disable("Requires balance_classes.", inputArgs);
}
}
else {
if(arg._name.equals("classification_stop")
|| arg._name.equals("max_confusion_matrix_size")
|| arg._name.equals("max_hit_ratio_k")
|| arg._name.equals("max_after_balance_size")
|| arg._name.equals("balance_classes")
|| arg._name.equals("class_sampling_factors")
) {
arg.disable("Only for classification.", inputArgs);
}
if (validation != null && arg._name.equals("score_validation_sampling")) {
score_validation_sampling = ClassSamplingMethod.Uniform;
arg.disable("Using uniform sampling for validation scoring dataset.", inputArgs);
}
}
if ((arg._name.equals("score_validation_samples") || arg._name.equals("score_validation_sampling")) && validation == null) {
arg.disable("Requires a validation data set.", inputArgs);
}
if (Utils.contains(expert_options, arg._name) && !expert_mode) {
arg.disable("Only in expert mode.", inputArgs);
}
if (!adaptive_rate) {
if (arg._name.equals("rho") || arg._name.equals("epsilon")) {
arg.disable("Only for adaptive learning rate.", inputArgs);
rho = 0;
epsilon = 0;
}
} else {
if (arg._name.equals("rate") || arg._name.equals("rate_annealing") || arg._name.equals("rate_decay") || arg._name.equals("nesterov_accelerated_gradient")
|| arg._name.equals("momentum_start") || arg._name.equals("momentum_ramp") || arg._name.equals("momentum_stable") ) {
arg.disable("Only for non-adaptive learning rate.", inputArgs);
momentum_start = 0;
momentum_stable = 0;
}
}
if (arg._name.equals("hidden_dropout_ratios")) {
if (activation != Activation.TanhWithDropout && activation != Activation.MaxoutWithDropout && activation != Activation.RectifierWithDropout) {
arg.disable("Only for activation functions with dropout.", inputArgs);
}
}
if (arg._name.equals("replicate_training_data") && (H2O.CLOUD.size() == 1)) {
arg.disable("Only for multi-node operation.");
replicate_training_data = false;
}
if (arg._name.equals("single_node_mode") && (H2O.CLOUD.size() == 1 || !replicate_training_data)) {
arg.disable("Only for multi-node operation with replication.");
single_node_mode = false;
}
if (arg._name.equals("use_all_factor_levels") && autoencoder ) {
arg.disable("Automatically enabled for auto-encoders.");
use_all_factor_levels = true;
}
if(arg._name.equals("override_with_best_model") && n_folds != 0) {
arg.disable("Only without n-fold cross-validation.", inputArgs);
override_with_best_model = false;
}
if(arg._name.equals("average_activation") && !autoencoder) {
arg.disable("Only for autoencoder.", inputArgs);
}
if(arg._name.equals("sparsity_beta") && !autoencoder) {
arg.disable("Only for autoencoder.", inputArgs);
}
}
/** Print model parameters as JSON */
@Override public boolean toHTML(StringBuilder sb) {
try {
return makeJsonBox(sb);
} catch (Throwable t) {
return false;
}
}
/**
* Return a query link to this page
* @param k Model Key
* @param content Link text
* @return HTML Link
*/
public static String link(Key k, String content) {
return link(k, content, null, null, null);
}
/**
* Return a query link to this page
* @param k Model Key
* @param content Link text
* @param cp Key to checkpoint to continue training with (optional)
* @param response Response
* @param val Validation data set key
* @return HTML Link
*/
public static String link(Key k, String content, Key cp, String response, Key val) {
DeepLearning req = new DeepLearning();
RString rs = new RString("<a href='" + req.href() + ".query?source=%$key" +
(cp == null ? "" : "&checkpoint=%$cp") +
(response == null ? "" : "&response=%$resp") +
(val == null ? "" : "&validation=%$valkey") +
"'>%content</a>");
rs.replace("key", k.toString());
rs.replace("content", content);
if (cp != null) rs.replace("cp", cp.toString());
if (response != null) rs.replace("resp", response);
if (val != null) rs.replace("valkey", val);
return rs.toString();
}
/**
* Report the relative progress of building a Deep Learning model (measured by how many epochs are done)
* @return floating point number between 0 and 1
*/
@Override public float progress(){
if(UKV.get(dest()) == null)return 0;
DeepLearningModel m = UKV.get(dest());
if (m != null && m.model_info()!=null ) {
final float p = (float) Math.min(1, (m.epoch_counter / m.model_info().get_params().epochs));
return cv_progress(p);
}
return 0;
}
@Override
protected final void execImpl() {
try {
buildModel();
if (n_folds > 0) CrossValUtils.crossValidate(this);
} finally {
delete();
state = UKV.<Job>get(self()).state;
new TAtomic<DeepLearningModel>() {
@Override
public DeepLearningModel atomic(DeepLearningModel m) {
if (m != null) m.get_params().state = state;
return m;
}
}.invoke(dest());
}
}
/**
* Train a Deep Learning model, assumes that all members are populated
* If checkpoint == null, then start training a new model, otherwise continue from a checkpoint
*/
private void buildModel() {
DeepLearningModel cp = null;
if (checkpoint == null) {
cp = initModel();
cp.start_training(null);
} else {
final DeepLearningModel previous = UKV.get(checkpoint);
if (previous == null) throw new IllegalArgumentException("Checkpoint not found.");
Log.info("Resuming from checkpoint.");
if (n_folds != 0) {
throw new UnsupportedOperationException("n_folds must be 0: Cross-validation is not supported during checkpoint restarts.");
}
else {
((ValidatedJob)previous.job()).xval_models = null; //remove existing cross-validation keys after checkpoint restart
}
if (source == null || (previous.model_info().get_params().source != null && !Arrays.equals(source._key._kb, previous.model_info().get_params().source._key._kb))) {
throw new IllegalArgumentException("source must be the same as for the checkpointed model.");
}
autoencoder = previous.model_info().get_params().autoencoder;
if (!autoencoder && (response == null || !source.names()[source.find(response)].equals(previous.responseName()))) {
throw new IllegalArgumentException("response must be the same as for the checkpointed model.");
}
// if (!autoencoder && (response == null || !Arrays.equals(response._key._kb, previous.model_info().get_params().response._key._kb))) {
// throw new IllegalArgumentException("response must be the same as for the checkpointed model.");
// }
if (Utils.difference(ignored_cols, previous.model_info().get_params().ignored_cols).length != 0
|| Utils.difference(previous.model_info().get_params().ignored_cols, ignored_cols).length != 0) {
ignored_cols = previous.model_info().get_params().ignored_cols;
Log.warn("Automatically re-using ignored_cols from the checkpointed model.");
}
if ((validation == null) == (previous._validationKey != null)
|| (validation != null && validation._key != null && previous._validationKey != null
&& !Arrays.equals(validation._key._kb, previous._validationKey._kb))) {
throw new IllegalArgumentException("validation must be the same as for the checkpointed model.");
}
if (classification != previous.model_info().get_params().classification) {
Log.warn("Automatically switching to " + ((classification=!classification) ? "classification" : "regression") + " (same as the checkpointed model).");
}
epochs += previous.epoch_counter; //add new epochs to existing model
Log.info("Adding " + String.format("%.3f", previous.epoch_counter) + " epochs from the checkpointed model.");
try {
final DataInfo dataInfo = prepareDataInfo();
cp = new DeepLearningModel(previous, destination_key, job_key, dataInfo);
cp.write_lock(self());
cp.start_training(previous);
assert(state==JobState.RUNNING);
final DeepLearning A = cp.model_info().get_params();
Object B = this;
for (Field fA : A.getClass().getDeclaredFields()) {
if (Utils.contains(cp_modifiable, fA.getName())) {
if (!expert_mode && Utils.contains(expert_options, fA.getName())) continue;
for (Field fB : B.getClass().getDeclaredFields()) {
if (fA.equals(fB)) {
try {
if (fB.get(B) == null || fA.get(A) == null || !fA.get(A).toString().equals(fB.get(B).toString())) { // if either of the two parameters is null, skip the toString()
if (fA.get(A) == null && fB.get(B) == null) continue; //if both parameters are null, we don't need to do anything
Log.info("Applying user-requested modification of '" + fA.getName() + "': " + fA.get(A) + " -> " + fB.get(B));
fA.set(A, fB.get(B));
}
} catch (IllegalAccessException e) {
e.printStackTrace();
}
}
}
}
}
if (A.n_folds != 0) {
Log.warn("Disabling cross-validation: Not supported when resuming training from a checkpoint.");
A.n_folds = 0;
}
cp.update(self());
} finally {
if (cp != null) cp.unlock(self());
}
}
trainModel(cp);
cp.stop_training();
}
/**
* Redirect to the model page for that model that is trained by this job
* @return Response
*/
@Override protected Response redirect() {
return DeepLearningProgressPage.redirect(this, self(), dest());
}
private boolean _fakejob;
//Sanity check for Deep Learning job parameters
private void checkParams() {
if (source.numCols() <= 1)
throw new IllegalArgumentException("Training data must have at least 2 features (incl. response).");
if (hidden == null) throw new IllegalArgumentException("There must be at least one hidden layer.");
for (int i=0;i<hidden.length;++i) {
if (hidden[i]==0)
throw new IllegalArgumentException("Hidden layer size must be >0.");
}
//Auto-fill defaults
if (hidden_dropout_ratios == null) {
if (activation == Activation.TanhWithDropout || activation == Activation.MaxoutWithDropout || activation == Activation.RectifierWithDropout) {
hidden_dropout_ratios = new double[hidden.length];
if (!quiet_mode) Log.info("Automatically setting all hidden dropout ratios to 0.5.");
Arrays.fill(hidden_dropout_ratios, 0.5);
}
}
else if (hidden_dropout_ratios.length != hidden.length) throw new IllegalArgumentException("Must have " + hidden.length + " hidden layer dropout ratios.");
else if (activation != Activation.TanhWithDropout && activation != Activation.MaxoutWithDropout && activation != Activation.RectifierWithDropout) {
if (!quiet_mode) Log.info("Ignoring hidden_dropout_ratios because a non-Dropout activation function was specified.");
}
if (input_dropout_ratio < 0 || input_dropout_ratio >= 1) {
throw new IllegalArgumentException("Input dropout must be in [0,1).");
}
if (class_sampling_factors != null || !balance_classes) {
if (!quiet_mode) Log.info("Ignoring class_sampling_factors since balance_classes is not enabled.");
}
if (!quiet_mode) {
if (adaptive_rate) {
Log.info("Using automatic learning rate. Ignoring the following input parameters:");
Log.info(" rate, rate_decay, rate_annealing, momentum_start, momentum_ramp, momentum_stable, nesterov_accelerated_gradient.");
} else {
Log.info("Using manual learning rate. Ignoring the following input parameters:");
Log.info(" rho, epsilon.");
}
if (initial_weight_distribution == InitialWeightDistribution.UniformAdaptive) {
Log.info("Ignoring initial_weight_scale for UniformAdaptive weight distribution.");
}
if (n_folds != 0) {
if (override_with_best_model) {
Log.info("Automatically setting override_with_best_model to false, since the final model is the only scored model with n-fold cross-validation.");
override_with_best_model = false;
}
}
}
if(loss == Loss.Automatic) {
if (!classification) {
if (!quiet_mode) Log.info("Automatically setting loss to MeanSquare for regression.");
loss = Loss.MeanSquare;
}
else if (autoencoder) {
if (!quiet_mode) Log.info("Automatically setting loss to MeanSquare for auto-encoder.");
loss = Loss.MeanSquare;
}
else {
if (!quiet_mode) Log.info("Automatically setting loss to Cross-Entropy for classification.");
loss = Loss.CrossEntropy;
}
}
if(autoencoder && sparsity_beta > 0) {
if (activation == Activation.Tanh || activation == Activation.TanhWithDropout) {
if (average_activation >= 1 || average_activation <= -1)
throw new IllegalArgumentException("Tanh average activation must be in (-1,1).");
}
else if (activation == Activation.Rectifier || activation == Activation.RectifierWithDropout) {
if (average_activation <= 0)
throw new IllegalArgumentException("Rectifier average activation must be positive.");
}
}
if (!classification && loss == Loss.CrossEntropy) throw new IllegalArgumentException("Cannot use CrossEntropy loss function for regression.");
if (autoencoder && loss != Loss.MeanSquare) throw new IllegalArgumentException("Must use MeanSquare loss function for auto-encoder.");
if (autoencoder && classification) { classification = false; Log.info("Using regression mode for auto-encoder.");}
// reason for the error message below is that validation might not have the same horizontalized features as the training data (or different order)
if (autoencoder && validation != null) throw new UnsupportedOperationException("Cannot specify a validation dataset for auto-encoder.");
if (autoencoder && activation == Activation.Maxout) throw new UnsupportedOperationException("Maxout activation is not supported for auto-encoder.");
if (max_categorical_features < 1) throw new IllegalArgumentException("max_categorical_features must be at least " + 1);
// make default job_key and destination_key in case they are missing
if (dest() == null) {
destination_key = Key.make();
}
if (self() == null) {
job_key = Key.make();
}
if (UKV.get(self()) == null) {
start_time = System.currentTimeMillis();
state = JobState.RUNNING;
UKV.put(self(), this);
_fakejob = true;
}
if (!sparse && col_major) {
if (!quiet_mode) throw new IllegalArgumentException("Cannot use column major storage for non-sparse data handling.");
}
if (reproducible) {
if (!quiet_mode)
Log.info("Automatically enabling force_load_balancing, disabling single_node_mode and replicate_training_data\nand setting train_samples_per_iteration to -1 to enforce reproducibility.");
force_load_balance = true;
single_node_mode = false;
train_samples_per_iteration = -1;
replicate_training_data = false; //there's no benefit from having multiple nodes compute the exact same thing, and then average it back to the same
// replicate_training_data = true; //doesn't hurt, but does replicated identical work
}
}
/**
* Helper to create a DataInfo object from the source and response
* @return DataInfo object
*/
private DataInfo prepareDataInfo() {
final boolean del_enum_resp = classification && !response.isEnum();
final Frame train = FrameTask.DataInfo.prepareFrame(source, autoencoder ? null : response, ignored_cols, classification, ignore_const_cols, true /*drop >20% NA cols*/);
final DataInfo dinfo = new FrameTask.DataInfo(train, autoencoder ? 0 : 1, true, autoencoder || use_all_factor_levels, //use all FactorLevels for auto-encoder
autoencoder ? DataInfo.TransformType.NORMALIZE : DataInfo.TransformType.STANDARDIZE, //transform predictors
classification ? DataInfo.TransformType.NONE : DataInfo.TransformType.STANDARDIZE); //transform response
if (!autoencoder) {
final Vec resp = dinfo._adaptedFrame.lastVec(); //convention from DataInfo: response is the last Vec
assert (!classification ^ resp.isEnum()) : "Must have enum response for classification!"; //either regression or enum response
if (del_enum_resp) ltrash(resp);
}
return dinfo;
}
/**
* Create an initial Deep Learning model, typically to be trained by trainModel(model)
* @return Randomly initialized model
*/
public final DeepLearningModel initModel() {
try {
lock_data();
checkParams();
final DataInfo dinfo = prepareDataInfo();
final Vec resp = dinfo._adaptedFrame.lastVec(); //convention from DataInfo: response is the last Vec
float[] priorDist = classification ? new MRUtils.ClassDist(resp).doAll(resp).rel_dist() : null;
final DeepLearningModel model = new DeepLearningModel(dest(), self(), source._key, dinfo, (DeepLearning)this.clone(), priorDist);
model.model_info().initializeMembers();
return model;
}
finally {
unlock_data();
}
}
/**
* Helper to update a Frame and adding it to the local trash at the same time
* @param target Frame referece, to be overwritten
* @param src Newly made frame, to be deleted via local trash
* @return src
*/
Frame updateFrame(Frame target, Frame src) {
if (src != target) ltrash(src);
return src;
}
/**
* Train a Deep Learning neural net model
* @param model Input model (e.g., from initModel(), or from a previous training run)
* @return Trained model
*/
public final DeepLearningModel trainModel(DeepLearningModel model) {
Frame validScoreFrame = null;
Frame train, trainScoreFrame;
try {
lock_data();
if (checkpoint == null && !quiet_mode) logStart(); //if checkpoint is given, some Job's params might be uninitialized (but the restarted model's parameters are correct)
if (model == null) {
model = UKV.get(dest());
}
model.write_lock(self());
final DeepLearning mp = model.model_info().get_params(); //use the model's parameters for everything below - NOT the job's parameters (can be different after checkpoint restart)
prepareValidationWithModel(model);
final long model_size = model.model_info().size();
if (!quiet_mode) Log.info("Number of model parameters (weights/biases): " + String.format("%,d", model_size));
train = model.model_info().data_info()._adaptedFrame;
if (mp.force_load_balance) train = updateFrame(train, reBalance(train, mp.replicate_training_data));
if (mp.classification && mp.balance_classes) {
float[] trainSamplingFactors = new float[train.lastVec().domain().length]; //leave initialized to 0 -> will be filled up below
if (class_sampling_factors != null) {
if (class_sampling_factors.length != train.lastVec().domain().length)
throw new IllegalArgumentException("class_sampling_factors must have " + train.lastVec().domain().length + " elements");
trainSamplingFactors = class_sampling_factors.clone(); //clone: don't modify the original
}
train = updateFrame(train, sampleFrameStratified(
train, train.lastVec(), trainSamplingFactors, (long)(mp.max_after_balance_size*train.numRows()), mp.seed, true, false));
model.setModelClassDistribution(new MRUtils.ClassDist(train.lastVec()).doAll(train.lastVec()).rel_dist());
}
model.training_rows = train.numRows();
trainScoreFrame = updateFrame(train, sampleFrame(train, mp.score_training_samples, mp.seed)); //training scoring dataset is always sampled uniformly from the training dataset
if (!quiet_mode) Log.info("Number of chunks of the training data: " + train.anyVec().nChunks());
if (validation != null) {
model.validation_rows = validation.numRows();
Frame adaptedValid = getValidation();
if (getValidAdaptor().needsAdaptation2CM()) {
adaptedValid.add(getValidAdaptor().adaptedValidationResponse(_responseName), getValidAdaptor().getAdaptedValidationResponse2CM());
}
// validation scoring dataset can be sampled in multiple ways from the given validation dataset
if (mp.classification && mp.balance_classes && mp.score_validation_sampling == ClassSamplingMethod.Stratified) {
validScoreFrame = updateFrame(adaptedValid, sampleFrameStratified(adaptedValid, adaptedValid.lastVec(), null,
mp.score_validation_samples > 0 ? mp.score_validation_samples : adaptedValid.numRows(), mp.seed+1, false /* no oversampling */, false));
} else {
validScoreFrame = updateFrame(adaptedValid, sampleFrame(adaptedValid, mp.score_validation_samples, mp.seed+1));
}
if (mp.force_load_balance) validScoreFrame = updateFrame(validScoreFrame, reBalance(validScoreFrame, false /*always split up globally since scoring should be distributed*/));
if (!quiet_mode) Log.info("Number of chunks of the validation data: " + validScoreFrame.anyVec().nChunks());
}
// Set train_samples_per_iteration size (cannot be done earlier since this depends on whether stratified sampling is done)
model.actual_train_samples_per_iteration = computeTrainSamplesPerIteration(mp, train.numRows(), model);
// Determine whether shuffling is enforced
if(mp.replicate_training_data && (model.actual_train_samples_per_iteration == train.numRows()*(mp.single_node_mode?1:H2O.CLOUD.size())) && !mp.shuffle_training_data && H2O.CLOUD.size() > 1 && !mp.reproducible) {
Log.warn("Enabling training data shuffling, because all nodes train on the full dataset (replicated training data).");
mp.shuffle_training_data = true;
}
model._timeLastScoreEnter = System.currentTimeMillis(); //to keep track of time per iteration, must be called before first call to doScoring
if (!mp.quiet_mode) Log.info("Initial model:\n" + model.model_info());
if (autoencoder) model.doScoring(train, trainScoreFrame, validScoreFrame, self(), getValidAdaptor()); //get the null model reconstruction error
// put the initial version of the model into DKV
model.update(self());
Log.info("Starting to train the Deep Learning model.");
//main loop
do model.set_model_info(H2O.CLOUD.size() > 1 && mp.replicate_training_data ? ( mp.single_node_mode ?
new DeepLearningTask2(train, model.model_info(), rowFraction(train, mp, model)).invoke(Key.make()).model_info() : //replicated data + single node mode
new DeepLearningTask2(train, model.model_info(), rowFraction(train, mp, model)).invokeOnAllNodes().model_info() ) : //replicated data + multi-node mode
new DeepLearningTask(model.model_info(), rowFraction(train, mp, model)).doAll(train).model_info()); //distributed data (always in multi-node mode)
while (model.doScoring(train, trainScoreFrame, validScoreFrame, self(), getValidAdaptor()));
// replace the model with the best model so far (if it's better)
if (!isCancelledOrCrashed() && override_with_best_model && model.actual_best_model_key != null && n_folds == 0) {
DeepLearningModel best_model = UKV.get(model.actual_best_model_key);
if (best_model != null && best_model.error() < model.error() && Arrays.equals(best_model.model_info().units, model.model_info().units)) {
Log.info("Setting the model to be the best model so far (based on scoring history).");
DeepLearningModel.DeepLearningModelInfo mi = best_model.model_info().deep_clone();
// Don't cheat - count full amount of training samples, since that's the amount of training it took to train (without finding anything better)
mi.set_processed_global(model.model_info().get_processed_global());
mi.set_processed_local(model.model_info().get_processed_local());
model.set_model_info(mi);
model.update(self());
model.doScoring(train, trainScoreFrame, validScoreFrame, self(), getValidAdaptor());
assert(best_model.error() == model.error());
}
}
Log.info(model);
Log.info("Finished training the Deep Learning model.");
return model;
}
catch(JobCancelledException ex) {
model = UKV.get(dest());
state = JobState.CANCELLED; //for JSON REST response
model.get_params().state = state; //for parameter JSON on the HTML page
Log.info("Deep Learning model building was cancelled.");
return model;
}
catch(Throwable t) {
t.printStackTrace();
model = UKV.get(dest());
state = JobState.FAILED; //for JSON REST response
if (model != null) {
model.get_params().state = state; //for parameter JSON on the HTML page
Log.info("Deep Learning model building failed.");
}
return model;
}
finally {
if (model != null && DKV.get(model._key) != null) model.unlock(self());
unlock_data();
}
}
/**
* Lock the input datasets against deletes
*/
private void lock_data() {
source.read_lock(self());
if( validation != null && source._key != null && validation._key !=null && !source._key.equals(validation._key) )
validation.read_lock(self());
}
/**
* Release the lock for the input datasets
*/
private void unlock_data() {
source.unlock(self());
if( validation != null && source._key != null && validation._key != null && !source._key.equals(validation._key) )
validation.unlock(self());
}
/**
* Delete job related keys
*/
public void delete() {
cleanup();
if (_fakejob) UKV.remove(job_key);
remove();
}
/**
* Rebalance a frame for load balancing
* @param fr Input frame
* @param local whether to only create enough chunks to max out all cores on one node only
* @return Frame that has potentially more chunks
*/
private Frame reBalance(final Frame fr, boolean local) {
int chunks = (int)Math.min( 4 * H2O.NUMCPUS * (local ? 1 : H2O.CLOUD.size()), fr.numRows());
if (fr.anyVec().nChunks() > chunks && !reproducible) {
Log.info("Dataset already contains " + fr.anyVec().nChunks() + " chunks. No need to rebalance.");
return fr;
} else if (reproducible) {
Log.warn("Reproducibility enforced - using only 1 thread - can be slow.");
chunks = 1;
}
if (!quiet_mode) Log.info("ReBalancing dataset into (at least) " + chunks + " chunks.");
// return MRUtils.shuffleAndBalance(fr, chunks, seed, local, shuffle_training_data);
String snewKey = fr._key != null ? (fr._key.toString() + ".balanced") : Key.rand();
Key newKey = Key.makeSystem(snewKey);
RebalanceDataSet rb = new RebalanceDataSet(fr, newKey, chunks);
H2O.submitTask(rb);
rb.join();
return UKV.get(newKey);
}
/**
* Compute the actual train_samples_per_iteration size from the user-given parameter
* @param mp Model parameter (DeepLearning object)
* @param numRows number of training rows
* @param model DL Model
* @return The total number of training rows to be processed per iteration (summed over on all nodes)
*/
private static long computeTrainSamplesPerIteration(final DeepLearning mp, final long numRows, DeepLearningModel model) {
long tspi = mp.train_samples_per_iteration;
assert(tspi == 0 || tspi == -1 || tspi == -2 || tspi >= 1);
if (tspi == 0 || (!mp.replicate_training_data && tspi == -1) ) {
tspi = numRows;
if (!mp.quiet_mode) Log.info("Setting train_samples_per_iteration (" + mp.train_samples_per_iteration + ") to one epoch: #rows (" + tspi + ").");
}
else if (tspi == -1) {
tspi = (mp.single_node_mode ? 1 : H2O.CLOUD.size()) * numRows;
if (!mp.quiet_mode) Log.info("Setting train_samples_per_iteration (" + mp.train_samples_per_iteration + ") to #nodes x #rows (" + tspi + ").");
} else if (tspi == -2) {
// automatic tuning based on CPU speed, network speed and model size
// measure cpu speed
double total_gflops = 0;
for (H2ONode h2o : H2O.CLOUD._memary) {
HeartBeat hb = h2o._heartbeat;
total_gflops += hb._gflops;
}
if (mp.single_node_mode) total_gflops /= H2O.CLOUD.size();
if (total_gflops == 0) {
total_gflops = Linpack.run(H2O.SELF._heartbeat._cpus_allowed) * (mp.single_node_mode ? 1 : H2O.CLOUD.size());
}
final long model_size = model.model_info().size();
int[] msg_sizes = new int[]{ (int)(model_size*4) == (model_size*4) ? (int)(model_size*4) : Integer.MAX_VALUE };
double[] microseconds_collective = new double[msg_sizes.length];
NetworkTest.NetworkTester nt = new NetworkTest.NetworkTester(msg_sizes,null,microseconds_collective,model_size>1e6 ? 1 : 5 /*repeats*/,false,true /*only collectives*/);
nt.compute2();
//length of the network traffic queue based on log-tree rollup (2 log(nodes))
int network_queue_length = mp.single_node_mode || H2O.CLOUD.size() == 1? 1 : 2*(int)Math.floor(Math.log(H2O.CLOUD.size())/Math.log(2));
// heuristics
double flops_overhead_per_row = 30;
if (mp.activation == Activation.Maxout || mp.activation == Activation.MaxoutWithDropout) {
flops_overhead_per_row *= 8;
} else if (mp.activation == Activation.Tanh || mp.activation == Activation.TanhWithDropout) {
flops_overhead_per_row *= 5;
}
// target fraction of comm vs cpu time: 5%
double fraction = mp.single_node_mode || H2O.CLOUD.size() == 1 ? 1e-3 : 0.05; //one single node mode, there's no model averaging effect, so less need to shorten the M/R iteration
// estimate the time for communication (network) and training (compute)
model.time_for_communication_us = (H2O.CLOUD.size() == 1 ? 1e4 /* add 10ms for single-node */ : 0) + network_queue_length * microseconds_collective[0];
double time_per_row_us = flops_overhead_per_row * model_size / (total_gflops * 1e9) / H2O.SELF._heartbeat._cpus_allowed * 1e6;
// compute the optimal number of training rows per iteration
// fraction := time_comm_us / (time_comm_us + tspi * time_per_row_us) ==> tspi = (time_comm_us/fraction - time_comm_us)/time_per_row_us
tspi = (long)((model.time_for_communication_us / fraction - model.time_for_communication_us)/ time_per_row_us);
tspi = Math.min(tspi, (mp.single_node_mode ? 1 : H2O.CLOUD.size()) * numRows * 10); //not more than 10x of what train_samples_per_iteration=-1 would do
// If the number is close to a multiple of epochs, use that -> prettier scoring
if (tspi > numRows && Math.abs(tspi % numRows)/(double)numRows < 0.2) tspi = tspi - tspi % numRows;
tspi = Math.min(tspi, (long)(mp.epochs * numRows / 10)); //limit to number of epochs desired, but at least 10 iterations total
tspi = Math.max(1, tspi); //at least 1 point
if (!mp.quiet_mode) {
Log.info("Auto-tuning parameter 'train_samples_per_iteration':");
Log.info("Estimated compute power : " + (int)total_gflops + " GFlops");
Log.info("Estimated time for comm : " + PrettyPrint.usecs((long)model.time_for_communication_us));
Log.info("Estimated time per row : " + ((long)time_per_row_us > 0 ? PrettyPrint.usecs((long)time_per_row_us) : time_per_row_us + " usecs"));
Log.info("Estimated training speed: " + (int)(1e6/time_per_row_us) + " rows/sec");
Log.info("Setting train_samples_per_iteration (" + mp.train_samples_per_iteration + ") to auto-tuned value: " + tspi);
}
} else {
// limit user-given value to number of epochs desired
tspi = Math.min(tspi, (long)(mp.epochs * numRows));
}
assert(tspi != 0 && tspi != -1 && tspi != -2 && tspi >= 1);
return tspi;
}
/**
* Compute the fraction of rows that need to be used for training during one iteration
* @param numRows number of training rows
* @param train_samples_per_iteration number of training rows to be processed per iteration
* @param replicate_training_data whether of not the training data is replicated on each node
* @return fraction of rows to be used for training during one iteration
*/
private static float computeRowUsageFraction(final long numRows, final long train_samples_per_iteration, final boolean replicate_training_data) {
float rowUsageFraction = (float)train_samples_per_iteration / numRows;
if (replicate_training_data) rowUsageFraction /= H2O.CLOUD.size();
assert(rowUsageFraction > 0);
return rowUsageFraction;
}
private static float rowFraction(Frame train, DeepLearning p, DeepLearningModel m) {
return computeRowUsageFraction(train.numRows(), m.actual_train_samples_per_iteration, p.replicate_training_data);
}
/**
* Cross-Validate a DeepLearning model by building new models on N train/test holdout splits
* @param splits Frames containing train/test splits
* @param cv_preds Array of Frames to store the predictions for each cross-validation run
* @param offsets Array to store the offsets of starting row indices for each cross-validation run
* @param i Which fold of cross-validation to perform
*/
@Override public void crossValidate(Frame[] splits, Frame[] cv_preds, long[] offsets, int i) {
// Train a clone with slightly modified parameters (to account for cross-validation)
final DeepLearning cv = (DeepLearning) this.clone();
cv.genericCrossValidation(splits, offsets, i);
cv_preds[i] = ((DeepLearningModel) UKV.get(cv.dest())).score(cv.validation);
new TAtomic<DeepLearningModel>() {
@Override public DeepLearningModel atomic(DeepLearningModel m) {
if (!keep_cross_validation_splits && /*paranoid*/cv.dest().toString().contains("xval")) {
m.get_params().source = null;
m.get_params().validation=null;
m.get_params().response=null;
}
return m;
}
}.invoke(cv.dest());
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/deeplearning/DeepLearningModel.java
|
package hex.deeplearning;
import static java.lang.Double.isNaN;
import hex.FrameTask.DataInfo;
import hex.VarImp;
import water.*;
import water.api.*;
import water.api.Request.API;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.*;
import java.util.Arrays;
import java.util.Random;
/**
* The Deep Learning model
* It contains a DeepLearningModelInfo with the most up-to-date model,
* a scoring history, as well as some helpers to indicate the progress
*/
public class DeepLearningModel extends Model implements Comparable<DeepLearningModel> {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="Model info", json = true)
private volatile DeepLearningModelInfo model_info;
void set_model_info(DeepLearningModelInfo mi) { model_info = mi; }
final public DeepLearningModelInfo model_info() { return model_info; }
@API(help="Job that built the model", json = true)
final private Key jobKey;
@API(help="Validation dataset used for model building", json = true)
public final Key _validationKey;
@API(help="Time to build the model", json = true)
private long run_time;
final private long start_time;
public long actual_train_samples_per_iteration;
public double time_for_communication_us; //helper for auto-tuning: time in microseconds for collective bcast/reduce of the model
@API(help="Number of training epochs", json = true)
public double epoch_counter;
@API(help="Number of rows in training data", json = true)
public long training_rows;
@API(help="Number of rows in validation data", json = true)
public long validation_rows;
@API(help = "Scoring during model building")
private Errors[] errors;
public Errors[] scoring_history() { return errors; }
// Keep the best model so far, based on a single criterion (overall class. error or MSE)
private float _bestError = Float.MAX_VALUE;
@API(help = "Key to the best model so far (based on overall error on scoring data set)")
public Key actual_best_model_key;
// return the most up-to-date model metrics
Errors last_scored() { return errors == null ? null : errors[errors.length-1]; }
@Override public final DeepLearning get_params() { return model_info.get_params(); }
@Override public final Request2 job() { return model_info.get_job(); }
@Override protected double missingColumnsType() { return get_params().sparse ? 0 : Double.NaN; }
public float error() { return (float) (isClassifier() ? cm().err() : mse()); }
@Override public boolean isClassifier() { return super.isClassifier() && !model_info.get_params().autoencoder; }
@Override public int nfeatures() { return model_info.get_params().autoencoder ? _names.length : _names.length - 1; }
public int compareTo(DeepLearningModel o) {
if (o.isClassifier() != isClassifier()) throw new UnsupportedOperationException("Cannot compare classifier against regressor.");
if (o.nclasses() != nclasses()) throw new UnsupportedOperationException("Cannot compare models with different number of classes.");
return (error() < o.error() ? -1 : error() > o.error() ? 1 : 0);
}
public static class Errors extends Iced {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
@API(help = "How many epochs the algorithm has processed")
public double epoch_counter;
@API(help = "How many rows the algorithm has processed")
public long training_samples;
@API(help = "How long the algorithm ran in ms")
public long training_time_ms;
//training/validation sets
@API(help = "Whether a validation set was provided")
boolean validation;
@API(help = "Number of folds for cross-validation (for validation=false)")
int num_folds;
@API(help = "Number of training set samples for scoring")
public long score_training_samples;
@API(help = "Number of validation set samples for scoring")
public long score_validation_samples;
@API(help="Do classification or regression")
public boolean classification;
@API(help = "Variable importances")
VarImp variable_importances;
// classification
@API(help = "Confusion matrix on training data")
public water.api.ConfusionMatrix train_confusion_matrix;
@API(help = "Confusion matrix on validation data")
public water.api.ConfusionMatrix valid_confusion_matrix;
@API(help = "Classification error on training data")
public double train_err = 1;
@API(help = "Classification error on validation data")
public double valid_err = 1;
@API(help = "AUC on training data")
public AUCData trainAUC;
@API(help = "AUC on validation data")
public AUCData validAUC;
@API(help = "Hit ratio on training data")
public water.api.HitRatio train_hitratio;
@API(help = "Hit ratio on validation data")
public water.api.HitRatio valid_hitratio;
// regression
@API(help = "Training MSE")
public double train_mse = Double.POSITIVE_INFINITY;
@API(help = "Validation MSE")
public double valid_mse = Double.POSITIVE_INFINITY;
@API(help = "Time taken for scoring")
public long scoring_time;
Errors deep_clone() {
AutoBuffer ab = new AutoBuffer();
this.write(ab);
ab.flipForReading();
return new Errors().read(ab);
}
@Override public String toString() {
StringBuilder sb = new StringBuilder();
if (classification) {
sb.append("Error on training data (misclassification)"
+ (trainAUC != null ? " [using threshold for " + trainAUC.threshold_criterion.toString().replace("_"," ") +"]: ": ": ")
+ String.format("%.2f", 100*train_err) + "%");
if (trainAUC != null) sb.append(", AUC on training data: " + String.format("%.4f", 100*trainAUC.AUC) + "%");
if (validation || num_folds>0)
sb.append("\nError on " + (num_folds>0 ? num_folds + "-fold cross-":"")+ "validation data (misclassification)"
+ (validAUC != null ? " [using threshold for " + validAUC.threshold_criterion.toString().replace("_"," ") +"]: ": ": ")
+ String.format("%.2f", (100*valid_err)) + "%");
if (validAUC != null) sb.append(", AUC on validation data: " + String.format("%.4f", 100*validAUC.AUC) + "%");
} else if (!Double.isInfinite(train_mse)) {
sb.append("Error on training data (MSE): " + train_mse);
if (validation || num_folds>0)
sb.append("\nError on "+ (num_folds>0 ? num_folds + "-fold cross-":"")+ "validation data (MSE): " + valid_mse);
}
return sb.toString();
}
}
final private static class ConfMat extends hex.ConfusionMatrix {
final private double _err;
final private double _f1;
public ConfMat(double err, double f1) {
super(null);
_err=err;
_f1=f1;
}
@Override public double err() { return _err; }
@Override public double F1() { return _f1; }
@Override public double[] classErr() { return null; }
}
/** for grid search error reporting */
@Override
public hex.ConfusionMatrix cm() {
final Errors lasterror = last_scored();
if (lasterror == null) return null;
water.api.ConfusionMatrix cm = lasterror.validation || lasterror.num_folds > 0 ?
lasterror.valid_confusion_matrix :
lasterror.train_confusion_matrix;
if (cm == null || cm.cm == null) {
if (lasterror.validation || lasterror.num_folds > 0) {
return new ConfMat(lasterror.valid_err, lasterror.validAUC != null ? lasterror.validAUC.F1() : 0);
} else {
return new ConfMat(lasterror.train_err, lasterror.trainAUC != null ? lasterror.trainAUC.F1() : 0);
}
}
// cm.cm has NaN padding, reduce it to N-1 size
return new hex.ConfusionMatrix(cm.cm, cm.cm.length-1);
}
@Override
public double mse() {
if (errors == null) return super.mse();
return last_scored().validation || last_scored().num_folds > 0 ? last_scored().valid_mse : last_scored().train_mse;
}
@Override
public VarImp varimp() {
if (errors == null) return null;
return last_scored().variable_importances;
}
// This describes the model, together with the parameters
// This will be shared: one per node
public static class DeepLearningModelInfo extends Iced {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="Input data info")
private DataInfo data_info;
public DataInfo data_info() { return data_info; }
// model is described by parameters and the following arrays
private Neurons.DenseRowMatrix[] dense_row_weights; //one 2D weight matrix per layer (stored as a 1D array each)
private Neurons.DenseColMatrix[] dense_col_weights; //one 2D weight matrix per layer (stored as a 1D array each)
private Neurons.DenseVector[] biases; //one 1D bias array per layer
private Neurons.DenseVector[] avg_activations; //one 1D array per hidden layer
// helpers for storing previous step deltas
// Note: These two arrays *could* be made transient and then initialized freshly in makeNeurons() and in DeepLearningTask.initLocal()
// But then, after each reduction, the weights would be lost and would have to restart afresh -> not *exactly* right, but close...
private Neurons.DenseRowMatrix[] dense_row_weights_momenta;
private Neurons.DenseColMatrix[] dense_col_weights_momenta;
private Neurons.DenseVector[] biases_momenta;
// helpers for AdaDelta
private Neurons.DenseRowMatrix[] dense_row_ada_dx_g;
private Neurons.DenseColMatrix[] dense_col_ada_dx_g;
private Neurons.DenseVector[] biases_ada_dx_g;
// compute model size (number of model parameters required for making predictions)
// momenta are not counted here, but they are needed for model building
public long size() {
long siz = 0;
for (Neurons.Matrix w : dense_row_weights) if (w != null) siz += w.size();
for (Neurons.Matrix w : dense_col_weights) if (w != null) siz += w.size();
for (Neurons.Vector b : biases) siz += b.size();
return siz;
}
// accessors to (shared) weights and biases - those will be updated racily (c.f. Hogwild!)
boolean has_momenta() { return get_params().momentum_start != 0 || get_params().momentum_stable != 0; }
boolean adaDelta() { return get_params().adaptive_rate; }
public final Neurons.Matrix get_weights(int i) { return dense_row_weights[i] == null ? dense_col_weights[i] : dense_row_weights[i]; }
public final Neurons.DenseVector get_biases(int i) { return biases[i]; }
public final Neurons.Matrix get_weights_momenta(int i) { return dense_row_weights_momenta[i] == null ? dense_col_weights_momenta[i] : dense_row_weights_momenta[i]; }
public final Neurons.DenseVector get_biases_momenta(int i) { return biases_momenta[i]; }
public final Neurons.Matrix get_ada_dx_g(int i) { return dense_row_ada_dx_g[i] == null ? dense_col_ada_dx_g[i] : dense_row_ada_dx_g[i]; }
public final Neurons.DenseVector get_biases_ada_dx_g(int i) { return biases_ada_dx_g[i]; }
//accessor to shared parameter defining avg activations
public final Neurons.DenseVector get_avg_activations(int i) { return avg_activations[i]; }
@API(help = "Model parameters", json = true)
private Request2 job;
public final DeepLearning get_params() { return (DeepLearning)job; }
public final Request2 get_job() { return job; }
@API(help = "Mean rate", json = true)
private float[] mean_rate;
@API(help = "RMS rate", json = true)
private float[] rms_rate;
@API(help = "Mean bias", json = true)
private float[] mean_bias;
@API(help = "RMS bias", json = true)
private float[] rms_bias;
@API(help = "Mean weight", json = true)
private float[] mean_weight;
@API(help = "RMS weight", json = true)
public float[] rms_weight;
@API(help = "Mean Activation", json = true)
public float[] mean_a;
@API(help = "Unstable", json = true)
private volatile boolean unstable = false;
public boolean unstable() { return unstable; }
public void set_unstable() { if (!unstable) computeStats(); unstable = true; }
@API(help = "Processed samples", json = true)
private long processed_global;
public synchronized long get_processed_global() { return processed_global; }
public synchronized void set_processed_global(long p) { processed_global = p; }
public synchronized void add_processed_global(long p) { processed_global += p; }
private long processed_local;
public synchronized long get_processed_local() { return processed_local; }
public synchronized void set_processed_local(long p) { processed_local = p; }
public synchronized void add_processed_local(long p) { processed_local += p; }
public synchronized long get_processed_total() { return processed_global + processed_local; }
// package local helpers
int[] units; //number of neurons per layer, extracted from parameters and from datainfo
public DeepLearningModelInfo() {}
public DeepLearningModelInfo(final Job job, final DataInfo dinfo) {
this.job = job;
data_info = dinfo;
final int num_input = dinfo.fullN();
final int num_output = get_params().autoencoder ? num_input : get_params().classification ? dinfo._adaptedFrame.domains()[dinfo._adaptedFrame.domains().length-1].length : 1;
assert(num_input > 0);
assert(num_output > 0);
if (has_momenta() && adaDelta()) throw new IllegalArgumentException("Cannot have non-zero momentum and adaptive rate at the same time.");
final int layers=get_params().hidden.length;
// units (# neurons for each layer)
units = new int[layers+2];
if (get_params().max_categorical_features <= Integer.MAX_VALUE - dinfo._nums)
units[0] = Math.min(dinfo._nums + get_params().max_categorical_features, num_input);
else
units[0] = num_input;
System.arraycopy(get_params().hidden, 0, units, 1, layers);
units[layers+1] = num_output;
// weights (to connect layers)
dense_row_weights = new Neurons.DenseRowMatrix[layers+1];
dense_col_weights = new Neurons.DenseColMatrix[layers+1];
// decide format of weight matrices row-major or col-major
if (get_params().col_major) dense_col_weights[0] = new Neurons.DenseColMatrix(units[1], units[0]);
else dense_row_weights[0] = new Neurons.DenseRowMatrix(units[1], units[0]);
for (int i = 1; i <= layers; ++i)
dense_row_weights[i] = new Neurons.DenseRowMatrix(units[i + 1] /*rows*/, units[i] /*cols*/);
// biases (only for hidden layers and output layer)
biases = new Neurons.DenseVector[layers+1];
for (int i=0; i<=layers; ++i) biases[i] = new Neurons.DenseVector(units[i+1]);
// average activation (only for hidden layers)
if (get_params().autoencoder && get_params().sparsity_beta > 0) {
avg_activations = new Neurons.DenseVector[layers];
mean_a = new float[layers];
for (int i = 0; i < layers; ++i) avg_activations[i] = new Neurons.DenseVector(units[i + 1]);
}
fillHelpers();
// for diagnostics
mean_rate = new float[units.length];
rms_rate = new float[units.length];
mean_bias = new float[units.length];
rms_bias = new float[units.length];
mean_weight = new float[units.length];
rms_weight = new float[units.length];
}
// deep clone all weights/biases
DeepLearningModelInfo deep_clone() {
AutoBuffer ab = new AutoBuffer();
this.write(ab);
ab.flipForReading();
return new DeepLearningModelInfo().read(ab);
}
void fillHelpers() {
if (has_momenta()) {
dense_row_weights_momenta = new Neurons.DenseRowMatrix[dense_row_weights.length];
dense_col_weights_momenta = new Neurons.DenseColMatrix[dense_col_weights.length];
if (dense_row_weights[0] != null)
dense_row_weights_momenta[0] = new Neurons.DenseRowMatrix(units[1], units[0]);
else
dense_col_weights_momenta[0] = new Neurons.DenseColMatrix(units[1], units[0]);
for (int i=1; i<dense_row_weights_momenta.length; ++i) dense_row_weights_momenta[i] = new Neurons.DenseRowMatrix(units[i+1], units[i]);
biases_momenta = new Neurons.DenseVector[biases.length];
for (int i=0; i<biases_momenta.length; ++i) biases_momenta[i] = new Neurons.DenseVector(units[i+1]);
}
else if (adaDelta()) {
dense_row_ada_dx_g = new Neurons.DenseRowMatrix[dense_row_weights.length];
dense_col_ada_dx_g = new Neurons.DenseColMatrix[dense_col_weights.length];
//AdaGrad
if (dense_row_weights[0] != null) {
dense_row_ada_dx_g[0] = new Neurons.DenseRowMatrix(units[1], 2*units[0]);
} else {
dense_col_ada_dx_g[0] = new Neurons.DenseColMatrix(2*units[1], units[0]);
}
for (int i=1; i<dense_row_ada_dx_g.length; ++i) {
dense_row_ada_dx_g[i] = new Neurons.DenseRowMatrix(units[i+1], 2*units[i]);
}
biases_ada_dx_g = new Neurons.DenseVector[biases.length];
for (int i=0; i<biases_ada_dx_g.length; ++i) {
biases_ada_dx_g[i] = new Neurons.DenseVector(2*units[i+1]);
}
}
}
@Override public String toString() {
StringBuilder sb = new StringBuilder();
if (get_params().diagnostics && !get_params().quiet_mode) {
Neurons[] neurons = DeepLearningTask.makeNeuronsForTesting(this);
sb.append("Number of hidden layers is " + get_params().hidden.length + " \n");
if (get_params().sparsity_beta > 0) {
for (int k = 0; k < get_params().hidden.length; k++)
sb.append("Average activation in hidden layer " + k + " is " + mean_a[k] + " \n");
}
sb.append("Status of Neuron Layers:\n");
sb.append("# Units Type Dropout L1 L2 " + (get_params().adaptive_rate ? " Rate (Mean,RMS) " : " Rate Momentum") + " Weight (Mean, RMS) Bias (Mean,RMS)\n");
final String format = "%7g";
for (int i=0; i<neurons.length; ++i) {
sb.append((i+1) + " " + String.format("%6d", neurons[i].units)
+ " " + String.format("%16s", neurons[i].getClass().getSimpleName()));
if (i == 0) {
sb.append(" " + Utils.formatPct(neurons[i].params.input_dropout_ratio) + " \n");
continue;
}
else if (i < neurons.length-1) {
if (neurons[i].params.hidden_dropout_ratios == null)
sb.append(" " + Utils.formatPct(0) + " ");
else
sb.append(" " + Utils.formatPct(neurons[i].params.hidden_dropout_ratios[i - 1]) + " ");
} else {
sb.append(" ");
}
sb.append(
" " + String.format("%5f", neurons[i].params.l1)
+ " " + String.format("%5f", neurons[i].params.l2)
+ " " + (get_params().adaptive_rate ? (" (" + String.format(format, mean_rate[i]) + ", " + String.format(format, rms_rate[i]) + ")" )
: (String.format("%10g", neurons[i].rate(get_processed_total())) + " " + String.format("%5f", neurons[i].momentum(get_processed_total()))))
+ " (" + String.format(format, mean_weight[i])
+ ", " + String.format(format, rms_weight[i]) + ")"
+ " (" + String.format(format, mean_bias[i])
+ ", " + String.format(format, rms_bias[i]) + ")\n");
if (get_params().sparsity_beta > 0) {
// sb.append(" " + String.format(format, mean_a[i]) + " \n");
}
}
}
return sb.toString();
}
// DEBUGGING
public String toStringAll() {
StringBuilder sb = new StringBuilder();
sb.append(toString());
for (int i=0; i<units.length-1; ++i)
sb.append("\nweights["+i+"][]="+Arrays.toString(get_weights(i).raw()));
for (int i=0; i<units.length-1; ++i)
sb.append("\nbiases["+i+"][]="+Arrays.toString(get_biases(i).raw()));
if (has_momenta()) {
for (int i=0; i<units.length-1; ++i)
sb.append("\nweights_momenta["+i+"][]="+Arrays.toString(get_weights_momenta(i).raw()));
}
if (biases_momenta != null) {
for (int i=0; i<units.length-1; ++i)
sb.append("\nbiases_momenta["+i+"][]="+Arrays.toString(biases_momenta[i].raw()));
}
sb.append("\nunits[]="+Arrays.toString(units));
sb.append("\nprocessed global: "+get_processed_global());
sb.append("\nprocessed local: "+get_processed_local());
sb.append("\nprocessed total: " + get_processed_total());
sb.append("\n");
return sb.toString();
}
void initializeMembers() {
randomizeWeights();
//TODO: determine good/optimal/best initialization scheme for biases
// hidden layers
for (int i=0; i<get_params().hidden.length; ++i) {
if (get_params().activation == DeepLearning.Activation.Rectifier
|| get_params().activation == DeepLearning.Activation.RectifierWithDropout
|| get_params().activation == DeepLearning.Activation.Maxout
|| get_params().activation == DeepLearning.Activation.MaxoutWithDropout
) {
// Arrays.fill(biases[i], 1.); //old behavior
Arrays.fill(biases[i].raw(), i == 0 ? 0.5f : 1f); //new behavior, might be slightly better
}
else if (get_params().activation == DeepLearning.Activation.Tanh || get_params().activation == DeepLearning.Activation.TanhWithDropout) {
Arrays.fill(biases[i].raw(), 0f);
}
}
Arrays.fill(biases[biases.length-1].raw(), 0f); //output layer
}
public void add(DeepLearningModelInfo other) {
for (int i=0;i<dense_row_weights.length;++i)
Utils.add(get_weights(i).raw(), other.get_weights(i).raw());
for (int i=0;i<biases.length;++i) Utils.add(biases[i].raw(), other.biases[i].raw());
if (avg_activations != null)
for (int i=0;i<avg_activations.length;++i)
Utils.add(avg_activations[i].raw(), other.biases[i].raw());
if (has_momenta()) {
assert(other.has_momenta());
for (int i=0;i<dense_row_weights_momenta.length;++i)
Utils.add(get_weights_momenta(i).raw(), other.get_weights_momenta(i).raw());
for (int i=0;i<biases_momenta.length;++i)
Utils.add(biases_momenta[i].raw(), other.biases_momenta[i].raw());
}
if (adaDelta()) {
assert(other.adaDelta());
for (int i=0;i<dense_row_ada_dx_g.length;++i) {
Utils.add(get_ada_dx_g(i).raw(), other.get_ada_dx_g(i).raw());
}
}
add_processed_local(other.get_processed_local());
}
protected void div(float N) {
for (int i=0; i<dense_row_weights.length; ++i)
Utils.div(get_weights(i).raw(), N);
for (Neurons.Vector bias : biases) Utils.div(bias.raw(), N);
if (avg_activations != null)
for (Neurons.Vector avgac : avg_activations)
Utils.div(avgac.raw(), N);
if (has_momenta()) {
for (int i=0; i<dense_row_weights_momenta.length; ++i)
Utils.div(get_weights_momenta(i).raw(), N);
for (Neurons.Vector bias_momenta : biases_momenta) Utils.div(bias_momenta.raw(), N);
}
if (adaDelta()) {
for (int i=0;i<dense_row_ada_dx_g.length;++i) {
Utils.div(get_ada_dx_g(i).raw(), N);
}
}
}
double uniformDist(Random rand, double min, double max) {
return min + rand.nextFloat() * (max - min);
}
void randomizeWeights() {
for (int w=0; w<dense_row_weights.length; ++w) {
final Random rng = water.util.Utils.getDeterRNG(get_params().seed + 0xBAD5EED + w+1); //to match NeuralNet behavior
final double range = Math.sqrt(6. / (units[w] + units[w+1]));
for( int i = 0; i < get_weights(w).rows(); i++ ) {
for( int j = 0; j < get_weights(w).cols(); j++ ) {
if (get_params().initial_weight_distribution == DeepLearning.InitialWeightDistribution.UniformAdaptive) {
// cf. http://machinelearning.wustl.edu/mlpapers/paper_files/AISTATS2010_GlorotB10.pdf
if (w==dense_row_weights.length-1 && get_params().classification)
get_weights(w).set(i,j, (float)(4.*uniformDist(rng, -range, range))); //Softmax might need an extra factor 4, since it's like a sigmoid
else
get_weights(w).set(i,j, (float)uniformDist(rng, -range, range));
}
else if (get_params().initial_weight_distribution == DeepLearning.InitialWeightDistribution.Uniform) {
get_weights(w).set(i,j, (float)uniformDist(rng, -get_params().initial_weight_scale, get_params().initial_weight_scale));
}
else if (get_params().initial_weight_distribution == DeepLearning.InitialWeightDistribution.Normal) {
get_weights(w).set(i,j, (float)(rng.nextGaussian() * get_params().initial_weight_scale));
}
}
}
}
}
// TODO: Add "subset randomize" function
// int count = Math.min(15, _previous.units);
// double min = -.1f, max = +.1f;
// //double min = -1f, max = +1f;
// for( int o = 0; o < units; o++ ) {
// for( int n = 0; n < count; n++ ) {
// int i = rand.nextInt(_previous.units);
// int w = o * _previous.units + i;
// _w[w] = uniformDist(rand, min, max);
// }
// }
/**
* Compute Variable Importance, based on
* GEDEON: DATA MINING OF INPUTS: ANALYSING MAGNITUDE AND FUNCTIONAL MEASURES
* @return variable importances for input features
*/
public float[] computeVariableImportances() {
float[] vi = new float[units[0]];
Arrays.fill(vi, 0f);
float[][] Qik = new float[units[0]][units[2]]; //importance of input i on output k
float[] sum_wj = new float[units[1]]; //sum of incoming weights into first hidden layer
float[] sum_wk = new float[units[2]]; //sum of incoming weights into output layer (or second hidden layer)
for (float[] Qi : Qik) Arrays.fill(Qi, 0f);
Arrays.fill(sum_wj, 0f);
Arrays.fill(sum_wk, 0f);
// compute sum of absolute incoming weights
for( int j = 0; j < units[1]; j++ ) {
for( int i = 0; i < units[0]; i++ ) {
float wij = get_weights(0).get(j, i);
sum_wj[j] += Math.abs(wij);
}
}
for( int k = 0; k < units[2]; k++ ) {
for( int j = 0; j < units[1]; j++ ) {
float wjk = get_weights(1).get(k,j);
sum_wk[k] += Math.abs(wjk);
}
}
// compute importance of input i on output k as product of connecting weights going through j
for( int i = 0; i < units[0]; i++ ) {
for( int k = 0; k < units[2]; k++ ) {
for( int j = 0; j < units[1]; j++ ) {
float wij = get_weights(0).get(j,i);
float wjk = get_weights(1).get(k,j);
//Qik[i][k] += Math.abs(wij)/sum_wj[j] * wjk; //Wong,Gedeon,Taggart '95
Qik[i][k] += Math.abs(wij)/sum_wj[j] * Math.abs(wjk)/sum_wk[k]; //Gedeon '97
}
}
}
// normalize Qik over all outputs k
for( int k = 0; k < units[2]; k++ ) {
float sumQk = 0;
for( int i = 0; i < units[0]; i++ ) sumQk += Qik[i][k];
for( int i = 0; i < units[0]; i++ ) Qik[i][k] /= sumQk;
}
// importance for feature i is the sum over k of i->k importances
for( int i = 0; i < units[0]; i++ ) vi[i] = Utils.sum(Qik[i]);
//normalize importances such that max(vi) = 1
Utils.div(vi, Utils.maxValue(vi));
return vi;
}
// compute stats on all nodes
public void computeStats() {
float[][] rate = get_params().adaptive_rate ? new float[units.length-1][] : null;
if (get_params().autoencoder && get_params().sparsity_beta > 0) {
for (int k = 0; k < get_params().hidden.length; k++) {
mean_a[k] = 0;
for (int j = 0; j < avg_activations[k].size(); j++)
mean_a[k] += avg_activations[k].get(j);
mean_a[k] /= avg_activations[k].size();
}
}
for( int y = 1; y < units.length; y++ ) {
mean_rate[y] = rms_rate[y] = 0;
mean_bias[y] = rms_bias[y] = 0;
mean_weight[y] = rms_weight[y] = 0;
for(int u = 0; u < biases[y-1].size(); u++) {
mean_bias[y] += biases[y-1].get(u);
}
if (rate != null) rate[y-1] = new float[get_weights(y-1).raw().length];
for(int u = 0; u < get_weights(y-1).raw().length; u++) {
mean_weight[y] += get_weights(y-1).raw()[u];
if (rate != null) {
// final float RMS_dx = (float)Math.sqrt(ada[y-1][2*u]+(float)get_params().epsilon);
// final float invRMS_g = (float)(1/Math.sqrt(ada[y-1][2*u+1]+(float)get_params().epsilon));
final float RMS_dx = Utils.approxSqrt(get_ada_dx_g(y-1).raw()[2*u]+(float)get_params().epsilon);
final float invRMS_g = Utils.approxInvSqrt(get_ada_dx_g(y-1).raw()[2*u+1]+(float)get_params().epsilon);
rate[y-1][u] = RMS_dx*invRMS_g; //not exactly right, RMS_dx should be from the previous time step -> but close enough for diagnostics.
mean_rate[y] += rate[y-1][u];
}
}
mean_bias[y] /= biases[y-1].size();
mean_weight[y] /= get_weights(y-1).size();
if (rate != null) mean_rate[y] /= rate[y-1].length;
for(int u = 0; u < biases[y-1].size(); u++) {
final double db = biases[y-1].get(u) - mean_bias[y];
rms_bias[y] += db * db;
}
for(int u = 0; u < get_weights(y-1).size(); u++) {
final double dw = get_weights(y-1).raw()[u] - mean_weight[y];
rms_weight[y] += dw * dw;
if (rate != null) {
final double drate = rate[y-1][u] - mean_rate[y];
rms_rate[y] += drate * drate;
}
}
rms_bias[y] = Utils.approxSqrt(rms_bias[y]/biases[y-1].size());
rms_weight[y] = Utils.approxSqrt(rms_weight[y]/get_weights(y-1).size());
if (rate != null) rms_rate[y] = Utils.approxSqrt(rms_rate[y]/rate[y-1].length);
// rms_bias[y] = (float)Math.sqrt(rms_bias[y]/biases[y-1].length);
// rms_weight[y] = (float)Math.sqrt(rms_weight[y]/weights[y-1].length);
// if (rate != null) rms_rate[y] = (float)Math.sqrt(rms_rate[y]/rate[y-1].length);
// Abort the run if weights or biases are unreasonably large (Note that all input values are normalized upfront)
// This can happen with Rectifier units when L1/L2/max_w2 are all set to 0, especially when using more than 1 hidden layer.
final double thresh = 1e10;
unstable |= mean_bias[y] > thresh || isNaN(mean_bias[y])
|| rms_bias[y] > thresh || isNaN(rms_bias[y])
|| mean_weight[y] > thresh || isNaN(mean_weight[y])
|| rms_weight[y] > thresh || isNaN(rms_weight[y]);
}
}
}
/**
* Constructor to restart from a checkpointed model
* @param cp Checkpoint to restart from
* @param destKey New destination key for the model
* @param jobKey New job key (job which updates the model)
*/
public DeepLearningModel(final DeepLearningModel cp, final Key destKey, final Key jobKey, final DataInfo dataInfo) {
super(destKey, cp._dataKey, dataInfo._adaptedFrame.names(), dataInfo._adaptedFrame.domains(), cp._priorClassDist != null ? cp._priorClassDist.clone() : null, null);
final boolean store_best_model = (jobKey == null);
this.jobKey = jobKey;
this._validationKey = cp._validationKey;
if (store_best_model) {
model_info = cp.model_info.deep_clone(); //don't want to interfere with model being built, just make a deep copy and store that
model_info.data_info = dataInfo.deep_clone(); //replace previous data_info with updated version that's passed in (contains enum for classification)
get_params().state = Job.JobState.DONE; //change the deep_clone'd state to DONE
_modelClassDist = cp._modelClassDist != null ? cp._modelClassDist.clone() : null;
} else {
model_info = (DeepLearningModelInfo) cp.model_info.clone(); //shallow clone is ok (won't modify the Checkpoint in K-V store during checkpoint restart)
model_info.data_info = dataInfo; //shallow clone is ok
get_params().checkpoint = cp._key; //it's only a "real" checkpoint if job != null, otherwise a best model copy
get_params().state = ((DeepLearning)UKV.get(jobKey)).state; //make the job state consistent
}
get_params().job_key = jobKey;
get_params().destination_key = destKey;
get_params().start_time = System.currentTimeMillis(); //for displaying the model progress
actual_best_model_key = cp.actual_best_model_key;
start_time = cp.start_time;
run_time = cp.run_time;
training_rows = cp.training_rows; //copy the value to display the right number on the model page before training has started
validation_rows = cp.validation_rows; //copy the value to display the right number on the model page before training has started
_bestError = cp._bestError;
// deep clone scoring history
errors = cp.errors.clone();
for (int i=0; i<errors.length;++i)
errors[i] = cp.errors[i].deep_clone();
// set proper timing
_timeLastScoreEnter = System.currentTimeMillis();
_timeLastScoreStart = 0;
_timeLastScoreEnd = 0;
_timeLastPrintStart = 0;
assert(Arrays.equals(_key._kb, destKey._kb));
}
public DeepLearningModel(final Key destKey, final Key jobKey, final Key dataKey, final DataInfo dinfo, final DeepLearning params, final float[] priorDist) {
super(destKey, dataKey, dinfo._adaptedFrame, priorDist);
this.jobKey = jobKey;
this._validationKey = params.validation != null ? params.validation._key : null;
run_time = 0;
start_time = System.currentTimeMillis();
_timeLastScoreEnter = start_time;
model_info = new DeepLearningModelInfo(params, dinfo);
actual_best_model_key = Key.makeSystem(Key.make().toString());
if (params.n_folds != 0) actual_best_model_key = null;
Object job = UKV.get(jobKey);
if (job instanceof DeepLearning)
get_params().state = ((DeepLearning)UKV.get(jobKey)).state; //make the job state consistent
else
get_params().state = ((Job.JobHandle)UKV.get(jobKey)).state; //make the job state consistent
if (!get_params().autoencoder) {
errors = new Errors[1];
errors[0] = new Errors();
errors[0].validation = (params.validation != null);
errors[0].num_folds = params.n_folds;
}
assert(Arrays.equals(_key._kb, destKey._kb));
}
public long _timeLastScoreEnter; //not transient: needed for HTML display page
transient private long _timeLastScoreStart;
transient private long _timeLastScoreEnd;
transient private long _timeLastPrintStart;
/**
*
* @param train training data from which the model is built (for epoch counting only)
* @param ftrain potentially downsampled training data for scoring
* @param ftest potentially downsampled validation data for scoring
* @param job_key key of the owning job
* @return true if model building is ongoing
*/
boolean doScoring(Frame train, Frame ftrain, Frame ftest, Key job_key, Job.ValidatedJob.Response2CMAdaptor vadaptor) {
try {
final long now = System.currentTimeMillis();
epoch_counter = (float)model_info().get_processed_total()/training_rows;
final double time_last_iter_millis = now-_timeLastScoreEnter;
// Auto-tuning
// if multi-node and auto-tuning and at least 10 ms for communication (to avoid doing thins on multi-JVM on same node),
// then adjust the auto-tuning parameter 'actual_train_samples_per_iteration' such that the targeted ratio of comm to comp is achieved
// Note: actual communication time is estimated by the NetworkTest's collective test.
if (H2O.CLOUD.size() > 1 && get_params().train_samples_per_iteration == -2 && time_for_communication_us > 1e4) {
// Log.info("Time taken for communication: " + PrettyPrint.usecs((long)time_for_communication_us));
// Log.info("Time taken for Map/Reduce iteration: " + PrettyPrint.msecs((long)time_last_iter_millis, true));
final double comm_to_work_ratio = (time_for_communication_us *1e-3) / time_last_iter_millis;
// Log.info("Ratio of network communication to computation: " + String.format("%.3f", comm_to_work_ratio));
// Log.info("target_comm_to_work: " + get_params().target_ratio_comm_to_comp);
final double correction = get_params().target_ratio_comm_to_comp / comm_to_work_ratio;
// Log.warn("Suggested value for train_samples_per_iteration: " + get_params().actual_train_samples_per_iteration/correction);
actual_train_samples_per_iteration /= correction;
actual_train_samples_per_iteration = Math.max(1, actual_train_samples_per_iteration);
}
run_time += time_last_iter_millis;
_timeLastScoreEnter = now;
boolean keep_running = (epoch_counter < get_params().epochs);
final long sinceLastScore = now -_timeLastScoreStart;
final long sinceLastPrint = now -_timeLastPrintStart;
final long samples = model_info().get_processed_total();
if (!keep_running || sinceLastPrint > get_params().score_interval*1000) {
_timeLastPrintStart = now;
Log.info("Training time: " + PrettyPrint.msecs(run_time, true)
+ ". Processed " + String.format("%,d", samples) + " samples" + " (" + String.format("%.3f", epoch_counter) + " epochs)."
+ " Speed: " + String.format("%.3f", 1000.*samples/run_time) + " samples/sec.");
}
// this is potentially slow - only do every so often
if( !keep_running ||
(sinceLastScore > get_params().score_interval*1000 //don't score too often
&&(double)(_timeLastScoreEnd-_timeLastScoreStart)/sinceLastScore < get_params().score_duty_cycle) ) { //duty cycle
final boolean printme = !get_params().quiet_mode;
final boolean adaptCM = (isClassifier() && vadaptor.needsAdaptation2CM());
_timeLastScoreStart = now;
if (get_params().diagnostics) model_info().computeStats();
Errors err = new Errors();
err.training_time_ms = run_time;
err.epoch_counter = epoch_counter;
err.training_samples = model_info().get_processed_total();
err.validation = ftest != null;
err.score_training_samples = ftrain.numRows();
if (get_params().autoencoder) {
if (printme) Log.info("Scoring the auto-encoder.");
// training
{
final Frame mse_frame = scoreAutoEncoder(ftrain);
final Vec l2 = mse_frame.anyVec();
Log.info("Mean reconstruction error on training data: " + l2.mean() + "\n");
err.train_mse = l2.mean();
mse_frame.delete();
}
} else {
if (printme) Log.info("Scoring the model.");
// compute errors
err.classification = isClassifier();
assert (err.classification == get_params().classification);
err.num_folds = get_params().n_folds;
err.train_confusion_matrix = new ConfusionMatrix();
final int hit_k = Math.min(nclasses(), get_params().max_hit_ratio_k);
if (err.classification && nclasses() > 2 && hit_k > 0) {
err.train_hitratio = new HitRatio();
err.train_hitratio.set_max_k(hit_k);
}
final String m = model_info().toString();
if (m.length() > 0) Log.info(m);
final Frame trainPredict = score(ftrain, false);
AUC trainAUC = null;
if (err.classification && nclasses() == 2) trainAUC = new AUC();
final double trainErr = calcError(ftrain, ftrain.lastVec(), trainPredict, trainPredict, "training",
printme, get_params().max_confusion_matrix_size, err.train_confusion_matrix, trainAUC, err.train_hitratio);
if (isClassifier()) err.train_err = trainErr;
if (trainAUC != null) err.trainAUC = trainAUC.data();
else err.train_mse = trainErr;
trainPredict.delete();
if (err.validation) {
assert ftest != null;
err.score_validation_samples = ftest.numRows();
err.valid_confusion_matrix = new ConfusionMatrix();
if (err.classification && nclasses() > 2 && hit_k > 0) {
err.valid_hitratio = new HitRatio();
err.valid_hitratio.set_max_k(hit_k);
}
final String adaptRespName = vadaptor.adaptedValidationResponse(responseName());
Vec adaptCMresp = null;
if (adaptCM) {
Vec[] v = ftest.vecs();
assert (ftest.find(adaptRespName) == v.length - 1); //make sure to have (adapted) response in the test set
adaptCMresp = ftest.remove(v.length - 1); //model would remove any extra columns anyway (need to keep it here for later)
}
final Frame validPredict = score(ftest, adaptCM);
final Frame hitratio_validPredict = new Frame(validPredict);
Vec orig_label = validPredict.vecs()[0];
// Adapt output response domain, in case validation domain is different from training domain
// Note: doesn't change predictions, just the *possible* label domain
if (adaptCM) {
assert (adaptCMresp != null);
assert (ftest.find(adaptRespName) == -1);
ftest.add(adaptRespName, adaptCMresp);
final Vec CMadapted = vadaptor.adaptModelResponse2CM(validPredict.vecs()[0]);
validPredict.replace(0, CMadapted); //replace label
validPredict.add("to_be_deleted", CMadapted); //keep the Vec around to be deleted later (no leak)
}
AUC validAUC = null;
if (err.classification && nclasses() == 2) validAUC = new AUC();
final double validErr = calcError(ftest, ftest.lastVec(), validPredict, hitratio_validPredict, "validation",
printme, get_params().max_confusion_matrix_size, err.valid_confusion_matrix, validAUC, err.valid_hitratio);
if (isClassifier()) err.valid_err = validErr;
if (trainAUC != null) err.validAUC = validAUC.data();
else err.valid_mse = validErr;
validPredict.delete();
//also delete the replaced label
if (adaptCM) orig_label.remove(new Futures()).blockForPending();
}
if (get_params().variable_importances) {
if (!get_params().quiet_mode) Log.info("Computing variable importances.");
final float[] vi = model_info().computeVariableImportances();
err.variable_importances = new VarImp(vi, Arrays.copyOfRange(model_info().data_info().coefNames(), 0, vi.length));
}
// only keep confusion matrices for the last step if there are fewer than specified number of output classes
if (err.train_confusion_matrix.cm != null
&& err.train_confusion_matrix.cm.length - 1 >= get_params().max_confusion_matrix_size) {
err.train_confusion_matrix = null;
err.valid_confusion_matrix = null;
}
}
_timeLastScoreEnd = System.currentTimeMillis();
err.scoring_time = System.currentTimeMillis() - now;
// enlarge the error array by one, push latest score back
if (errors == null) {
errors = new Errors[]{err};
} else {
Errors[] err2 = new Errors[errors.length + 1];
System.arraycopy(errors, 0, err2, 0, errors.length);
err2[err2.length - 1] = err;
errors = err2;
}
if (!get_params().autoencoder) {
// always keep a copy of the best model so far (based on the following criterion)
if (actual_best_model_key != null && (
// if we have a best_model in DKV, then compare against its error() (unless it's a different model as judged by the network size)
(UKV.get(actual_best_model_key) != null && (error() < UKV.<DeepLearningModel>get(actual_best_model_key).error() || !Arrays.equals(model_info().units, UKV.<DeepLearningModel>get(actual_best_model_key).model_info().units)))
||
// otherwise, compare against our own _bestError
(UKV.get(actual_best_model_key) == null && error() < _bestError)
) ) {
if (!get_params().quiet_mode)
Log.info("Error reduced from " + _bestError + " to " + error() + ". Storing best model so far under key " + actual_best_model_key.toString() + ".");
_bestError = error();
putMeAsBestModel(actual_best_model_key);
// debugging check
if (false) {
DeepLearningModel bestModel = UKV.get(actual_best_model_key);
final Frame fr = ftest != null ? ftest : ftrain;
final Frame bestPredict = bestModel.score(fr, ftest != null ? adaptCM : false);
final Frame hitRatio_bestPredict = new Frame(bestPredict);
// Adapt output response domain, in case validation domain is different from training domain
// Note: doesn't change predictions, just the *possible* label domain
if (adaptCM) {
final Vec CMadapted = vadaptor.adaptModelResponse2CM(bestPredict.vecs()[0]);
bestPredict.replace(0, CMadapted); //replace label
bestPredict.add("to_be_deleted", CMadapted); //keep the Vec around to be deleted later (no leak)
}
final double err3 = calcError(fr, fr.lastVec(), bestPredict, hitRatio_bestPredict, "cross-check",
printme, get_params().max_confusion_matrix_size, new water.api.ConfusionMatrix(), isClassifier() && nclasses() == 2 ? new AUC() : null, null);
if (isClassifier())
assert (ftest != null ? Math.abs(err.valid_err - err3) < 1e-5 : Math.abs(err.train_err - err3) < 1e-5);
else
assert (ftest != null ? Math.abs(err.valid_mse - err3) < 1e-5 : Math.abs(err.train_mse - err3) < 1e-5);
bestPredict.delete();
}
}
// else {
// // keep output JSON small
// if (errors.length > 1) {
// if (last_scored().trainAUC != null) last_scored().trainAUC.clear();
// if (last_scored().validAUC != null) last_scored().validAUC.clear();
// last_scored().variable_importances = null;
// }
// }
// print the freshly scored model to ASCII
for (String s : toString().split("\n")) Log.info(s);
if (printme) Log.info("Time taken for scoring and diagnostics: " + PrettyPrint.msecs(err.scoring_time, true));
}
}
if (model_info().unstable()) {
Log.warn(unstable_msg);
keep_running = false;
} else if ( (isClassifier() && last_scored().train_err <= get_params().classification_stop)
|| (!isClassifier() && last_scored().train_mse <= get_params().regression_stop) ) {
Log.info("Achieved requested predictive accuracy on the training data. Model building completed.");
keep_running = false;
}
update(job_key);
// System.out.println(this);
return keep_running;
}
catch (Exception ex) {
return false;
}
}
@Override protected void setCrossValidationError(Job.ValidatedJob job, double cv_error, ConfusionMatrix cm, AUCData auc, HitRatio hr) {
_have_cv_results = true;
if (!get_params().classification)
last_scored().valid_mse = cv_error;
else
last_scored().valid_err = cv_error;
last_scored().score_validation_samples = last_scored().score_training_samples / get_params().n_folds;
last_scored().num_folds = get_params().n_folds;
last_scored().valid_confusion_matrix = cm;
last_scored().validAUC = auc;
last_scored().valid_hitratio = hr;
DKV.put(this._key, this); //overwrite this model
}
@Override public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(model_info.toString());
sb.append(last_scored().toString());
return sb.toString();
}
public String toStringAll() {
StringBuilder sb = new StringBuilder();
sb.append(model_info.toStringAll());
sb.append(last_scored().toString());
return sb.toString();
}
/**
* This is an overridden version of Model.score(). Make either a prediction or a reconstruction.
* @param frame Test dataset
* @return A frame containing the prediction or reconstruction
*/
@Override
public Frame score(Frame frame) {
if (!get_params().autoencoder) {
return super.score(frame);
} else {
// Reconstruction
// Adapt the Frame layout - returns adapted frame and frame containing only
// newly created vectors
Frame[] adaptFrms = adapt(frame,false,false/*no response*/);
// Adapted frame containing all columns - mix of original vectors from fr
// and newly created vectors serving as adaptors
Frame adaptFrm = adaptFrms[0];
// Contains only newly created vectors. The frame eases deletion of these vectors.
Frame onlyAdaptFrm = adaptFrms[1];
final int len = model_info().data_info().fullN();
String prefix = "reconstr_";
assert(model_info().data_info()._responses == 0);
String[] coefnames = model_info().data_info().coefNames();
assert(len == coefnames.length);
for( int c=0; c<len; c++ )
adaptFrm.add(prefix+coefnames[c],adaptFrm.anyVec().makeZero());
new MRTask2() {
@Override public void map( Chunk chks[] ) {
double tmp [] = new double[_names.length];
float preds[] = new float [len];
final Neurons[] neurons = DeepLearningTask.makeNeuronsForTesting(model_info);
for( int row=0; row<chks[0]._len; row++ ) {
float p[] = score_autoencoder(chks, row, tmp, preds, neurons);
for( int c=0; c<preds.length; c++ )
chks[_names.length+c].set0(row,p[c]);
}
}
}.doAll(adaptFrm);
// Return the predicted columns
int x=_names.length, y=adaptFrm.numCols();
Frame f = adaptFrm.extractFrame(x, y); //this will call vec_impl() and we cannot call the delete() below just yet
onlyAdaptFrm.delete();
return f;
}
}
/**
* Predict from raw double values representing the data
* @param data raw array containing categorical values (horizontalized to 1,0,0,1,0,0 etc.) and numerical values (0.35,1.24,5.3234,etc), both can contain NaNs
* @param preds predicted label and per-class probabilities (for classification), predicted target (regression), can contain NaNs
* @return preds, can contain NaNs
*/
@Override public float[] score0(double[] data, float[] preds) {
if (model_info().unstable()) {
Log.warn(unstable_msg);
throw new UnsupportedOperationException("Trying to predict with an unstable model.");
}
Neurons[] neurons = DeepLearningTask.makeNeuronsForTesting(model_info);
((Neurons.Input)neurons[0]).setInput(-1, data);
DeepLearningTask.step(-1, neurons, model_info, false, null);
float[] out = neurons[neurons.length - 1]._a.raw();
if (isClassifier()) {
assert (preds.length == out.length + 1);
for (int i = 0; i < preds.length - 1; ++i) {
preds[i + 1] = out[i];
if (Float.isNaN(preds[i + 1])) throw new RuntimeException("Predicted class probability NaN!");
}
preds[0] = ModelUtils.getPrediction(preds, data);
} else {
assert (preds.length == 1 && out.length == 1);
if (model_info().data_info()._normRespMul != null)
preds[0] = (float) (out[0] / model_info().data_info()._normRespMul[0] + model_info().data_info()._normRespSub[0]);
else
preds[0] = out[0];
if (Float.isNaN(preds[0])) throw new RuntimeException("Predicted regression target NaN!");
}
return preds;
}
/**
* Score auto-encoded reconstruction (on-the-fly, without allocating the reconstruction as done in Frame score(Frame fr))
* @param frame Original data (can contain response, will be ignored)
* @return Frame containing one Vec with reconstruction error (MSE) of each reconstructed row, caller is responsible for deletion
*/
public Frame scoreAutoEncoder(Frame frame) {
final int len = _names.length;
// Adapt the Frame layout - returns adapted frame and frame containing only
// newly created vectors
Frame[] adaptFrms = adapt(frame,false,false/*no response*/);
// Adapted frame containing all columns - mix of original vectors from fr
// and newly created vectors serving as adaptors
Frame adaptFrm = adaptFrms[0];
// Contains only newly created vectors. The frame eases deletion of these vectors.
Frame onlyAdaptFrm = adaptFrms[1];
adaptFrm.add("Reconstruction.MSE", adaptFrm.anyVec().makeZero());
new MRTask2() {
@Override public void map( Chunk chks[] ) {
double tmp [] = new double[len];
final Neurons[] neurons = DeepLearningTask.makeNeuronsForTesting(model_info);
for( int row=0; row<chks[0]._len; row++ ) {
for( int i=0; i<_names.length; i++ )
tmp[i] = chks[i].at0(row); //original data
chks[len].set0(row, score_autoencoder(tmp, null, neurons)); //store the per-row reconstruction error (MSE) in the last column
}
}
}.doAll(adaptFrm);
// Return just the output columns
int x=_names.length, y=adaptFrm.numCols();
final Frame l2 = adaptFrm.extractFrame(x, y);
onlyAdaptFrm.delete();
return l2;
}
/**
* Score auto-encoded reconstruction (on-the-fly, without allocating the reconstruction as done in Frame score(Frame fr))
* @param frame Original data (can contain response, will be ignored)
* @return Frame containing one Vec with reconstruction error (MSE) of each reconstructed row, caller is responsible for deletion
*/
public Frame scoreDeepFeatures(Frame frame, final int layer) {
assert(layer >= 0 && layer < model_info().get_params().hidden.length);
final int len = nfeatures();
Vec resp = null;
if (isSupervised()) {
int ridx = frame.find(responseName());
if (ridx != -1) { // drop the response for scoring!
frame = new Frame(frame);
resp = frame.vecs()[ridx];
frame.remove(ridx);
}
}
// Adapt the Frame layout - returns adapted frame and frame containing only
// newly created vectors
Frame[] adaptFrms = adapt(frame,false,false/*no response*/);
// Adapted frame containing all columns - mix of original vectors from fr
// and newly created vectors serving as adaptors
Frame adaptFrm = adaptFrms[0];
// Contains only newly created vectors. The frame eases deletion of these vectors.
Frame onlyAdaptFrm = adaptFrms[1];
//create new features, will be dense
final int features = model_info().get_params().hidden[layer];
Vec[] vecs = adaptFrm.anyVec().makeZeros(features);
for (int j=0; j<features; ++j) {
adaptFrm.add("DF.C" + (j+1), vecs[j]);
}
new MRTask2() {
@Override public void map( Chunk chks[] ) {
double tmp [] = new double[len];
float df[] = new float [features];
final Neurons[] neurons = DeepLearningTask.makeNeuronsForTesting(model_info);
for( int row=0; row<chks[0]._len; row++ ) {
for( int i=0; i<len; i++ )
tmp[i] = chks[i].at0(row);
((Neurons.Input)neurons[0]).setInput(-1, tmp);
DeepLearningTask.step(-1, neurons, model_info, false, null);
float[] out = neurons[layer+1]._a.raw(); //extract the layer-th hidden feature
for( int c=0; c<df.length; c++ )
chks[_names.length+c].set0(row,out[c]);
}
}
}.doAll(adaptFrm);
// Return just the output columns
int x=_names.length, y=adaptFrm.numCols();
Frame ret = adaptFrm.extractFrame(x, y);
onlyAdaptFrm.delete();
if (resp != null) ret.prepend(responseName(), resp);
return ret;
}
// Make (potentially expanded) reconstruction
private float[] score_autoencoder(Chunk[] chks, int row_in_chunk, double[] tmp, float[] preds, Neurons[] neurons) {
assert(get_params().autoencoder);
assert(tmp.length == _names.length);
for( int i=0; i<tmp.length; i++ )
tmp[i] = chks[i].at0(row_in_chunk);
score_autoencoder(tmp, preds, neurons); // this fills preds, returns MSE error (ignored here)
return preds;
}
/**
* Helper to reconstruct original data into preds array and compute the reconstruction error (MSE)
* @param data Original data (unexpanded)
* @param preds Reconstruction (potentially expanded)
* @return reconstruction error
*/
private double score_autoencoder(double[] data, float[] preds, Neurons[] neurons) {
assert(model_info().get_params().autoencoder);
if (model_info().unstable()) {
Log.warn(unstable_msg);
throw new UnsupportedOperationException("Trying to predict with an unstable model.");
}
((Neurons.Input)neurons[0]).setInput(-1, data); // expands categoricals inside
DeepLearningTask.step(-1, neurons, model_info, false, null); // reconstructs data in expanded space
float[] in = neurons[0]._a.raw(); //input (expanded)
float[] out = neurons[neurons.length - 1]._a.raw(); //output (expanded)
assert(in.length == out.length);
// First normalize categorical reconstructions to be probabilities
// (such that they can be better compared to the input where one factor was 1 and the rest was 0)
// model_info().data_info().softMaxCategoricals(out,out); //only modifies the categoricals
// Compute MSE of reconstruction in expanded space (with categorical probabilities)
double l2 = 0;
for (int i = 0; i < in.length; ++i)
l2 += Math.pow((out[i] - in[i]), 2);
l2 /= in.length;
if (preds!=null) {
// Now scale back numerical columns to original data space (scale + shift)
model_info().data_info().unScaleNumericals(out, out); //only modifies the numericals
System.arraycopy(out, 0, preds, 0, out.length); //copy reconstruction into preds
}
return l2;
}
/**
* Compute quantile-based threshold (in reconstruction error) to find outliers
* @param mse Vector containing reconstruction errors
* @param quantile Quantile for cut-off
* @return Threshold in MSE value for a point to be above the quantile
*/
public double calcOutlierThreshold(Vec mse, double quantile) {
Frame mse_frame = new Frame(Key.make(), new String[]{"Reconstruction.MSE"}, new Vec[]{mse});
QuantilesPage qp = new QuantilesPage();
qp.column = mse_frame.vec(0);
qp.source_key = mse_frame;
qp.quantile = quantile;
qp.invoke();
DKV.remove(mse_frame._key);
return qp.result;
}
@Override public ModelAutobufferSerializer getModelSerializer() {
// Return a serializer which knows how to serialize keys
return new ModelAutobufferSerializer() {
@Override protected AutoBuffer postLoad(Model m, AutoBuffer ab) {
Job.hygiene(((DeepLearningModel)m).get_params());
return ab;
}
};
}
public boolean generateHTML(String title, StringBuilder sb) {
if (_key == null) {
DocGen.HTML.title(sb, "No model yet");
return true;
}
// optional JFrame creation for visualization of weights
// DeepLearningVisualization.visualize(this);
final String mse_format = "%g";
// final String cross_entropy_format = "%2.6f";
// stats for training and validation
final Errors error = last_scored();
DocGen.HTML.title(sb, title);
if (get_params().source == null || DKV.get(get_params().source._key) == null ||
(get_params().validation != null && DKV.get(get_params().validation._key) == null)) (Job.hygiene(get_params())).toHTML(sb);
else job().toHTML(sb);
sb.append("<div class='alert'>Actions: "
+ (jobKey != null && UKV.get(jobKey) != null && Job.isRunning(jobKey) ? "<i class=\"icon-stop\"></i>" + Cancel.link(jobKey, "Stop training") + ", " : "")
+ Inspect2.link("Inspect training data (" + _dataKey + ")", _dataKey) + ", "
+ (_validationKey != null ? (Inspect2.link("Inspect validation data (" + _validationKey + ")", _validationKey) + ", ") : "")
+ water.api.Predict.link(_key, "Score on dataset") + ", "
+ DeepLearning.link(_dataKey, "Compute new model", null, responseName(), _validationKey)
+ (actual_best_model_key != null && UKV.get(actual_best_model_key) != null && actual_best_model_key != _key ? ", " + DeepLearningModelView.link("Go to best model", actual_best_model_key) : "")
+ (jobKey == null || ((jobKey != null && UKV.get(jobKey) == null)) || (jobKey != null && UKV.get(jobKey) != null && Job.isEnded(jobKey)) ? ", <i class=\"icon-play\"></i>" + DeepLearning.link(_dataKey, "Continue training this model", _key, responseName(), _validationKey) : "") + ", "
+ UIUtils.qlink(SaveModel.class, "model", _key, "Save model") + ", "
+ "</div>");
DocGen.HTML.paragraph(sb, "Model Key: " + _key);
if (jobKey != null) DocGen.HTML.paragraph(sb, "Job Key: " + jobKey);
if (!get_params().autoencoder)
DocGen.HTML.paragraph(sb, "Model type: " + (get_params().classification ? " Classification" : " Regression") + ", predicting: " + responseName());
else
DocGen.HTML.paragraph(sb, "Model type: Auto-Encoder");
DocGen.HTML.paragraph(sb, "Number of model parameters (weights/biases): " + String.format("%,d", model_info().size()));
if (model_info.unstable()) {
DocGen.HTML.section(sb, "=======================================================================================");
DocGen.HTML.section(sb, unstable_msg.replace("\n"," "));
DocGen.HTML.section(sb, "=======================================================================================");
}
if (error == null) return true;
DocGen.HTML.title(sb, "Progress");
// update epoch counter every time the website is displayed
epoch_counter = training_rows > 0 ? (float)model_info().get_processed_total()/training_rows : 0;
final double progress = get_params().progress();
if (get_params() != null && get_params().diagnostics) {
DocGen.HTML.section(sb, "Status of Neuron Layers");
sb.append("<table class='table table-striped table-bordered table-condensed'>");
sb.append("<tr>");
sb.append("<th>").append("#").append("</th>");
sb.append("<th>").append("Units").append("</th>");
sb.append("<th>").append("Type").append("</th>");
sb.append("<th>").append("Dropout").append("</th>");
sb.append("<th>").append("L1").append("</th>");
sb.append("<th>").append("L2").append("</th>");
if (get_params().adaptive_rate) {
sb.append("<th>").append("Rate (Mean, RMS)").append("</th>");
} else {
sb.append("<th>").append("Rate").append("</th>");
sb.append("<th>").append("Momentum").append("</th>");
}
sb.append("<th>").append("Weight (Mean, RMS)").append("</th>");
sb.append("<th>").append("Bias (Mean, RMS)").append("</th>");
sb.append("</tr>");
Neurons[] neurons = DeepLearningTask.makeNeuronsForTesting(model_info()); //link the weights to the neurons, for easy access
for (int i=0; i<neurons.length; ++i) {
sb.append("<tr>");
sb.append("<td>").append("<b>").append(i+1).append("</b>").append("</td>");
sb.append("<td>").append("<b>").append(neurons[i].units).append("</b>").append("</td>");
sb.append("<td>").append(neurons[i].getClass().getSimpleName()).append("</td>");
if (i == 0) {
sb.append("<td>");
sb.append(Utils.formatPct(neurons[i].params.input_dropout_ratio));
sb.append("</td>");
sb.append("<td></td>");
sb.append("<td></td>");
sb.append("<td></td>");
if (!get_params().adaptive_rate) sb.append("<td></td>");
sb.append("<td></td>");
sb.append("<td></td>");
sb.append("</tr>");
continue;
}
else if (i < neurons.length-1) {
sb.append("<td>");
if (neurons[i].params.hidden_dropout_ratios == null)
sb.append(Utils.formatPct(0));
else
sb.append(Utils.formatPct(neurons[i].params.hidden_dropout_ratios[i - 1]));
sb.append("</td>");
} else {
sb.append("<td></td>");
}
final String format = "%g";
sb.append("<td>").append(neurons[i].params.l1).append("</td>");
sb.append("<td>").append(neurons[i].params.l2).append("</td>");
if (get_params().adaptive_rate) {
sb.append("<td>(").append(String.format(format, model_info.mean_rate[i])).
append(", ").append(String.format(format, model_info.rms_rate[i])).append(")</td>");
} else {
sb.append("<td>").append(String.format("%.5g", neurons[i].rate(error.training_samples))).append("</td>");
sb.append("<td>").append(String.format("%.5f", neurons[i].momentum(error.training_samples))).append("</td>");
}
sb.append("<td>(").append(String.format(format, model_info.mean_weight[i])).
append(", ").append(String.format(format, model_info.rms_weight[i])).append(")</td>");
sb.append("<td>(").append(String.format(format, model_info.mean_bias[i])).
append(", ").append(String.format(format, model_info.rms_bias[i])).append(")</td>");
sb.append("</tr>");
}
sb.append("</table>");
}
if (isClassifier() && !get_params().autoencoder) {
DocGen.HTML.section(sb, "Classification error on training data: " + Utils.formatPct(error.train_err));
if(error.validation) {
DocGen.HTML.section(sb, "Classification error on validation data: " + Utils.formatPct(error.valid_err));
} else if(error.num_folds > 0) {
DocGen.HTML.section(sb, "Classification error on " + error.num_folds + "-fold cross-validated training data"
+ (_have_cv_results ? ": " + Utils.formatPct(error.valid_err) : " is being computed - please reload this page later."));
}
} else {
DocGen.HTML.section(sb, "MSE on training data: " + String.format(mse_format, error.train_mse));
if(error.validation) {
DocGen.HTML.section(sb, "MSE on validation data: " + String.format(mse_format, error.valid_mse));
} else if(error.num_folds > 0) {
DocGen.HTML.section(sb, "MSE on " + error.num_folds + "-fold cross-validated training data"
+ (_have_cv_results ? ": " + String.format(mse_format, error.valid_mse) : " is being computed - please reload this page later."));
}
}
DocGen.HTML.paragraph(sb, "Training samples: " + String.format("%,d", model_info().get_processed_total()));
DocGen.HTML.paragraph(sb, "Epochs: " + String.format("%.3f", epoch_counter) + " / " + String.format("%.3f", get_params().epochs));
int cores = 0; for (H2ONode n : H2O.CLOUD._memary) cores += n._heartbeat._num_cpus;
DocGen.HTML.paragraph(sb, "Number of compute nodes: " + (model_info.get_params().single_node_mode ? ("1 (" + H2O.NUMCPUS + " threads)") : (H2O.CLOUD.size() + " (" + cores + " threads)")));
DocGen.HTML.paragraph(sb, "Training samples per iteration" + (
get_params().train_samples_per_iteration == -2 ? " (-2 -> auto-tuning): " :
get_params().train_samples_per_iteration == -1 ? " (-1 -> max. available data): " :
get_params().train_samples_per_iteration == 0 ? " (0 -> one epoch): " : " (user-given): ")
+ String.format("%,d", actual_train_samples_per_iteration));
final boolean isEnded = get_params().self() == null || (UKV.get(get_params().self()) != null && Job.isEnded(get_params().self()));
final long time_so_far = isEnded ? run_time : run_time + System.currentTimeMillis() - _timeLastScoreEnter;
if (time_so_far > 0) {
long time_for_speed = isEnded || H2O.CLOUD.size() > 1 ? run_time : time_so_far;
if (time_for_speed > 0)
DocGen.HTML.paragraph(sb, "Training speed: " + String.format("%,d", model_info().get_processed_total() * 1000 / time_for_speed) + " samples/s");
}
DocGen.HTML.paragraph(sb, "Training time: " + PrettyPrint.msecs(time_so_far, true));
if (progress > 0 && !isEnded)
DocGen.HTML.paragraph(sb, "Estimated time left: " +PrettyPrint.msecs((long)(time_so_far*(1-progress)/progress), true));
long score_train = error.score_training_samples;
long score_valid = error.score_validation_samples;
final boolean fulltrain = score_train==0 || score_train == training_rows;
final boolean fullvalid = error.validation && get_params().n_folds == 0 && (score_valid==0 || score_valid == validation_rows);
final String toolarge = " Confusion matrix not shown here - too large: number of classes (" + model_info.units[model_info.units.length-1]
+ ") is greater than the specified limit of " + get_params().max_confusion_matrix_size + ".";
boolean smallenough = model_info.units[model_info.units.length-1] <= get_params().max_confusion_matrix_size;
if (!error.validation) {
if (_have_cv_results) {
String cmTitle = "<div class=\"alert\">Scoring results reported for " + error.num_folds + "-fold cross-validated training data " + Inspect2.link(_dataKey) + ":</div>";
sb.append("<h5>" + cmTitle);
sb.append("</h5>");
}
else {
String cmTitle = "<div class=\"alert\">Scoring results reported on training data " + Inspect2.link(_dataKey) + (fulltrain ? "" : " (" + score_train + " samples)") + ":</div>";
sb.append("<h5>" + cmTitle);
sb.append("</h5>");
}
}
else {
RString v_rs = new RString("<a href='Inspect2.html?src_key=%$key'>%key</a>");
String cmTitle = "<div class=\"alert\">Scoring results reported on validation data " + Inspect2.link(_validationKey) + (fullvalid ? "" : " (" + score_valid + " samples)") + ":</div>";
sb.append("<h5>" + cmTitle);
sb.append("</h5>");
}
if (isClassifier()) {
// print AUC
if (error.validAUC != null) {
error.validAUC.toHTML(sb);
}
else if (error.trainAUC != null) {
error.trainAUC.toHTML(sb);
}
else {
if (error.validation) {
if (error.valid_confusion_matrix != null && smallenough) {
error.valid_confusion_matrix.toHTML(sb);
} else if (smallenough) sb.append("<h5>Confusion matrix on validation data is not yet computed.</h5>");
else sb.append(toolarge);
}
else if (_have_cv_results) {
if (error.valid_confusion_matrix != null && smallenough) {
error.valid_confusion_matrix.toHTML(sb);
} else if (smallenough) sb.append("<h5>Confusion matrix on " + error.num_folds + "-fold cross-validated training data is not yet computed.</h5>");
else sb.append(toolarge);
}
else {
if (error.train_confusion_matrix != null && smallenough) {
error.train_confusion_matrix.toHTML(sb);
} else if (smallenough) sb.append("<h5>Confusion matrix on training data is not yet computed.</h5>");
else sb.append(toolarge);
}
}
}
// Hit ratio
if (error.valid_hitratio != null) {
error.valid_hitratio.toHTML(sb);
} else if (error.train_hitratio != null) {
error.train_hitratio.toHTML(sb);
}
// Variable importance
if (error.variable_importances != null) {
error.variable_importances.toHTML(this, sb);
}
printCrossValidationModelsHTML(sb);
DocGen.HTML.title(sb, "Scoring history");
if (errors.length > 1) {
DocGen.HTML.paragraph(sb, "Time taken for last scoring and diagnostics: " + PrettyPrint.msecs(errors[errors.length-1].scoring_time, true));
// training
{
final long pts = fulltrain ? training_rows : score_train;
String training = "Number of training data samples for scoring: " + (fulltrain ? "all " : "") + pts;
if (pts < 1000 && training_rows >= 1000) training += " (low, scoring might be inaccurate -> consider increasing this number in the expert mode)";
if (pts > 100000 && errors[errors.length-1].scoring_time > 10000) training += " (large, scoring can be slow -> consider reducing this number in the expert mode or scoring manually)";
DocGen.HTML.paragraph(sb, training);
}
// validation
if (error.validation) {
final long ptsv = fullvalid ? validation_rows : score_valid;
String validation = "Number of validation data samples for scoring: " + (fullvalid ? "all " : "") + ptsv;
if (ptsv < 1000 && validation_rows >= 1000) validation += " (low, scoring might be inaccurate -> consider increasing this number in the expert mode)";
if (ptsv > 100000 && errors[errors.length-1].scoring_time > 10000) validation += " (large, scoring can be slow -> consider reducing this number in the expert mode or scoring manually)";
DocGen.HTML.paragraph(sb, validation);
}
if (isClassifier() && nclasses() != 2 /*binary classifier has its own conflicting D3 object (AUC)*/) {
// Plot training error
float[] err = new float[errors.length];
float[] samples = new float[errors.length];
for (int i=0; i<err.length; ++i) {
err[i] = (float)errors[i].train_err;
samples[i] = errors[i].training_samples;
}
new D3Plot(samples, err, "training samples", "classification error",
"classification error on training data").generate(sb);
// Plot validation error
if (error.validation) {
for (int i=0; i<err.length; ++i) {
err[i] = (float)errors[i].valid_err;
}
new D3Plot(samples, err, "training samples", "classification error",
"classification error on validation set").generate(sb);
}
}
// regression
else if (!isClassifier()) {
// Plot training MSE
float[] err = new float[errors.length-1];
float[] samples = new float[errors.length-1];
for (int i=0; i<err.length; ++i) {
err[i] = (float)errors[i+1].train_mse;
samples[i] = errors[i+1].training_samples;
}
new D3Plot(samples, err, "training samples", "MSE",
"regression error on training data").generate(sb);
// Plot validation MSE
if (error.validation) {
for (int i=0; i<err.length; ++i) {
err[i] = (float)errors[i+1].valid_mse;
}
new D3Plot(samples, err, "training samples", "MSE",
"regression error on validation data").generate(sb);
}
}
}
// String training = "Number of training set samples for scoring: " + error.score_training;
if (error.validation) {
// String validation = "Number of validation set samples for scoring: " + error.score_validation;
}
sb.append("<table class='table table-striped table-bordered table-condensed'>");
sb.append("<tr>");
sb.append("<th>Training Time</th>");
sb.append("<th>Training Epochs</th>");
sb.append("<th>Training Samples</th>");
if (isClassifier()) {
// sb.append("<th>Training MCE</th>");
sb.append("<th>Training Error</th>");
if (nclasses()==2) sb.append("<th>Training AUC</th>");
} else {
sb.append("<th>Training MSE</th>");
}
if (error.validation) {
if (isClassifier()) {
// sb.append("<th>Validation MCE</th>");
sb.append("<th>Validation Error</th>");
if (nclasses()==2) sb.append("<th>Validation AUC</th>");
} else {
sb.append("<th>Validation MSE</th>");
}
}
else if (error.num_folds > 0) {
if (isClassifier()) {
sb.append("<th>Cross-Validation Error</th>");
if (nclasses()==2) sb.append("<th>Cross-Validation AUC</th>");
} else {
sb.append("<th>Cross-Validation MSE</th>");
}
}
sb.append("</tr>");
for( int i = errors.length - 1; i >= 0; i-- ) {
final Errors e = errors[i];
sb.append("<tr>");
sb.append("<td>" + PrettyPrint.msecs(e.training_time_ms, true) + "</td>");
sb.append("<td>" + String.format("%g", e.epoch_counter) + "</td>");
sb.append("<td>" + String.format("%,d", e.training_samples) + "</td>");
if (isClassifier() && !get_params().autoencoder) {
sb.append("<td>" + Utils.formatPct(e.train_err) + "</td>");
if (nclasses()==2) {
if (e.trainAUC != null) sb.append("<td>" + Utils.formatPct(e.trainAUC.AUC()) + "</td>");
else sb.append("<td>" + "N/A" + "</td>");
}
} else {
sb.append("<td>" + String.format(mse_format, e.train_mse) + "</td>");
}
if(e.validation) {
if (isClassifier()) {
sb.append("<td>" + Utils.formatPct(e.valid_err) + "</td>");
if (nclasses()==2) {
if (e.validAUC != null) sb.append("<td>" + Utils.formatPct(e.validAUC.AUC()) + "</td>");
else sb.append("<td>" + "N/A" + "</td>");
}
} else {
sb.append("<td>" + String.format(mse_format, e.valid_mse) + "</td>");
}
}
else if(e.num_folds > 0) {
if (i == errors.length - 1 && _have_cv_results) {
if (isClassifier()) {
sb.append("<td>" + Utils.formatPct(e.valid_err) + "</td>");
if (nclasses() == 2) {
if (e.validAUC != null) sb.append("<td>" + Utils.formatPct(e.validAUC.AUC()) + "</td>");
else sb.append("<td>" + "N/A" + "</td>");
}
} else {
sb.append("<td>" + String.format(mse_format, e.valid_mse) + "</td>");
}
}
else {
sb.append("<td>N/A</td>");
if (nclasses() == 2) sb.append("<td>N/A</td>");
}
}
sb.append("</tr>");
}
sb.append("</table>");
return true;
}
public void toJavaHtml(StringBuilder sb) {
sb.append("<br /><br /><div class=\"pull-right\"><a href=\"#\" onclick=\'$(\"#javaModel\").toggleClass(\"hide\");\'" +
"class=\'btn btn-inverse btn-mini\'>Java Model</a></div><br /><div class=\"hide\" id=\"javaModel\">");
boolean featureAllowed = true; //isFeatureAllowed();
if (! featureAllowed) {
sb.append("<br/><div id=\'javaModelWarningBlock\' class=\"alert\" style=\"background:#eedd20;color:#636363;text-shadow:none;\">");
sb.append("<b>You have requested a premium feature and your H<sub>2</sub>O software is unlicensed.</b><br/><br/>");
sb.append("Please enter your email address below, and we will send you a trial license shortly.<br/>");
sb.append("This will also temporarily enable downloading Java models.<br/>");
sb.append("<form class=\'form-inline\'><input id=\"emailForJavaModel\" class=\"span5\" type=\"text\" placeholder=\"Email\"/> ");
sb.append("<a href=\"#\" onclick=\'processJavaModelLicense();\' class=\'btn btn-inverse\'>Send</a></form></div>");
sb.append("<div id=\"javaModelSource\" class=\"hide\"><pre style=\"overflow-y:scroll;\"><code class=\"language-java\">");
DocGen.HTML.escape(sb, toJava());
sb.append("</code></pre></div>");
}
else if( model_info().size() > 100000 ) {
String modelName = JCodeGen.toJavaId(_key.toString());
sb.append("<pre style=\"overflow-y:scroll;\"><code class=\"language-java\">");
sb.append("/* Java code is too large to display, download it directly.\n");
sb.append(" To obtain the code please invoke in your terminal:\n");
sb.append(" curl http:/").append(H2O.SELF.toString()).append("/h2o-model.jar > h2o-model.jar\n");
sb.append(" curl http:/").append(H2O.SELF.toString()).append("/2/").append(this.getClass().getSimpleName()).append("View.java?_modelKey=").append(_key).append(" > ").append(modelName).append(".java\n");
sb.append(" javac -cp h2o-model.jar -J-Xmx2g -J-XX:MaxPermSize=128m ").append(modelName).append(".java\n");
sb.append("*/");
sb.append("</code></pre>");
} else {
sb.append("<pre style=\"overflow-y:scroll;\"><code class=\"language-java\">");
DocGen.HTML.escape(sb, toJava());
sb.append("</code></pre>");
}
sb.append("</div>");
sb.append("<script type=\"text/javascript\">$(document).ready(showOrHideJavaModel);</script>");
}
@Override protected SB toJavaInit(SB sb, SB fileContextSB) {
sb = super.toJavaInit(sb, fileContextSB);
if (model_info().data_info()._nums > 0) {
JCodeGen.toStaticVar(sb, "NUMS", new double[model_info().data_info()._nums], "Workspace for storing numerical input variables.");
JCodeGen.toStaticVar(sb, "NORMMUL", model_info().data_info()._normMul, "Standardization/Normalization scaling factor for numerical variables.");
JCodeGen.toStaticVar(sb, "NORMSUB", model_info().data_info()._normSub, "Standardization/Normalization offset for numerical variables.");
}
if (model_info().data_info()._cats > 0) {
JCodeGen.toStaticVar(sb, "CATS", new int[model_info().data_info()._cats], "Workspace for storing categorical input variables.");
}
JCodeGen.toStaticVar(sb, "CATOFFSETS", model_info().data_info()._catOffsets, "Workspace for categorical offsets.");
if (model_info().data_info()._normRespMul != null) {
JCodeGen.toStaticVar(sb, "NORMRESPMUL", model_info().data_info()._normRespMul, "Standardization/Normalization scaling factor for response.");
JCodeGen.toStaticVar(sb, "NORMRESPSUB", model_info().data_info()._normRespSub, "Standardization/Normalization offset for response.");
}
if (get_params().hidden_dropout_ratios != null) {
JCodeGen.toStaticVar(sb, "HIDDEN_DROPOUT_RATIOS", get_params().hidden_dropout_ratios, "Hidden layer dropout ratios.");
}
Neurons[] neurons = DeepLearningTask.makeNeuronsForTesting(model_info());
int[] layers = new int[neurons.length];
for (int i=0;i<neurons.length;++i)
layers[i] = neurons[i].units;
JCodeGen.toStaticVar(sb, "NEURONS", layers, "Number of neurons for each layer.");
// activation storage
sb.i(1).p("// Storage for neuron activation values.").nl();
sb.i(1).p("public static final float[][] ACTIVATION = new float[][] {").nl();
for (int i=0; i<neurons.length; i++) {
String colInfoClazz = "Activation_"+i;
sb.i(2).p("/* ").p(neurons[i].getClass().getSimpleName()).p(" */ ");
sb.p(colInfoClazz).p(".VALUES");
if (i!=neurons.length-1) sb.p(',');
sb.nl();
fileContextSB.i().p("// Neuron activation values for ").p(neurons[i].getClass().getSimpleName()).p(" layer").nl();
JCodeGen.toClassWithArray(fileContextSB, null, colInfoClazz, new float[layers[i]]);
}
sb.i(1).p("};").nl();
// biases
sb.i(1).p("// Neuron bias values.").nl();
sb.i(1).p("public static final float[][] BIAS = new float[][] {").nl();
for (int i=0; i<neurons.length; i++) {
String colInfoClazz = "Bias_"+i;
sb.i(2).p("/* ").p(neurons[i].getClass().getSimpleName()).p(" */ ");
sb.p(colInfoClazz).p(".VALUES");
if (i!=neurons.length-1) sb.p(',');
sb.nl();
fileContextSB.i().p("// Neuron bias values for ").p(neurons[i].getClass().getSimpleName()).p(" layer").nl();
float[] bias = i == 0 ? null : new float[model_info().get_biases(i-1).size()];
if (i>0) {
for (int j=0; j<bias.length; ++j) bias[j] = model_info().get_biases(i-1).get(j);
}
JCodeGen.toClassWithArray(fileContextSB, null, colInfoClazz, bias);
}
sb.i(1).p("};").nl();
// weights
sb.i(1).p("// Connecting weights between neurons.").nl();
sb.i(1).p("public static final float[][] WEIGHT = new float[][] {").nl();
for (int i=0; i<neurons.length; i++) {
String colInfoClazz = "Weight_"+i;
sb.i(2).p("/* ").p(neurons[i].getClass().getSimpleName()).p(" */ ");
sb.p(colInfoClazz).p(".VALUES");
if (i!=neurons.length-1) sb.p(',');
sb.nl();
if (i > 0) {
fileContextSB.i().p("// Neuron weights connecting ").
p(neurons[i - 1].getClass().getSimpleName()).p(" and ").
p(neurons[i].getClass().getSimpleName()).
p(" layer").nl();
}
float[] weights = i == 0 ? null : new float[model_info().get_weights(i-1).rows()*model_info().get_weights(i-1).cols()];
if (i>0) {
final int rows = model_info().get_weights(i-1).rows();
final int cols = model_info().get_weights(i-1).cols();
for (int j=0; j<rows; ++j)
for (int k=0; k<cols; ++k)
weights[j*cols+k] = model_info().get_weights(i-1).get(j,k);
}
JCodeGen.toClassWithArray(fileContextSB, null, colInfoClazz, weights);
}
sb.i(1).p("};").nl();
return sb;
}
@Override protected void toJavaPredictBody( final SB bodySb, final SB classCtxSb, final SB fileCtxSb) {
SB model = new SB();
bodySb.i().p("java.util.Arrays.fill(preds,0f);").nl();
final int cats = model_info().data_info()._cats;
final int nums = model_info().data_info()._nums;
// initialize input layer
if (nums > 0) bodySb.i().p("java.util.Arrays.fill(NUMS,0f);").nl();
if (cats > 0) bodySb.i().p("java.util.Arrays.fill(CATS,0);").nl();
bodySb.i().p("int i = 0, ncats = 0;").nl();
if (cats > 0) {
bodySb.i().p("for(; i<"+cats+"; ++i) {").nl();
bodySb.i(1).p("if (!Double.isNaN(data[i])) {").nl();
bodySb.i(2).p("int c = (int) data[i];").nl();
if (model_info().data_info()._useAllFactorLevels)
bodySb.i(2).p("CATS[ncats++] = c + CATOFFSETS[i];").nl();
else
bodySb.i(2).p("if (c != 0) CATS[ncats++] = c + CATOFFSETS[i] - 1;").nl();
bodySb.i(1).p("}").nl();
bodySb.i().p("}").nl();
}
if (nums > 0) {
bodySb.i().p("final int n = data.length;").nl();
bodySb.i().p("for(; i<n; ++i) {").nl();
bodySb.i(1).p("NUMS[i" + (cats > 0 ? "-" + cats : "") + "] = Double.isNaN(data[i]) ? 0 : ");
if (model_info().data_info()._normMul != null) {
bodySb.p("(data[i] - NORMSUB[i" + (cats > 0 ? "-" + cats : "") + "])*NORMMUL[i" + (cats > 0 ? "-" + cats : "") + "];").nl();
} else {
bodySb.p("data[i];").nl();
}
bodySb.i(0).p("}").nl();
}
if (cats > 0) {
bodySb.i().p("for (i=0; i<ncats; ++i) ACTIVATION[0][CATS[i]] = 1f;").nl();
}
if (nums > 0) {
bodySb.i().p("for (i=0; i<NUMS.length; ++i) {").nl();
bodySb.i(1).p("ACTIVATION[0][CATOFFSETS[CATOFFSETS.length-1] + i] = Double.isNaN(NUMS[i]) ? 0f : (float) NUMS[i];").nl();
bodySb.i().p("}").nl();
}
boolean tanh=(get_params().activation == DeepLearning.Activation.Tanh || get_params().activation == DeepLearning.Activation.TanhWithDropout);
boolean relu=(get_params().activation == DeepLearning.Activation.Rectifier || get_params().activation == DeepLearning.Activation.RectifierWithDropout);
boolean maxout=(get_params().activation == DeepLearning.Activation.Maxout || get_params().activation == DeepLearning.Activation.MaxoutWithDropout);
// make prediction: forward propagation
bodySb.i().p("for (i=1; i<ACTIVATION.length; ++i) {").nl();
bodySb.i(1).p("java.util.Arrays.fill(ACTIVATION[i],0f);").nl();
if (maxout) {
bodySb.i(1).p("float rmax = 0;").nl();
}
bodySb.i(1).p("for (int r=0; r<ACTIVATION[i].length; ++r) {").nl();
bodySb.i(2).p("final int cols = ACTIVATION[i-1].length;").nl();
if (maxout) {
bodySb.i(2).p("float cmax = Float.NEGATIVE_INFINITY;").nl();
}
bodySb.i(2).p("for (int c=0; c<cols; ++c) {").nl();
if (!maxout) {
bodySb.i(3).p("ACTIVATION[i][r] += ACTIVATION[i-1][c] * WEIGHT[i][r*cols+c];").nl();
} else {
bodySb.i(3).p("if (i<ACTIVATION.length-1) cmax = Math.max(ACTIVATION[i-1][c] * WEIGHT[i][r*cols+c], cmax);").nl();
bodySb.i(3).p("else ACTIVATION[i][r] += ACTIVATION[i-1][c] * WEIGHT[i][r*cols+c];").nl();
}
bodySb.i(2).p("}").nl();
if (maxout) {
bodySb.i(2).p("if (i<ACTIVATION.length-1) ACTIVATION[i][r] = Float.isInfinite(cmax) ? 0f : cmax;").nl();
}
bodySb.i(2).p("ACTIVATION[i][r] += BIAS[i][r];").nl();
if (maxout) {
bodySb.i(2).p("if (i<ACTIVATION.length-1) rmax = Math.max(rmax, ACTIVATION[i][r]);").nl();
}
bodySb.i(1).p("}").nl();
if (!maxout) bodySb.i(1).p("if (i<ACTIVATION.length-1) {").nl();
bodySb.i(2).p("for (int r=0; r<ACTIVATION[i].length; ++r) {").nl();
if (tanh) {
bodySb.i(3).p("ACTIVATION[i][r] = 1f - 2f / (1f + (float)Math.exp(2*ACTIVATION[i][r]));").nl();
} else if (relu) {
bodySb.i(3).p("ACTIVATION[i][r] = Math.max(0f, ACTIVATION[i][r]);").nl();
} else if (maxout) {
bodySb.i(3).p("if (rmax > 1 ) ACTIVATION[i][r] /= rmax;").nl();
}
if (get_params().hidden_dropout_ratios != null) {
if (maxout) bodySb.i(1).p("if (i<ACTIVATION.length-1) {").nl();
bodySb.i(3).p("ACTIVATION[i][r] *= HIDDEN_DROPOUT_RATIOS[i-1];").nl();
if (maxout) bodySb.i(1).p("}").nl();
}
bodySb.i(2).p("}").nl();
if (!maxout) bodySb.i(1).p("}").nl();
if (isClassifier()) {
bodySb.i(1).p("if (i == ACTIVATION.length-1) {").nl();
// softmax
bodySb.i(2).p("float max = ACTIVATION[i][0];").nl();
bodySb.i(2).p("for (int r=1; r<ACTIVATION[i].length; r++) {").nl();
bodySb.i(3).p("if (ACTIVATION[i][r]>max) max = ACTIVATION[i][r];").nl();
bodySb.i(2).p("}").nl();
bodySb.i(2).p("float scale = 0f;").nl();
bodySb.i(2).p("for (int r=0; r<ACTIVATION[i].length; r++) {").nl();
bodySb.i(3).p("ACTIVATION[i][r] = (float) Math.exp(ACTIVATION[i][r] - max);").nl();
bodySb.i(3).p("scale += ACTIVATION[i][r];").nl();
bodySb.i(2).p("}").nl();
bodySb.i(2).p("for (int r=0; r<ACTIVATION[i].length; r++) {").nl();
bodySb.i(3).p("if (Float.isNaN(ACTIVATION[i][r]))").nl();
bodySb.i(4).p("throw new RuntimeException(\"Numerical instability, predicted NaN.\");").nl();
bodySb.i(3).p("ACTIVATION[i][r] /= scale;").nl();
bodySb.i(3).p("preds[r+1] = ACTIVATION[i][r];").nl();
bodySb.i(2).p("}").nl();
bodySb.i(1).p("}").nl();
bodySb.i().p("}").nl();
} else {
bodySb.i().p("}").nl();
// regression: set preds[1], FillPreds0 will put it into preds[0]
if (model_info().data_info()._normRespMul != null) {
bodySb.i().p("preds[1] = (float) (ACTIVATION[ACTIVATION.length-1][0] / NORMRESPMUL[0] + NORMRESPSUB[0]);").nl();
}
else {
bodySb.i().p("preds[1] = ACTIVATION[ACTIVATION.length-1][0];").nl();
}
bodySb.i().p("if (Float.isNaN(preds[1])) throw new RuntimeException(\"Predicted regression target NaN!\");").nl();
}
fileCtxSb.p(model);
toJavaUnifyPreds(bodySb);
toJavaFillPreds0(bodySb);
}
// helper to push this model to another key (for keeping good models)
private void putMeAsBestModel(Key bestModelKey) {
final Key job = null;
final DeepLearningModel cp = this;
DeepLearningModel bestModel = new DeepLearningModel(cp, bestModelKey, job, model_info().data_info());
bestModel.get_params().state = Job.JobState.DONE;
bestModel.get_params().job_key = get_params().self();
bestModel.delete_and_lock(job);
bestModel.unlock(job);
assert (UKV.get(bestModelKey) != null);
assert (bestModel.compareTo(this) <= 0);
assert (((DeepLearningModel) UKV.get(bestModelKey)).error() == _bestError);
}
public void delete_best_model( ) {
if (actual_best_model_key != null && actual_best_model_key != _key) DKV.remove(actual_best_model_key);
}
public void delete_xval_models( ) {
if (get_params().xval_models != null) {
for (Key k : get_params().xval_models) {
UKV.<DeepLearningModel>get(k).delete_best_model();
UKV.<DeepLearningModel>get(k).delete();
}
}
}
transient private final String unstable_msg = "Job was aborted due to observed numerical instability (exponential growth)."
+ "\nTry a different initial distribution, a bounded activation function or adding"
+ "\nregularization with L1, L2 or max_w2 and/or use a smaller learning rate or faster annealing.";
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/deeplearning/DeepLearningTask.java
|
package hex.deeplearning;
import hex.FrameTask;
import water.H2O;
import water.H2O.H2OCountedCompleter;
import water.Job;
import water.util.Log;
import java.util.Arrays;
import java.util.Random;
public class DeepLearningTask extends FrameTask<DeepLearningTask> {
final private boolean _training;
private hex.deeplearning.DeepLearningModel.DeepLearningModelInfo _input;
hex.deeplearning.DeepLearningModel.DeepLearningModelInfo _output;
final public hex.deeplearning.DeepLearningModel.DeepLearningModelInfo model_info() { return _output; }
transient Neurons[] _neurons;
int _chunk_node_count = 1;
@Override protected boolean skipMissing() {
return _output.get_params().missing_values_handling == DeepLearning.MissingValuesHandling.Skip;
}
public DeepLearningTask(hex.deeplearning.DeepLearningModel.DeepLearningModelInfo input, float fraction){this(input,fraction,null);}
private DeepLearningTask(hex.deeplearning.DeepLearningModel.DeepLearningModelInfo input, float fraction, H2OCountedCompleter cmp){
super(input.get_params().self(),input.data_info(),cmp);
_training=true;
_input=input;
_useFraction=fraction;
_shuffle = _input.get_params().shuffle_training_data;
assert(_output == null);
}
// transfer ownership from input to output (which will be worked on)
@Override protected void setupLocal(){
_output = _input; //faster, good enough in this case (since the input was freshly deserialized by the Weaver)
_input = null;
_output.set_processed_local(0l);
}
// create local workspace (neurons)
// and link them to shared weights
@Override protected void chunkInit(){
_neurons = makeNeuronsForTraining(_output);
}
@Override public final void processRow(long seed, final double [] nums, final int numcats, final int [] cats, double [] responses){
if(_output.get_params().self() != null && !Job.isRunning(_output.get_params().self())) throw new Job.JobCancelledException();
if (model_info().get_params().reproducible) {
seed += model_info().get_processed_global(); //avoid periodicity
} else {
seed = new Random().nextLong();
}
((Neurons.Input)_neurons[0]).setInput(seed, nums, numcats, cats);
step(seed, _neurons, _output, _training, responses);
}
@Override protected void chunkDone(long n) {
if (_training) _output.add_processed_local(n);
}
@Override public void reduce(DeepLearningTask other){
if (other._output.get_processed_local() > 0 //other NNTask was active (its model_info should be used for averaging)
&& other._output != _output) //other NNTask worked on a different model_info
{
// avoid adding remote model info to unprocessed local data, still random
// (this can happen if we have no chunks on the master node)
if (_output.get_processed_local() == 0) {
_output = other._output;
_chunk_node_count = other._chunk_node_count;
} else {
_output.add(other._output);
_chunk_node_count += other._chunk_node_count;
}
}
if (other._output.unstable()) _output.set_unstable();
}
static long _lastWarn;
static long _warnCount;
@Override protected void postGlobal(){
if (H2O.CLOUD.size() > 1 && !_output.get_params().replicate_training_data) {
long now = System.currentTimeMillis();
if (_chunk_node_count < H2O.CLOUD.size() && (now - _lastWarn > 5000) && _warnCount < 3) {
// Log.info("Synchronizing across " + _chunk_node_count + " H2O node(s).");
Log.warn(H2O.CLOUD.size() - _chunk_node_count + " node(s) (out of " + H2O.CLOUD.size()
+ ") are not contributing to model updates. Consider setting replicate_training_data to true or using a larger training dataset (or fewer H2O nodes).");
_lastWarn = now;
_warnCount++;
}
}
if (!_output.get_params().replicate_training_data || H2O.CLOUD.size() == 1) {
_output.div(_chunk_node_count);
_output.add_processed_global(_output.get_processed_local());
_output.set_processed_local(0l);
}
assert(_input == null);
}
public static Neurons[] makeNeuronsForTraining(final DeepLearningModel.DeepLearningModelInfo minfo) {
return makeNeurons(minfo, true);
}
public static Neurons[] makeNeuronsForTesting(final DeepLearningModel.DeepLearningModelInfo minfo) {
return makeNeurons(minfo, false);
}
// Helper
private static Neurons[] makeNeurons(final DeepLearningModel.DeepLearningModelInfo minfo, boolean training) {
DataInfo dinfo = minfo.data_info();
final DeepLearning params = minfo.get_params();
final int[] h = params.hidden;
Neurons[] neurons = new Neurons[h.length + 2]; // input + hidden + output
// input
neurons[0] = new Neurons.Input(minfo.units[0], dinfo);
// hidden
for( int i = 0; i < h.length + (params.autoencoder ? 1 : 0); i++ ) {
int n = params.autoencoder && i == h.length ? minfo.units[0] : h[i];
switch( params.activation ) {
case Tanh:
neurons[i+1] = new Neurons.Tanh(n);
break;
case TanhWithDropout:
neurons[i+1] = params.autoencoder && i == h.length ? new Neurons.Tanh(n) : new Neurons.TanhDropout(n);
break;
case Rectifier:
neurons[i+1] = new Neurons.Rectifier(n);
break;
case RectifierWithDropout:
neurons[i+1] = params.autoencoder && i == h.length ? new Neurons.Rectifier(n) : new Neurons.RectifierDropout(n);
break;
case Maxout:
neurons[i+1] = new Neurons.Maxout(n);
break;
case MaxoutWithDropout:
neurons[i+1] = params.autoencoder && i == h.length ? new Neurons.Maxout(n) : new Neurons.MaxoutDropout(n);
break;
}
}
if(!params.autoencoder) {
if (params.classification)
neurons[neurons.length - 1] = new Neurons.Softmax(minfo.units[minfo.units.length - 1]);
else
neurons[neurons.length - 1] = new Neurons.Linear(1);
}
//copy parameters from NN, and set previous/input layer links
for( int i = 0; i < neurons.length; i++ ) {
neurons[i].init(neurons, i, params, minfo, training);
neurons[i]._input = neurons[0];
}
// // debugging
// for (Neurons n : neurons) Log.info(n.toString());
return neurons;
}
// forward/backward propagation
// assumption: layer 0 has _a filled with (horizontalized categoricals) double values
public static void step(long seed, Neurons[] neurons, DeepLearningModel.DeepLearningModelInfo minfo, boolean training, double[] responses) {
try {
for (int i=1; i<neurons.length-1; ++i) {
neurons[i].fprop(seed, training);
}
if (minfo.get_params().autoencoder) {
neurons[neurons.length - 1].fprop(seed, training);
if (training) {
for (int i=neurons.length-1; i>0; --i) {
neurons[i].bprop();
}
}
} else {
if (minfo.get_params().classification) {
((Neurons.Softmax) neurons[neurons.length - 1]).fprop();
if (training) {
for (int i = 1; i < neurons.length - 1; i++)
Arrays.fill(neurons[i]._e.raw(), 0);
int target_label;
if (Double.isNaN(responses[0])) { //missing response
target_label = Neurons.missing_int_value;
} else {
assert ((double) (int) responses[0] == responses[0]); //classification -> integer labels expected
target_label = (int) responses[0];
}
((Neurons.Softmax) neurons[neurons.length - 1]).bprop(target_label);
}
} else {
((Neurons.Linear) neurons[neurons.length - 1]).fprop();
if (training) {
for (int i = 1; i < neurons.length - 1; i++)
Arrays.fill(neurons[i]._e.raw(), 0);
float target_value;
if (Double.isNaN(responses[0])) { //missing response
target_value = Neurons.missing_real_value;
} else {
target_value = (float) responses[0];
}
((Neurons.Linear) neurons[neurons.length - 1]).bprop(target_value);
}
}
if (training) {
for (int i=neurons.length-2; i>0; --i)
neurons[i].bprop();
}
}
}
catch(RuntimeException ex) {
Log.warn(ex.getMessage());
minfo.set_unstable();
throw new Job.JobCancelledException("Canceling job due to numerical instability.");
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/deeplearning/DeepLearningTask2.java
|
package hex.deeplearning;
import water.DRemoteTask;
import water.fvec.Frame;
/**
* DRemoteTask-based Deep Learning.
* Every node has access to all the training data which leads to optimal CPU utilization IF the data fits on every node.
*/
public class DeepLearningTask2 extends DRemoteTask<DeepLearningTask2> {
/**
* Construct a DeepLearningTask2 where every node trains on the entire training dataset
* @param train Frame containing training data
* @param model_info Initial DeepLearningModelInfo (weights + biases)
* @param sync_fraction Fraction of the training data to use for one SGD iteration
*/
public DeepLearningTask2(Frame train, DeepLearningModel.DeepLearningModelInfo model_info, float sync_fraction) {
assert(sync_fraction > 0);
_fr = train;
_model_info = model_info;
_sync_fraction = sync_fraction;
}
/**
* Returns the aggregated DeepLearning model that was trained by all nodes (over all the training data)
* @return model_info object
*/
public DeepLearningModel.DeepLearningModelInfo model_info() {
return _res.model_info();
}
final private Frame _fr;
final private DeepLearningModel.DeepLearningModelInfo _model_info;
final private float _sync_fraction;
private DeepLearningTask _res;
/**
* Do the local computation: Perform one DeepLearningTask (with run_local=true) iteration.
* Pass over all the data (will be replicated in dfork() here), and use _sync_fraction random rows.
* This calls DeepLearningTask's reduce() between worker threads that update the same local model_info via Hogwild!
* Once the computation is done, reduce() will be called
*/
@Override
public void lcompute() {
_res = new DeepLearningTask(_model_info, _sync_fraction);
_res.setCompleter(this);
_res.asyncExec(0, _fr, true /*run_local*/);
}
/**
* Reduce between worker nodes, with network traffic (if greater than 1 nodes)
* After all reduce()'s are done, postGlobal() will be called
* @param drt
*/
@Override
public void reduce(DeepLearningTask2 drt) {
if (_res == null) _res = drt._res;
else {
_res._chunk_node_count += drt._res._chunk_node_count;
_res.model_info().add(drt._res.model_info()); //add models, but don't average yet
}
assert(_res.model_info().get_params().replicate_training_data);
}
/**
* Finish up the work after all nodes have reduced their models via the above reduce() method.
* All we do is average the models and add to the global training sample counter.
* After this returns, model_info() can be queried for the updated model.
*/
@Override
protected void postGlobal() {
assert(_res.model_info().get_params().replicate_training_data);
super.postGlobal();
_res.model_info().div(_res._chunk_node_count); //model averaging
_res.model_info().add_processed_global(_res.model_info().get_processed_local()); //switch from local counters to global counters
_res.model_info().set_processed_local(0l);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/deeplearning/Dropout.java
|
package hex.deeplearning;
import java.util.Arrays;
import java.util.Random;
/**
* Helper class for dropout training of Neural Nets
*/
public class Dropout {
private transient Random _rand;
private transient byte[] _bits;
private transient double _rate;
public double rate() { return _rate; }
public byte[] bits() { return _bits; }
public Dropout() {
_rate = 0.5;
}
@Override
public String toString() {
String s = "Dropout: " + super.toString();
s += "\nRandom: " + _rand.toString();
s += "\nDropout rate: " + _rate;
s += "\nbits: ";
for (int i=0; i< _bits.length*8; ++i) s += unit_active(i) ? "1":"0";
s += "\n";
return s;
}
Dropout(int units) {
_bits = new byte[(units+7)/8];
_rand = new Random(0);
_rate = 0.5;
}
Dropout(int units, double rate) {
this(units);
_rate = rate;
}
public void randomlySparsifyActivation(Neurons.Vector a, long seed) {
if (a instanceof Neurons.DenseVector)
randomlySparsifyActivation((Neurons.DenseVector) a, seed);
else if (a instanceof Neurons.SparseVector)
randomlySparsifyActivation((Neurons.SparseVector)a, seed);
else throw new UnsupportedOperationException("randomlySparsifyActivation not implemented for this type: " + a.getClass().getSimpleName());
}
// for input layer
private void randomlySparsifyActivation(Neurons.DenseVector a, long seed) {
if (_rate == 0) return;
setSeed(seed);
for( int i = 0; i < a.size(); i++ )
if (_rand.nextFloat() < _rate) a.set(i, 0);
}
private void randomlySparsifyActivation(Neurons.SparseVector a, long seed) {
if (_rate == 0) return;
setSeed(seed);
for (Neurons.SparseVector.Iterator it=a.begin(); !it.equals(a.end()); it.next())
if (_rand.nextFloat() < _rate) it.setValue(0f);
}
// for hidden layers
public void fillBytes(long seed) {
setSeed(seed);
if (_rate == 0.5) _rand.nextBytes(_bits);
else {
Arrays.fill(_bits, (byte)0);
for (int i=0;i<_bits.length*8;++i)
if (_rand.nextFloat() > _rate) _bits[i / 8] |= 1 << (i % 8);
}
}
public boolean unit_active(int o) {
return (_bits[o / 8] & (1 << (o % 8))) != 0;
}
private void setSeed(long seed) {
if ((seed >>> 32) < 0x0000ffffL) seed |= 0x5b93000000000000L;
if (((seed << 32) >>> 32) < 0x0000ffffL) seed |= 0xdb910000L;
_rand.setSeed(seed);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/deeplearning/Neurons.java
|
package hex.deeplearning;
import hex.FrameTask;
import hex.deeplearning.DeepLearning.Loss;
import org.apache.hadoop.util.hash.Hash;
import org.apache.hadoop.util.hash.MurmurHash;
import water.Iced;
import water.MemoryManager;
import water.api.Request.API;
import water.util.Utils;
import java.nio.ByteBuffer;
import java.util.*;
/**
* This class implements the concept of a Neuron layer in a Neural Network
* During training, every MRTask2 F/J thread is expected to create these neurons for every map call (Cheap to make).
* These Neurons are NOT sent over the wire.
* The weights connecting the neurons are in a separate class (DeepLearningModel.DeepLearningModelInfo), and will be shared per node.
*/
public abstract class Neurons {
@API(help = "Number of neurons")
protected int units;
/**
* Constructor of a Neuron Layer
* @param units How many neurons are in this layer?
*/
Neurons(int units) {
this.units = units;
}
/**
* Print the status of this neuron layer
* @return populated String
*/
@Override
public String toString() {
String s = this.getClass().getSimpleName();
s += "\nNumber of Neurons: " + units;
s += "\nParameters:\n" + params.toString();
if (_dropout != null) s += "\nDropout:\n" + _dropout.toString();
return s;
}
/**
* Parameters (deep-cloned() from the user input, can be modified here, e.g. learning rate decay)
*/
protected transient DeepLearning params;
protected transient int _index; //which hidden layer it is
/**
* Layer state (one per neuron): activity, error
*/
public transient Vector _a; //can be sparse for input layer
public transient DenseVector _e;
/**
* References for feed-forward connectivity
*/
public Neurons _previous;
public Neurons _input;
DeepLearningModel.DeepLearningModelInfo _minfo; //reference to shared model info
public Matrix _w;
public DenseVector _b;
/**
* References for momentum training
*/
Matrix _wm;
DenseVector _bm;
/**
* References for ADADELTA
*/
Matrix _ada_dx_g;
DenseVector _bias_ada_dx_g;
/**
* For Dropout training
*/
protected Dropout _dropout;
/**
* Helper to shortcut bprop
*/
private boolean _shortcut = false;
public DenseVector _avg_a;
public static final int missing_int_value = Integer.MAX_VALUE; //encode missing label
public static final Float missing_real_value = Float.NaN; //encode missing regression target
/**
* Helper to check sanity of Neuron layers
* @param training whether training or testing is done
*/
void sanityCheck(boolean training) {
if (this instanceof Input) {
assert(_previous == null);
assert (!training || _dropout != null);
} else {
assert(_previous != null);
if (_minfo.has_momenta()) {
assert(_wm != null);
assert(_bm != null);
assert(_ada_dx_g == null);
}
if (_minfo.adaDelta()) {
if (params.rho == 0) throw new IllegalArgumentException("rho must be > 0 if epsilon is >0.");
if (params.epsilon == 0) throw new IllegalArgumentException("epsilon must be > 0 if rho is >0.");
assert(_minfo.adaDelta());
assert(_bias_ada_dx_g != null);
assert(_wm == null);
assert(_bm == null);
}
if (this instanceof MaxoutDropout || this instanceof TanhDropout || this instanceof RectifierDropout) {
assert (!training || _dropout != null);
}
}
}
/**
* Initialization of the parameters and connectivity of a Neuron layer
* @param neurons Array of all neuron layers, to establish feed-forward connectivity
* @param index Which layer am I?
* @param p User-given parameters (Job parental object hierarchy is not used)
* @param minfo Model information (weights/biases and their momenta)
* @param training Whether training is done or just testing (no need for dropout)
*/
public final void init(Neurons[] neurons, int index, DeepLearning p, final DeepLearningModel.DeepLearningModelInfo minfo, boolean training) {
_index = index-1;
params = (DeepLearning)p.clone();
params.rate *= Math.pow(params.rate_decay, index-1);
_a = new DenseVector(units);
if (!(this instanceof Output) && !(this instanceof Input)) {
_e = new DenseVector(units);
}
if (training && (this instanceof MaxoutDropout || this instanceof TanhDropout
|| this instanceof RectifierDropout || this instanceof Input) ) {
_dropout = this instanceof Input ? new Dropout(units, params.input_dropout_ratio) : new Dropout(units, params.hidden_dropout_ratios[_index]);
}
if (!(this instanceof Input)) {
_previous = neurons[_index]; //incoming neurons
_minfo = minfo;
_w = minfo.get_weights(_index); //incoming weights
_b = minfo.get_biases(_index); //bias for this layer (starting at hidden layer)
if(params.autoencoder && params.sparsity_beta > 0 && _index < params.hidden.length) {
_avg_a = minfo.get_avg_activations(_index);
}
if (minfo.has_momenta()) {
_wm = minfo.get_weights_momenta(_index); //incoming weights
_bm = minfo.get_biases_momenta(_index); //bias for this layer (starting at hidden layer)
}
if (minfo.adaDelta()) {
_ada_dx_g = minfo.get_ada_dx_g(_index);
_bias_ada_dx_g = minfo.get_biases_ada_dx_g(_index);
}
_shortcut = (params.fast_mode || (
// not doing fast mode, but also don't have anything else to update (neither momentum nor ADADELTA history), and no L1/L2
!params.adaptive_rate && !_minfo.has_momenta() && params.l1 == 0.0 && params.l2 == 0.0));
}
sanityCheck(training);
}
/**
* Forward propagation
* @param seed For seeding the RNG inside (for dropout)
* @param training Whether training is done or just testing (no need for dropout)
*/
protected abstract void fprop(long seed, boolean training);
/**
* Back propagation
*/
protected abstract void bprop();
void bprop_sparse(float r, float m) {
SparseVector prev_a = (SparseVector) _previous._a;
int start = prev_a.begin()._idx;
int end = prev_a.end()._idx;
for (int it = start; it < end; ++it) {
final int col = prev_a._indices[it];
final float previous_a = prev_a._values[it];
bprop_col(col, previous_a, r, m);
}
final int rows = _a.size();
final float max_w2 = params.max_w2;
for (int row = 0; row < rows; row++) {
if (max_w2 != Float.POSITIVE_INFINITY)
rescale_weights(_w, row, max_w2);
}
}
/**
* Backpropagation: w -= rate * dE/dw, where dE/dw = dE/dy * dy/dnet * dnet/dw
* This method adds the dnet/dw = activation term per unit
* @param row row index (update weights feeding to this neuron)
* @param partial_grad partial derivative dE/dnet = dE/dy * dy/net
* @param rate learning rate
* @param momentum momentum factor (needed only if ADADELTA isn't used)
*/
final void bprop(final int row, final float partial_grad, final float rate, final float momentum) {
// only correct weights if the gradient is large enough
if (_shortcut && partial_grad == 0f) return;
if (_w instanceof DenseRowMatrix && _previous._a instanceof DenseVector)
bprop_dense_row_dense(
(DenseRowMatrix) _w, (DenseRowMatrix) _wm, (DenseRowMatrix) _ada_dx_g,
(DenseVector) _previous._a, _previous._e, _b, _bm, row, partial_grad, rate, momentum);
else if (_w instanceof DenseRowMatrix && _previous._a instanceof SparseVector)
bprop_dense_row_sparse(
(DenseRowMatrix)_w, (DenseRowMatrix)_wm, (DenseRowMatrix)_ada_dx_g,
(SparseVector)_previous._a, _previous._e, _b, _bm, row, partial_grad, rate, momentum);
else
throw new UnsupportedOperationException("bprop for types not yet implemented.");
}
final void bprop_col(final int col, final float previous_a, final float rate, final float momentum) {
if (_w instanceof DenseColMatrix && _previous._a instanceof SparseVector)
bprop_dense_col_sparse(
(DenseColMatrix)_w, (DenseColMatrix)_wm, (DenseColMatrix)_ada_dx_g,
(SparseVector)_previous._a, _previous._e, _b, _bm, col, previous_a, rate, momentum);
else
throw new UnsupportedOperationException("bprop_col for types not yet implemented.");
}
/**
* Specialization of backpropagation for DenseRowMatrices and DenseVectors
* @param _w weight matrix
* @param _wm weight momentum matrix
* @param adaxg ADADELTA matrix (2 floats per weight)
* @param prev_a activation of previous layer
* @param prev_e error of previous layer
* @param _b bias vector
* @param _bm bias momentum vector
* @param row index of the neuron for which we back-propagate
* @param partial_grad partial derivative dE/dnet = dE/dy * dy/net
* @param rate learning rate
* @param momentum momentum factor (needed only if ADADELTA isn't used)
*/
private void bprop_dense_row_dense(
final DenseRowMatrix _w, final DenseRowMatrix _wm, final DenseRowMatrix adaxg,
final DenseVector prev_a, final DenseVector prev_e, final DenseVector _b, final DenseVector _bm,
final int row, final float partial_grad, float rate, final float momentum)
{
final float rho = (float)params.rho;
final float eps = (float)params.epsilon;
final float l1 = (float)params.l1;
final float l2 = (float)params.l2;
final float max_w2 = params.max_w2;
final boolean have_momenta = _minfo.has_momenta();
final boolean have_ada = _minfo.adaDelta();
final boolean nesterov = params.nesterov_accelerated_gradient;
final boolean update_prev = prev_e != null;
final boolean fast_mode = params.fast_mode;
final int cols = prev_a.size();
final int idx = row * cols;
float avg_grad2 = 0;
for( int col = 0; col < cols; col++ ) {
final float weight = _w.get(row,col);
if( update_prev ) prev_e.add(col, partial_grad * weight); // propagate the error dE/dnet to the previous layer, via connecting weights
final float previous_a = prev_a.get(col);
if (fast_mode && previous_a == 0) continue;
//this is the actual gradient dE/dw
final float grad = partial_grad * previous_a - Math.signum(weight) * l1 - weight * l2;
final int w = idx + col;
if (have_ada) {
assert(!have_momenta);
final float grad2 = grad*grad;
avg_grad2 += grad2;
float brate = computeAdaDeltaRateForWeight(grad, w, adaxg, rho, eps);
_w.raw()[w] += brate * grad;
} else {
if (!nesterov) {
final float delta = rate * grad;
_w.raw()[w] += delta;
if( have_momenta ) {
_w.raw()[w] += momentum * _wm.raw()[w];
_wm.raw()[w] = delta;
}
} else {
float tmp = grad;
if( have_momenta ) {
_wm.raw()[w] *= momentum;
_wm.raw()[w] += tmp;
tmp = _wm.raw()[w];
}
_w.raw()[w] += rate * tmp;
}
}
}
if (max_w2 != Float.POSITIVE_INFINITY)
rescale_weights(_w, row, max_w2);
if (have_ada) avg_grad2 /= cols;
update_bias(_b, _bm, row, partial_grad, avg_grad2, rate, momentum);
}
/**
* Specialization of backpropagation for DenseColMatrices and SparseVector for previous layer's activation and DenseVector for everything else
* @param w Weight matrix
* @param wm Momentum matrix
* @param adaxg ADADELTA matrix (2 floats per weight)
* @param prev_a sparse activation of previous layer
* @param prev_e error of previous layer
* @param b bias
* @param bm bias momentum
* @param rate learning rate
* @param momentum momentum factor (needed only if ADADELTA isn't used)
*/
private void bprop_dense_col_sparse(
final DenseColMatrix w, final DenseColMatrix wm, final DenseColMatrix adaxg,
final SparseVector prev_a, final DenseVector prev_e, final DenseVector b, final DenseVector bm,
final int col, final float previous_a, float rate, final float momentum)
{
final float rho = (float)params.rho;
final float eps = (float)params.epsilon;
final float l1 = (float)params.l1;
final float l2 = (float)params.l2;
final boolean have_momenta = _minfo.has_momenta();
final boolean have_ada = _minfo.adaDelta();
final boolean nesterov = params.nesterov_accelerated_gradient;
final boolean update_prev = prev_e != null;
final int cols = prev_a.size();
final int rows = _a.size();
for (int row = 0; row < rows; row++) {
final float partial_grad = _e.get(row) * (1f - _a.get(row) * _a.get(row));
final float weight = w.get(row,col);
if( update_prev ) prev_e.add(col, partial_grad * weight); // propagate the error dE/dnet to the previous layer, via connecting weights
assert (previous_a != 0); //only iterate over non-zeros!
if (_shortcut && partial_grad == 0f) continue;
//this is the actual gradient dE/dw
final float grad = partial_grad * previous_a - Math.signum(weight) * l1 - weight * l2;
if (have_ada) {
assert(!have_momenta);
float brate = computeAdaDeltaRateForWeight(grad, row, col, adaxg, rho, eps);
w.add(row,col, brate * grad);
} else {
if (!nesterov) {
final float delta = rate * grad;
w.add(row, col, delta);
// Log.info("for row = " + row + ", col = " + col + ", partial_grad = " + partial_grad + ", grad = " + grad);
if( have_momenta ) {
w.add(row, col, momentum * wm.get(row, col));
wm.set(row, col, delta);
}
} else {
float tmp = grad;
if( have_momenta ) {
float val = wm.get(row, col);
val *= momentum;
val += tmp;
tmp = val;
wm.set(row, col, val);
}
w.add(row, col, rate * tmp);
}
}
//this is called cols times, so we divide the (repeated) contribution by 1/cols
update_bias(b, bm, row, partial_grad/cols, grad*grad/cols, rate, momentum);
}
}
/**
* Specialization of backpropagation for DenseRowMatrices and SparseVector for previous layer's activation and DenseVector for everything else
* @param _w weight matrix
* @param _wm weight momentum matrix
* @param adaxg ADADELTA matrix (2 floats per weight)
* @param prev_a sparse activation of previous layer
* @param prev_e error of previous layer
* @param _b bias vector
* @param _bm bias momentum vector
* @param row index of the neuron for which we back-propagate
* @param partial_grad partial derivative dE/dnet = dE/dy * dy/net
* @param rate learning rate
* @param momentum momentum factor (needed only if ADADELTA isn't used)
*/
private void bprop_dense_row_sparse(
final DenseRowMatrix _w, final DenseRowMatrix _wm, final DenseRowMatrix adaxg,
final SparseVector prev_a, final DenseVector prev_e, final DenseVector _b, final DenseVector _bm,
final int row, final float partial_grad, float rate, final float momentum)
{
final float rho = (float)params.rho;
final float eps = (float)params.epsilon;
final float l1 = (float)params.l1;
final float l2 = (float)params.l2;
final float max_w2 = params.max_w2;
final boolean have_momenta = _minfo.has_momenta();
final boolean have_ada = _minfo.adaDelta();
final boolean nesterov = params.nesterov_accelerated_gradient;
final boolean update_prev = prev_e != null;
final int cols = prev_a.size();
final int idx = row * cols;
float avg_grad2 = 0;
int start = prev_a.begin()._idx;
int end = prev_a.end()._idx;
for (int it = start; it < end; ++it) {
final int col = prev_a._indices[it];
final float weight = _w.get(row,col);
if( update_prev ) prev_e.add(col, partial_grad * weight); // propagate the error dE/dnet to the previous layer, via connecting weights
final float previous_a = prev_a._values[it];
assert (previous_a != 0); //only iterate over non-zeros!
//this is the actual gradient dE/dw
final float grad = partial_grad * previous_a - Math.signum(weight) * l1 - weight * l2;
final int w = idx + col;
if (have_ada) {
assert(!have_momenta);
final float grad2 = grad*grad;
avg_grad2 += grad2;
float brate = computeAdaDeltaRateForWeight(grad, w, adaxg, rho, eps);
_w.raw()[w] += brate * grad;
} else {
if (!nesterov) {
final float delta = rate * grad;
_w.raw()[w] += delta;
if( have_momenta ) {
_w.raw()[w] += momentum * _wm.raw()[w];
_wm.raw()[w] = delta;
}
} else {
float tmp = grad;
if( have_momenta ) {
_wm.raw()[w] *= momentum;
_wm.raw()[w] += tmp;
tmp = _wm.raw()[w];
}
_w.raw()[w] += rate * tmp;
}
}
}
if (max_w2 != Float.POSITIVE_INFINITY)
rescale_weights(_w, row, max_w2);
if (have_ada) avg_grad2 /= prev_a.nnz();
update_bias(_b, _bm, row, partial_grad, avg_grad2, rate, momentum);
}
/**
* Helper to scale down incoming weights if their squared sum exceeds a given value (by a factor of 10 -> to avoid doing costly rescaling too often)
* C.f. Improving neural networks by preventing co-adaptation of feature detectors
* @param row index of the neuron for which to scale the weights
*/
private static void rescale_weights(final Matrix w, final int row, final float max_w2) {
final int cols = w.cols();
if (w instanceof DenseRowMatrix) {
rescale_weights((DenseRowMatrix)w, row, max_w2);
} else if (w instanceof DenseColMatrix) {
float r2 = 0;
for (int col=0; col<cols;++col)
r2 += w.get(row,col)*w.get(row,col);
if( r2 > max_w2) {
final float scale = Utils.approxSqrt(max_w2 / r2);
for( int col=0; col < cols; col++ ) w.set(row, col, w.get(row,col) * scale);
}
}
else throw new UnsupportedOperationException("rescale weights for " + w.getClass().getSimpleName() + " not yet implemented.");
}
// Specialization for DenseRowMatrix
private static void rescale_weights(final DenseRowMatrix w, final int row, final float max_w2) {
final int cols = w.cols();
final int idx = row * cols;
float r2 = Utils.sumSquares(w.raw(), idx, idx+cols);
// float r2 = Utils.approxSumSquares(w.raw(), idx, idx + cols);
if( r2 > max_w2) {
final float scale = Utils.approxSqrt(max_w2 / r2);
for( int c = 0; c < cols; c++ ) w.raw()[idx + c] *= scale;
}
}
/**
* Helper to compute the reconstruction error for auto-encoders (part of the gradient computation)
* @param row neuron index
* @return difference between the output (auto-encoder output layer activation) and the target (input layer activation)
*/
protected float autoEncoderError(int row) {
assert (_minfo.get_params().autoencoder && _index == _minfo.get_params().hidden.length);
assert (params.loss == Loss.MeanSquare);
return (_input._a.get(row) - _a.get(row));
}
/**
* Compute learning rate with AdaDelta
* http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf
* @param grad gradient
* @param row which neuron is to be updated
* @param col weight from which incoming neuron
* @param ada_dx_g Matrix holding helper values (2 floats per weight)
* @param rho hyper-parameter #1
* @param eps hyper-parameter #2
* @return learning rate
*/
private static float computeAdaDeltaRateForWeight(final float grad, final int row, final int col,
final DenseColMatrix ada_dx_g,
final float rho, final float eps) {
ada_dx_g.set(2*row+1, col, rho * ada_dx_g.get(2*row+1, col) + (1f - rho) * grad * grad);
final float rate = Utils.approxSqrt((ada_dx_g.get(2*row, col) + eps)/(ada_dx_g.get(2*row+1, col) + eps));
ada_dx_g.set(2*row, col, rho * ada_dx_g.get(2*row, col) + (1f - rho) * rate * rate * grad * grad);
return rate;
}
/**
* Compute learning rate with AdaDelta, specialized for DenseRowMatrix
* @param grad gradient
* @param w neuron index
* @param ada_dx_g Matrix holding helper values (2 floats per weight)
* @param rho hyper-parameter #1
* @param eps hyper-parameter #2
* @return learning rate
*/
private static float computeAdaDeltaRateForWeight(final float grad, final int w,
final DenseRowMatrix ada_dx_g,
final float rho, final float eps) {
ada_dx_g.raw()[2*w+1] = rho * ada_dx_g.raw()[2*w+1] + (1f - rho) * grad * grad;
final float rate = Utils.approxSqrt((ada_dx_g.raw()[2*w] + eps)/(ada_dx_g.raw()[2*w+1] + eps));
ada_dx_g.raw()[2*w] = rho * ada_dx_g.raw()[2*w] + (1f - rho) * rate * rate * grad * grad;
return rate;
}
/**
* Compute learning rate with AdaDelta, specialized for DenseVector (Bias)
* @param grad2 squared gradient
* @param row neuron index
* @param bias_ada_dx_g Matrix holding helper values (2 floats per weight)
* @param rho hyper-parameter #1
* @param eps hyper-parameter #2
* @return learning rate
*/
private static float computeAdaDeltaRateForBias(final float grad2, final int row,
final DenseVector bias_ada_dx_g,
final float rho, final float eps) {
bias_ada_dx_g.raw()[2*row+1] = rho * bias_ada_dx_g.raw()[2*row+1] + (1f - rho) * grad2;
final float rate = Utils.approxSqrt((bias_ada_dx_g.raw()[2*row ] + eps)/(bias_ada_dx_g.raw()[2*row+1] + eps));
bias_ada_dx_g.raw()[2*row] = rho * bias_ada_dx_g.raw()[2*row ] + (1f - rho) * rate * rate * grad2;
return rate;
}
/**
* Helper to enforce learning rule to satisfy sparsity constraint:
* Computes the (rolling) average activation for each (hidden) neuron.
*/
void compute_sparsity() {
if (_avg_a != null) {
for (int row = 0; row < _avg_a.size(); row++) {
_avg_a.set(row, (float) 0.999 * (_avg_a.get(row)) + (float) 0.001 * (_a.get(row)));
}
}
}
/**
* Helper to update the bias values
* @param _b bias vector
* @param _bm bias momentum vector
* @param row index of the neuron for which we back-propagate
* @param partial_grad partial derivative dE/dnet = dE/dy * dy/net
* @param avg_grad2 average squared gradient for this neuron's incoming weights (only for ADADELTA)
* @param rate learning rate
* @param momentum momentum factor (needed only if ADADELTA isn't used)
*/
void update_bias(final DenseVector _b, final DenseVector _bm, final int row,
float partial_grad, final float avg_grad2, float rate, final float momentum) {
final boolean have_momenta = _minfo.has_momenta();
final boolean have_ada = _minfo.adaDelta();
final float l1 = (float)params.l1;
final float l2 = (float)params.l2;
final float bias = _b.get(row);
partial_grad -= Math.signum(bias) * l1 + bias * l2;
if (have_ada) {
final float rho = (float)params.rho;
final float eps = (float)params.epsilon;
rate = computeAdaDeltaRateForBias(avg_grad2, row, _bias_ada_dx_g, rho, eps);
}
if (!params.nesterov_accelerated_gradient) {
final float delta = rate * partial_grad;
_b.add(row, delta);
if (have_momenta) {
_b.add(row, momentum * _bm.get(row));
_bm.set(row, delta);
}
} else {
float d = partial_grad;
if (have_momenta) {
_bm.set(row, _bm.get(row) * momentum);
_bm.add(row, d);
d = _bm.get(row);
}
_b.add(row, rate * d);
}
//update for sparsity constraint
if (params.autoencoder && params.sparsity_beta > 0 && !(this instanceof Output) && !(this instanceof Input) && (_index != params.hidden.length)) {
_b.add(row, -(float) (rate * params.sparsity_beta * (_avg_a._data[row] - params.average_activation)));
}
if (Float.isInfinite(_b.get(row))) _minfo.set_unstable();
}
/**
* The learning rate
* @param n The number of training samples seen so far (for rate_annealing greater than 0)
* @return Learning rate
*/
public float rate(long n) {
return (float)(params.rate / (1 + params.rate_annealing * n));
}
protected float momentum() {
return momentum(-1);
}
/**
* The momentum - real number in [0, 1)
* Can be a linear ramp from momentum_start to momentum_stable, over momentum_ramp training samples
* @param n The number of training samples seen so far
* @return momentum
*/
public float momentum(long n) {
double m = params.momentum_start;
if( params.momentum_ramp > 0 ) {
final long num = n != -1 ? _minfo.get_processed_total() : n;
if( num >= params.momentum_ramp )
m = params.momentum_stable;
else
m += (params.momentum_stable - params.momentum_start) * num / params.momentum_ramp;
}
return (float)m;
}
/**
* Input layer of the Neural Network
* This layer is different from other layers as it has no incoming weights,
* but instead gets its activation values from the training points.
*/
public static class Input extends Neurons {
private FrameTask.DataInfo _dinfo; //training data
SparseVector _svec;
DenseVector _dvec;
Input(int units, final FrameTask.DataInfo d) {
super(units);
_dinfo = d;
_a = new DenseVector(units);
_dvec = (DenseVector)_a;
}
@Override protected void bprop() { throw new UnsupportedOperationException(); }
@Override protected void fprop(long seed, boolean training) { throw new UnsupportedOperationException(); }
/**
* One of two methods to set layer input values. This one is for raw double data, e.g. for scoring
* @param seed For seeding the RNG inside (for input dropout)
* @param data Data (training columns and responses) to extract the training columns
* from to be mapped into the input neuron layer
*/
public void setInput(long seed, final double[] data) {
// Log.info("Data: " + ArrayUtils.toString(data));
assert(_dinfo != null);
double [] nums = MemoryManager.malloc8d(_dinfo._nums); // a bit wasteful - reallocated each time
int [] cats = MemoryManager.malloc4(_dinfo._cats); // a bit wasteful - reallocated each time
int i = 0, ncats = 0;
for(; i < _dinfo._cats; ++i){
// This can occur when testing data has categorical levels that are not part of training (or if there's a missing value)
if (Double.isNaN(data[i])) {
if (_dinfo._catMissing[i]!=0) cats[ncats++] = (_dinfo._catOffsets[i+1]-1); //use the extra level made during training
else {
if (!_dinfo._useAllFactorLevels)
throw new IllegalArgumentException("Model was built without missing categorical factors in column "
+ _dinfo.coefNames()[i] + ", but found unknown (or missing) categorical factors during scoring."
+ "\nThe model needs to be built with use_all_factor_levels=true for this to work.");
// else just leave all activations at 0, and since all factor levels were enabled,
// this is OK (missing or new categorical doesn't activate any levels seen during training)
}
} else {
int c = (int)data[i];
if (_dinfo._useAllFactorLevels)
cats[ncats++] = c + _dinfo._catOffsets[i];
else if (c!=0)
cats[ncats++] = c + _dinfo._catOffsets[i] - 1;
}
}
final int n = data.length; // data contains only input features - no response is included
for(;i < n;++i){
double d = data[i];
if(_dinfo._normMul != null) d = (d - _dinfo._normSub[i-_dinfo._cats])*_dinfo._normMul[i-_dinfo._cats];
nums[i-_dinfo._cats] = d; //can be NaN for missing numerical data
}
setInput(seed, nums, ncats, cats);
}
/**
* The second method used to set input layer values. This one is used directly by FrameTask.processRow() and by the method above.
* @param seed For seeding the RNG inside (for input dropout)
* @param nums Array containing numerical values, can be NaN
* @param numcat Number of horizontalized categorical non-zero values (i.e., those not being the first factor of a class)
* @param cats Array of indices, the first numcat values are the input layer unit (==column) indices for the non-zero categorical values
* (This allows this array to be re-usable by the caller, without re-allocating each time)
*/
public void setInput(long seed, final double[] nums, final int numcat, final int[] cats) {
_a = _dvec;
Arrays.fill(_a.raw(), 0f);
// random projection from fullN down to max_categorical_features
if (params.max_categorical_features < _dinfo.fullN() - _dinfo._nums) {
assert(nums.length == _dinfo._nums);
final int M = nums.length + params.max_categorical_features;
final boolean random_projection = false;
final boolean hash_trick = true;
if (random_projection) {
final int N = _dinfo.fullN();
assert (_a.size() == M);
// sparse random projection
for (int i = 0; i < M; ++i) {
for (int c = 0; c < numcat; ++c) {
int j = cats[c];
Random rng = new Random(params.seed + i*N + j);
float val = 0;
final float rnd = rng.nextFloat();
if (rnd < 1. / 6.) val = (float) Math.sqrt(3);
if (rnd > 5. / 6.) val = -(float) Math.sqrt(3);
_a.add(i, 1f * val);
}
Random rng = new Random(params.seed + i*N + _dinfo.numStart());
for (int n = 0; n < nums.length; ++n) {
float val = 0;
final float rnd = rng.nextFloat();
if (rnd < 1. / 6.) val = (float) Math.sqrt(3);
if (rnd > 5. / 6.) val = -(float) Math.sqrt(3);
_a.set(i, (Double.isNaN(nums[n]) ? 0f /*Always do MeanImputation during scoring*/ : (float) nums[n]) * val);
}
}
} else if (hash_trick) {
// Use hash trick for categorical features
assert (_a.size() == M);
// hash N-nums.length down to M-nums.length = cM (#categorical slots - always use all numerical features)
final int cM = params.max_categorical_features;
assert (_a.size() == M);
Hash murmur = MurmurHash.getInstance();
for (int i = 0; i < numcat; ++i) {
ByteBuffer buf = ByteBuffer.allocate(4);
int hashval = murmur.hash(buf.putInt(cats[i]).array(), 4, (int)params.seed); // turn horizontalized categorical integer into another integer, based on seed
// int hashval = cats[i] ^ (int)params.seed; // turn horizontalized categorical integer into another integer, based on seed
_a.add(Math.abs(hashval % cM), 1f); // restrict to limited range
}
for (int i = 0; i < nums.length; ++i)
_a.set(cM + i, Double.isNaN(nums[i]) ? 0f /*Always do MeanImputation during scoring*/ : (float) nums[i]);
}
} else {
for (int i = 0; i < numcat; ++i) _a.set(cats[i], 1f); // one-hot encode categoricals
for (int i = 0; i < nums.length; ++i)
_a.set(_dinfo.numStart() + i, Double.isNaN(nums[i]) ? 0f /*Always do MeanImputation during scoring*/ : (float) nums[i]);
}
// Log.info("Input Layer: " + ArrayUtils.toString(_a.raw()));
// Input Dropout
if (_dropout == null) return;
seed += params.seed + 0x1337B4BE;
_dropout.randomlySparsifyActivation(_a, seed);
if (params.sparse) {
_svec = new SparseVector(_dvec);
_a = _svec;
}
}
}
/**
* Tanh neurons - most common, most stable
*/
public static class Tanh extends Neurons {
public Tanh(int units) { super(units); }
@Override protected void fprop(long seed, boolean training) {
gemv((DenseVector)_a, _w, _previous._a, _b, _dropout != null ? _dropout.bits() : null);
final int rows = _a.size();
for( int row = 0; row < rows; row++ )
_a.set(row, 1f - 2f / (1f + (float)Math.exp(2*_a.get(row)))); //evals faster than tanh(x), but is slightly less numerically stable - OK
compute_sparsity();
}
// Computing partial derivative g = dE/dnet = dE/dy * dy/dnet, where dE/dy is the backpropagated error
// dy/dnet = (1 - a^2) for y(net) = tanh(net)
@Override protected void bprop() {
float m = momentum();
float r = _minfo.adaDelta() ? 0 : rate(_minfo.get_processed_total()) * (1f - m);
if (_w instanceof DenseRowMatrix) {
final int rows = _a.size();
for (int row = 0; row < rows; row++) {
if (_minfo.get_params().autoencoder && _index == _minfo.get_params().hidden.length)
_e.set(row, autoEncoderError(row));
float g = _e.get(row) * (1f - _a.get(row) * _a.get(row));
bprop(row, g, r, m);
}
}
else {
bprop_sparse(r, m);
}
}
}
/**
* Tanh neurons with dropout
*/
public static class TanhDropout extends Tanh {
public TanhDropout(int units) { super(units); }
@Override protected void fprop(long seed, boolean training) {
if (training) {
seed += params.seed + 0xDA7A6000;
_dropout.fillBytes(seed);
super.fprop(seed, true);
}
else {
super.fprop(seed, false);
Utils.mult(_a.raw(), (float)(1-params.hidden_dropout_ratios[_index]));
}
}
}
/**
* Maxout neurons
*/
public static class Maxout extends Neurons {
public Maxout(int units) { super(units); }
@Override protected void fprop(long seed, boolean training) {
float max = 0;
final int rows = _a.size();
if (_previous._a instanceof DenseVector) {
for( int row = 0; row < rows; row++ ) {
_a.set(row, 0);
if( !training || _dropout == null || _dropout.unit_active(row) ) {
_a.set(row, Float.NEGATIVE_INFINITY);
for( int i = 0; i < _previous._a.size(); i++ )
_a.set(row, Math.max(_a.get(row), _w.get(row, i) * _previous._a.get(i)));
if (Float.isInfinite(-_a.get(row))) _a.set(row, 0); //catch the case where there is dropout (and/or input sparsity) -> no max found!
_a.add(row, _b.get(row));
max = Math.max(_a.get(row), max);
}
}
if( max > 1 ) Utils.div(_a.raw(), max);
}
else {
SparseVector x = (SparseVector)_previous._a;
for( int row = 0; row < _a.size(); row++ ) {
_a.set(row, 0);
if( !training || _dropout == null || _dropout.unit_active(row) ) {
// _a.set(row, Float.NEGATIVE_INFINITY);
// for( int i = 0; i < _previous._a.size(); i++ )
// _a.set(row, Math.max(_a.get(row), _w.get(row, i) * _previous._a.get(i)));
float mymax = Float.NEGATIVE_INFINITY;
int start = x.begin()._idx;
int end = x.end()._idx;
for (int it = start; it < end; ++it) {
mymax = Math.max(mymax, _w.get(row, x._indices[it]) * x._values[it]);
}
_a.set(row, mymax);
if (Float.isInfinite(-_a.get(row))) _a.set(row, 0); //catch the case where there is dropout (and/or input sparsity) -> no max found!
_a.add(row, _b.get(row));
max = Math.max(_a.get(row), max);
}
}
if( max > 1f ) Utils.div(_a.raw(), max);
}
compute_sparsity();
}
@Override protected void bprop() {
float m = momentum();
float r = _minfo.adaDelta() ? 0 : rate(_minfo.get_processed_total()) * (1f - m);
if (_w instanceof DenseRowMatrix) {
final int rows = _a.size();
for( int row = 0; row < rows; row++ ) {
assert (!_minfo.get_params().autoencoder);
// if (_minfo.get_params().autoencoder && _index == _minfo.get_params().hidden.length)
// _e.set(row, autoEncoderError(row));
float g = _e.get(row);
// if( _a[o] < 0 ) Not sure if we should be using maxout with a hard zero bottom
// g = 0;
bprop(row, g, r, m);
}
}
else {
bprop_sparse(r, m);
}
}
}
/**
* Maxout neurons with dropout
*/
public static class MaxoutDropout extends Maxout {
public MaxoutDropout(int units) { super(units); }
@Override protected void fprop(long seed, boolean training) {
if (training) {
seed += params.seed + 0x51C8D00D;
_dropout.fillBytes(seed);
super.fprop(seed, true);
}
else {
super.fprop(seed, false);
Utils.mult(_a.raw(), (float)(1-params.hidden_dropout_ratios[_index]));
}
}
}
/**
* Rectifier linear unit (ReLU) neurons
*/
public static class Rectifier extends Neurons {
public Rectifier(int units) { super(units); }
@Override protected void fprop(long seed, boolean training) {
gemv((DenseVector)_a, _w, _previous._a, _b, _dropout != null ? _dropout.bits() : null);
final int rows = _a.size();
for( int row = 0; row < rows; row++ ) {
_a.set(row, Math.max(_a.get(row), 0f));
compute_sparsity();
}
}
@Override protected void bprop() {
float m = momentum();
float r = _minfo.adaDelta() ? 0 : rate(_minfo.get_processed_total()) * (1f - m);
final int rows = _a.size();
if (_w instanceof DenseRowMatrix) {
for (int row = 0; row < rows; row++) {
//(d/dx)(max(0,x)) = 1 if x > 0, otherwise 0
if (_minfo.get_params().autoencoder && _index == _minfo.get_params().hidden.length)
_e.set(row, autoEncoderError(row));
float g = _a.get(row) > 0f ? _e.get(row) : 0f;
bprop(row, g, r, m);
}
}
else {
bprop_sparse(r, m);
}
}
}
/**
* Rectifier linear unit (ReLU) neurons with dropout
*/
public static class RectifierDropout extends Rectifier {
public RectifierDropout(int units) { super(units); }
@Override protected void fprop(long seed, boolean training) {
if (training) {
seed += params.seed + 0x3C71F1ED;
_dropout.fillBytes(seed);
super.fprop(seed, true);
}
else {
super.fprop(seed, false);
Utils.mult(_a.raw(), (float)(1-params.hidden_dropout_ratios[_index]));
}
}
}
/**
* Abstract class for Output neurons
*/
public static abstract class Output extends Neurons {
Output(int units) { super(units); }
protected void fprop(long seed, boolean training) { throw new UnsupportedOperationException(); }
protected void bprop() { throw new UnsupportedOperationException(); }
}
/**
* Output neurons for classification - Softmax
*/
public static class Softmax extends Output {
public Softmax(int units) { super(units); }
protected void fprop() {
gemv((DenseVector) _a, (DenseRowMatrix) _w, (DenseVector) _previous._a, _b, null);
final float max = Utils.maxValue(_a.raw());
float scale = 0f;
final float rows = _a.size();
for( int row = 0; row < rows; row++ ) {
_a.set(row, (float)Math.exp(_a.get(row) - max));
scale += _a.get(row);
}
for( int row = 0; row < rows; row++ ) {
if (Float.isNaN(_a.get(row))) {
_minfo.set_unstable();
throw new RuntimeException("Numerical instability, predicted NaN.");
}
_a.raw()[row] /= scale;
}
}
/**
* Backpropagation for classification
* Update every weight as follows: w += -rate * dE/dw
* Compute dE/dw via chain rule: dE/dw = dE/dy * dy/dnet * dnet/dw, where net = sum(xi*wi)+b and y = activation function
* @param target actual class label
*/
protected void bprop(int target) {
assert (target != missing_int_value); // no correction of weights/biases for missing label
float m = momentum();
float r = _minfo.adaDelta() ? 0 : rate(_minfo.get_processed_total()) * (1f - m);
float g; //partial derivative dE/dy * dy/dnet
final float rows = _a.size();
for( int row = 0; row < rows; row++ ) {
final float t = (row == target ? 1f : 0f);
final float y = _a.get(row);
//dy/dnet = derivative of softmax = (1-y)*y
if (params.loss == Loss.CrossEntropy) {
//nothing else needed, -dCE/dy * dy/dnet = target - y
//cf. http://www.stanford.edu/group/pdplab/pdphandbook/handbookch6.html
g = t - y;
} else {
assert(params.loss == Loss.MeanSquare);
//-dMSE/dy = target-y
g = (t - y) * (1f - y) * y;
}
// this call expects dE/dnet
bprop(row, g, r, m);
}
}
}
/**
* Output neurons for regression - Softmax
*/
public static class Linear extends Output {
public Linear(int units) { super(units); }
protected void fprop() {
gemv((DenseVector)_a, _w, _previous._a, _b, _dropout != null ? _dropout.bits() : null);
}
/**
* Backpropagation for regression
* @param target floating-point target value
*/
protected void bprop(float target) {
assert (target != missing_real_value);
if (params.loss != Loss.MeanSquare) throw new UnsupportedOperationException("Regression is only implemented for MeanSquare error.");
final int row = 0;
// Computing partial derivative: dE/dnet = dE/dy * dy/dnet = dE/dy * 1
final float g = target - _a.get(row); //for MSE -dMSE/dy = target-y
float m = momentum();
float r = _minfo.adaDelta() ? 0 : rate(_minfo.get_processed_total()) * (1f - m);
bprop(row, g, r, m);
}
}
/**
* Mat-Vec Plus Add (with optional row dropout)
* @param res = a*x+y (pre-allocated, will be overwritten)
* @param a matrix of size rows x cols
* @param x vector of length cols
* @param y vector of length rows
* @param row_bits if not null, check bits of this byte[] to determine whether a row is used or not
*/
static void gemv_naive(final float[] res, final float[] a, final float[] x, final float[] y, byte[] row_bits) {
final int cols = x.length;
final int rows = y.length;
assert(res.length == rows);
for(int row = 0; row<rows; row++) {
res[row] = 0;
if( row_bits != null && (row_bits[row / 8] & (1 << (row % 8))) == 0) continue;
for(int col = 0; col<cols; col++)
res[row] += a[row*cols+col] * x[col];
res[row] += y[row];
}
}
/**
* Optimized Mat-Vec Plus Add (with optional row dropout)
* Optimization: Partial sums can be evaluated in parallel
* @param res = a*x+y (pre-allocated, will be overwritten)
* @param a matrix of size rows x cols
* @param x vector of length cols
* @param y vector of length rows
* @param row_bits if not null, check bits of this byte[] to determine whether a row is used or not
*/
static void gemv_row_optimized(final float[] res, final float[] a, final float[] x, final float[] y, final byte[] row_bits) {
final int cols = x.length;
final int rows = y.length;
assert(res.length == rows);
final int extra=cols-cols%8;
final int multiple = (cols/8)*8-1;
int idx = 0;
for (int row = 0; row<rows; row++) {
res[row] = 0;
if( row_bits == null || (row_bits[row / 8] & (1 << (row % 8))) != 0) {
float psum0 = 0, psum1 = 0, psum2 = 0, psum3 = 0, psum4 = 0, psum5 = 0, psum6 = 0, psum7 = 0;
for (int col = 0; col < multiple; col += 8) {
int off = idx + col;
psum0 += a[off ] * x[col ];
psum1 += a[off + 1] * x[col + 1];
psum2 += a[off + 2] * x[col + 2];
psum3 += a[off + 3] * x[col + 3];
psum4 += a[off + 4] * x[col + 4];
psum5 += a[off + 5] * x[col + 5];
psum6 += a[off + 6] * x[col + 6];
psum7 += a[off + 7] * x[col + 7];
}
res[row] += psum0 + psum1 + psum2 + psum3;
res[row] += psum4 + psum5 + psum6 + psum7;
for (int col = extra; col < cols; col++)
res[row] += a[idx + col] * x[col];
res[row] += y[row];
}
idx += cols;
}
}
/**
* Helper to do a generic gemv: res = a*x + y
* @param res Dense result
* @param a Matrix (sparse or dense)
* @param x Vector (sparse or dense)
* @param y Dense vector to add to result
* @param row_bits Bit mask for which rows to use
*/
static void gemv(final DenseVector res, final Matrix a, final Vector x, final DenseVector y, byte[] row_bits) {
if (a instanceof DenseRowMatrix && x instanceof DenseVector)
gemv(res, (DenseRowMatrix)a, (DenseVector)x, y, row_bits); //default
else if (a instanceof DenseColMatrix && x instanceof SparseVector)
gemv(res, (DenseColMatrix)a, (SparseVector)x, y, row_bits); //fast for really sparse
else if (a instanceof DenseRowMatrix && x instanceof SparseVector)
gemv(res, (DenseRowMatrix) a, (SparseVector) x, y, row_bits); //try
else if (a instanceof DenseColMatrix && x instanceof DenseVector)
gemv(res, (DenseColMatrix) a, (DenseVector) x, y, row_bits); //try
else throw new UnsupportedOperationException("gemv for matrix " + a.getClass().getSimpleName() + " and vector + " + x.getClass().getSimpleName() + " not yet implemented.");
}
static void gemv(final DenseVector res, final DenseRowMatrix a, final DenseVector x, final DenseVector y, byte[] row_bits) {
gemv_row_optimized(res.raw(), a.raw(), x.raw(), y.raw(), row_bits);
}
static void gemv_naive(final DenseVector res, final DenseRowMatrix a, final DenseVector x, final DenseVector y, byte[] row_bits) {
gemv_naive(res.raw(), a.raw(), x.raw(), y.raw(), row_bits);
}
//TODO: make optimized version for col matrix
static void gemv(final DenseVector res, final DenseColMatrix a, final DenseVector x, final DenseVector y, byte[] row_bits) {
final int cols = x.size();
final int rows = y.size();
assert(res.size() == rows);
for(int r = 0; r<rows; r++) {
res.set(r, 0);
}
for(int c = 0; c<cols; c++) {
final float val = x.get(c);
for(int r = 0; r<rows; r++) {
if( row_bits != null && (row_bits[r / 8] & (1 << (r % 8))) == 0) continue;
res.add(r, a.get(r,c) * val);
}
}
for(int r = 0; r<rows; r++) {
if( row_bits != null && (row_bits[r / 8] & (1 << (r % 8))) == 0) continue;
res.add(r, y.get(r));
}
}
static void gemv(final DenseVector res, final DenseRowMatrix a, final SparseVector x, final DenseVector y, byte[] row_bits) {
final int rows = y.size();
assert(res.size() == rows);
for(int r = 0; r<rows; r++) {
res.set(r, 0);
if( row_bits != null && (row_bits[r / 8] & (1 << (r % 8))) == 0) continue;
int start = x.begin()._idx;
int end = x.end()._idx;
for (int it = start; it < end; ++it) {
res.add(r, a.get(r, x._indices[it]) * x._values[it]);
}
res.add(r, y.get(r));
}
}
static void gemv(final DenseVector res, final DenseColMatrix a, final SparseVector x, final DenseVector y, byte[] row_bits) {
final int rows = y.size();
assert(res.size() == rows);
for(int r = 0; r<rows; r++) {
res.set(r, 0);
}
int start = x.begin()._idx;
int end = x.end()._idx;
for (int it = start; it < end; ++it) {
final float val = x._values[it];
if (val == 0f) continue;
for(int r = 0; r<rows; r++) {
if( row_bits != null && (row_bits[r / 8] & (1 << (r % 8))) == 0) continue;
res.add(r, a.get(r,x._indices[it]) * val);
}
}
for(int r = 0; r<rows; r++) {
if( row_bits != null && (row_bits[r / 8] & (1 << (r % 8))) == 0) continue;
res.add(r, y.get(r));
}
}
static void gemv(final DenseVector res, final SparseRowMatrix a, final SparseVector x, final DenseVector y, byte[] row_bits) {
final int rows = y.size();
assert(res.size() == rows);
for(int r = 0; r<rows; r++) {
res.set(r, 0);
if( row_bits != null && (row_bits[r / 8] & (1 << (r % 8))) == 0) continue;
// iterate over all non-empty columns for this row
TreeMap<Integer, Float> row = a.row(r);
Set<Map.Entry<Integer,Float>> set = row.entrySet();
for (Map.Entry<Integer,Float> e : set) {
final float val = x.get(e.getKey());
if (val != 0f) res.add(r, e.getValue() * val); //TODO: iterate over both iterators and only add where there are matching indices
}
res.add(r, y.get(r));
}
}
static void gemv(final DenseVector res, final SparseColMatrix a, final SparseVector x, final DenseVector y, byte[] row_bits) {
final int rows = y.size();
assert(res.size() == rows);
for(int r = 0; r<rows; r++) {
res.set(r, 0);
}
for(int c = 0; c<a.cols(); c++) {
TreeMap<Integer, Float> col = a.col(c);
final float val = x.get(c);
if (val == 0f) continue;
for (Map.Entry<Integer,Float> e : col.entrySet()) {
final int r = e.getKey();
if( row_bits != null && (row_bits[r / 8] & (1 << (r % 8))) == 0) continue;
// iterate over all non-empty columns for this row
res.add(r, e.getValue() * val);
}
}
for(int r = 0; r<rows; r++) {
if( row_bits != null && (row_bits[r / 8] & (1 << (r % 8))) == 0) continue;
res.add(r, y.get(r));
}
}
/**
* Abstract vector interface
*/
public abstract interface Vector {
public abstract float get(int i);
public abstract void set(int i, float val);
public abstract void add(int i, float val);
public abstract int size();
public abstract float[] raw();
}
/**
* Dense vector implementation
*/
public static class DenseVector extends Iced implements Vector {
private float[] _data;
DenseVector(int len) { _data = new float[len]; }
DenseVector(float[] v) { _data = v; }
@Override public float get(int i) { return _data[i]; }
@Override public void set(int i, float val) { _data[i] = val; }
@Override public void add(int i, float val) { _data[i] += val; }
@Override public int size() { return _data.length; }
@Override public float[] raw() { return _data; }
}
/**
* Sparse vector implementation
*/
public static class SparseVector extends Iced implements Vector {
private int[] _indices;
private float[] _values;
private int _size;
private int _nnz;
@Override public int size() { return _size; }
public int nnz() { return _nnz; }
SparseVector(float[] v) { this(new DenseVector(v)); }
SparseVector(final DenseVector dv) {
_size = dv.size();
// first count non-zeros
for (int i=0; i<dv._data.length; ++i) {
if (dv.get(i) != 0.0f) {
_nnz++;
}
}
// only allocate what's needed
_indices = new int[_nnz];
_values = new float[_nnz];
// fill values
int idx = 0;
for (int i=0; i<dv._data.length; ++i) {
if (dv.get(i) != 0.0f) {
_indices[idx] = i;
_values[idx] = dv.get(i);
idx++;
}
}
assert(idx == nnz());
}
/**
* Slow path access to i-th element
* @param i element index
* @return real value
*/
@Override public float get(int i) {
final int idx = Arrays.binarySearch(_indices, i);
return idx < 0 ? 0f : _values[idx];
}
@Override
public void set(int i, float val) {
throw new UnsupportedOperationException("setting values in a sparse vector is not implemented.");
}
@Override
public void add(int i, float val) {
throw new UnsupportedOperationException("adding values in a sparse vector is not implemented.");
}
@Override
public float[] raw() {
throw new UnsupportedOperationException("raw access to the data in a sparse vector is not implemented.");
}
/**
* Iterator over a sparse vector
*/
public class Iterator {
int _idx; //which nnz
Iterator(int id) { _idx = id; }
Iterator next() {
_idx++;
return this;
}
// boolean hasNext() {
// return _idx < _indices.length-1;
// }
boolean equals(Iterator other) {
return _idx == other._idx;
}
@Override
public String toString() {
return index() + " -> " + value();
}
float value() { return _values[_idx]; }
int index() { return _indices[_idx]; }
void setValue(float val) { _values[_idx] = val; }
}
public Iterator begin() { return new Iterator(0); }
public Iterator end() { return new Iterator(_indices.length); }
}
/**
* Abstract matrix interface
*/
public abstract interface Matrix {
abstract float get(int row, int col);
abstract void set(int row, int col, float val);
abstract void add(int row, int col, float val);
abstract int cols();
abstract int rows();
abstract long size();
abstract float[] raw();
}
/**
* Dense row matrix implementation
*/
public final static class DenseRowMatrix extends Iced implements Matrix {
private float[] _data;
private int _cols;
private int _rows;
DenseRowMatrix(int rows, int cols) { this(new float[cols*rows], rows, cols); }
DenseRowMatrix(float[] v, int rows, int cols) { _data = v; _rows = rows; _cols = cols; }
@Override public float get(int row, int col) { assert(row<_rows && col<_cols); return _data[row*_cols + col]; }
@Override public void set(int row, int col, float val) { assert(row<_rows && col<_cols); _data[row*_cols + col] = val; }
@Override public void add(int row, int col, float val) { assert(row<_rows && col<_cols); _data[row*_cols + col] += val; }
@Override public int cols() { return _cols; }
@Override public int rows() { return _rows; }
@Override public long size() { return (long)_rows*(long)_cols; }
public float[] raw() { return _data; }
}
/**
* Dense column matrix implementation
*/
public final static class DenseColMatrix extends Iced implements Matrix {
private float[] _data;
private int _cols;
private int _rows;
DenseColMatrix(int rows, int cols) { this(new float[cols*rows], rows, cols); }
DenseColMatrix(float[] v, int rows, int cols) { _data = v; _rows = rows; _cols = cols; }
DenseColMatrix(DenseRowMatrix m, int rows, int cols) { this(rows, cols); for (int row=0;row<rows;++row) for (int col=0;col<cols;++col) set(row,col, m.get(row,col)); }
@Override public float get(int row, int col) { assert(row<_rows && col<_cols); return _data[col*_rows + row]; }
@Override public void set(int row, int col, float val) { assert(row<_rows && col<_cols); _data[col*_rows + row] = val; }
@Override public void add(int row, int col, float val) { assert(row<_rows && col<_cols); _data[col*_rows + row] += val; }
@Override public int cols() { return _cols; }
@Override public int rows() { return _rows; }
@Override public long size() { return (long)_rows*(long)_cols; }
public float[] raw() { return _data; }
}
/**
* Sparse row matrix implementation
*/
public final static class SparseRowMatrix implements Matrix {
private TreeMap<Integer, Float>[] _rows;
private int _cols;
SparseRowMatrix(int rows, int cols) { this(null, rows, cols); }
SparseRowMatrix(Matrix v, int rows, int cols) {
_rows = new TreeMap[rows];
for (int row=0;row<rows;++row) _rows[row] = new TreeMap<Integer, Float>();
_cols = cols;
if (v!=null)
for (int row=0;row<rows;++row)
for (int col=0;col<cols;++col)
if (v.get(row,col) != 0f)
add(row,col, v.get(row,col));
}
@Override public float get(int row, int col) { Float v = _rows[row].get(col); if (v == null) return 0f; else return v; }
@Override public void add(int row, int col, float val) { set(row,col,get(row,col)+val); }
@Override public void set(int row, int col, float val) { _rows[row].put(col, val); }
@Override public int cols() { return _cols; }
@Override public int rows() { return _rows.length; }
@Override public long size() { return (long)_rows.length*(long)_cols; }
TreeMap<Integer, Float> row(int row) { return _rows[row]; }
public float[] raw() { throw new UnsupportedOperationException("raw access to the data in a sparse matrix is not implemented."); }
}
/**
* Sparse column matrix implementation
*/
static final class SparseColMatrix implements Matrix {
private TreeMap<Integer, Float>[] _cols;
private int _rows;
SparseColMatrix(int rows, int cols) { this(null, rows, cols); }
SparseColMatrix(Matrix v, int rows, int cols) {
_rows = rows;
_cols = new TreeMap[cols];
for (int col=0;col<cols;++col) _cols[col] = new TreeMap<Integer, Float>();
if (v!=null)
for (int row=0;row<rows;++row)
for (int col=0;col<cols;++col)
if (v.get(row,col) != 0f)
add(row,col, v.get(row,col));
}
@Override public float get(int row, int col) { Float v = _cols[col].get(row); if (v == null) return 0f; else return v; }
@Override public void add(int row, int col, float val) { set(row,col,get(row,col)+val); }
@Override public void set(int row, int col, float val) { _cols[col].put(row, val); }
@Override public int cols() { return _cols.length; }
@Override public int rows() { return _rows; }
@Override public long size() { return (long)_rows*(long)_cols.length; }
TreeMap<Integer, Float> col(int col) { return _cols[col]; }
public float[] raw() { throw new UnsupportedOperationException("raw access to the data in a sparse matrix is not implemented."); }
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/drf/DRF.java
|
package hex.drf;
import static hex.drf.TreeMeasuresCollector.asSSE;
import static hex.drf.TreeMeasuresCollector.asVotes;
import static water.util.Utils.div;
import static water.util.Utils.sum;
import hex.ConfusionMatrix;
import hex.VarImp;
import hex.drf.TreeMeasuresCollector.TreeMeasures;
import hex.drf.TreeMeasuresCollector.TreeSSE;
import hex.drf.TreeMeasuresCollector.TreeVotes;
import hex.gbm.*;
import hex.gbm.DTree.DecidedNode;
import hex.gbm.DTree.LeafNode;
import hex.gbm.DTree.TreeModel.CompressedTree;
import hex.gbm.DTree.TreeModel.TreeStats;
import hex.gbm.DTree.UndecidedNode;
import java.util.Arrays;
import java.util.Random;
import water.*;
import water.H2O.H2OCountedCompleter;
import water.api.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.util.*;
import water.util.Log.Tag.Sys;
// Random Forest Trees
public class DRF extends SharedTreeModelBuilder<DRF.DRFModel> {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
static final boolean DEBUG_DETERMINISTIC = false; // enable this for deterministic version of DRF. It will use same seed for each execution. I would prefere here to read this property from system properties.
@API(help = "Columns to randomly select at each level, or -1 for sqrt(#cols)", filter = Default.class, lmin=-1, lmax=100000)
int mtries = -1;
@API(help = "Sample rate, from 0. to 1.0", filter = Default.class, dmin=0, dmax=1, importance=ParamImportance.SECONDARY)
float sample_rate = 0.6666667f;
@API(help = "Seed for the random number generator (autogenerated)", filter = Default.class)
long seed = -1; // To follow R-semantics, each call of RF should provide different seed. -1 means seed autogeneration
@API(help = "Check non-contiguous group splits for categorical predictors", filter = Default.class, hide = true)
boolean do_grpsplit = true;
@API(help="Run on one node only; no network overhead but fewer cpus used. Suitable for small datasets.", filter=myClassFilter.class, importance=ParamImportance.SECONDARY)
public boolean build_tree_one_node = false;
class myClassFilter extends DRFCopyDataBoolean { myClassFilter() { super("source"); } }
@API(help = "Computed number of split features", importance=ParamImportance.EXPERT)
protected int _mtry; // FIXME remove and replace by mtries
@API(help = "Autogenerated seed", importance=ParamImportance.EXPERT)
protected long _seed; // FIXME remove and replace by seed
// Fixed seed generator for DRF
private static final Random _seedGenerator = Utils.getDeterRNG(0xd280524ad7fe0602L);
// --- Private data handled only on master node
// Classification or Regression:
// Tree votes/SSE of individual trees on OOB rows
private transient TreeMeasures _treeMeasuresOnOOB;
// Tree votes/SSE per individual features on permutated OOB rows
private transient TreeMeasures[/*features*/] _treeMeasuresOnSOOB;
/** DRF model holding serialized tree and implementing logic for scoring a row */
public static class DRFModel extends DTree.TreeModel {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "Model parameters", json = true)
private final DRF parameters; // This is used purely for printing values out.
@Override public final DRF get_params() { return parameters; }
@Override public final Request2 job() { return get_params(); }
@API(help = "Number of columns picked at each split") final int mtries;
@API(help = "Sample rate") final float sample_rate;
@API(help = "Seed") final long seed;
// Params that do not affect model quality:
//
public DRFModel(DRF params, Key key, Key dataKey, Key testKey, String names[], String domains[][], String[] cmDomain, int ntrees, int max_depth, int min_rows, int nbins, int mtries, float sample_rate, long seed, int num_folds, float[] priorClassDist, float[] classDist) {
super(key,dataKey,testKey,names,domains,cmDomain,ntrees, max_depth, min_rows, nbins, num_folds, priorClassDist, classDist);
this.parameters = Job.hygiene((DRF) params.clone());
this.mtries = mtries;
this.sample_rate = sample_rate;
this.seed = seed;
}
private DRFModel(DRFModel prior, DTree[] trees, TreeStats tstats) {
super(prior, trees, tstats);
this.parameters = prior.parameters;
this.mtries = prior.mtries;
this.sample_rate = prior.sample_rate;
this.seed = prior.seed;
}
private DRFModel(DRFModel prior, double err, ConfusionMatrix cm, VarImp varimp, AUCData validAUC) {
super(prior, err, cm, varimp, validAUC);
this.parameters = prior.parameters;
this.mtries = prior.mtries;
this.sample_rate = prior.sample_rate;
this.seed = prior.seed;
}
private DRFModel(DRFModel prior, Key[][] treeKeys, double[] errs, ConfusionMatrix[] cms, TreeStats tstats, VarImp varimp, AUCData validAUC) {
super(prior, treeKeys, errs, cms, tstats, varimp, validAUC);
this.parameters = prior.parameters;
this.mtries = prior.mtries;
this.sample_rate = prior.sample_rate;
this.seed = prior.seed;
}
@Override protected TreeModelType getTreeModelType() { return TreeModelType.DRF; }
@Override protected float[] score0(double data[], float preds[]) {
float[] p = super.score0(data, preds);
int ntrees = ntrees();
if (p.length==1) { if (ntrees>0) div(p, ntrees); } // regression - compute avg over all trees
else { // classification
float s = sum(p);
if (s>0) div(p, s); // unify over all classes
p[0] = ModelUtils.getPrediction(p, data);
}
return p;
}
@Override protected void generateModelDescription(StringBuilder sb) {
DocGen.HTML.paragraph(sb,"mtries: "+mtries+", Sample rate: "+sample_rate+", Seed: "+seed);
if (testKey==null && sample_rate==1f) {
sb.append("<div class=\"alert alert-danger\">There are no out-of-bag data to compute out-of-bag error estimate, since sampling rate is 1!</div>");
}
}
@Override protected void toJavaUnifyPreds(SB bodySb) {
if (isClassifier()) {
bodySb.i().p("float sum = 0;").nl();
bodySb.i().p("for(int i=1; i<preds.length; i++) sum += preds[i];").nl();
bodySb.i().p("if (sum>0) for(int i=1; i<preds.length; i++) preds[i] /= sum;").nl();
} else bodySb.i().p("preds[1] = preds[1]/NTREES;").nl();
}
@Override protected void setCrossValidationError(ValidatedJob job, double cv_error, water.api.ConfusionMatrix cm, AUCData auc, HitRatio hr) {
DRFModel drfm = ((DRF)job).makeModel(this, cv_error, cm.cm == null ? null : new ConfusionMatrix(cm.cm, cms[0].nclasses()), this.varimp, auc);
drfm._have_cv_results = true;
DKV.put(this._key, drfm); //overwrite this model
}
}
public Frame score( Frame fr ) { return ((DRFModel)UKV.get(dest())).score(fr); }
@Override protected Log.Tag.Sys logTag() { return Sys.DRF__; }
@Override protected DRFModel makeModel(Key outputKey, Key dataKey, Key testKey, int ntrees, String[] names, String[][] domains, String[] cmDomain, float[] priorClassDist, float[] classDist) {
return new DRFModel(this,outputKey,dataKey,validation==null?null:testKey,names,domains,cmDomain,ntrees, max_depth, min_rows, nbins, mtries, sample_rate, _seed, n_folds, priorClassDist, classDist);
}
@Override protected DRFModel makeModel( DRFModel model, double err, ConfusionMatrix cm, VarImp varimp, AUCData validAUC) {
return new DRFModel(model, err, cm, varimp, validAUC);
}
@Override protected DRFModel makeModel( DRFModel model, DTree ktrees[], TreeStats tstats) {
return new DRFModel(model, ktrees, tstats);
}
@Override protected DRFModel updateModel(DRFModel model, DRFModel checkpoint, boolean overwriteCheckpoint) {
// Do not forget to clone trees in case that we are not going to overwrite checkpoint
Key[][] treeKeys = null;
if (!overwriteCheckpoint) throw H2O.unimpl("Cloning of model trees is not implemented yet!");
else treeKeys = checkpoint.treeKeys;
return new DRFModel(model, treeKeys, checkpoint.errs, checkpoint.cms, checkpoint.treeStats, checkpoint.varimp, checkpoint.validAUC);
}
public DRF() { description = "Distributed RF"; ntrees = 50; max_depth = 20; min_rows = 1; }
/** Return the query link to this page */
public static String link(Key k, String content) {
RString rs = new RString("<a href='/2/DRF.query?source=%$key'>%content</a>");
rs.replace("key", k.toString());
rs.replace("content", content);
return rs.toString();
}
// ==========================================================================
/** Compute a DRF tree.
*
* Start by splitting all the data according to some criteria (minimize
* variance at the leaves). Record on each row which split it goes to, and
* assign a split number to it (for next pass). On *this* pass, use the
* split-number to build a per-split histogram, with a per-histogram-bucket
* variance. */
@Override protected void execImpl() {
try {
logStart();
buildModel(seed);
if (n_folds > 0) CrossValUtils.crossValidate(this);
} finally {
remove(); // Remove Job
// Ugly hack updating job state carried as parameters inside a model
state = UKV.<Job>get(self()).state;
new TAtomic<DRFModel>() {
@Override
public DRFModel atomic(DRFModel m) {
if (m != null) m.get_params().state = state;
return m;
}
}.invoke(dest());
}
}
@Override protected Response redirect() {
return DRFProgressPage.redirect(this, self(), dest());
}
@SuppressWarnings("unused")
@Override protected void init() {
super.init();
// Initialize local variables
_mtry = (mtries==-1) ? // classification: mtry=sqrt(_ncols), regression: mtry=_ncols/3
( classification ? Math.max((int)Math.sqrt(_ncols),1) : Math.max(_ncols/3,1)) : mtries;
if (!(1 <= _mtry && _mtry <= _ncols)) throw new IllegalArgumentException("Computed mtry should be in interval <1,#cols> but it is " + _mtry);
if (!(0.0 < sample_rate && sample_rate <= 1.0)) throw new IllegalArgumentException("Sample rate should be interval (0,1> but it is " + sample_rate);
if (DEBUG_DETERMINISTIC && seed == -1) _seed = 0x1321e74a0192470cL; // fixed version of seed
else if (seed == -1) _seed = _seedGenerator.nextLong(); else _seed = seed;
if (sample_rate==1f && validation!=null)
Log.warn(Sys.DRF__, "Sample rate is 100% and no validation dataset is specified. There are no OOB data to compute out-of-bag error estimation!");
}
@Override protected void initAlgo(DRFModel initialModel) {
// Initialize TreeVotes for classification, MSE arrays for regression
if (importance) initTreeMeasurements();
}
@Override protected void initWorkFrame(DRFModel initialModel, Frame fr) {
// Append number of trees participating in on-the-fly scoring
fr.add("OUT_BAG_TREES", response.makeZero());
// Prepare working columns
new SetWrkTask().doAll(fr);
// If there was a check point recompute tree_<_> and oob columns based on predictions from previous trees
// but only if OOB validation is requested.
if (validation==null && checkpoint!=null) {
Timer t = new Timer();
// Compute oob votes for each output level
new OOBScorer(_ncols, _nclass, sample_rate, initialModel.treeKeys).doAll(fr);
Log.info(logTag(), "Reconstructing oob stats from checkpointed model took " + t);
}
}
@Override protected DRFModel buildModel( DRFModel model, final Frame fr, String names[], String domains[][], final Timer t_build ) {
// The RNG used to pick split columns
Random rand = createRNG(_seed);
// To be deterministic get random numbers for previous trees and
// put random generator to the same state
for (int i=0; i<_ntreesFromCheckpoint; i++) rand.nextLong();
int tid;
DTree[] ktrees = null;
// Prepare tree statistics
TreeStats tstats = model.treeStats!=null ? model.treeStats : new TreeStats();
// Build trees until we hit the limit
for( tid=0; tid<ntrees; tid++) { // Building tid-tree
if (tid!=0 || checkpoint==null) { // do not make initial scoring if model already exist
model = doScoring(model, fr, ktrees, tid, tstats, tid==0, !hasValidation(), build_tree_one_node);
}
// At each iteration build K trees (K = nclass = response column domain size)
// TODO: parallelize more? build more than k trees at each time, we need to care about temporary data
// Idea: launch more DRF at once.
Timer kb_timer = new Timer();
ktrees = buildNextKTrees(fr,_mtry,sample_rate,rand,tid);
Log.info(logTag(), (tid+1) + ". tree was built " + kb_timer.toString());
if( !Job.isRunning(self()) ) break; // If canceled during building, do not bulkscore
// Check latest predictions
tstats.updateBy(ktrees);
}
if( Job.isRunning(self()) ) { // do not perform final scoring and finish
model = doScoring(model, fr, ktrees, tid, tstats, true, !hasValidation(), build_tree_one_node);
// Make sure that we did not miss any votes
assert !importance || _treeMeasuresOnOOB.npredictors() == _treeMeasuresOnSOOB[0/*variable*/].npredictors() : "Missing some tree votes in variable importance voting?!";
}
return model;
}
private void initTreeMeasurements() {
assert importance : "Tree votes should be initialized only if variable importance is requested!";
// Preallocate tree votes
if (classification) {
_treeMeasuresOnOOB = new TreeVotes(ntrees);
_treeMeasuresOnSOOB = new TreeVotes[_ncols];
for (int i=0; i<_ncols; i++) _treeMeasuresOnSOOB[i] = new TreeVotes(ntrees);
} else {
_treeMeasuresOnOOB = new TreeSSE(ntrees);
_treeMeasuresOnSOOB = new TreeSSE[_ncols];
for (int i=0; i<_ncols; i++) _treeMeasuresOnSOOB[i] = new TreeSSE(ntrees);
}
}
/** On-the-fly version for varimp. After generation a new tree, its tree votes are collected on shuffled
* OOB rows and variable importance is recomputed.
* <p>
* The <a href="http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm#varimp">page</a> says:
* <cite>
* "In every tree grown in the forest, put down the oob cases and count the number of votes cast for the correct class.
* Now randomly permute the values of variable m in the oob cases and put these cases down the tree.
* Subtract the number of votes for the correct class in the variable-m-permuted oob data from the number of votes
* for the correct class in the untouched oob data.
* The average of this number over all trees in the forest is the raw importance score for variable m."
* </cite>
* </p>
* */
@Override
protected VarImp doVarImpCalc(final DRFModel model, DTree[] ktrees, final int tid, final Frame fTrain, boolean scale) {
// Check if we have already serialized 'ktrees'-trees in the model
assert model.ntrees()-1-_ntreesFromCheckpoint == tid : "Cannot compute DRF varimp since 'ktrees' are not serialized in the model! tid="+tid;
assert _treeMeasuresOnOOB.npredictors()-1 == tid : "Tree votes over OOB rows for this tree (var ktrees) were not found!";
// Compute tree votes over shuffled data
final CompressedTree[/*nclass*/] theTree = model.ctree(tid); // get the last tree FIXME we should pass only keys
final int nclasses = model.nclasses();
Futures fs = new Futures();
for (int var=0; var<_ncols; var++) {
final int variable = var;
H2OCountedCompleter task4var = classification ? new H2OCountedCompleter() {
@Override public void compute2() {
// Compute this tree votes over all data over given variable
TreeVotes cd = TreeMeasuresCollector.collectVotes(theTree, nclasses, fTrain, _ncols, sample_rate, variable);
assert cd.npredictors() == 1;
asVotes(_treeMeasuresOnSOOB[variable]).append(cd);
tryComplete();
}
} : /* regression */ new H2OCountedCompleter() {
@Override public void compute2() {
// Compute this tree votes over all data over given variable
TreeSSE cd = TreeMeasuresCollector.collectSSE(theTree, nclasses, fTrain, _ncols, sample_rate, variable);
assert cd.npredictors() == 1;
asSSE(_treeMeasuresOnSOOB[variable]).append(cd);
tryComplete();
}
};
fs.add(task4var);
H2O.submitTask(task4var); // Fork computation
}
fs.blockForPending(); // Wait for results
// Compute varimp for individual features (_ncols)
final float[] varimp = new float[_ncols]; // output variable importance
final float[] varimpSD = new float[_ncols]; // output variable importance sd
for (int var=0; var<_ncols; var++) {
double[/*2*/] imp = classification ? asVotes(_treeMeasuresOnSOOB[var]).imp(asVotes(_treeMeasuresOnOOB)) : asSSE(_treeMeasuresOnSOOB[var]).imp(asSSE(_treeMeasuresOnOOB));
varimp [var] = (float) imp[0];
varimpSD[var] = (float) imp[1];
}
return new VarImp.VarImpMDA(varimp, varimpSD, model.ntrees());
}
@Override public boolean supportsBagging() { return true; }
/** Fill work columns:
* - classification: set 1 in the corresponding wrk col according to row response
* - regression: copy response into work column (there is only 1 work column) */
private class SetWrkTask extends MRTask2<SetWrkTask> {
@Override public void map( Chunk chks[] ) {
Chunk cy = chk_resp(chks);
for( int i=0; i<cy._len; i++ ) {
if( cy.isNA0(i) ) continue;
if (classification) {
int cls = (int)cy.at80(i);
chk_work(chks,cls).set0(i,1L);
} else {
float pred = (float) cy.at0(i);
chk_work(chks,0).set0(i,pred);
}
}
}
}
// --------------------------------------------------------------------------
// Build the next random k-trees representing tid-th tree
private DTree[] buildNextKTrees(Frame fr, int mtrys, float sample_rate, Random rand, int tid) {
// We're going to build K (nclass) trees - each focused on correcting
// errors for a single class.
final DTree[] ktrees = new DTree[_nclass];
// Initial set of histograms. All trees; one leaf per tree (the root
// leaf); all columns
DHistogram hcs[][][] = new DHistogram[_nclass][1/*just root leaf*/][_ncols];
// Adjust nbins for the top-levels
int adj_nbins = Math.max((1<<(10-0)),nbins);
// Use for all k-trees the same seed. NOTE: this is only to make a fair
// view for all k-trees
long rseed = rand.nextLong();
// Initially setup as-if an empty-split had just happened
for( int k=0; k<_nclass; k++ ) {
assert (_distribution!=null && classification) || (_distribution==null && !classification);
if( _distribution == null || _distribution[k] != 0 ) { // Ignore missing classes
// The Boolean Optimization cannot be applied here for RF !
// This optimization assumes the 2nd tree of a 2-class system is the
// inverse of the first. This is false for DRF (and true for GBM) -
// DRF picks a random different set of columns for the 2nd tree.
//if( k==1 && _nclass==2 ) continue;
ktrees[k] = new DRFTree(fr,_ncols,(char)nbins,(char)_nclass,min_rows,mtrys,rseed);
boolean isBinom = classification;
new DRFUndecidedNode(ktrees[k],-1, DHistogram.initialHist(fr,_ncols,adj_nbins,hcs[k][0],do_grpsplit,isBinom) ); // The "root" node
}
}
// Sample - mark the lines by putting 'OUT_OF_BAG' into nid(<klass>) vector
Timer t_1 = new Timer();
Sample ss[] = new Sample[_nclass];
for( int k=0; k<_nclass; k++)
if (ktrees[k] != null) ss[k] = new Sample((DRFTree)ktrees[k], sample_rate).dfork(0,new Frame(vec_nids(fr,k),vec_resp(fr,k)), build_tree_one_node);
for( int k=0; k<_nclass; k++)
if( ss[k] != null ) ss[k].getResult();
Log.debug(Sys.DRF__, "Sampling took: + " + t_1);
int[] leafs = new int[_nclass]; // Define a "working set" of leaf splits, from leafs[i] to tree._len for each tree i
// ----
// One Big Loop till the ktrees are of proper depth.
// Adds a layer to the trees each pass.
Timer t_2 = new Timer();
int depth=0;
for( ; depth<max_depth; depth++ ) {
if( !Job.isRunning(self()) ) return null;
hcs = buildLayer(fr, ktrees, leafs, hcs, true, build_tree_one_node);
// If we did not make any new splits, then the tree is split-to-death
if( hcs == null ) break;
}
Log.debug(Sys.DRF__, "Tree build took: " + t_2);
// Each tree bottomed-out in a DecidedNode; go 1 more level and insert
// LeafNodes to hold predictions.
Timer t_3 = new Timer();
for( int k=0; k<_nclass; k++ ) {
DTree tree = ktrees[k];
if( tree == null ) continue;
int leaf = leafs[k] = tree.len();
for( int nid=0; nid<leaf; nid++ ) {
if( tree.node(nid) instanceof DecidedNode ) {
DecidedNode dn = tree.decided(nid);
for( int i=0; i<dn._nids.length; i++ ) {
int cnid = dn._nids[i];
if( cnid == -1 || // Bottomed out (predictors or responses known constant)
tree.node(cnid) instanceof UndecidedNode || // Or chopped off for depth
(tree.node(cnid) instanceof DecidedNode && // Or not possible to split
((DecidedNode)tree.node(cnid))._split.col()==-1) ) {
LeafNode ln = new DRFLeafNode(tree,nid);
ln._pred = dn.pred(i); // Set prediction into the leaf
dn._nids[i] = ln.nid(); // Mark a leaf here
}
}
// Handle the trivial non-splitting tree
if( nid==0 && dn._split.col() == -1 )
new DRFLeafNode(tree,-1,0);
}
}
} // -- k-trees are done
Log.debug(Sys.DRF__, "Nodes propagation: " + t_3);
// ----
// Move rows into the final leaf rows
Timer t_4 = new Timer();
CollectPreds cp = new CollectPreds(ktrees,leafs).doAll(fr,build_tree_one_node);
if (importance) {
if (classification) asVotes(_treeMeasuresOnOOB).append(cp.rightVotes, cp.allRows); // Track right votes over OOB rows for this tree
else /* regression */ asSSE (_treeMeasuresOnOOB).append(cp.sse, cp.allRows);
}
Log.debug(Sys.DRF__, "CollectPreds done: " + t_4);
// Collect leaves stats
for (int i=0; i<ktrees.length; i++)
if( ktrees[i] != null )
ktrees[i].leaves = ktrees[i].len() - leafs[i];
// DEBUG: Print the generated K trees
//printGenerateTrees(ktrees);
return ktrees;
}
// Read the 'tree' columns, do model-specific math and put the results in the
// fs[] array, and return the sum. Dividing any fs[] element by the sum
// turns the results into a probability distribution.
@Override protected float score1( Chunk chks[], float fs[/*nclass*/], int row ) {
float sum=0;
for( int k=0; k<_nclass; k++ ) // Sum across of likelyhoods
sum+=(fs[k+1]=(float)chk_tree(chks,k).at0(row));
if (_nclass == 1) sum /= (float)chk_oobt(chks).at0(row); // for regression average per trees voted for this row (only trees which have row in "out-of-bag"
return sum;
}
@Override protected boolean inBagRow(Chunk[] chks, int row) {
return chk_oobt(chks).at80(row) == 0;
}
// Collect and write predictions into leafs.
private class CollectPreds extends MRTask2<CollectPreds> {
/* @IN */ final DTree _trees[]; // Read-only, shared (except at the histograms in the Nodes)
/* @OUT */ long rightVotes; // number of right votes over OOB rows (performed by this tree) represented by DTree[] _trees
/* @OUT */ long allRows; // number of all OOB rows (sampled by this tree)
/* @OUT */ float sse; // Sum of squares for this tree only
CollectPreds(DTree trees[], int leafs[]) { _trees=trees; }
@Override public void map( Chunk[] chks ) {
final Chunk y = importance ? chk_resp(chks) : null; // Response
final float [] rpred = importance ? new float [1+_nclass] : null; // Row prediction
final double[] rowdata = importance ? new double[_ncols] : null; // Pre-allocated row data
final Chunk oobt = chk_oobt(chks); // Out-of-bag rows counter over all trees
// Iterate over all rows
for( int row=0; row<oobt._len; row++ ) {
boolean wasOOBRow = false;
// For all tree (i.e., k-classes)
for( int k=0; k<_nclass; k++ ) {
final DTree tree = _trees[k];
if( tree == null ) continue; // Empty class is ignored
// If we have all constant responses, then we do not split even the
// root and the residuals should be zero.
if( tree.root() instanceof LeafNode ) continue;
final Chunk nids = chk_nids(chks,k); // Node-ids for this tree/class
final Chunk ct = chk_tree(chks,k); // k-tree working column holding votes for given row
int nid = (int)nids.at80(row); // Get Node to decide from
// Update only out-of-bag rows
// This is out-of-bag row - but we would like to track on-the-fly prediction for the row
if( isOOBRow(nid) ) { // The row should be OOB for all k-trees !!!
assert k==0 || wasOOBRow : "Something is wrong: k-class trees oob row computing is broken! All k-trees should agree on oob row!";
wasOOBRow = true;
nid = oob2Nid(nid);
if( tree.node(nid) instanceof UndecidedNode ) // If we bottomed out the tree
nid = tree.node(nid).pid(); // Then take parent's decision
DecidedNode dn = tree.decided(nid); // Must have a decision point
if( dn._split.col() == -1 ) // Unable to decide?
dn = tree.decided(tree.node(nid).pid()); // Then take parent's decision
int leafnid = dn.ns(chks,row); // Decide down to a leafnode
// Setup Tree(i) - on the fly prediction of i-tree for row-th row
// - for classification: cumulative number of votes for this row
// - for regression: cumulative sum of prediction of each tree - has to be normalized by number of trees
double prediction = ((LeafNode)tree.node(leafnid)).pred(); // Prediction for this k-class and this row
if (importance) rpred[1+k] = (float) prediction; // for both regression and classification
ct.set0(row, (float)(ct.at0(row) + prediction));
// For this tree this row is out-of-bag - i.e., a tree voted for this row
oobt.set0(row, _nclass>1?1:oobt.at0(row)+1); // for regression track number of trees, for classification boolean flag is enough
}
// reset help column for this row and this k-class
nids.set0(row,0);
} /* end of k-trees iteration */
if (importance) {
if (wasOOBRow && !y.isNA0(row)) {
if (classification) {
int treePred = ModelUtils.getPrediction(rpred, data_row(chks,row, rowdata));
int actuPred = (int) y.at80(row);
if (treePred==actuPred) rightVotes++; // No miss !
} else { // regression
float treePred = rpred[1];
float actuPred = (float) y.at0(row);
sse += (actuPred-treePred)*(actuPred-treePred);
}
allRows++;
}
}
}
}
@Override public void reduce(CollectPreds mrt) {
rightVotes += mrt.rightVotes;
allRows += mrt.allRows;
sse += mrt.sse;
}
}
// A standard DTree with a few more bits. Support for sampling during
// training, and replaying the sample later on the identical dataset to
// e.g. compute OOBEE.
static class DRFTree extends DTree {
final int _mtrys; // Number of columns to choose amongst in splits
final long _seeds[]; // One seed for each chunk, for sampling
final transient Random _rand; // RNG for split decisions & sampling
DRFTree( Frame fr, int ncols, char nbins, char nclass, int min_rows, int mtrys, long seed ) {
super(fr._names, ncols, nbins, nclass, min_rows, seed);
_mtrys = mtrys;
_rand = createRNG(seed);
_seeds = new long[fr.vecs()[0].nChunks()];
for( int i=0; i<_seeds.length; i++ )
_seeds[i] = _rand.nextLong();
}
// Return a deterministic chunk-local RNG. Can be kinda expensive.
@Override public Random rngForChunk( int cidx ) {
long seed = _seeds[cidx];
return createRNG(seed);
}
}
@Override protected DecidedNode makeDecided( UndecidedNode udn, DHistogram hs[] ) {
return new DRFDecidedNode(udn,hs);
}
// DRF DTree decision node: same as the normal DecidedNode, but specifies a
// decision algorithm given complete histograms on all columns.
// DRF algo: find the lowest error amongst a random mtry columns.
static class DRFDecidedNode extends DecidedNode {
DRFDecidedNode( UndecidedNode n, DHistogram hs[] ) { super(n,hs); }
@Override public DRFUndecidedNode makeUndecidedNode( DHistogram hs[] ) {
return new DRFUndecidedNode(_tree,_nid, hs);
}
// Find the column with the best split (lowest score).
@Override public DTree.Split bestCol( UndecidedNode u, DHistogram hs[] ) {
DTree.Split best = new DTree.Split(-1,-1,null,(byte)0,Double.MAX_VALUE,Double.MAX_VALUE,0L,0L,0,0);
if( hs == null ) return best;
for( int i=0; i<u._scoreCols.length; i++ ) {
int col = u._scoreCols[i];
DTree.Split s = hs[col].scoreMSE(col);
if( s == null ) continue;
if( s.se() < best.se() ) best = s;
if( s.se() <= 0 ) break; // No point in looking further!
}
return best;
}
}
// DRF DTree undecided node: same as the normal UndecidedNode, but specifies
// a list of columns to score on now, and then decide over later.
// DRF algo: pick a random mtry columns
static class DRFUndecidedNode extends UndecidedNode {
DRFUndecidedNode( DTree tree, int pid, DHistogram[] hs ) { super(tree,pid, hs); }
// Randomly select mtry columns to 'score' in following pass over the data.
@Override public int[] scoreCols( DHistogram[] hs ) {
DRFTree tree = (DRFTree)_tree;
int[] cols = new int[hs.length];
int len=0;
// Gather all active columns to choose from.
for( int i=0; i<hs.length; i++ ) {
if( hs[i]==null ) continue; // Ignore not-tracked cols
assert hs[i]._min < hs[i]._maxEx && hs[i].nbins() > 1 : "broken histo range "+hs[i];
cols[len++] = i; // Gather active column
}
int choices = len; // Number of columns I can choose from
assert choices > 0;
// Draw up to mtry columns at random without replacement.
for( int i=0; i<tree._mtrys; i++ ) {
if( len == 0 ) break; // Out of choices!
int idx2 = tree._rand.nextInt(len);
int col = cols[idx2]; // The chosen column
cols[idx2] = cols[--len]; // Compress out of array; do not choose again
cols[len] = col; // Swap chosen in just after 'len'
}
assert choices - len > 0;
return Arrays.copyOfRange(cols,len,choices);
}
}
static class DRFLeafNode extends LeafNode {
DRFLeafNode( DTree tree, int pid ) { super(tree,pid); }
DRFLeafNode( DTree tree, int pid, int nid ) { super(tree,pid,nid); }
// Insert just the predictions: a single byte/short if we are predicting a
// single class, or else the full distribution.
@Override protected AutoBuffer compress(AutoBuffer ab) { assert !Double.isNaN(pred()); return ab.put4f((float)pred()); }
@Override protected int size() { return 4; }
}
// Deterministic sampling
static class Sample extends MRTask2<Sample> {
final DRFTree _tree;
final float _rate;
Sample( DRFTree tree, float rate ) { _tree = tree; _rate = rate; }
@Override public void map( Chunk nids, Chunk ys ) {
Random rand = _tree.rngForChunk(nids.cidx());
for( int row=0; row<nids._len; row++ )
if( rand.nextFloat() >= _rate || Double.isNaN(ys.at0(row)) ) {
nids.set0(row, OUT_OF_BAG); // Flag row as being ignored by sampling
}
}
}
/**
* Cross-Validate a DRF model by building new models on N train/test holdout splits
* @param splits Frames containing train/test splits
* @param cv_preds Array of Frames to store the predictions for each cross-validation run
* @param offsets Array to store the offsets of starting row indices for each cross-validation run
* @param i Which fold of cross-validation to perform
*/
@Override public void crossValidate(Frame[] splits, Frame[] cv_preds, long[] offsets, int i) {
// Train a clone with slightly modified parameters (to account for cross-validation)
DRF cv = (DRF) this.clone();
cv.genericCrossValidation(splits, offsets, i);
cv_preds[i] = ((DRFModel) UKV.get(cv.dest())).score(cv.validation); // cv_preds is escaping the context of this function and needs to be DELETED by the caller!!!
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/drf/OOBScorer.java
|
package hex.drf;
import java.util.Arrays;
import java.util.Random;
import hex.gbm.DTree.TreeModel.CompressedTree;
import hex.gbm.*;
import water.*;
import water.fvec.Chunk;
/**
* Computing oob scores over all trees and rows
* and reconstructing <code>ntree_id, oobt</code> fields in given frame.
*
* <p>It prepares voter per tree and also marks
* rows which were consider out-of-bag.</p>
*/
/* package */ class OOBScorer extends DTreeScorer<OOBScorer> {
/* @IN */ final protected float _rate;
public OOBScorer(int ncols, int nclass, float rate, Key[][] treeKeys) {
super(ncols,nclass,treeKeys);
_rate = rate;
}
@Override public void map(Chunk[] chks) {
double[] data = new double[_ncols];
float [] preds = new float[_nclass+1];
int ntrees = _trees.length;
Chunk coobt = chk_oobt(chks);
Chunk cys = chk_resp(chks);
for( int tidx=0; tidx<ntrees; tidx++) { // tree
// OOB RNG for this tree
Random rng = rngForTree(_trees[tidx], coobt.cidx());
for (int row=0; row<coobt._len; row++) {
if( rng.nextFloat() >= _rate || Double.isNaN(cys.at0(row)) ) {
// Mark oob row and store number of trees voting for this row (only for regression)
coobt.set0(row, _nclass>1?1:coobt.at0(row)+1);
// Make a prediction
for (int i=0;i<_ncols;i++) data[i] = chks[i].at0(row);
Arrays.fill(preds, 0);
score0(data, preds, _trees[tidx]);
if (_nclass==1) preds[1]=preds[0]; // Only for regression, keep consistency
// Write tree predictions
for (int c=0;c<_nclass;c++) { // over all class
if (preds[1+c] != 0) {
Chunk ctree = chk_tree(chks, c);
ctree.set0(row, (float)(ctree.at0(row) + preds[1+c]));
}
}
}
}
}
}
private Random rngForTree(CompressedTree[] ts, int cidx) {
return ts[0].rngForChunk(cidx); // k-class set of trees shares the same random number
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/drf/TreeMeasuresCollector.java
|
package hex.drf;
import hex.ShuffleTask;
import hex.gbm.DTreeUtils;
import hex.gbm.DTree.TreeModel.CompressedTree;
import java.util.Arrays;
import java.util.Random;
import water.Iced;
import water.MRTask2;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.util.ModelUtils;
import water.util.Utils;
/** Score given tree model and preserve errors per tree in form of votes (for classification)
* or MSE (for regression).
*
* This is different from Model.score() function since the MR task
* uses inverse loop: first over all trees and over all rows in chunk.
*/
public class TreeMeasuresCollector extends MRTask2<TreeMeasuresCollector> {
/* @IN */ final private float _rate;
/* @IN */ private CompressedTree[/*N*/][/*nclasses*/] _trees; // FIXME: Pass only tree-keys since serialized trees are passed over wire !!!
/* @IN */ final private int _var;
/* @IN */ final private boolean _oob;
/* @IN */ final private int _ncols;
/* @IN */ final private int _nclasses;
/* @IN */ final private boolean _classification;
/* @INOUT */ private final int _ntrees;
/* @OUT */ private long [/*ntrees*/] _votes; // Number of correct votes per tree (for classification only)
/* @OUT */ private long [/*ntrees*/] _nrows; // Number of scored row per tree (for classification/regression)
/* @OUT */ private float[/*ntrees*/] _sse; // Sum of squared errors per tree (for regression only)
private TreeMeasuresCollector(CompressedTree[/*N*/][/*nclasses*/] trees, int nclasses, int ncols, float rate, int variable) {
assert trees.length > 0;
assert nclasses == trees[0].length;
_trees = trees; _ncols = ncols;
_rate = rate; _var = variable;
_oob = true; _ntrees = trees.length;
_nclasses = nclasses;
_classification = (nclasses>1);
}
@Override public void map(Chunk[] chks) {
double[] data = new double[_ncols];
float [] preds = new float[_nclasses+1];
Chunk cresp = chk_resp(chks);
int nrows = cresp._len;
int [] oob = new int[2+Math.round((1f-_rate)*nrows*1.2f+0.5f)]; // preallocate
int [] soob = null;
// Prepare output data
_nrows = new long[_ntrees];
_votes = _classification ? new long[_ntrees] : null;
_sse = _classification ? null : new float[_ntrees];
long seedForOob = ShuffleTask.seed(cresp.cidx()); // seed for shuffling oob samples
// Start iteration
for( int tidx=0; tidx<_ntrees; tidx++) { // tree
// OOB RNG for this tree
Random rng = rngForTree(_trees[tidx], cresp.cidx());
// Collect oob rows and permutate them
oob = ModelUtils.sampleOOBRows(nrows, _rate, rng, oob); // reuse use the same array for sampling
int oobcnt = oob[0]; // Get number of sample rows
if (_var>=0) {
if (soob==null || soob.length < oobcnt) soob = new int[oobcnt];
Utils.shuffleArray(oob, oobcnt, soob, seedForOob, 1); // Shuffle array and copy results into <code>soob</code>
}
for(int j = 1; j < 1+oobcnt; j++) {
int row = oob[j];
if (cresp.isNA0(row)) continue; // we cannot deal with this row anyhow
// Do scoring:
// - prepare a row data
for (int i=0;i<_ncols;i++) data[i] = chks[i].at0(row); // 1+i - one free is expected by prediction
// - permute variable
if (_var>=0) data[_var] = chks[_var].at0(soob[j-1]);
else assert soob==null;
// - score data
Arrays.fill(preds, 0);
// - score only the tree
score0(data, preds, _trees[tidx]);
// - derive a prediction
if (_classification) {
int pred = ModelUtils.getPrediction(preds, data);
int actu = (int) cresp.at80(row);
// assert preds[pred] > 0 : "There should be a vote for at least one class.";
// - collect only correct votes
if (pred == actu) _votes[tidx]++;
} else { /* regression */
float pred = preds[0]; // Important!
float actu = (float) cresp.at0(row);
_sse[tidx] += (actu-pred)*(actu-pred);
}
// - collect rows which were used for voting
_nrows[tidx]++;
//if (_var<0) System.err.println("VARIMP OOB row: " + (cresp._start+row) + " : " + Arrays.toString(data) + " tree/actu: " + pred + "/" + actu);
}
}
// Clean-up
_trees = null;
}
@Override public void reduce( TreeMeasuresCollector t ) { Utils.add(_votes,t._votes); Utils.add(_nrows, t._nrows); Utils.add(_sse, t._sse); }
public TreeVotes resultVotes() { return new TreeVotes(_votes, _nrows, _ntrees); }
public TreeSSE resultSSE () { return new TreeSSE (_sse, _nrows, _ntrees); }
/* This is a copy of score0 method from DTree:615 */
private void score0(double data[], float preds[], CompressedTree[] ts) {
DTreeUtils.scoreTree(data, preds, ts);
}
private Chunk chk_resp( Chunk chks[] ) { return chks[_ncols]; }
private Random rngForTree(CompressedTree[] ts, int cidx) {
return _oob ? ts[0].rngForChunk(cidx) : new DummyRandom(); // k-class set of trees shares the same random number
}
/* For bulk scoring
public static TreeVotes collect(TreeModel tmodel, Frame f, int ncols, float rate, int variable) {
CompressedTree[][] trees = new CompressedTree[tmodel.ntrees()][];
for (int tidx = 0; tidx < tmodel.ntrees(); tidx++) trees[tidx] = tmodel.ctree(tidx);
return new TreeVotesCollector(trees, tmodel.nclasses(), ncols, rate, variable).doAll(f).result();
}*/
public static TreeVotes collectVotes(CompressedTree[/*nclass || 1 for regression*/] tree, int nclasses, Frame f, int ncols, float rate, int variable) {
return new TreeMeasuresCollector(new CompressedTree[][] {tree}, nclasses, ncols, rate, variable).doAll(f).resultVotes();
}
public static TreeSSE collectSSE(CompressedTree[/*nclass || 1 for regression*/] tree, int nclasses, Frame f, int ncols, float rate, int variable) {
return new TreeMeasuresCollector(new CompressedTree[][] {tree}, nclasses, ncols, rate, variable).doAll(f).resultSSE();
}
private static final class DummyRandom extends Random {
@Override public final float nextFloat() { return 1.0f; }
}
/** A simple holder for set of different tree measurements. */
public static abstract class TreeMeasures<T extends TreeMeasures> extends Iced {
/** Actual number of trees which votes are stored in this object */
protected int _ntrees;
/** Number of processed row per tree. */
protected long[/*ntrees*/] _nrows;
public TreeMeasures(int initialCapacity) { _nrows = new long[initialCapacity]; }
public TreeMeasures(long[] nrows, int ntrees) { _nrows = nrows; _ntrees = ntrees;}
/** Returns number of rows which were used during voting per individual tree. */
public final long[] nrows() { return _nrows; }
/** Returns number of voting predictors */
public final int npredictors() { return _ntrees; }
/** Returns a list of accuracies per tree. */
public abstract double accuracy(int tidx);
public final double[] accuracy() {
double[] r = new double[_ntrees];
// Average of all trees
for (int tidx=0; tidx<_ntrees; tidx++) r[tidx] = accuracy(tidx);
return r;
}
/** Compute variable importance with respect to given votes.
* The given {@link T} object represents correct votes.
* This object represents votes over shuffled data.
*
* @param right individual tree measurements performed over not shuffled data.
* @return computed importance and standard deviation
*/
public abstract double[/*2*/] imp(T right);
public abstract T append(T t);
}
/** A class holding tree votes. */
public static class TreeVotes extends TreeMeasures<TreeVotes> {
/** Number of correct votes per tree */
private long[/*ntrees*/] _votes;
public TreeVotes(int initialCapacity) {
super(initialCapacity);
_votes = new long[initialCapacity];
}
public TreeVotes(long[] votes, long[] nrows, int ntrees) {
super(nrows, ntrees);
_votes = votes;
}
/** Returns number of positive votes per tree. */
public final long[] votes() { return _votes; }
/** Returns accuracy per individual trees. */
@Override public final double accuracy(int tidx) {
assert tidx < _nrows.length && tidx < _votes.length;
return ((double) _votes[tidx]) / _nrows[tidx];
}
/** Compute variable importance with respect to given votes.
* The given {@link TreeVotes} object represents correct votes.
* This object represents votes over shuffled data.
*
* @param right individual tree voters performed over not shuffled data.
* @return computed importance and standard deviation
*/
@Override public final double[/*2*/] imp(TreeVotes right) {
assert npredictors() == right.npredictors();
int ntrees = npredictors();
double imp = 0;
double sd = 0;
// Over all trees
for (int tidx = 0; tidx < ntrees; tidx++) {
assert right.nrows()[tidx] == nrows()[tidx];
double delta = ((double) (right.votes()[tidx] - votes()[tidx])) / nrows()[tidx];
imp += delta;
sd += delta * delta;
}
double av = imp / ntrees;
double csd = Math.sqrt( (sd/ntrees - av*av) / ntrees );
return new double[] { av, csd};
}
/** Append a tree votes to a list of trees. */
public TreeVotes append(long rightVotes, long allRows) {
assert _votes.length > _ntrees && _votes.length == _nrows.length : "TreeVotes inconsistency!";
_votes[_ntrees] = rightVotes;
_nrows[_ntrees] = allRows;
_ntrees++;
return this;
}
@Override public TreeVotes append(final TreeVotes tv) {
for (int i=0; i<tv.npredictors(); i++)
append(tv._votes[i], tv._nrows[i]);
return this;
}
}
/** A simple holder serving SSE per tree. */
public static class TreeSSE extends TreeMeasures<TreeSSE> {
/** SSE per tree */
private float[/*ntrees*/] _sse;
public TreeSSE(int initialCapacity) {
super(initialCapacity);
_sse = new float[initialCapacity];
}
public TreeSSE(float[] sse, long[] nrows, int ntrees) {
super(nrows, ntrees);
_sse = sse;
}
@Override public double accuracy(int tidx) {
return _sse[tidx] / _nrows[tidx];
}
@Override public double[] imp(TreeSSE right) {
assert npredictors() == right.npredictors();
int ntrees = npredictors();
double imp = 0;
double sd = 0;
// Over all trees
for (int tidx = 0; tidx < ntrees; tidx++) {
assert right.nrows()[tidx] == nrows()[tidx]; // check that we iterate over same OOB rows
double delta = ((double) (_sse[tidx] - right._sse[tidx])) / nrows()[tidx];
imp += delta;
sd += delta * delta;
}
double av = imp / ntrees;
double csd = Math.sqrt( (sd/ntrees - av*av) / ntrees );
return new double[] { av, csd };
}
@Override public TreeSSE append(TreeSSE t) {
for (int i=0; i<t.npredictors(); i++)
append(t._sse[i], t._nrows[i]);
return this;
}
/** Append a tree sse to a list of trees. */
public TreeSSE append(float sse, long allRows) {
assert _sse.length > _ntrees && _sse.length == _nrows.length : "TreeVotes inconsistency!";
_sse [_ntrees] = sse;
_nrows[_ntrees] = allRows;
_ntrees++;
return this;
}
}
public static TreeVotes asVotes(TreeMeasures tm) { return (TreeVotes) tm; }
public static TreeSSE asSSE (TreeMeasures tm) { return (TreeSSE) tm; }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/gapstat/GapStatistic.java
|
package hex.gapstat;
import hex.KMeans2;
import water.*;
import water.Job;
import water.api.DocGen;
import water.fvec.*;
import water.util.Log;
import java.util.Random;
import static water.util.Utils.getDeterRNG;
/**
* Gap Statistic
* This is an algorithm for estimating the optimal number of clusters in p-dimensional data.
* @author spencer_aiello
*
*/
public class GapStatistic extends Job.ColumnsJob {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
static final String DOC_GET = "gap statistic";
@API(help = "Number of Monte Carlo Bootstrap Replicates", filter = Default.class, lmin = 1, lmax = 100000, json = true)
public int b_max = 10;
@API(help = "The number maximum number of clusters to consider, must be at least 2.", filter = Default.class, json = true, lmin = 2, lmax = 10000)
public int k_max = 10;
@API(help = "Fraction of data size to replicate in each MC simulation.", filter = Default.class, json = true, dmin = 0, dmax = 1)
public double bootstrap_fraction = .33;
@API(help = "Max iteratiors per clustering.")
public int max_iter = 50;
@API(help = "A random seed.", filter = Default.class, json = true)
public long seed = new Random().nextLong();
@Override protected void execImpl() {
logStart();
GapStatisticModel model = initModel();
buildModel(model);
cleanup();
remove();
}
private GapStatisticModel initModel() {
try {
source.read_lock(self());
int ks = k_max;
double[] wks = new double[ks];
double[] wkbs = new double[ks];
double[] sk = new double[ks];
return new GapStatisticModel(destination_key, source._key, source, k_max, wks, wkbs, sk, k_max, b_max, 1, 0);
}
finally {
source.unlock(self());
}
}
private void buildModel(GapStatisticModel gs_model) {
try {
source.read_lock(self());
if (gs_model == null) gs_model = UKV.get(dest());
gs_model.delete_and_lock(self());
for (int k = 1; k <= k_max; ++k) {
if (this.isCancelledOrCrashed()) {
throw new JobCancelledException();
}
KMeans2 km = new KMeans2();
km.source = source;
km.cols = cols;
km.max_iter = max_iter;
km.k = k;
km.initialization = KMeans2.Initialization.Furthest;
km.invoke();
KMeans2.KMeans2Model res = UKV.get(km.dest());
Futures fs = new Futures();
DKV.remove(Key.make(km.dest()+"_clusters"), fs);
gs_model.wks[k - 1] = Math.log(res.mse());
double[] bwkbs = new double[b_max];
for (int b = 0; b < b_max; ++b) {
if (this.isCancelledOrCrashed()) {
throw new JobCancelledException();
}
Frame bs = new MRTask2() {
@Override public void map(Chunk[] chks, NewChunk[] nchks) {
final Random rng = getDeterRNG(seed + chks[0].cidx());
for (int row = 0; row < Math.floor(bootstrap_fraction * chks[0]._len); ++row) {
for (int col = 0; col < chks.length; ++ col) {
if (source.vecs()[col].isConst()) {
nchks[col].addNum(source.vecs()[col].max());
continue;
}
if (source.vecs()[col].isEnum()) {
nchks[col].addEnum((int)chks[col].at8(row));
continue;
}
double d = rng.nextDouble() * source.vecs()[col].max() + source.vecs()[col].min();
nchks[col].addNum(d);
}
}
}
}.doAll(source.numCols(), source).outputFrame(source.names(), source.domains());
KMeans2 km_bs = new KMeans2();
km_bs.source = bs;
km_bs.cols = cols;
km_bs.max_iter = max_iter;
km_bs.k = k;
km_bs.initialization = KMeans2.Initialization.Furthest;
km_bs.invoke();
KMeans2.KMeans2Model res_bs = UKV.get(km_bs.dest());
fs = new Futures();
DKV.remove(Key.make(km_bs.dest()+"_clusters"), fs);
bwkbs[b] = Math.log(res_bs.mse());
gs_model.b++;
gs_model.update(self());
}
double sum_bwkbs = 0.;
for (double d: bwkbs) sum_bwkbs += d;
gs_model.wkbs[k - 1] = sum_bwkbs / b_max;
double sk_2 = 0.;
for (double d: bwkbs) {
sk_2 += (d - gs_model.wkbs[k - 1]) * (d - gs_model.wkbs[k - 1]) * 1. / (double) b_max;
}
gs_model.sk[k - 1] = Math.sqrt(sk_2) * Math.sqrt(1 + 1. / (double) b_max);
gs_model.k++;
for(int i = 0; i < gs_model.wks.length; ++i) gs_model.gap_stats[i] = gs_model.wkbs[i] - gs_model.wks[i];
gs_model.update(self());
}
}
catch(JobCancelledException ex) {
Log.info("Gap Statistic Computation was cancelled.");
}
catch(Exception ex) {
ex.printStackTrace();
throw new RuntimeException(ex);
}
finally {
if (gs_model != null) {
gs_model = UKV.get(dest());
gs_model.unlock(self());
}
source.unlock(self());
emptyLTrash();
}
}
@Override protected Response redirect() {
return GapStatisticProgressPage.redirect(this, self(), dest());
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/gapstat/GapStatisticModel.java
|
package hex.gapstat;
import water.Job;
import water.Key;
import water.Model;
import water.api.DocGen;
import water.api.Request.API;
import water.fvec.Frame;
import water.util.D3Plot;
public class GapStatisticModel extends Model implements Job.Progress {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
// @API(help = "Number of clusters to build in each iteration.")
final int ks;
@API(help = "The initial pooled within cluster sum of squares for each iteration.")
final double[] wks;
@API(help = "The log of the Wks.")
final double[] wkbs;
@API(help = "The standard error from the Monte Carlo simulated data for each iteration.")
final double[] sk;
// @API(help = "k_max.")
final int k_max;
// @API(help = "b_max.")
final int b_max;
// @API(help = "The current value of k_max: (2 <= k <= k_max).")
int k;
// @API(help = "The current value of B (1 <= b <= B.")
int b;
@API(help = "The gap statistics per value of k.")
double[] gap_stats;
@API(help = "Optimal number of clusters.")
int k_best = 1;
public GapStatisticModel(Key selfKey, Key dataKey, Frame fr, int ks, double[] wks, double[] log_wks, double[] sk, int k_max, int b_max, int k, int b) {
super(selfKey, dataKey, fr, /* priorClassDistribution */ null);
this.ks = ks;
this.wks = wks;
this.wkbs = log_wks;
this.sk = sk;
this.k_max = k_max;
this.b_max = b_max;
this.k = k;
this.b = b;
this.gap_stats = new double[this.wks.length];
}
public double[] wks() { return wks; }
public double[] wkbs() { return wkbs; }
public double[] sk() {return sk; }
public double[] gaps() {return gap_stats; }
@Override
public float progress() {
// float p1 = (float) ((double) (k - 1) / (double) k_max);
return (float) (( (double) (k - 1) / (double) k_max ) + (double) b / (double) ( b_max * k_max ));
}
@Override protected float[] score0(double[] data, float[] preds) {
throw new UnsupportedOperationException();
}
@Override public String toString(){
return String.format("Gap Statistic Model (key=%s , trained on %s):\n", _key, _dataKey);
}
public void generateHTML(String title, StringBuilder sb) {
if(title != null && !title.isEmpty()) DocGen.HTML.title(sb, title);
DocGen.HTML.paragraph(sb, "Model Key: " + _key);
// sb.append("<div class='alert'>Actions: " + Predict.link(_key, "Predict on dataset") + ", "
// + NaiveBayes.link(_dataKey, "Compute new model") + "</div>");
DocGen.HTML.section(sb, "Gap Statistic Output:");
//Log Pooled Variances...
DocGen.HTML.section(sb, "Log of the Pooled Cluster Within Sum of Squares per value of k");
sb.append("<span style='display: inline-block;'>");
sb.append("<table class='table table-striped table-bordered'>");
double[] log_wks = wks();
sb.append("<tr>");
for (int i = 0; i <log_wks.length; ++i) {
if (log_wks[i] == 0) continue;
sb.append("<th>").append(i+1).append("</th>");
}
sb.append("</tr>");
sb.append("<tr>");
for (double log_wk : log_wks) {
if (log_wk == 0) continue;
sb.append("<td>").append(log_wk).append("</td>");
}
sb.append("</tr>");
sb.append("</table></span>");
//Monte Carlo Bootstrap averages
DocGen.HTML.section(sb, "Monte Carlo Bootstrap Replicate Averages of the Log of the Pooled Cluster Within SS per value of k");
sb.append("<span style='display: inline-block;'>");
sb.append("<table class='table table-striped table-bordered'>");
double[] log_wkbs = wkbs();
sb.append("<tr>");
for (int i = 0; i <log_wkbs.length; ++i) {
if (log_wkbs[i] == 0) continue;
sb.append("<th>").append(i+1).append("</th>");
}
sb.append("</tr>");
sb.append("<tr>");
for (double log_wkb : log_wkbs) {
if (log_wkb == 0) continue;
sb.append("<td>").append(log_wkb).append("</td>");
}
sb.append("</tr>");
sb.append("</table></span>");
//standard errors
DocGen.HTML.section(sb, "Standard Error for the Monte Carlo Bootstrap Replicate Averages of the Log of the Pooled Cluster Within SS per value of k");
sb.append("<span style='display: inline-block;'>");
sb.append("<table class='table table-striped table-bordered'>");
double[] sks = sk();
sb.append("<tr>");
for (int i = 0; i <sks.length; ++i) {
if (sks[i] == 0) continue;
sb.append("<th>").append(i+1).append("</th>");
}
sb.append("</tr>");
sb.append("<tr>");
for (double sk1 : sks) {
if (sk1 == 0) continue;
sb.append("<td>").append(sk1).append("</td>");
}
sb.append("</tr>");
sb.append("</table></span>");
//Gap computation
DocGen.HTML.section(sb, "Gap Statistic per value of k");
sb.append("<span style='display: inline-block;'>");
sb.append("<table class='table table-striped table-bordered'>");
sb.append("<tr>");
for (int i = 0; i < log_wkbs.length; ++i) {
if (log_wkbs[i] == 0) continue;
sb.append("<th>").append(i+1).append("</th>");
}
sb.append("</tr>");
double[] gaps = gaps();
sb.append("<tr>");
for (double gap : gaps) {
if (gap == 0) continue;
sb.append("<td>").append(gap).append("</td>");
}
sb.append("</tr>");
sb.append("</table></span>");
//Compute optimal k: min k such that G_k >= G_(k+1) - s_(k+1)
int kmin = -1;
for (int i = 0; i < gaps.length-1; ++i) {
int cur_k = i + 1;
if(gaps[cur_k] == 0) {
kmin = 0;
k_best = 1; //= kmin;
break;
}
if (i == gaps.length - 1) {
kmin = cur_k;
k_best = kmin;
break;
}
if ( gaps[i] >= (gaps[i+1] - sks[i+1])) {
kmin = cur_k;
k_best = kmin;
break;
}
}
if (kmin <= 0) k_best = 1;
if (log_wks[log_wks.length -1] != 0) {
DocGen.HTML.section(sb, "Best k:");
if (kmin <= 1) {
sb.append("No optimal number of clusters found (best k = 1).");
} else {
sb.append("k = ").append(kmin);
}
} else {
DocGen.HTML.section(sb, "Best k so far:");
if (kmin <= 1) {
sb.append("No k computed yet...");
} else {
sb.append("k = ").append(kmin);
}
}
if (k_best <= 0) k_best = (int)Double.NaN;
if (k_best == 0) k_best = 1;
float[] K = new float[ks];
float[] wks_y = new float[ks];
for(int i = 0; i < wks.length; ++i){
assert wks.length == ks;
K[i] = i + 1;
wks_y[i] = (float)wks[i];
}
DocGen.HTML.section(sb, "Elbow Plot");
sb.append("<br />");
D3Plot plt = new D3Plot(K, wks_y, "k (Number of clusters)", " log( W_k ) ", "Elbow Plot", true, false);
plt.generate(sb);
float[] gs = new float[ks];
String[] names = new String[ks];
for (int i = 0; i < gs.length; ++i) {
names[i] = "k = " + (i+1);
gs[i] = (float)gap_stats[i];
}
DocGen.HTML.section(sb, "Gap Statistics");
sb.append("<br />");
DocGen.HTML.graph(sb, "gapstats", "g_varimp",
DocGen.HTML.toJSArray(new StringBuilder(), names, null, gap_stats.length),
DocGen.HTML.toJSArray(new StringBuilder(), gs , null, gap_stats.length)
);
// D3Plot plt2 = new D3Plot(K, gs, "k (Number of clusters)", " Gap Statistics ", "Gap Statistic Elbow Plot", true, false);
// plt2.generate(sb);
DocGen.HTML.section(sb, "Gap Statistics Less Standard Errors");
sb.append("<br />");
float[] new_gs = new float[gs.length];
for (int i = 0; i < gs.length; ++i) {
new_gs[i] = (float) (gs[i] - sks[i]);
}
DocGen.HTML.graph(sb, "g_minus_err", "g_varimp",
DocGen.HTML.toJSArray(new StringBuilder(), names, null, gap_stats.length),
DocGen.HTML.toJSArray(new StringBuilder(), new_gs , null, gap_stats.length)
);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/gapstat/GapStatisticModelView.java
|
package hex.gapstat;
import water.DKV;
import water.Key;
import water.Request2;
import water.api.DocGen;
import water.api.Request;
public class GapStatisticModelView extends Request2 {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="Gap Statistic Model Key", required = true, filter = GSKeyFilter.class)
Key _modelKey;
class GSKeyFilter extends H2OKey { public GSKeyFilter() { super("",true); } }
@API(help="Gap Statistic Model")
GapStatisticModel gap_model;
public static String link(String txt, Key model) {
return "<a href='/2/GapStatisticModelView.html?_modelKey=" + model + "'>" + txt + "</a>";
}
public static Response redirect(Request req, Key modelKey) {
return Response.redirect(req, "/2/GapStatisticModelView", "_modelKey", modelKey);
}
@Override public boolean toHTML(StringBuilder sb){
gap_model.generateHTML("", sb);
return true;
}
@Override protected Response serve() {
gap_model = DKV.get(_modelKey).get();
return Response.done(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/gapstat/GapStatisticProgressPage.java
|
package hex.gapstat;
import water.DKV;
import water.Job;
import water.Key;
import water.Value;
import water.api.Progress2;
import water.api.Request;
public class GapStatisticProgressPage extends Progress2 {
/** Return {@link Response} for finished job. */
@Override protected Response jobDone(final Key dst) {
return GapStatisticModelView.redirect(this, dst);
}
public static Response redirect(Request req, Key jobkey, Key dest) {
return Response.redirect(req, "/2/GapStatisticProgressPage", JOB_KEY, jobkey, DEST_KEY, dest );
}
@Override public boolean toHTML( StringBuilder sb ) {
Job jjob = Job.findJob(job_key);
if (jjob ==null) return true;
Value v = DKV.get(jjob.dest());
if(v != null){
GapStatisticModel m = v.get();
m.generateHTML("Gap Statistic", sb);
} else
sb.append("<b>No model yet.</b>");
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/gbm/DBinomHistogram.java
|
package hex.gbm;
import java.util.*;
import water.MemoryManager;
import water.util.Utils;
import water.util.Utils.IcedBitSet;
/**
A Histogram, computed in parallel over a Vec.
<p>
Sums (and sums-of-squares) of binomials - 0 or 1. Sums-of-squares==sums in this case.
<p>
@author Cliff Click
*/
public class DBinomHistogram extends DHistogram<DBinomHistogram> {
private long _sums[]; // Sums (& square-sums since only 0 & 1 allowed), shared, atomically incremented
public DBinomHistogram( String name, final int nbins, byte isInt, float min, float maxEx, long nelems, boolean doGrpSplit ) {
super(name,nbins,isInt,min,maxEx,nelems,doGrpSplit);
}
@Override boolean isBinom() { return true; }
@Override public double mean(int b) {
long n = _bins[b];
return n>0 ? (double)_sums[b]/n : 0;
}
@Override public double var (int b) {
long n = _bins[b];
if( n<=1 ) return 0;
return (_sums[b] - (double)_sums[b]*_sums[b]/n)/(n-1);
}
// Big allocation of arrays
@Override void init0() {
_sums = MemoryManager.malloc8(_nbin);
}
// Add one row to a bin found via simple linear interpolation.
// Compute bin min/max.
// Compute response mean & variance.
@Override void incr0( int b, double y ) {
Utils.AtomicLongArray.incr(_sums,b);
}
// Merge two equal histograms together. Done in a F/J reduce, so no
// synchronization needed.
@Override void add0( DBinomHistogram dsh ) {
Utils.add(_sums,dsh._sums);
}
// Compute a "score" for a column; lower score "wins" (is a better split).
// Score is the sum of the MSEs when the data is split at a single point.
// mses[1] == MSE for splitting between bins 0 and 1.
// mses[n] == MSE for splitting between bins n-1 and n.
@Override public DTree.Split scoreMSE( int col ) {
final int nbins = nbins();
assert nbins > 1;
// Store indices from sort to determine group split later
Integer idx[] = new Integer[nbins];
for(int b = 0; b < nbins; b++) idx[b] = b;
// Sort predictor levels in ascending order of mean response within each bin
if(_isInt == 2 && _step == 1.0f && nbins >= 4 && _doGrpSplit) {
final Double[] means = new Double[nbins];
for(int b = 0; b < nbins; b++) means[b] = mean(b);
Arrays.sort(idx, new Comparator<Integer>() {
@Override public int compare(Integer o1, Integer o2) { return means[o1].compareTo(means[o2]); }
});
}
// Compute mean/var for cumulative bins from 0 to nbins inclusive.
long sums0[] = MemoryManager.malloc8(nbins+1);
long ns0[] = MemoryManager.malloc8(nbins+1);
for( int b=1; b<=nbins; b++ ) {
long m0 = sums0[b-1], m1 = _sums[idx[b-1]];
long k0 = ns0 [b-1], k1 = _bins[idx[b-1]];
if( k0==0 && k1==0 ) continue;
sums0[b] = m0+m1;
ns0 [b] = k0+k1;
}
long tot = ns0[nbins];
// If we see zero variance, we must have a constant response in this
// column. Normally this situation is cut out before we even try to split, but we might
// have NA's in THIS column...
if( sums0[nbins] == 0 || sums0[nbins] == tot ) { assert isConstantResponse(); return null; }
// Compute mean/var for cumulative bins from nbins to 0 inclusive.
long sums1[] = MemoryManager.malloc8(nbins+1);
long ns1[] = MemoryManager.malloc8(nbins+1);
for( int b=nbins-1; b>=0; b-- ) {
long m0 = sums1[b+1], m1 = _sums[idx[b]];
long k0 = ns1 [b+1], k1 = _bins[idx[b]];
if( k0==0 && k1==0 ) continue;
sums1[b] = m0+m1;
ns1 [b] = k0+k1;
assert ns0[b]+ns1[b]==tot;
}
// Now roll the split-point across the bins. There are 2 ways to do this:
// split left/right based on being less than some value, or being equal/
// not-equal to some value. Equal/not-equal makes sense for categoricals
// but both splits could work for any integral datatype. Do the less-than
// splits first.
int best=0; // The no-split
double best_se0=Double.MAX_VALUE; // Best squared error
double best_se1=Double.MAX_VALUE; // Best squared error
byte equal=0; // Ranged check
for( int b=1; b<=nbins-1; b++ ) {
if( _bins[idx[b]] == 0 ) continue; // Ignore empty splits
// We're making an unbiased estimator, so that MSE==Var.
// Then Squared Error = MSE*N = Var*N
// = (ssqs/N - mean^2)*N
// = ssqs - N*mean^2
// = ssqs - N*(sum/N)(sum/N)
// = ssqs - sum^2/N
// For binomial, ssqs == sum, so further reduces:
// = sum - sum^2/N
double se0 = sums0[b]; se0 -= se0*se0/ns0[b];
double se1 = sums1[b]; se1 -= se1*se1/ns1[b];
if( (se0+se1 < best_se0+best_se1) || // Strictly less error?
// Or tied MSE, then pick split towards middle bins
(se0+se1 == best_se0+best_se1 &&
Math.abs(b -(nbins>>1)) < Math.abs(best-(nbins>>1))) ) {
best_se0 = se0; best_se1 = se1;
best = b;
}
}
// If the min==max, we can also try an equality-based split
if( _isInt > 0 && _step == 1.0f && // For any integral (not float) column
_maxEx-_min > 2 ) { // Also need more than 2 (boolean) choices to actually try a new split pattern
for( int b=1; b<=nbins-1; b++ ) {
if( _bins[idx[b]] == 0 ) continue; // Ignore empty splits
long N = ns0[b+0] + ns1[b+1];
if( N == 0 ) continue;
double sums = sums0[b+0]+sums1[b+1];
double sumb = _sums[idx[b+0]];
double si = sums - sums*sums/ N ; // Left+right, excluding 'b'
double sx = sumb - sumb*sumb/_bins[idx[b]]; // Just 'b'
if( si+sx < best_se0+best_se1 ) { // Strictly less error?
best_se0 = si; best_se1 = sx;
best = b; equal = 1; // Equality check
}
}
}
if( best==0 ) return null; // No place to split
assert best > 0 : "Must actually pick a split "+best;
long n0 = equal == 0 ? ns0[best] : ns0[best]+ ns1[best+1];
long n1 = equal == 0 ? ns1[best] : _bins[idx[best]] ;
double p0 = equal == 0 ? sums0[best] : sums0[best]+sums1[best+1];
double p1 = equal == 0 ? sums1[best] : _sums[idx[best]] ;
// For categorical predictors, set bits for levels grouped to right of split
IcedBitSet bs = null;
if(_isInt == 2 && _step == 1.0f && nbins >= 4 && _doGrpSplit) {
// Small cats: always use 4B to store and prepend offset # of zeros at front
// Big cats: save offset and store only nbins # of bits that are left after trimming
int offset = (int)_min;
if(_maxEx <= 32) {
equal = 2;
bs = new IcedBitSet(32);
for(int i = best; i < nbins; i++)
bs.set(idx[i] + offset);
} else {
equal = 3;
bs = new IcedBitSet(nbins, offset);
for(int i = best; i < nbins; i++)
bs.set(idx[i]);
}
}
return new DTree.Split(col,best,bs,equal,best_se0,best_se1,n0,n1,p0/n0,p1/n1);
}
@Override public long byteSize0() {
return 8*1 + // 1 more internal arrays
24+_sums.length<<3;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/gbm/DHistogram.java
|
package hex.gbm;
import sun.misc.Unsafe;
import water.*;
import water.fvec.Frame;
import water.fvec.Vec;
import water.nbhm.UtilUnsafe;
import water.util.Utils;
/**
A Histogram, computed in parallel over a Vec.
<p>
A {@code DHistogram} bins every value added to it, and computes a the vec
min and max (for use in the next split), and response mean and variance for each
bin. {@code DHistogram}s are initialized with a min, max and number-of-
elements to be added (all of which are generally available from a Vec).
Bins run from min to max in uniform sizes. If the {@code DHistogram} can
determine that fewer bins are needed (e.g. boolean columns run from 0 to 1,
but only ever take on 2 values, so only 2 bins are needed), then fewer bins
are used.
<p>
{@code DHistogram} are shared per-node, and atomically updated. There's an
{@code add} call to help cross-node reductions. The data is stored in
primitive arrays, so it can be sent over the wire.
<p>
If we are successively splitting rows (e.g. in a decision tree), then a
fresh {@code DHistogram} for each split will dynamically re-bin the data.
Each successive split will logarithmically divide the data. At the first
split, outliers will end up in their own bins - but perhaps some central
bins may be very full. At the next split(s), the full bins will get split,
and again until (with a log number of splits) each bin holds roughly the
same amount of data. This dynamic binning resolves a lot of problems with
picking the proper bin count or limits - generally a few more tree levels
will equal any fancy but fixed-size binning strategy.
<p>
@author Cliff Click
*/
public abstract class DHistogram<TDH extends DHistogram> extends Iced {
public final transient String _name; // Column name (for debugging)
public final byte _isInt; // 0: float col, 1: int col, 2: enum & int col
public final char _nbin; // Bin count
public final float _step; // Linear interpolation step per bin
public final float _min, _maxEx; // Conservative Min/Max over whole collection. _maxEx is Exclusive.
public int _bins[]; // Bins, shared, atomically incremented
public final boolean _doGrpSplit;
// Atomically updated float min/max
protected float _min2, _maxIn; // Min/Max, shared, atomically updated. _maxIn is Inclusive.
private static final Unsafe _unsafe = UtilUnsafe.getUnsafe();
static private final long _min2Offset;
static private final long _max2Offset;
static {
try {
_min2Offset = _unsafe.objectFieldOffset(DHistogram.class.getDeclaredField("_min2"));
_max2Offset = _unsafe.objectFieldOffset(DHistogram.class.getDeclaredField("_maxIn"));
} catch( Exception e ) {
throw H2O.fail();
}
}
public void setMin( float min ) {
int imin = Float.floatToRawIntBits(min);
float old = _min2;
while( min < old && !_unsafe.compareAndSwapInt(this, _min2Offset, Float.floatToRawIntBits(old), imin ) )
old = _min2;
}
// Find Inclusive _max2
public void setMax( float max ) {
int imax = Float.floatToRawIntBits(max);
float old = _maxIn;
while( max > old && !_unsafe.compareAndSwapInt(this, _max2Offset, Float.floatToRawIntBits(old), imax ) )
old = _maxIn;
}
public DHistogram( String name, final int nbins, final byte isInt, final float min, final float maxEx, long nelems, boolean doGrpSplit ) {
assert nelems > 0;
assert nbins >= 1;
assert maxEx > min : "Caller ensures "+maxEx+">"+min+", since if max==min== the column "+name+" is all constants";
_isInt = isInt;
_name = name;
_doGrpSplit = doGrpSplit;
_min=min;
_maxEx=maxEx; // Set Exclusive max
_min2 = Float.MAX_VALUE; // Set min/max to outer bounds
_maxIn= -Float.MAX_VALUE;
// See if we can show there are fewer unique elements than nbins.
// Common for e.g. boolean columns, or near leaves.
int xbins = nbins;
float step;
if( isInt>0 && maxEx-min <= nbins ) {
assert ((long)min)==min; // No overflow
xbins = (char)((long)maxEx-(long)min); // Shrink bins
assert xbins > 1; // Caller ensures enough range to bother
step = 1.0f; // Fixed stepsize
} else {
step = (maxEx-min)/nbins; // Step size for linear interpolation
assert step > 0;
}
_step = 1.0f/step; // Use multiply instead of division during frequent binning math
_nbin = (char)xbins;
// Do not allocate the big arrays here; wait for scoreCols to pick which cols will be used.
}
abstract boolean isBinom();
// Interpolate d to find bin#
int bin( float col_data ) {
if( Float.isNaN(col_data) ) return 0; // Always NAs to bin 0
if (Float.isInfinite(col_data)) // Put infinity to most left/right bin
if (col_data<0) return 0;
else return _bins.length-1;
// When the model is exposed to new test data, we could have data that is
// out of range of any bin - however this binning call only happens during
// model-building.
assert _min <= col_data && col_data < _maxEx : "Coldata "+col_data+" out of range "+this;
int idx1 = (int)((col_data-_min)*_step);
assert 0 <= idx1 && idx1 <= _bins.length;
if( idx1 == _bins.length) idx1--; // Roundoff error allows idx1 to hit upper bound, so truncate
return idx1;
}
float binAt( int b ) { return _min+b/_step; }
public int nbins() { return _nbin; }
public int bins(int b) { return _bins[b]; }
public float mins(int b) { return _min2; }
public float maxsIn(int b) { return _maxIn; } // Always an Inclusive max
abstract public double mean(int b);
abstract public double var (int b);
// Big allocation of arrays
abstract void init0();
final void init() {
assert _bins == null;
_bins = MemoryManager.malloc4(_nbin);
init0();
}
// Add one row to a bin found via simple linear interpolation.
// Compute bin min/max.
// Compute response mean & variance.
abstract void incr0( int b, double y );
final void incr( float col_data, double y ) {
assert Float.isNaN(col_data) || Float.isInfinite(col_data) || (_min <= col_data && col_data < _maxEx) : "col_data "+col_data+" out of range "+this;
int b = bin(col_data); // Compute bin# via linear interpolation
Utils.AtomicIntArray.incr(_bins,b); // Bump count in bin
// Track actual lower/upper bound per-bin
if (!Float.isInfinite(col_data)) {
setMin(col_data);
setMax(col_data);
}
if( y != 0 ) incr0(b,y);
}
// Merge two equal histograms together. Done in a F/J reduce, so no
// synchronization needed.
abstract void add0( TDH dsh );
void add( TDH dsh ) {
assert _isInt == dsh._isInt && _nbin == dsh._nbin && _step == dsh._step &&
_min == dsh._min && _maxEx == dsh._maxEx;
assert (_bins == null && dsh._bins == null) || (_bins != null && dsh._bins != null);
if( _bins == null ) return;
Utils.add(_bins,dsh._bins);
if( _min2 > dsh._min2 ) _min2 = dsh._min2 ;
if( _maxIn < dsh._maxIn ) _maxIn = dsh._maxIn;
add0(dsh);
}
// Inclusive min & max
public float find_min () { return _min2 ; }
public float find_maxIn() { return _maxIn; }
// Exclusive max
public float find_maxEx() { return find_maxEx(_maxIn,_isInt); }
static public float find_maxEx(float maxIn, int isInt ) {
float ulp = Math.ulp(maxIn);
if( isInt > 0 && 1 > ulp ) ulp = 1;
return maxIn+ulp;
}
// Compute a "score" for a column; lower score "wins" (is a better split).
// Score is the sum of the MSEs when the data is split at a single point.
// mses[1] == MSE for splitting between bins 0 and 1.
// mses[n] == MSE for splitting between bins n-1 and n.
abstract public DTree.Split scoreMSE( int col );
// The initial histogram bins are setup from the Vec rollups.
static public DHistogram[] initialHist(Frame fr, int ncols, int nbins, DHistogram hs[], boolean doGrpSplit, boolean isBinom) {
Vec vecs[] = fr.vecs();
for( int c=0; c<ncols; c++ ) {
Vec v = vecs[c];
final float minIn = (float)Math.max(v.min(),-Float.MAX_VALUE); // inclusive vector min
final float maxIn = (float)Math.min(v.max(), Float.MAX_VALUE); // inclusive vector max
final float maxEx = find_maxEx(maxIn,v.isInt()?1:0); // smallest exclusive max
final long vlen = v.length();
hs[c] = v.naCnt()==vlen || v.min()==v.max() ? null :
make(fr._names[c],nbins,(byte)(v.isEnum() ? 2 : (v.isInt()?1:0)),minIn,maxEx,vlen,doGrpSplit,isBinom);
}
return hs;
}
static public DHistogram make( String name, final int nbins, byte isInt, float min, float maxEx, long nelems, boolean doGrpSplit, boolean isBinom ) {
return isBinom
? new DBinomHistogram(name,nbins,isInt,min,maxEx,nelems,doGrpSplit)
: new DRealHistogram(name,nbins,isInt,min,maxEx,nelems,doGrpSplit);
}
// Check for a constant response variable
public boolean isConstantResponse() {
double m = Double.NaN;
for( int b=0; b<_bins.length; b++ ) {
if( _bins[b] == 0 ) continue;
if( var(b) > 1e-14 ) return false;
double mean = mean(b);
if( mean != m )
if( Double.isNaN(m) ) m=mean;
else if(Math.abs(m - mean) > 1e-6) return false;
}
return true;
}
// Pretty-print a histogram
@Override public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(_name).append(":").append(_min).append("-").append(_maxEx).append(" step="+(1/_step)+" nbins="+nbins()+" isInt="+_isInt);
if( _bins != null ) {
for( int b=0; b<_bins.length; b++ ) {
sb.append(String.format("\ncnt=%d, [%f - %f], mean/var=", _bins[b],mins(b),maxsIn(b)));
sb.append(String.format("%6.2f/%6.2f,", mean(b), var(b)));
}
sb.append('\n');
}
return sb.toString();
}
abstract public long byteSize0();
public long byteSize() {
long sum = 8+8; // Self header
sum += 1+2; // enum; nbin
sum += 4+4+4+4+4; // step,min,max,min2,max2
sum += 8*1; // 1 internal arrays
if( _bins == null ) return sum;
// + 20(array header) + len<<2 (array body)
sum += 24+_bins.length<<3;
sum += byteSize0();
return sum;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/gbm/DRealHistogram.java
|
package hex.gbm;
import water.MemoryManager;
import water.util.Utils;
import java.util.Arrays;
import java.util.Comparator;
/**
A Histogram, computed in parallel over a Vec.
<p>
Sums and sums-of-squares of doubles
<p>
@author Cliff Click
*/
public class DRealHistogram extends DHistogram<DRealHistogram> {
private double _sums[], _ssqs[]; // Sums & square-sums, shared, atomically incremented
public DRealHistogram( String name, final int nbins, byte isInt, float min, float maxEx, long nelems, boolean doGrpSplit ) {
super(name,nbins,isInt,min,maxEx,nelems,doGrpSplit);
}
@Override boolean isBinom() { return false; }
@Override public double mean(int b) {
long n = _bins[b];
return n>0 ? _sums[b]/n : 0;
}
@Override public double var (int b) {
long n = _bins[b];
if( n<=1 ) return 0;
return (_ssqs[b] - _sums[b]*_sums[b]/n)/(n-1);
}
// Big allocation of arrays
@Override void init0() {
_sums = MemoryManager.malloc8d(_nbin);
_ssqs = MemoryManager.malloc8d(_nbin);
}
// Add one row to a bin found via simple linear interpolation.
// Compute response mean & variance.
// Done racily instead F/J map calls, so atomic
@Override void incr0( int b, double y ) {
Utils.AtomicDoubleArray.add(_sums,b,y);
Utils.AtomicDoubleArray.add(_ssqs,b,y*y);
}
// Same, except square done by caller
void incr1( int b, double y, double yy ) {
Utils.AtomicDoubleArray.add(_sums,b,y);
Utils.AtomicDoubleArray.add(_ssqs,b,yy);
}
// Merge two equal histograms together.
// Done in a F/J reduce, so no synchronization needed.
@Override void add0( DRealHistogram dsh ) {
Utils.add(_sums,dsh._sums);
Utils.add(_ssqs,dsh._ssqs);
}
// Compute a "score" for a column; lower score "wins" (is a better split).
// Score is the sum of the MSEs when the data is split at a single point.
// mses[1] == MSE for splitting between bins 0 and 1.
// mses[n] == MSE for splitting between bins n-1 and n.
@Override public DTree.Split scoreMSE( int col ) {
final int nbins = nbins();
assert nbins > 1;
// Store indices from sort to determine group split later
Integer idx[] = new Integer[nbins];
for(int b = 0; b < nbins; b++) idx[b] = b;
// Sort predictor levels in ascending order of mean response within each bin
if(_isInt == 2 && _step == 1.0f && nbins >= 4 && _doGrpSplit) {
final double[] means = new double[nbins];
System.arraycopy(_sums, 0, means, 0, nbins);
Arrays.sort(idx, new Comparator<Integer>() {
@Override
public int compare(Integer o1, Integer o2) {
return ((Double)means[o1]).compareTo(means[o2]);
}
});
}
// Compute mean/var for cumulative bins from 0 to nbins inclusive.
double sums0[] = MemoryManager.malloc8d(nbins+1);
double ssqs0[] = MemoryManager.malloc8d(nbins+1);
long ns0[] = MemoryManager.malloc8 (nbins+1);
for( int b=1; b<=nbins; b++ ) {
double m0 = sums0[b-1], m1 = _sums[b-1];
double s0 = ssqs0[b-1], s1 = _ssqs[b-1];
long k0 = ns0 [b-1], k1 = _bins[b-1];
if( k0==0 && k1==0 ) continue;
sums0[b] = m0+m1;
ssqs0[b] = s0+s1;
ns0 [b] = k0+k1;
}
long tot = ns0[nbins];
// If we see zero variance, we must have a constant response in this
// column. Normally this situation is cut out before we even try to split, but we might
// have NA's in THIS column...
if( ssqs0[nbins]*tot - sums0[nbins]*sums0[nbins] == 0 ) {
assert isConstantResponse(); return null;
}
// Compute mean/var for cumulative bins from nbins to 0 inclusive.
double sums1[] = MemoryManager.malloc8d(nbins+1);
double ssqs1[] = MemoryManager.malloc8d(nbins+1);
long ns1[] = MemoryManager.malloc8 (nbins+1);
for( int b=nbins-1; b>=0; b-- ) {
double m0 = sums1[b+1], m1 = _sums[b];
double s0 = ssqs1[b+1], s1 = _ssqs[b];
long k0 = ns1 [b+1], k1 = _bins[b];
if( k0==0 && k1==0 ) continue;
sums1[b] = m0+m1;
ssqs1[b] = s0+s1;
ns1 [b] = k0+k1;
assert ns0[b]+ns1[b]==tot;
}
// Now roll the split-point across the bins. There are 2 ways to do this:
// split left/right based on being less than some value, or being equal/
// not-equal to some value. Equal/not-equal makes sense for catagoricals
// but both splits could work for any integral datatype. Do the less-than
// splits first.
int best=0; // The no-split
double best_se0=Double.MAX_VALUE; // Best squared error
double best_se1=Double.MAX_VALUE; // Best squared error
byte equal=0; // Ranged check
for( int b=1; b<=nbins-1; b++ ) {
if( _bins[b] == 0 ) continue; // Ignore empty splits
// We're making an unbiased estimator, so that MSE==Var.
// Then Squared Error = MSE*N = Var*N
// = (ssqs/N - mean^2)*N
// = ssqs - N*mean^2
// = ssqs - N*(sum/N)(sum/N)
// = ssqs - sum^2/N
double se0 = ssqs0[b] - sums0[b]*sums0[b]/ns0[b];
double se1 = ssqs1[b] - sums1[b]*sums1[b]/ns1[b];
if( (se0+se1 < best_se0+best_se1) || // Strictly less error?
// Or tied MSE, then pick split towards middle bins
(se0+se1 == best_se0+best_se1 &&
Math.abs(b -(nbins>>1)) < Math.abs(best-(nbins>>1))) ) {
best_se0 = se0; best_se1 = se1;
best = b;
}
}
// If the min==max, we can also try an equality-based split
if( _isInt > 0 && _step == 1.0f && // For any integral (not float) column
_maxEx-_min > 2 ) { // Also need more than 2 (boolean) choices to actually try a new split pattern
for( int b=1; b<=nbins-1; b++ ) {
if( _bins[b] == 0 ) continue; // Ignore empty splits
long N = ns0[b+0] + ns1[b+1];
double sums = sums0[b+0]+sums1[b+1];
double ssqs = ssqs0[b+0]+ssqs1[b+1];
if( N == 0 ) continue;
double si = ssqs - sums * sums / N ; // Left+right, excluding 'b'
double sx = _ssqs[b] - _sums[b]*_sums[b]/_bins[b]; // Just 'b'
if( si+sx < best_se0+best_se1 ) { // Strictly less error?
best_se0 = si; best_se1 = sx;
best = b; equal = 1; // Equality check
}
}
}
if( best==0 ) return null; // No place to split
assert best > 0 : "Must actually pick a split "+best;
long n0 = equal == 0 ? ns0[best] : ns0[best]+ ns1[best+1];
long n1 = equal == 0 ? ns1[best] : _bins[best] ;
double p0 = equal == 0 ? sums0[best] : sums0[best]+sums1[best+1];
double p1 = equal == 0 ? sums1[best] : _sums[best] ;
// For categorical predictors, set bits for levels grouped to right of split
Utils.IcedBitSet bs = null;
if(_isInt == 2 && _step == 1.0f && nbins >= 4 && _doGrpSplit) {
// Small cats: always use 4B to store and prepend offset # of zeros at front
// Big cats: save offset and store only nbins # of bits that are left after trimming
int offset = (int)_min;
if(_maxEx <= 32) {
equal = 2;
bs = new Utils.IcedBitSet(32);
for(int i = best; i < nbins; i++)
bs.set(idx[i] + offset);
} else {
equal = 3;
bs = new Utils.IcedBitSet(nbins, offset);
for(int i = best; i < nbins; i++)
bs.set(idx[i]);
}
}
return new DTree.Split(col,best,bs,equal,best_se0,best_se1,n0,n1,p0/n0,p1/n1);
}
@Override public long byteSize0() {
return 8*2 + // 2 more internal arrays
24+_sums.length<<3 +
24+_ssqs.length<<3 ;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/gbm/DTree.java
|
package hex.gbm;
import static hex.gbm.SharedTreeModelBuilder.createRNG;
import hex.ConfusionMatrix;
import hex.VarImp;
import hex.gbm.DTree.TreeModel.CompressedTree;
import hex.gbm.DTree.TreeModel.TreeVisitor;
import water.*;
import water.api.*;
import water.api.Request.API;
import water.fvec.Chunk;
import water.license.LicenseManager;
import water.util.*;
import water.util.Utils.IcedBitSet;
import java.util.*;
/**
A Decision Tree, laid over a Frame of Vecs, and built distributed.
This class defines an explicit Tree structure, as a collection of {@code DTree}
{@code Node}s. The Nodes are numbered with a unique {@code _nid}. Users
need to maintain their own mapping from their data to a {@code _nid}, where
the obvious technique is to have a Vec of {@code _nid}s (ints), one per each
element of the data Vecs.
Each {@code Node} has a {@code DHistogram}, describing summary data about the
rows. The DHistogram requires a pass over the data to be filled in, and we
expect to fill in all rows for Nodes at the same depth at the same time.
i.e., a single pass over the data will fill in all leaf Nodes' DHistograms
at once.
@author Cliff Click
*/
public class DTree extends Iced {
final String[] _names; // Column names
final int _ncols; // Active training columns
final char _nbins; // Max number of bins to split over
final char _nclass; // #classes, or 1 for regression trees
final int _min_rows; // Fewest allowed rows in any split
final long _seed; // RNG seed; drives sampling seeds if necessary
private Node[] _ns; // All the nodes in the tree. Node 0 is the root.
int _len; // Resizable array
public DTree( String[] names, int ncols, char nbins, char nclass, int min_rows ) { this(names,ncols,nbins,nclass,min_rows,-1); }
public DTree( String[] names, int ncols, char nbins, char nclass, int min_rows, long seed ) {
_names = names; _ncols = ncols; _nbins=nbins; _nclass=nclass; _min_rows = min_rows; _ns = new Node[1]; _seed = seed; }
public final Node root() { return _ns[0]; }
// One-time local init after wire transfer
void init_tree( ) { for( int j=0; j<_len; j++ ) _ns[j]._tree = this; }
// Return Node i
public final Node node( int i ) {
if( i >= _len ) throw new ArrayIndexOutOfBoundsException(i);
return _ns[i];
}
public final UndecidedNode undecided( int i ) { return (UndecidedNode)node(i); }
public final DecidedNode decided( int i ) { return ( DecidedNode)node(i); }
// Get a new node index, growing innards on demand
private synchronized int newIdx(Node n) {
if( _len == _ns.length ) _ns = Arrays.copyOf(_ns,_len<<1);
_ns[_len] = n;
return _len++;
}
// Return a deterministic chunk-local RNG. Can be kinda expensive.
// Override this in, e.g. Random Forest algos, to get a per-chunk RNG
public Random rngForChunk( int cidx ) { throw H2O.fail(); }
public final int len() { return _len; }
public final void len(int len) { _len = len; }
// Public stats about tree
public int leaves;
public int depth;
// --------------------------------------------------------------------------
// Abstract node flavor
public static abstract class Node extends Iced {
transient protected DTree _tree; // Make transient, lest we clone the whole tree
final protected int _pid; // Parent node id, root has no parent and uses -1
final protected int _nid; // My node-ID, 0 is root
Node( DTree tree, int pid, int nid ) {
_tree = tree;
_pid=pid;
tree._ns[_nid=nid] = this;
}
Node( DTree tree, int pid ) {
_tree = tree;
_pid=pid;
_nid = tree.newIdx(this);
}
// Recursively print the decision-line from tree root to this child.
StringBuilder printLine(StringBuilder sb ) {
if( _pid==-1 ) return sb.append("[root]");
DecidedNode parent = _tree.decided(_pid);
parent.printLine(sb).append(" to ");
return parent.printChild(sb,_nid);
}
abstract public StringBuilder toString2(StringBuilder sb, int depth);
abstract protected AutoBuffer compress(AutoBuffer ab);
abstract protected int size();
public final int nid() { return _nid; }
public final int pid() { return _pid; }
}
// --------------------------------------------------------------------------
// Records a column, a bin to split at within the column, and the MSE.
public static class Split extends Iced {
final int _col, _bin; // Column to split, bin where being split
final IcedBitSet _bs; // For binary y and categorical x (with >= 4 levels), split into 2 non-contiguous groups
final byte _equal; // Split is 0: <, 1: == with single split point, 2: == with group split (<= 32 levels), 3: == with group split (> 32 levels)
final double _se0, _se1; // Squared error of each subsplit
final long _n0, _n1; // Rows in each final split
final double _p0, _p1; // Predicted value for each split
public Split( int col, int bin, IcedBitSet bs, byte equal, double se0, double se1, long n0, long n1, double p0, double p1 ) {
_col = col; _bin = bin; _bs = bs; _equal = equal;
_n0 = n0; _n1 = n1; _se0 = se0; _se1 = se1;
_p0 = p0; _p1 = p1;
}
public final double se() { return _se0+_se1; }
public final int col() { return _col; }
public final int bin() { return _bin; }
public final long rowsLeft () { return _n0; }
public final long rowsRight() { return _n1; }
/** Returns empirical improvement in mean-squared error.
*
* <p>Formula for node splitting space into two subregions R1,R2 with predictions y1, y2:</p>
* <code>i2(R1,R2) ~ w1*w2 / (w1+w2) * (y1 - y2)^2</code>
*
*
* <p>For more information see (35), (45) in the paper
* <a href="www-stat.stanford.edu/~jhf/ftp/trebst.pdf"><i>J. Friedman - Greedy Function Approximation: A Gradient boosting machine</i></a></p> */
public final float improvement() {
double d = (_p0-_p1);
return (float) ( d*d*_n0*_n1 / (_n0+_n1) );
}
// Split-at dividing point. Don't use the step*bin+bmin, due to roundoff
// error we can have that point be slightly higher or lower than the bin
// min/max - which would allow values outside the stated bin-range into the
// split sub-bins. Always go for a value which splits the nearest two
// elements.
float splat(DHistogram hs[]) {
DHistogram h = hs[_col];
assert _bin > 0 && _bin < h.nbins();
if( _equal == 1 ) { assert h.bins(_bin)!=0; return h.binAt(_bin); }
// Find highest non-empty bin below the split
int x=_bin-1;
while( x >= 0 && h.bins(x)==0 ) x--;
// Find lowest non-empty bin above the split
int n=_bin;
while( n < h.nbins() && h.bins(n)==0 ) n++;
// Lo is the high-side of the low non-empty bin, rounded to int for int columns
// Hi is the low -side of the hi non-empty bin, rounded to int for int columns
// Example: Suppose there are no empty bins, and we are splitting an
// integer column at 48.4 (more than nbins, so step != 1.0, perhaps
// step==1.8). The next lowest non-empty bin is from 46.6 to 48.4, and
// we set lo=48.4. The next highest non-empty bin is from 48.4 to 50.2
// and we set hi=48.4. Since this is an integer column, we round lo to
// 48 (largest integer below the split) and hi to 49 (smallest integer
// above the split). Finally we average them, and split at 48.5.
float lo = h.binAt(x+1);
float hi = h.binAt(n );
if( h._isInt > 0 ) lo = h._step==1 ? lo-1 : (float)Math.floor(lo);
if( h._isInt > 0 ) hi = h._step==1 ? hi : (float)Math.ceil (hi);
return (lo+hi)/2.0f;
}
// Split a DHistogram. Return null if there is no point in splitting
// this bin further (such as there's fewer than min_row elements, or zero
// error in the response column). Return an array of DHistograms (one
// per column), which are bounded by the split bin-limits. If the column
// has constant data, or was not being tracked by a prior DHistogram
// (for being constant data from a prior split), then that column will be
// null in the returned array.
public DHistogram[] split( int way, char nbins, int min_rows, DHistogram hs[], float splat ) {
long n = way==0 ? _n0 : _n1;
if( n < min_rows || n <= 1 ) return null; // Too few elements
double se = way==0 ? _se0 : _se1;
if( se <= 1e-30 ) return null; // No point in splitting a perfect prediction
// Build a next-gen split point from the splitting bin
int cnt=0; // Count of possible splits
DHistogram nhists[] = new DHistogram[hs.length]; // A new histogram set
for( int j=0; j<hs.length; j++ ) { // For every column in the new split
DHistogram h = hs[j]; // old histogram of column
if( h == null ) continue; // Column was not being tracked?
int adj_nbins = Math.max(h.nbins()>>1,nbins);
// min & max come from the original column data, since splitting on an
// unrelated column will not change the j'th columns min/max.
// Tighten min/max based on actual observed data for tracked columns
float min, maxEx;
if( h._bins == null ) { // Not tracked this last pass?
min = h._min; // Then no improvement over last go
maxEx = h._maxEx;
} else { // Else pick up tighter observed bounds
min = h.find_min(); // Tracked inclusive lower bound
if( h.find_maxIn() == min ) continue; // This column will not split again
maxEx = h.find_maxEx(); // Exclusive max
}
// Tighter bounds on the column getting split: exactly each new
// DHistogram's bound are the bins' min & max.
if( _col==j ) {
if( _equal != 0 ) { // Equality split; no change on unequals-side
if( way == 1 ) continue; // but know exact bounds on equals-side - and this col will not split again
} else { // Less-than split
if( h._bins[_bin]==0 )
throw H2O.unimpl(); // Here I should walk up & down same as split() above.
float split = splat;
if( h._isInt > 0 ) split = (float)Math.ceil(split);
if( way == 0 ) maxEx= split;
else min = split;
}
}
if( Utils.equalsWithinOneSmallUlp(min, maxEx) ) continue; // This column will not split again
if( h._isInt > 0 && !(min+1 < maxEx ) ) continue; // This column will not split again
if( min > maxEx ) continue; // Happens for all-NA subsplits
assert min < maxEx && n > 1 : ""+min+"<"+maxEx+" n="+n;
nhists[j] = DHistogram.make(h._name,adj_nbins,h._isInt,min,maxEx,n,h._doGrpSplit,h.isBinom());
cnt++; // At least some chance of splitting
}
return cnt == 0 ? null : nhists;
}
public static StringBuilder ary2str( StringBuilder sb, int w, long xs[] ) {
sb.append('[');
for( long x : xs ) UndecidedNode.p(sb,x,w).append(",");
return sb.append(']');
}
public static StringBuilder ary2str( StringBuilder sb, int w, float xs[] ) {
sb.append('[');
for( float x : xs ) UndecidedNode.p(sb,x,w).append(",");
return sb.append(']');
}
public static StringBuilder ary2str( StringBuilder sb, int w, double xs[] ) {
sb.append('[');
for( double x : xs ) UndecidedNode.p(sb,(float)x,w).append(",");
return sb.append(']');
}
@Override public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{"+_col+"/");
UndecidedNode.p(sb,_bin,2);
sb.append(", se0=").append(_se0);
sb.append(", se1=").append(_se1);
sb.append(", n0=" ).append(_n0 );
sb.append(", n1=" ).append(_n1 );
return sb.append("}").toString();
}
}
// --------------------------------------------------------------------------
// An UndecidedNode: Has a DHistogram which is filled in (in parallel
// with other histograms) in a single pass over the data. Does not contain
// any split-decision.
public static abstract class UndecidedNode extends Node {
public transient DHistogram[] _hs;
public final int _scoreCols[]; // A list of columns to score; could be null for all
public UndecidedNode( DTree tree, int pid, DHistogram[] hs ) {
super(tree,pid);
assert hs.length==tree._ncols;
_scoreCols = scoreCols(_hs=hs);
}
// Pick a random selection of columns to compute best score.
// Can return null for 'all columns'.
abstract public int[] scoreCols( DHistogram[] hs );
// Make the parent of this Node use a -1 NID to prevent the split that this
// node otherwise induces. Happens if we find out too-late that we have a
// perfect prediction here, and we want to turn into a leaf.
public void do_not_split( ) {
if( _pid == -1 ) return; // skip root
DecidedNode dn = _tree.decided(_pid);
for( int i=0; i<dn._nids.length; i++ )
if( dn._nids[i]==_nid )
{ dn._nids[i] = -1; return; }
throw H2O.fail();
}
@Override public String toString() {
final String colPad=" ";
final int cntW=4, mmmW=4, menW=5, varW=5;
final int colW=cntW+1+mmmW+1+mmmW+1+menW+1+varW;
StringBuilder sb = new StringBuilder();
sb.append("Nid# ").append(_nid).append(", ");
printLine(sb).append("\n");
if( _hs == null ) return sb.append("_hs==null").toString();
final int ncols = _hs.length;
for( int j=0; j<ncols; j++ )
if( _hs[j] != null )
p(sb,_hs[j]._name+String.format(", %4.1f-%4.1f",_hs[j]._min,_hs[j]._maxEx),colW).append(colPad);
sb.append('\n');
for( int j=0; j<ncols; j++ ) {
if( _hs[j] == null ) continue;
p(sb,"cnt" ,cntW).append('/');
p(sb,"min" ,mmmW).append('/');
p(sb,"max" ,mmmW).append('/');
p(sb,"mean",menW).append('/');
p(sb,"var" ,varW).append(colPad);
}
sb.append('\n');
// Max bins
int nbins=0;
for( int j=0; j<ncols; j++ )
if( _hs[j] != null && _hs[j].nbins() > nbins ) nbins = _hs[j].nbins();
for( int i=0; i<nbins; i++ ) {
for( int j=0; j<ncols; j++ ) {
DHistogram h = _hs[j];
if( h == null ) continue;
if( i < h.nbins() && h._bins != null ) {
p(sb, h.bins(i),cntW).append('/');
p(sb, h.binAt(i),mmmW).append('/');
p(sb, h.binAt(i+1),mmmW).append('/');
p(sb, h.mean(i),menW).append('/');
p(sb, h.var (i),varW).append(colPad);
} else {
p(sb,"",colW).append(colPad);
}
}
sb.append('\n');
}
sb.append("Nid# ").append(_nid);
return sb.toString();
}
static private StringBuilder p(StringBuilder sb, String s, int w) {
return sb.append(Log.fixedLength(s,w));
}
static private StringBuilder p(StringBuilder sb, long l, int w) {
return p(sb,Long.toString(l),w);
}
static private StringBuilder p(StringBuilder sb, double d, int w) {
String s = Double.isNaN(d) ? "NaN" :
((d==Float.MAX_VALUE || d==-Float.MAX_VALUE || d==Double.MAX_VALUE || d==-Double.MAX_VALUE) ? " -" :
(d==0?" 0":Double.toString(d)));
if( s.length() <= w ) return p(sb,s,w);
s = String.format("% 4.2f",d);
if( s.length() > w )
s = String.format("%4.1f",d);
if( s.length() > w )
s = String.format("%4.0f",d);
return p(sb,s,w);
}
@Override public StringBuilder toString2(StringBuilder sb, int depth) {
for( int d=0; d<depth; d++ ) sb.append(" ");
return sb.append("Undecided\n");
}
@Override protected AutoBuffer compress(AutoBuffer ab) { throw H2O.fail(); }
@Override protected int size() { throw H2O.fail(); }
}
// --------------------------------------------------------------------------
// Internal tree nodes which split into several children over a single
// column. Includes a split-decision: which child does this Row belong to?
// Does not contain a histogram describing how the decision was made.
public static abstract class DecidedNode extends Node {
public final Split _split; // Split: col, equal/notequal/less/greater, nrows, MSE
public final float _splat; // Split At point: lower bin-edge of split
// _equals\_nids[] \ 0 1
// ----------------+----------
// F | < >=
// T | != ==
public final int _nids[]; // Children NIDS for the split LEFT, RIGHT
transient byte _nodeType; // Complex encoding: see the compressed struct comments
transient int _size = 0; // Compressed byte size of this subtree
// Make a correctly flavored Undecided
public abstract UndecidedNode makeUndecidedNode(DHistogram hs[]);
// Pick the best column from the given histograms
public abstract Split bestCol( UndecidedNode u, DHistogram hs[] );
public DecidedNode( UndecidedNode n, DHistogram hs[] ) {
super(n._tree,n._pid,n._nid); // Replace Undecided with this DecidedNode
_nids = new int[2]; // Split into 2 subsets
_split = bestCol(n,hs); // Best split-point for this tree
if( _split._col == -1 ) { // No good split?
// Happens because the predictor columns cannot split the responses -
// which might be because all predictor columns are now constant, or
// because all responses are now constant.
_splat = Float.NaN;
Arrays.fill(_nids,-1);
return;
}
_splat = (_split._equal == 0 || _split._equal == 1) ? _split.splat(hs) : -1; // Split-at value (-1 for group-wise splits)
final char nbins = _tree._nbins;
final int min_rows = _tree._min_rows;
for( int b=0; b<2; b++ ) { // For all split-points
// Setup for children splits
DHistogram nhists[] = _split.split(b,nbins,min_rows,hs,_splat);
assert nhists==null || nhists.length==_tree._ncols;
_nids[b] = nhists == null ? -1 : makeUndecidedNode(nhists)._nid;
}
}
// Bin #.
public int bin( Chunk chks[], int row ) {
float d = (float)chks[_split._col].at0(row); // Value to split on for this row
if( Float.isNaN(d) ) // Missing data?
return 0; // NAs always to bin 0
// Note that during *scoring* (as opposed to training), we can be exposed
// to data which is outside the bin limits.
if(_split._equal == 0)
return d < _splat ? 0 : 1;
else if(_split._equal == 1)
return d != _splat ? 0 : 1;
else
return _split._bs.contains((int)d) ? 1 : 0;
// return _split._equal ? (d != _splat ? 0 : 1) : (d < _splat ? 0 : 1);
}
public int ns( Chunk chks[], int row ) { return _nids[bin(chks,row)]; }
public double pred( int nid ) { return nid==0 ? _split._p0 : _split._p1; }
@Override public String toString() {
if( _split._col == -1 ) return "Decided has col = -1";
int col = _split._col;
if( _split._equal == 1 )
return
_tree._names[col]+" != "+_splat+"\n"+
_tree._names[col]+" == "+_splat+"\n";
else if( _split._equal == 2 || _split._equal == 3 )
return
_tree._names[col]+" != "+_split._bs.toString()+"\n"+
_tree._names[col]+" == "+_split._bs.toString()+"\n";
return
_tree._names[col]+" < "+_splat+"\n"+
_splat+" <="+_tree._names[col]+"\n";
}
StringBuilder printChild( StringBuilder sb, int nid ) {
int i = _nids[0]==nid ? 0 : 1;
assert _nids[i]==nid : "No child nid "+nid+"? " +Arrays.toString(_nids);
sb.append("[").append(_tree._names[_split._col]);
sb.append(_split._equal != 0
? (i==0 ? " != " : " == ")
: (i==0 ? " < " : " >= "));
sb.append((_split._equal == 2 || _split._equal == 3) ? _split._bs.toString() : _splat).append("]");
return sb;
}
@Override public StringBuilder toString2(StringBuilder sb, int depth) {
for( int i=0; i<_nids.length; i++ ) {
for( int d=0; d<depth; d++ ) sb.append(" ");
sb.append(_nid).append(" ");
if( _split._col < 0 ) sb.append("init");
else {
sb.append(_tree._names[_split._col]);
sb.append(_split._equal != 0
? (i==0 ? " != " : " == ")
: (i==0 ? " < " : " >= "));
sb.append((_split._equal == 2 || _split._equal == 3) ? _split._bs.toString() : _splat).append("\n");
}
if( _nids[i] >= 0 && _nids[i] < _tree._len )
_tree.node(_nids[i]).toString2(sb,depth+1);
}
return sb;
}
// Size of this subtree; sets _nodeType also
@Override public final int size(){
if( _size != 0 ) return _size; // Cached size
assert _nodeType == 0:"unexpected node type: " + _nodeType;
if(_split._equal != 0)
_nodeType |= _split._equal == 1 ? 4 : (_split._equal == 2 ? 8 : 12);
// int res = 7; // 1B node type + flags, 2B colId, 4B float split val
// 1B node type + flags, 2B colId, 4B split val/small group or (2B offset + 2B size) + large group
int res = _split._equal == 3 ? 7 + _split._bs.numBytes() : 7;
Node left = _tree.node(_nids[0]);
int lsz = left.size();
res += lsz;
if( left instanceof LeafNode ) _nodeType |= (byte)(48 << 0*2);
else {
int slen = lsz < 256 ? 0 : (lsz < 65535 ? 1 : (lsz<(1<<24) ? 2 : 3));
_nodeType |= slen; // Set the size-skip bits
res += (slen+1); //
}
Node rite = _tree.node(_nids[1]);
if( rite instanceof LeafNode ) _nodeType |= (byte)(48 << 1*2);
res += rite.size();
assert (_nodeType&0x33) != 51;
assert res != 0;
return (_size = res);
}
// Compress this tree into the AutoBuffer
@Override public AutoBuffer compress(AutoBuffer ab) {
int pos = ab.position();
if( _nodeType == 0 ) size(); // Sets _nodeType & _size both
ab.put1(_nodeType); // Includes left-child skip-size bits
assert _split._col != -1; // Not a broken root non-decision?
ab.put2((short)_split._col);
// Save split-at-value or group
if(_split._equal == 0 || _split._equal == 1)
ab.put4f(_splat);
else if(_split._equal == 2) {
/* byte[] ary = MemoryManager.malloc1(4);
for(int i = 0; i < 4; i++)
ary[i] = _split._bs._val[i];
ab.putA1(ary, 4); */
ab.putA1(_split._bs._val, 4);
} else {
assert _split._equal == 3;
ab.put2((char)_split._bs._offset);
ab.put2((char)_split._bs.numBytes());
ab.putA1(_split._bs._val, _split._bs.numBytes());
}
Node left = _tree.node(_nids[0]);
if( (_nodeType&48) == 0 ) { // Size bits are optional for left leaves !
int sz = left.size();
if(sz < 256) ab.put1( sz);
else if (sz < 65535) ab.put2((short)sz);
else if (sz < (1<<24)) ab.put3( sz);
else ab.put4( sz); // 1<<31-1
}
// now write the subtree in
left.compress(ab);
Node rite = _tree.node(_nids[1]);
rite.compress(ab);
assert _size == ab.position()-pos:"reported size = " + _size + " , real size = " + (ab.position()-pos);
return ab;
}
}
public static abstract class LeafNode extends Node {
public double _pred;
public LeafNode( DTree tree, int pid ) { super(tree,pid); }
public LeafNode( DTree tree, int pid, int nid ) { super(tree,pid,nid); }
@Override public String toString() { return "Leaf#"+_nid+" = "+_pred; }
@Override public final StringBuilder toString2(StringBuilder sb, int depth) {
for( int d=0; d<depth; d++ ) sb.append(" ");
sb.append(_nid).append(" ");
return sb.append("pred=").append(_pred).append("\n");
}
public final double pred() { return _pred; }
public final void pred(double pred) { _pred = pred; }
}
static public final boolean isRootNode(Node n) { return n._pid == -1; }
// --------------------------------------------------------------------------
public static abstract class TreeModel extends water.Model {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="Expected max trees") public final int N;
@API(help="MSE rate as trees are added") public final double [] errs;
@API(help="Keys of actual trees built") public final Key [/*N*/][/*nclass*/] treeKeys; // Always filled, but 2-binary classifiers can contain null for 2nd class
@API(help="Maximum tree depth") public final int max_depth;
@API(help="Fewest allowed observations in a leaf") public final int min_rows;
@API(help="Bins in the histograms") public final int nbins;
// For classification models, we'll do a Confusion Matrix right in the
// model (for now - really should be separate).
@API(help="Testing key for cm and errs") public final Key testKey;
// Confusion matrix per each generated tree or null
@API(help="Confusion Matrix computed on training dataset, cm[actual][predicted]") public final ConfusionMatrix cms[/*CM-per-tree*/];
@API(help="Confusion matrix domain.") public final String[] cmDomain;
@API(help="Variable importance for individual input variables.") public final VarImp varimp; // NOTE: in future we can have an array of different variable importance measures (per method)
@API(help="Tree statistics") public final TreeStats treeStats;
@API(help="AUC for validation dataset") public final AUCData validAUC;
@API(help="Whether this is transformed from speedrf") public boolean isFromSpeeDRF=false;
private final int num_folds;
private transient volatile CompressedTree[/*N*/][/*nclasses OR 1 for regression*/] _treeBitsCache;
public TreeModel( Key key, Key dataKey, Key testKey, String names[], String domains[][], String[] cmDomain, int ntrees, int max_depth, int min_rows, int nbins, int num_folds, float[] priorClassDist, float[] classDist) {
this(key, dataKey, testKey, names, domains, cmDomain, ntrees, max_depth, min_rows, nbins, num_folds,
priorClassDist, classDist,
new Key[0][], new ConfusionMatrix[0], new double[0], null, null, null);
}
private TreeModel( Key key, Key dataKey, Key testKey, String names[], String domains[][], String[] cmDomain, int ntrees, int max_depth, int min_rows, int nbins, int num_folds,
float[] priorClassDist, float[] classDist,
Key[][] treeKeys, ConfusionMatrix[] cms, double[] errs, TreeStats treeStats, VarImp varimp, AUCData validAUC) {
super(key,dataKey,names,domains,priorClassDist, classDist);
this.N = ntrees;
this.max_depth = max_depth; this.min_rows = min_rows; this.nbins = nbins;
this.num_folds = num_folds;
this.treeKeys = treeKeys;
this.treeStats = treeStats;
this.cmDomain = cmDomain!=null ? cmDomain : new String[0];;
this.testKey = testKey;
this.cms = cms;
this.errs = errs;
this.varimp = varimp;
this.validAUC = validAUC;
}
// Simple copy ctor, null value of parameter means copy from prior-model
protected TreeModel(TreeModel prior, Key[][] treeKeys, double[] errs, ConfusionMatrix[] cms, TreeStats tstats, VarImp varimp, AUCData validAUC) {
super(prior._key,prior._dataKey,prior._names,prior._domains, prior._priorClassDist,prior._modelClassDist,prior.training_start_time,prior.training_duration_in_ms);
this.N = prior.N;
this.testKey = prior.testKey;
this.max_depth = prior.max_depth;
this.min_rows = prior.min_rows;
this.nbins = prior.nbins;
this.cmDomain = prior.cmDomain;
this.num_folds = prior.num_folds;
if (treeKeys != null) this.treeKeys = treeKeys; else this.treeKeys = prior.treeKeys;
if (errs != null) this.errs = errs; else this.errs = prior.errs;
if (cms != null) this.cms = cms; else this.cms = prior.cms;
if (tstats != null) this.treeStats = tstats; else this.treeStats = prior.treeStats;
if (varimp != null) this.varimp = varimp; else this.varimp = prior.varimp;
if (validAUC != null) this.validAUC = validAUC; else this.validAUC = prior.validAUC;
}
// Additional copy ctors to update specific fields
public TreeModel(TreeModel prior, DTree[] tree, double err, ConfusionMatrix cm, TreeStats tstats) {
this(prior, append(prior.treeKeys, tree), Utils.append(prior.errs, err), Utils.append(prior.cms, cm), tstats, null, null);
}
public TreeModel(TreeModel prior, DTree[] tree, TreeStats tstats) {
this(prior, append(prior.treeKeys, tree), null, null, tstats, null, null);
}
public TreeModel(TreeModel prior, double err, ConfusionMatrix cm, VarImp varimp, AUCData validAUC) {
this(prior, null, Utils.append(prior.errs, err), Utils.append(prior.cms, cm), null, varimp, validAUC);
}
public enum TreeModelType {
UNKNOWN,
GBM,
DRF,
}
protected TreeModelType getTreeModelType() { return TreeModelType.UNKNOWN; }
/** Returns Producer if the model is under construction else null.
* <p>The implementation looks for writer lock. If it is present, then returns true.</p>
*
* <p>WARNING: the method is strictly for UI used, does not provide any atomicity!!!</p>*/
private final Key getProducer() {
return FetchProducer.fetch(_key);
}
private final boolean isProduced() {
return getProducer()!=null;
}
private static final class FetchProducer extends DTask<FetchProducer> {
final private Key _key;
private Key _producer;
public static Key fetch(Key key) {
FetchProducer fp = new FetchProducer(key);
if (key.home()) fp.compute2();
else fp = RPC.call(key.home_node(), fp).get();
return fp._producer;
}
private FetchProducer(Key k) { _key = k; }
@Override public void compute2() {
Lockable l = UKV.get(_key);
_producer = l!=null && l._lockers!=null && l._lockers.length > 0 ? l._lockers[0] : null;
tryComplete();
}
@Override public byte priority() { return H2O.ATOMIC_PRIORITY; }
}
private static final Key[][] append(Key[][] prior, DTree[] tree ) {
if (tree==null) return prior;
prior = Arrays.copyOf(prior, prior.length+1);
Key ts[] = prior[prior.length-1] = new Key[tree.length];
for( int c=0; c<tree.length; c++ )
if( tree[c] != null ) {
ts[c] = tree[c].save();
}
return prior;
}
/** Number of trees in current model. */
public int ntrees() { return treeKeys.length; }
// Most recent ConfusionMatrix
@Override public ConfusionMatrix cm() {
ConfusionMatrix[] cms = this.cms; // Avoid race update; read it once
if(cms != null && cms.length > 0){
int n = cms.length-1;
while(n > 0 && cms[n] == null)--n;
return cms[n] == null?null:cms[n];
} else return null;
}
@Override public VarImp varimp() { return varimp; }
@Override public double mse() {
if(errs != null && errs.length > 0){
int n = errs.length-1;
while(n > 0 && Double.isNaN(errs[n]))--n;
return errs[n];
} else return Double.NaN;
}
@Override protected float[] score0(double data[], float preds[]) {
// Prefetch trees into the local cache if it is necessary
// Invoke scoring
Arrays.fill(preds,0);
for( int tidx=0; tidx<treeKeys.length; tidx++ )
score0(data, preds, tidx);
return preds;
}
/** Returns i-th tree represented by an array of k-trees. */
public final CompressedTree[] ctree(int tidx) {
if (_treeBitsCache==null) {
synchronized(this) {
if (_treeBitsCache==null) _treeBitsCache = new CompressedTree[ntrees()][];
}
}
if (_treeBitsCache[tidx]==null) {
synchronized(this) {
if (_treeBitsCache[tidx]==null) {
Key[] k = treeKeys[tidx];
CompressedTree[] ctree = new CompressedTree[nclasses()];
for (int i = 0; i < nclasses(); i++) // binary classifiers can contains null for second tree
if (k[i]!=null) ctree[i] = UKV.get(k[i]);
_treeBitsCache[tidx] = ctree;
}
}
}
return _treeBitsCache[tidx];
}
// Score per line per tree
public void score0(double data[], float preds[], int treeIdx) {
CompressedTree ts[] = ctree(treeIdx);
DTreeUtils.scoreTree(data, preds, ts);
}
/** Delete model trees */
public void delete_trees() {
Futures fs = new Futures();
delete_trees(fs);
fs.blockForPending();
}
public Futures delete_trees(Futures fs) {
for (int tid = 0; tid < treeKeys.length; tid++) /* over all trees */
for (int cid = 0; cid < treeKeys[tid].length; cid++) /* over all classes */
// 2-binary classifiers can contain null for the second
if (treeKeys[tid][cid]!=null) DKV.remove(treeKeys[tid][cid], fs);
return fs;
}
// If model is deleted then all trees has to be delete as well
@Override public Futures delete_impl(Futures fs) {
delete_trees(fs);
super.delete_impl(fs);
return fs;
}
@Override public ModelAutobufferSerializer getModelSerializer() {
// Return a serializer which knows how to serialize keys
return new ModelAutobufferSerializer() {
@Override protected AutoBuffer postSave(Model m, AutoBuffer ab) {
int ntrees = treeKeys.length;
ab.put4(ntrees);
for (int i=0; i<ntrees; i++) {
CompressedTree[] ts = ctree(i);
ab.putA(ts);
}
return ab;
}
@Override protected AutoBuffer postLoad(Model m, AutoBuffer ab) {
int ntrees = ab.get4();
Futures fs = new Futures();
for (int i=0; i<ntrees; i++) {
CompressedTree[] ts = ab.getA(CompressedTree.class);
for (int j=0; j<ts.length; j++) {
Key k = ((TreeModel) m).treeKeys[i][j];
assert k == null && ts[j] == null || k != null && ts[j] != null : "Incosistency in model serialization: key is null but model is not null, OR vice versa!";
if (k!=null) {
UKV.put(k, ts[j], fs);
}
}
}
fs.blockForPending();
return ab;
}
};
}
public void generateHTML(String title, StringBuilder sb) {
DocGen.HTML.title(sb,title);
sb.append("<div class=\"alert\">").append("Actions: ");
if (_dataKey != null)
sb.append(Inspect2.link("Inspect training data ("+_dataKey.toString()+")", _dataKey)).append(", ");
sb.append(Predict.link(_key,"Score on dataset")).append(", ");
if (_dataKey != null)
sb.append(UIUtils.builderModelLink(this.getClass(), _dataKey, responseName(), "Compute new model")).append(", ");
sb.append(UIUtils.qlink(SaveModel.class, "model", _key, "Save model")).append(", ");
if (isProduced()) { // looks at locker field and check W-locker guy
sb.append("<i class=\"icon-stop\"></i> ").append(Cancel.link(getProducer(), "Stop training this model"));
} else {
sb.append("<i class=\"icon-play\"></i> ").append(UIUtils.builderLink(this.getClass(), _dataKey, responseName(), this._key, "Continue training this model"));
}
sb.append("</div>");
DocGen.HTML.paragraph(sb,"Model Key: "+_key);
DocGen.HTML.paragraph(sb,"Max depth: "+max_depth+", Min rows: "+min_rows+", Nbins:"+nbins+", Trees: " + ntrees());
generateModelDescription(sb);
sb.append("</pre>");
String[] domain = cmDomain; // Domain of response col
// Generate a display using the last scored Model. Not all models are
// scored immediately (since scoring can be a big part of model building).
ConfusionMatrix cm = null;
int last = cms.length-1;
while( last > 0 && cms[last]==null ) last--;
cm = 0 <= last && last < cms.length ? cms[last] : null;
// Display the CM
if( cm != null && domain != null ) {
// Top row of CM
assert cm._arr.length==domain.length;
DocGen.HTML.title(sb,"Scoring");
if( testKey == null ) {
if (_have_cv_results)
sb.append("<div class=\"alert\">Reported on ").append(num_folds).append("-fold cross-validated training data</div>");
else {
sb.append("<div class=\"alert\">Reported on ").append(title.contains("DRF") ? "out-of-bag" : "training").append(" data");
if (num_folds > 0) sb.append(" (cross-validation results are being computed - please reload this page later)");
sb.append(".");
if (_priorClassDist!=null && _modelClassDist!=null) sb.append("<br />Data were resampled to balance class distribution.");
sb.append("</div>");
}
} else {
RString rs = new RString("<div class=\"alert\">Reported on <a href='Inspect2.html?src_key=%$key'>%key</a></div>");
rs.replace("key", testKey);
DocGen.HTML.paragraph(sb,rs.toString());
}
if (validAUC == null) { //AUC shows the CM already
// generate HTML for CM
DocGen.HTML.section(sb, "Confusion Matrix");
cm.toHTML(sb, domain);
}
}
if( errs != null ) {
if (!isClassifier() && num_folds > 0) {
if (_have_cv_results)
DocGen.HTML.section(sb, num_folds + "-fold cross-validated Mean Squared Error: " + String.format("%5.3f", errs[errs.length-1]));
else
DocGen.HTML.section(sb, num_folds + "-fold cross-validated Mean Squared Error is being computed - please reload this page later.");
}
DocGen.HTML.section(sb,"Mean Squared Error by Tree");
DocGen.HTML.arrayHead(sb);
sb.append("<tr style='min-width:60px'><th>Trees</th>");
last = errs.length-1-(_have_cv_results?1:0); // for regressor reports all errors (except for cross-validated result)
for( int i=last; i>=0; i-- )
sb.append("<td style='min-width:60px'>").append(i).append("</td>");
sb.append("</tr>");
sb.append("<tr><th class='warning'>MSE</th>");
for( int i=last; i>=0; i-- )
sb.append(!Double.isNaN(errs[i]) ? String.format("<td style='min-width:60px'>%5.3f</td>",errs[i]) : "<td style='min-width:60px'>---</td>");
sb.append("</tr>");
DocGen.HTML.arrayTail(sb);
}
// Show AUC for binary classifiers
if (validAUC != null) generateHTMLAUC(sb);
// Show tree stats
if (treeStats != null) generateHTMLTreeStats(sb);
// Show variable importance
if (varimp != null) {
generateHTMLVarImp(sb);
}
printCrossValidationModelsHTML(sb);
}
static final String NA = "---";
protected void generateHTMLTreeStats(StringBuilder sb) {
DocGen.HTML.section(sb,"Tree stats");
DocGen.HTML.arrayHead(sb);
sb.append("<tr><th> </th>").append("<th>Min</th><th>Mean</th><th>Max</th></tr>");
boolean valid = treeStats.isValid();
sb.append("<tr><th>Depth</th>")
.append("<td>").append(valid ? treeStats.minDepth : NA).append("</td>")
.append("<td>").append(valid ? treeStats.meanDepth : NA).append("</td>")
.append("<td>").append(valid ? treeStats.maxDepth : NA).append("</td></tr>");
sb.append("<th>Leaves</th>")
.append("<td>").append(valid ? treeStats.minLeaves : NA).append("</td>")
.append("<td>").append(valid ? treeStats.meanLeaves : NA).append("</td>")
.append("<td>").append(valid ? treeStats.maxLeaves : NA).append("</td></tr>");
DocGen.HTML.arrayTail(sb);
}
protected void generateHTMLVarImp(StringBuilder sb) {
if (varimp!=null) {
// Set up variable names for importance
varimp.setVariables(Arrays.copyOf(_names, _names.length-1));
varimp.toHTML(this, sb);
}
}
protected void generateHTMLAUC(StringBuilder sb) {
validAUC.toHTML(sb);
}
public static class TreeStats extends Iced {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="Minimal tree depth.") public int minDepth = Integer.MAX_VALUE;
@API(help="Maximum tree depth.") public int maxDepth = Integer.MIN_VALUE;
@API(help="Average tree depth.") public float meanDepth;
@API(help="Minimal num. of leaves.") public int minLeaves = Integer.MAX_VALUE;
@API(help="Maximum num. of leaves.") public int maxLeaves = Integer.MIN_VALUE;
@API(help="Average num. of leaves.") public float meanLeaves;
transient long sumDepth = 0;
transient long sumLeaves = 0;
transient int numTrees = 0;
public boolean isValid() { return minDepth <= maxDepth; }
public void updateBy(DTree[] ktrees) {
if (ktrees==null) return;
for (int i=0; i<ktrees.length; i++) {
DTree tree = ktrees[i];
if( tree == null ) continue;
if (minDepth > tree.depth) minDepth = tree.depth;
if (maxDepth < tree.depth) maxDepth = tree.depth;
if (minLeaves > tree.leaves) minLeaves = tree.leaves;
if (maxLeaves < tree.leaves) maxLeaves = tree.leaves;
sumDepth += tree.depth;
sumLeaves += tree.leaves;
numTrees++;
meanDepth = ((float)sumDepth / numTrees);
meanLeaves = ((float)sumLeaves / numTrees);
}
}
public void setNumTrees(int i) { numTrees = i; }
}
// --------------------------------------------------------------------------
// Highly compressed tree encoding:
// tree: 1B nodeType, 2B colId, 4B splitVal, left-tree-size, left, right
// nodeType: (from lsb):
// 2 bits (1,2) skip-tree-size-size,
// 2 bits (4,8) operator flag (0 --> <, 1 --> ==, 2 --> small (4B) group, 3 --> big (var size) group),
// 1 bit ( 16) left leaf flag,
// 1 bit ( 32) left leaf type flag (0: subtree, 1: small cat, 2: big cat, 3: float)
// 1 bit ( 64) right leaf flag,
// 1 bit (128) right leaf type flag (0: subtree, 1: small cat, 2: big cat, 3: float)
// left, right: tree | prediction
// prediction: 4 bytes of float
public static class CompressedTree extends Iced {
final byte [] _bits;
final int _nclass;
final long _seed;
public CompressedTree( byte [] bits, int nclass, long seed ) { _bits = bits; _nclass = nclass; _seed = seed; }
public float score( final double row[] ) {
AutoBuffer ab = new AutoBuffer(_bits);
while(true) {
int nodeType = ab.get1();
int colId = ab.get2();
if( colId == 65535 ) return scoreLeaf(ab);
// boolean equal = ((nodeType&4)==4);
int equal = (nodeType&12) >> 2;
assert (equal >= 0 && equal <= 3): "illegal equal value " + equal+" at "+ab.position()+" in bitpile "+Arrays.toString(_bits);
// Extract value or group to split on
float splitVal = -1;
boolean grpContains = false;
if(equal == 0 || equal == 1) {
splitVal = ab.get4f();
} else {
int off = (equal == 3) ? ab.get2() : 0; // number of zero-bits skipped during serialization
int sz = (equal == 3) ? ab.get2() : 4; // size of serialized bitset (part containing some non-zeros) in bytes
int idx = (int)row[colId]; // the input value driving decision
if(Double.isNaN(row[colId]) || idx < off ) {
grpContains = false;
ab.skip(sz);
} else {
idx = idx - off;
int bbskip = idx >> 3;
if (sz-bbskip>0) {
ab.skip(bbskip);
grpContains = (ab.get1() & ((byte)1 << (idx % 8))) != 0;
ab.skip(sz-bbskip-1);
} else { // value is not in bit set at all (it is even out of value)
grpContains = false;
ab.skip(sz);
}
}
}
// Compute the amount to skip.
int lmask = nodeType & 0x33;
int rmask = (nodeType & 0xC0) >> 2;
int skip = 0;
switch(lmask) {
case 0: skip = ab.get1(); break;
case 1: skip = ab.get2(); break;
case 2: skip = ab.get3(); break;
case 3: skip = ab.get4(); break;
case 16: skip = _nclass < 256?1:2; break; // Small leaf
case 48: skip = 4; break; // skip the prediction
default: assert false:"illegal lmask value " + lmask+" at "+ab.position()+" in bitpile "+Arrays.toString(_bits);
}
// WARNING: Generated code has to be consistent with this code:
// - Double.NaN < 3.7f => return false => BUT left branch has to be selected (i.e., ab.position())
// - Double.NaN != 3.7f => return true => left branch has to be select selected (i.e., ab.position())
if( !Double.isNaN(row[colId]) ) { // NaNs always go to bin 0
if( ( equal==0 && ((float)row[colId]) >= splitVal) ||
( equal==1 && ((float)row[colId]) == splitVal) ||
( (equal==2 || equal==3) && grpContains )) {
ab.position(ab.position()+skip); // Skip to the right subtree
lmask = rmask; // And set the leaf bits into common place
}
} /* else Double.isNaN() is true => use left branch */
if( (lmask&16)==16 ) return scoreLeaf(ab);
}
}
private float scoreLeaf( AutoBuffer ab ) { return ab.get4f(); }
public Random rngForChunk( int cidx ) {
Random rand = createRNG(_seed);
// Argh - needs polishment
for( int i=0; i<cidx; i++ ) rand.nextLong();
long seed = rand.nextLong();
return createRNG(seed);
}
}
/** Abstract visitor class for serialized trees.*/
public static abstract class TreeVisitor<T extends Exception> {
// Override these methods to get walker behavior.
protected void pre ( int col, float fcmp, IcedBitSet gcmp, int equal ) throws T { }
protected void mid ( int col, float fcmp, int equal ) throws T { }
protected void post( int col, float fcmp, int equal ) throws T { }
protected void leaf( float pred ) throws T { }
long result( ) { return 0; } // Override to return simple results
protected final TreeModel _tm;
protected final CompressedTree _ct;
private final AutoBuffer _ts;
protected int _depth; // actual depth
protected int _nodes; // number of visited nodes
public TreeVisitor( TreeModel tm, CompressedTree ct ) {
_tm = tm;
_ts = new AutoBuffer((_ct=ct)._bits);
}
// Call either the single-class leaf or the full-prediction leaf
private final void leaf2( int mask ) throws T {
assert (mask==0 || ( (mask&16)==16 && (mask&32)==32) ) : "Unknown mask: " + mask; // Is a leaf or a special leaf on the top of tree
leaf(_ts.get4f());
}
public final void visit() throws T {
int nodeType = _ts.get1();
int col = _ts.get2();
if( col==65535 ) { leaf2(nodeType); return; }
// float fcmp = _ts.get4f();
// boolean equal = ((nodeType&4)==4);
int equal = (nodeType&12) >> 2;
// Extract value or group to split on
float fcmp = -1;
IcedBitSet gcmp = null;
if(equal == 0 || equal == 1)
fcmp = _ts.get4f();
else {
int off = (equal == 3) ? _ts.get2() : 0;
int sz = (equal == 3) ? _ts.get2() : 4;
byte[] buf = MemoryManager.malloc1(sz);
_ts.read(buf, 0, sz);
gcmp = new IcedBitSet(buf, sz << 3, off);
}
// Compute the amount to skip.
int lmask = nodeType & 0x33;
int rmask = (nodeType & 0xC0) >> 2;
int skip = 0;
switch(lmask) {
case 0: skip = _ts.get1(); break;
case 1: skip = _ts.get2(); break;
case 2: skip = _ts.get3(); break;
case 3: skip = _ts.get4(); break;
case 16: skip = _ct._nclass < 256?1:2; break; // Small leaf
case 48: skip = 4; break; // skip is always 4 for direct leaves (see DecidedNode.size() and LeafNode.size() methods)
default: assert false:"illegal lmask value " + lmask;
}
pre(col,fcmp,gcmp,equal); // Pre-walk
_depth++;
if( (lmask & 0x10)==16 ) leaf2(lmask); else visit();
mid(col,fcmp,equal); // Mid-walk
if( (rmask & 0x10)==16 ) leaf2(rmask); else visit();
_depth--;
post(col,fcmp,equal);
_nodes++;
}
}
StringBuilder toString(final String res, CompressedTree ct, final StringBuilder sb ) {
new TreeVisitor<RuntimeException>(this,ct) {
@Override protected void pre( int col, float fcmp, IcedBitSet gcmp, int equal ) {
for( int i=0; i<_depth; i++ ) sb.append(" ");
if(equal == 2 || equal == 3)
sb.append(_names[col]).append("==").append(gcmp.toString()).append('\n');
else
sb.append(_names[col]).append(equal==1?"==":"< ").append(fcmp).append('\n');
}
@Override protected void leaf( float pred ) {
for( int i=0; i<_depth; i++ ) sb.append(" ");
sb.append(res).append("=").append(pred).append(";\n");
}
}.visit();
return sb;
}
// For GBM: learn_rate. For DRF: mtries, sample_rate, seed.
abstract protected void generateModelDescription(StringBuilder sb);
// Determine whether feature is licensed.
private boolean isFeatureAllowed() {
boolean featureAllowed = false;
try {
if (treeStats.numTrees <= 10) {
featureAllowed = true;
}
else {
if (getTreeModelType() == TreeModelType.GBM) {
featureAllowed = H2O.licenseManager.isFeatureAllowed(LicenseManager.FEATURE_GBM_SCORING);
}
else if (getTreeModelType() == TreeModelType.DRF) {
featureAllowed = H2O.licenseManager.isFeatureAllowed(LicenseManager.FEATURE_RF_SCORING);
}
}
}
catch (Exception xe) {}
return featureAllowed;
}
public void toJavaHtml( StringBuilder sb ) {
if( treeStats == null ) return; // No trees yet
sb.append("<br /><br /><div class=\"pull-right\"><a href=\"#\" onclick=\'$(\"#javaModel\").toggleClass(\"hide\");\'" +
"class=\'btn btn-inverse btn-mini\'>Java Model</a></div><br /><div class=\"hide\" id=\"javaModel\">");
boolean featureAllowed = isFeatureAllowed();
if (! featureAllowed) {
sb.append("<br/><div id=\'javaModelWarningBlock\' class=\"alert\" style=\"background:#eedd20;color:#636363;text-shadow:none;\">");
sb.append("<b>You have requested a premium feature (> 10 trees) and your H<sub>2</sub>O software is unlicensed.</b><br/><br/>");
sb.append("Please enter your email address below, and we will send you a trial license shortly.<br/>");
sb.append("This will also temporarily enable downloading Java models.<br/>");
sb.append("<form class=\'form-inline\'><input id=\"emailForJavaModel\" class=\"span5\" type=\"text\" placeholder=\"Email\"/> ");
sb.append("<a href=\"#\" onclick=\'processJavaModelLicense();\' class=\'btn btn-inverse\'>Send</a></form></div>");
sb.append("<div id=\"javaModelSource\" class=\"hide\">");
}
if( ntrees() * treeStats.meanLeaves > 5000 ) {
String modelName = JCodeGen.toJavaId(_key.toString());
sb.append("<pre style=\"overflow-y:scroll;\"><code class=\"language-java\">");
sb.append("/* Java code is too large to display, download it directly.\n");
sb.append(" To obtain the code please invoke in your terminal:\n");
sb.append(" curl http:/").append(H2O.SELF.toString()).append("/h2o-model.jar > h2o-model.jar\n");
sb.append(" curl http:/").append(H2O.SELF.toString()).append("/2/").append(this.getClass().getSimpleName()).append("View.java?_modelKey=").append(_key).append(" > ").append(modelName).append(".java\n");
sb.append(" javac -cp h2o-model.jar -J-Xmx2g -J-XX:MaxPermSize=128m ").append(modelName).append(".java\n");
if (GEN_BENCHMARK_CODE)
sb.append(" java -cp h2o-model.jar:. -Xmx2g -XX:MaxPermSize=256m -XX:ReservedCodeCacheSize=256m ").append(modelName).append('\n');
sb.append("*/");
sb.append("</code></pre>");
} else {
sb.append("<pre style=\"overflow-y:scroll;\"><code class=\"language-java\">");
DocGen.HTML.escape(sb, toJava());
sb.append("</code></pre>");
}
if (!featureAllowed) sb.append("</div>"); // close license blog
sb.append("</div>");
sb.append("<script type=\"text/javascript\">$(document).ready(showOrHideJavaModel);</script>");
}
@Override protected SB toJavaInit(SB sb, SB fileContextSB) {
sb = super.toJavaInit(sb, fileContextSB);
String modelName = JCodeGen.toJavaId(_key.toString());
// Generate main method with benchmark
if (GEN_BENCHMARK_CODE) {
sb.i().p("/**").nl();
sb.i().p(" * Sample program harness providing an example of how to call predict().").nl();
sb.i().p(" */").nl();
sb.i().p("public static void main(String[] args) throws Exception {").nl();
sb.i(1).p("int iters = args.length > 0 ? Integer.valueOf(args[0]) : DEFAULT_ITERATIONS;").nl();
sb.i(1).p(modelName).p(" model = new ").p(modelName).p("();").nl();
sb.i(1).p("model.bench(iters, DataSample.DATA, new float[NCLASSES+1], NTREES);").nl();
sb.i().p("}").nl();
sb.di(1);
sb.p(TO_JAVA_BENCH_FUNC);
}
JCodeGen.toStaticVar(sb, "NTREES", ntrees(), "Number of trees in this model.");
JCodeGen.toStaticVar(sb, "NTREES_INTERNAL", ntrees()*nclasses(), "Number of internal trees in this model (= NTREES*NCLASSES).");
if (GEN_BENCHMARK_CODE) JCodeGen.toStaticVar(sb, "DEFAULT_ITERATIONS", 10000, "Default number of iterations.");
// Generate a data in separated class since we do not want to influence size of constant pool of model class
if (GEN_BENCHMARK_CODE) {
if( _dataKey != null ) {
Value dataval = DKV.get(_dataKey);
if (dataval != null) {
water.fvec.Frame frdata = dataval.get();
water.fvec.Frame frsub = frdata.subframe(_names);
JCodeGen.toClass(fileContextSB, "// Sample of data used by benchmark\nclass DataSample", "DATA", frsub, 10, "Sample test data.");
}
}
}
return sb;
}
// Convert Tree model to Java
@Override protected void toJavaPredictBody( final SB bodySb, final SB classCtxSb, final SB fileCtxSb) {
// AD-HOC maximal number of trees in forest - in fact constant pool size for Forest class (all UTF String + references to static classes).
// TODO: in future this parameter can be a parameter for generator, as well as maxIters
final int maxfsize = 4000;
int fidx = 0; // forest index
int treesInForest = 0;
SB forest = new SB();
// divide trees into small forests per 100 trees
/* DEBUG line */ bodySb.i().p("// System.err.println(\"Row (gencode.predict): \" + java.util.Arrays.toString(data));").nl();
bodySb.i().p("java.util.Arrays.fill(preds,0f);").nl();
if (isFromSpeeDRF) {
bodySb.i().p("// Call forest predicting class ").p(0).nl();
bodySb.i().p("preds").p(" =").p(" Forest_").p(fidx).p("_class_").p(0).p(".predict(data, maxIters - " + fidx * maxfsize + ");").nl();
}
for( int c=0; c<nclasses(); c++ ) {
toJavaForestBegin(bodySb, forest, c, fidx++, maxfsize);
for( int i=0; i < treeKeys.length; i++ ) {
CompressedTree cts[] = ctree(i);
if( cts[c] == null ) continue;
if (!isFromSpeeDRF) {
forest.i().p("if (iters-- > 0) pred").p(" +=").p(" Tree_").p(i).p("_class_").p(c).p(".predict(data);").nl();
} else {
forest.i().p("pred[(int)").p(" Tree_").p(i).p("_class_").p(c).p(".predict(data) + 1] += 1;").nl();
}
// append representation of tree predictor
toJavaTreePredictFct(fileCtxSb, cts[c], i, c);
if (++treesInForest == maxfsize) {
toJavaForestEnd(bodySb, forest, c, fidx);
toJavaForestBegin(bodySb, forest, c, fidx++, maxfsize);
treesInForest = 0;
}
}
toJavaForestEnd(bodySb, forest, c, fidx);
treesInForest = 0;
fidx = 0;
}
fileCtxSb.p(forest);
toJavaUnifyPreds(bodySb);
toJavaFillPreds0(bodySb);
}
/* Numeric type used in generated code to hold predicted value between the calls. */
static final String PRED_TYPE = "float";
private void toJavaForestBegin(SB predictBody, SB forest, int c, int fidx, int maxTreesInForest) {
// ugly hack here
if (!isFromSpeeDRF) {
predictBody.i().p("// Call forest predicting class ").p(c).nl();
predictBody.i().p("preds[").p(c + 1).p("] +=").p(" Forest_").p(fidx).p("_class_").p(c).p(".predict(data, maxIters - " + fidx * maxTreesInForest + ");").nl();
forest.i().p("// Forest representing a subset of trees scoring class ").p(c).nl();
forest.i().p("class Forest_").p(fidx).p("_class_").p(c).p(" {").nl().ii(1);
forest.i().p("public static ").p(PRED_TYPE).p(" predict(double[] data, int maxIters) {").nl().ii(1);
forest.i().p(PRED_TYPE).p(" pred = 0;").nl();
forest.i().p("int iters = maxIters;").nl();
} else {
forest.i().p("// Forest representing a subset of trees scoring class ").p(c).nl();
forest.i().p("class Forest_").p(fidx).p("_class_").p(c).p(" {").nl().ii(1);
forest.i().p("public static ").p(PRED_TYPE).p("[] predict(double[] data, int maxIters) {").nl().ii(1);
forest.i().p(PRED_TYPE).p("[] pred = new float["+(nclasses()+1)+"];").nl();
forest.i().p("java.util.Arrays.fill(pred,0f);").nl();
forest.i().p("int iters = maxIters;").nl();
}
}
private void toJavaForestEnd(SB predictBody, SB forest, int c, int fidx) {
if (!isFromSpeeDRF) {
forest.i().p("return pred;").nl();
forest.i().p("}").di(1).nl(); // end of function
forest.i().p("}").di(1).nl(); // end of forest classs
} else {
if (c ==0) {
forest.i().p("float sum = 0;").nl();
forest.i().p("for (int i=1; i <= " + nclasses() + "; i++) {").p("sum += pred[i];").p("}").nl();
forest.i().p("for (int i=1; i <= " + nclasses() + "; i++) {").p("pred[i] /= sum;").p("}").nl();
}
forest.i().p("return pred;").nl();
forest.i().p("}").di(1).nl(); // end of function
forest.i().p("}").di(1).nl(); // end of forest classs
}
}
// Produce prediction code for one tree
protected void toJavaTreePredictFct(final SB sb, final CompressedTree cts, int treeIdx, int classIdx) {
// generate top-level class definition
sb.nl();
sb.i().p("// Tree predictor for ").p(treeIdx).p("-tree and ").p(classIdx).p("-class").nl();
sb.i().p("class Tree_").p(treeIdx).p("_class_").p(classIdx).p(" {").nl().ii(1);
new TreeJCodeGen(this,cts, sb).generate();
sb.i().p("}").nl(); // close the class
}
@Override protected String toJavaDefaultMaxIters() { return String.valueOf(this.N); }
}
// Build a compressed-tree struct
public TreeModel.CompressedTree compress() {
int sz = root().size();
if( root() instanceof LeafNode ) sz += 3; // Oops - tree-stump
AutoBuffer ab = new AutoBuffer(sz);
if( root() instanceof LeafNode ) // Oops - tree-stump
ab.put1(0).put2((char)65535); // Flag it special so the decompress doesn't look for top-level decision
root().compress(ab); // Compress whole tree
assert ab.position() == sz;
return new TreeModel.CompressedTree(ab.buf(),_nclass,_seed);
}
/** Save this tree into DKV store under default random Key. */
public Key save() { return save(defaultTreeKey()); }
/** Save this tree into DKV store under the given Key. */
public Key save(Key k) {
CompressedTree ts = compress();
UKV.put(k, ts);
return k;
}
private Key defaultTreeKey() {
return Key.makeSystem("__Tree_"+Key.rand());
}
private static final SB TO_JAVA_BENCH_FUNC = new SB().
nl().
p(" /**").nl().
p(" * Run a predict() benchmark with the generated model and some synthetic test data.").nl().
p(" *").nl().
p(" * @param iters number of iterations to run; each iteration predicts on every sample (i.e. row) in the test data").nl().
p(" * @param data test data to predict on").nl().
p(" * @param preds output predictions").nl().
p(" * @param ntrees number of trees").nl().
p(" */").nl().
p(" public void bench(int iters, double[][] data, float[] preds, int ntrees) {").nl().
p(" System.out.println(\"Iterations: \" + iters);").nl().
p(" System.out.println(\"Data rows : \" + data.length);").nl().
p(" System.out.println(\"Trees : \" + ntrees + \"x\" + (preds.length-1));").nl().
nl().
p(" long startMillis;").nl().
p(" long endMillis;").nl().
p(" long deltaMillis;").nl().
p(" double deltaSeconds;").nl().
p(" double samplesPredicted;").nl().
p(" double samplesPredictedPerSecond;").nl().
p(" System.out.println(\"Starting timing phase of \"+iters+\" iterations...\");").nl().
nl().
p(" startMillis = System.currentTimeMillis();").nl().
p(" for (int i=0; i<iters; i++) {").nl().
p(" // Uncomment the nanoTime logic for per-iteration prediction times.").nl().
p(" // long startTime = System.nanoTime();").nl().
nl().
p(" for (double[] row : data) {").nl().
p(" predict(row, preds);").nl().
p(" // System.out.println(java.util.Arrays.toString(preds) + \" : \" + (DOMAINS[DOMAINS.length-1]!=null?(DOMAINS[DOMAINS.length-1][(int)preds[0]]+\"~\"+DOMAINS[DOMAINS.length-1][(int)row[row.length-1]]):(preds[0] + \" ~ \" + row[row.length-1])) );").nl().
p(" }").nl().
nl().
p(" // long ttime = System.nanoTime()-startTime;").nl().
p(" // System.out.println(i+\". iteration took \" + (ttime) + \"ns: scoring time per row: \" + ttime/data.length +\"ns, scoring time per row and tree: \" + ttime/data.length/ntrees + \"ns\");").nl().
nl().
p(" if ((i % 1000) == 0) {").nl().
p(" System.out.println(\"finished \"+i+\" iterations (of \"+iters+\")...\");").nl().
p(" }").nl().
p(" }").nl().
p(" endMillis = System.currentTimeMillis();").nl().
nl().
p(" deltaMillis = endMillis - startMillis;").nl().
p(" deltaSeconds = (double)deltaMillis / 1000.0;").nl().
p(" samplesPredicted = data.length * iters;").nl().
p(" samplesPredictedPerSecond = samplesPredicted / deltaSeconds;").nl().
p(" System.out.println(\"finished in \"+deltaSeconds+\" seconds.\");").nl().
p(" System.out.println(\"samplesPredicted: \" + samplesPredicted);").nl().
p(" System.out.println(\"samplesPredictedPerSecond: \" + samplesPredictedPerSecond);").nl().
p(" }").nl().
nl();
static class TreeJCodeGen extends TreeVisitor<RuntimeException> {
public static final int MAX_NODES = (1 << 12) / 4; // limit for a number decision nodes
final byte _bits[] = new byte [100];
final float _fs [] = new float[100];
final SB _sbs [] = new SB [100];
final int _nodesCnt[] = new int [100];
SB _sb;
SB _csb;
SB _grpsplit;
int _subtrees = 0;
int _grpcnt = 0;
public TreeJCodeGen(TreeModel tm, CompressedTree ct, SB sb) {
super(tm, ct);
_sb = sb;
_csb = new SB();
_grpsplit = new SB();
}
// code preamble
protected void preamble(SB sb, int subtree) throws RuntimeException {
String subt = subtree>0?String.valueOf(subtree):"";
sb.i().p("static final ").p(TreeModel.PRED_TYPE).p(" predict").p(subt).p("(double[] data) {").nl().ii(1); // predict method for one tree
sb.i().p(TreeModel.PRED_TYPE).p(" pred = ");
}
// close the code
protected void closure(SB sb) throws RuntimeException {
sb.p(";").nl();
sb.i(1).p("return pred;").nl().di(1);
sb.i().p("}").nl();
// sb.p(_grpsplit).di(1);
}
@Override protected void pre( int col, float fcmp, IcedBitSet gcmp, int equal ) {
if(equal == 2 || equal == 3 && gcmp != null) {
_grpsplit.i(1).p("// ").p(gcmp.toString()).nl();
_grpsplit.i(1).p("public static final byte[] GRPSPLIT").p(_grpcnt).p(" = new byte[] ").p(gcmp.toStrArray()).p(";").nl();
}
if( _depth > 0 ) {
int b = _bits[_depth-1];
assert b > 0 : Arrays.toString(_bits)+"\n"+_sb.toString();
if( b==1 ) _bits[_depth-1]=3;
if( b==1 || b==2 ) _sb.p('\n').i(_depth).p("?");
if( b==2 ) _sb.p(' ').pj(_fs[_depth-1]); // Dump the leaf containing float value
if( b==2 || b==3 ) _sb.p('\n').i(_depth).p(":");
}
if (_nodes>MAX_NODES) {
_sb.p("predict").p(_subtrees).p("(data)");
_nodesCnt[_depth] = _nodes;
_sbs[_depth] = _sb;
_sb = new SB();
_nodes = 0;
preamble(_sb, _subtrees);
_subtrees++;
}
// All NAs are going always to the left
_sb.p(" (Double.isNaN(data[").p(col).p("]) || ");
if(equal == 0 || equal == 1)
if (!_tm.isFromSpeeDRF) {
_sb.p("(float) data[").p(col).p(" /* ").p(_tm._names[col]).p(" */").p("] ").p(equal == 1 ? "!= " : "< ").pj(fcmp); // then left and then right (left is !=)
} else {
_sb.p("(float) data[").p(col).p(" /* ").p(_tm._names[col]).p(" */").p("] ").p(equal == 1 ? "!= " : "<= ").pj(fcmp); // then left and then right (left is !=)
}
else {
_sb.p("!water.genmodel.GeneratedModel.grpContains(GRPSPLIT").p(_grpcnt).p(", ").p(gcmp._offset).p(", (int) data[").p(col).p(" /* ").p(_tm._names[col]).p(" */").p("])");
_grpcnt++;
}
assert _bits[_depth]==0;
_bits[_depth]=1;
}
@Override protected void leaf( float pred ) {
assert _depth==0 || _bits[_depth-1] > 0 : Arrays.toString(_bits); // it can be degenerated tree
if( _depth==0) { // it is de-generated tree
_sb.pj(pred);
} else if( _bits[_depth-1] == 1 ) { // No prior leaf; just memorize this leaf
_bits[_depth-1]=2; _fs[_depth-1]=pred;
} else { // Else==2 (prior leaf) or 3 (prior tree)
if( _bits[_depth-1] == 2 ) _sb.p(" ? ").pj(_fs[_depth-1]).p(" ");
else _sb.p('\n').i(_depth);
_sb.p(": ").pj(pred);
}
}
@Override protected void post( int col, float fcmp, int equal ) {
_sb.p(')');
_bits[_depth]=0;
if (_sbs[_depth]!=null) {
closure(_sb);
_csb.p(_sb);
_sb = _sbs[_depth];
_nodes = _nodesCnt[_depth];
_sbs[_depth] = null;
}
}
public void generate() {
preamble(_sb, _subtrees++); // TODO: Need to pass along group split BitSet
visit();
closure(_sb);
_sb.p(_grpsplit).di(1);
_sb.p(_csb);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/gbm/DTreeScorer.java
|
package hex.gbm;
import hex.gbm.DTree.TreeModel.CompressedTree;
import water.*;
import water.fvec.Chunk;
public abstract class DTreeScorer<T extends DTreeScorer<T>> extends MRTask2<T> {
/* @IN */ final protected int _ncols;
/* @IN */ final protected int _nclass;
/* @IN */ final protected Key[][] _treeKeys;
protected transient CompressedTree[][] _trees;
public DTreeScorer(int ncols, int nclass, Key[][] treeKeys) {
_ncols = ncols;
_nclass = nclass;
_treeKeys = treeKeys;
}
@Override protected final void setupLocal() {
int ntrees = _treeKeys.length;
_trees = new CompressedTree[ntrees][];
for (int t=0; t<ntrees; t++) {
Key[] treek = _treeKeys[t];
_trees[t] = new CompressedTree[treek.length];
// FIXME remove get by introducing fetch class for all trees
for (int i=0; i<treek.length; i++) {
if (treek[i]!=null)
_trees[t][i] = UKV.get(treek[i]);
}
}
}
protected final Chunk chk_oobt(Chunk chks[]) { return chks[_ncols+1+_nclass+_nclass+_nclass]; }
protected final Chunk chk_tree(Chunk chks[], int c) { return chks[_ncols+1+c]; }
protected final Chunk chk_resp( Chunk chks[] ) { return chks[_ncols]; }
protected void score0(double data[], float preds[], CompressedTree[] ts) {
DTreeUtils.scoreTree(data, preds, ts);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/gbm/DTreeUtils.java
|
package hex.gbm;
import hex.gbm.DTree.TreeModel.CompressedTree;
/** Toolkit class providing various useful methods for tree models */
public class DTreeUtils {
/**
* Score given tree on the row of data.
*
* @param data row of data
* @param preds array to hold resulting prediction
* @param ts a tree representation (single regression tree, or multi tree)
*/
public static void scoreTree(double data[], float preds[], CompressedTree[] ts) {
for( int c=0; c<ts.length; c++ )
if( ts[c] != null )
preds[ts.length==1?0:c+1] += ts[c].score(data);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/gbm/GBM.java
|
package hex.gbm;
import hex.ConfusionMatrix;
import hex.VarImp;
import hex.VarImp.VarImpRI;
import hex.gbm.DTree.DecidedNode;
import hex.gbm.DTree.LeafNode;
import hex.gbm.DTree.Split;
import hex.gbm.DTree.TreeModel.TreeStats;
import hex.gbm.DTree.UndecidedNode;
import water.*;
import water.api.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.util.*;
import water.util.Log.Tag.Sys;
import java.util.Arrays;
import static water.util.ModelUtils.getPrediction;
import static water.util.Utils.div;
// Gradient Boosted Trees
//
// Based on "Elements of Statistical Learning, Second Edition, page 387"
public class GBM extends SharedTreeModelBuilder<GBM.GBMModel> {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "Distribution for computing loss function. AUTO selects gaussian for continuous and multinomial for categorical response", filter = Default.class, json=true, importance=ParamImportance.CRITICAL)
public Family family = Family.AUTO;
@API(help = "Learning rate, from 0. to 1.0", filter = Default.class, dmin=0, dmax=1, json=true, importance=ParamImportance.SECONDARY)
public double learn_rate = 0.1;
@API(help = "Grid search parallelism", filter = Default.class, lmax = 4, gridable=false, importance=ParamImportance.SECONDARY)
public int grid_parallelism = 1;
@API(help = "Seed for the random number generator - only for balancing classes (autogenerated)", filter = Default.class)
long seed = -1; // To follow R-semantics, each call of GBM with imbalance should provide different seed. -1 means seed autogeneration
@API(help = "Perform Group Splitting Categoricals", filter=Default.class)
public boolean group_split = true;
/** Distribution functions */
// Note: AUTO will select gaussian for continuous, and multinomial for categorical response
// TODO: Replace with drop-down that displays different distributions depending on cont/cat response
public enum Family {
AUTO, bernoulli
}
/** Sum of variable empirical improvement in squared-error. The value is not scaled! */
private transient float[/*nfeatures*/] _improvPerVar;
public static class GBMModel extends DTree.TreeModel {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "Model parameters", json = true)
final private GBM parameters;
@Override public final GBM get_params() { return parameters; }
@Override public final Request2 job() { return get_params(); }
@API(help = "Learning rate, from 0. to 1.0") final double learn_rate;
@API(help = "Distribution for computing loss function. AUTO selects gaussian for continuous and multinomial for categorical response")
final Family family;
@API(help = "Initially predicted value (for zero trees)")
double initialPrediction;
public GBMModel(GBM job, Key key, Key dataKey, Key testKey, String names[], String domains[][], String[] cmDomain, int ntrees, int max_depth, int min_rows, int nbins, double learn_rate, Family family, int num_folds, float[] priorClassDist, float[] classDist) {
super(key,dataKey,testKey,names,domains,cmDomain,ntrees,max_depth,min_rows,nbins,num_folds,priorClassDist,classDist);
this.parameters = Job.hygiene((GBM) job.clone());
this.learn_rate = learn_rate;
this.family = family;
}
private GBMModel(GBMModel prior, DTree[] trees, double err, ConfusionMatrix cm, TreeStats tstats) {
super(prior, trees, err, cm, tstats);
this.parameters = prior.parameters;
this.learn_rate = prior.learn_rate;
this.family = prior.family;
this.initialPrediction = prior.initialPrediction;
}
private GBMModel(GBMModel prior, DTree[] trees, TreeStats tstats) {
super(prior, trees, tstats);
this.parameters = prior.parameters;
this.learn_rate = prior.learn_rate;
this.family = prior.family;
this.initialPrediction = prior.initialPrediction;
}
private GBMModel(GBMModel prior, double err, ConfusionMatrix cm, VarImp varimp, AUCData validAUC) {
super(prior, err, cm, varimp, validAUC);
this.parameters = prior.parameters;
this.learn_rate = prior.learn_rate;
this.family = prior.family;
this.initialPrediction = prior.initialPrediction;
}
private GBMModel(GBMModel prior, Key[][] treeKeys, double[] errs, ConfusionMatrix[] cms, TreeStats tstats, VarImp varimp, AUCData validAUC) {
super(prior, treeKeys, errs, cms, tstats, varimp, validAUC);
this.parameters = prior.parameters;
this.learn_rate = prior.learn_rate;
this.family = prior.family;
}
@Override protected TreeModelType getTreeModelType() { return TreeModelType.GBM; }
@Override protected float[] score0(double[] data, float[] preds) {
float[] p = super.score0(data, preds); // These are f_k(x) in Algorithm 10.4
if(family == Family.bernoulli) {
double fx = p[1] + initialPrediction;
p[2] = 1.0f/(float)(1f+Math.exp(-fx));
p[1] = 1f-p[2];
p[0] = getPrediction(p, data);
return p;
}
if (nclasses()>1) { // classification
// Because we call Math.exp, we have to be numerically stable or else
// we get Infinities, and then shortly NaN's. Rescale the data so the
// largest value is +/-1 and the other values are smaller.
// See notes here: http://www.hongliangjie.com/2011/01/07/logsum/
float maxval=Float.NEGATIVE_INFINITY;
float dsum=0;
if (nclasses()==2) p[2] = - p[1];
// Find a max
for( int k=1; k<p.length; k++) maxval = Math.max(maxval,p[k]);
assert !Float.isInfinite(maxval) : "Something is wrong with GBM trees since returned prediction is " + Arrays.toString(p);
for(int k=1; k<p.length;k++)
dsum+=(p[k]=(float)Math.exp(p[k]-maxval));
div(p,dsum);
p[0] = getPrediction(p, data);
} else { // regression
// Prediction starts from the mean response, and adds predicted residuals
preds[0] += initialPrediction;
}
return p;
}
@Override protected void generateModelDescription(StringBuilder sb) {
DocGen.HTML.paragraph(sb,"Learn rate: "+learn_rate);
}
@Override protected void toJavaUnifyPreds(SB bodyCtxSB) {
if(family == Family.bernoulli) {
bodyCtxSB.i().p("// Compute Probabilities for Bernoulli 0-1 classifier").nl();
bodyCtxSB.i().p("double fx = preds[1] + "+initialPrediction+";").nl();
bodyCtxSB.i().p("preds[2] = 1.0f/(float)(1.0f+Math.exp(-fx))").nl();
bodyCtxSB.i().p("preds[1] = 1.0f-preds[2]").nl();
}
else if (isClassifier()) {
bodyCtxSB.i().p("// Compute Probabilities for classifier (scale via http://www.hongliangjie.com/2011/01/07/logsum/)").nl();
bodyCtxSB.i().p("float dsum = 0, maxval = Float.NEGATIVE_INFINITY;").nl();
if (nclasses()==2) {
bodyCtxSB.i().p("preds[2] = -preds[1];").nl();
}
bodyCtxSB.i().p("for(int i=1; i<preds.length; i++) maxval = Math.max(maxval, preds[i]);").nl();
bodyCtxSB.i().p("for(int i=1; i<preds.length; i++) dsum += (preds[i]=(float) Math.exp(preds[i] - maxval));").nl();
bodyCtxSB.i().p("for(int i=1; i<preds.length; i++) preds[i] = preds[i] / dsum;").nl();
}
else {
bodyCtxSB.i().p("// Compute Regression").nl();
bodyCtxSB.i().p("preds[1] += "+initialPrediction+";").nl();
}
}
@Override protected void setCrossValidationError(ValidatedJob job, double cv_error, water.api.ConfusionMatrix cm, AUCData auc, HitRatio hr) {
GBMModel gbmm = ((GBM)job).makeModel(this, cv_error, cm.cm == null ? null : new ConfusionMatrix(cm.cm, cms[0].nclasses()), this.varimp, auc);
gbmm._have_cv_results = true;
DKV.put(this._key, gbmm); //overwrite this model
}
}
public Frame score( Frame fr ) { return ((GBMModel)UKV.get(dest())).score(fr); }
@Override protected Log.Tag.Sys logTag() { return Sys.GBM__; }
@Override protected GBMModel makeModel(Key outputKey, Key dataKey, Key testKey, int ntrees, String[] names, String[][] domains, String[] cmDomain, float[] priorClassDist, float[] classDist) {
return new GBMModel(this, outputKey, dataKey, validation==null?null:testKey, names, domains, cmDomain, ntrees, max_depth, min_rows, nbins, learn_rate, family, n_folds,priorClassDist,classDist);
}
@Override protected GBMModel makeModel( GBMModel model, double err, ConfusionMatrix cm, VarImp varimp, AUCData validAUC) {
return new GBMModel(model, err, cm, varimp, validAUC);
}
@Override protected GBMModel makeModel(GBMModel model, DTree[] ktrees, TreeStats tstats) {
return new GBMModel(model, ktrees, tstats);
}
public GBM() { description = "Distributed GBM"; importance = true; }
@Override protected GBMModel updateModel(GBMModel additionModel, GBMModel checkpoint, boolean overwriteCheckpoint) {
// Do not forget to clone trees in case that we are not going to overwrite checkpoint
Key[][] treeKeys = null;
if (!overwriteCheckpoint) throw H2O.unimpl("Cloning of tree models is not implemented yet!");
else treeKeys = checkpoint.treeKeys;
return new GBMModel(additionModel, treeKeys, checkpoint.errs, checkpoint.cms, checkpoint.treeStats, checkpoint.varimp, checkpoint.validAUC);
}
/** Return the query link to this page */
public static String link(Key k, String content) {
RString rs = new RString("<a href='/2/GBM.query?source=%$key'>%content</a>");
rs.replace("key", k.toString());
rs.replace("content", content);
return rs.toString();
}
@Override protected void execImpl() {
try {
logStart();
buildModel(seed);
if (n_folds > 0) CrossValUtils.crossValidate(this);
} finally {
remove(); // Remove Job
state = UKV.<Job>get(self()).state;
new TAtomic<GBMModel>() {
@Override
public GBMModel atomic(GBMModel m) {
if (m != null) m.get_params().state = state;
return m;
}
}.invoke(dest());
}
}
@Override public int gridParallelism() {
return grid_parallelism;
}
@Override protected Response redirect() {
return GBMProgressPage.redirect(this, self(), dest());
}
@Override protected void initAlgo( GBMModel initialModel) {
// Initialize gbm-specific data structures
if (importance) _improvPerVar = new float[initialModel.nfeatures()];
// assert (family != Family.bernoulli) || (_nclass == 2) : "Bernoulli requires the response to be a 2-class categorical";
if(family == Family.bernoulli && _nclass != 2)
throw new IllegalArgumentException("Bernoulli requires the response to be a 2-class categorical");
}
@Override protected void initWorkFrame(GBMModel initialModel, Frame fr) {
// Tag out rows missing the response column
new ExcludeNAResponse().doAll(fr);
// Initial value is mean(y)
final double mean = (float) fr.vec(initialModel.responseName()).mean();
// Initialize working response based on given loss function
if (_nclass == 1) { /* regression */
initialModel.initialPrediction = mean; // Regression initially predicts the response mean
new MRTask2() {
@Override public void map(Chunk[] chks) {
Chunk tr = chk_tree(chks, 0); // there is only one tree for regression
for (int i=0; i<tr._len; i++) tr.set0(i, mean);
}
}.doAll(fr);
} else if(family == Family.bernoulli) {
// Initial value is log( mean(y)/(1-mean(y)) )
final float init = (float) Math.log(mean/(1.0f-mean));
initialModel.initialPrediction = init;
new MRTask2() {
@Override public void map(Chunk[] chks) {
Chunk tr = chk_tree(chks, 0); // only the tree for y = 0 is used
for (int i=0; i<tr._len; i++) tr.set0(i, init);
}
}.doAll(fr);
} else { /* multinomial */
/* Preserve 0s in working columns */
}
// Update tree fields based on checkpoint
if (checkpoint!=null) {
Timer t = new Timer();
new ResidualsCollector(_ncols, _nclass, initialModel.treeKeys).doAll(fr);
Log.info(logTag(), "Reconstructing tree residuals stats from checkpointed model took " + t);
}
}
// ==========================================================================
// Compute a GBM tree.
// Start by splitting all the data according to some criteria (minimize
// variance at the leaves). Record on each row which split it goes to, and
// assign a split number to it (for next pass). On *this* pass, use the
// split-number to build a per-split histogram, with a per-histogram-bucket
// variance.
@Override protected GBMModel buildModel( GBMModel model, final Frame fr, String names[], String domains[][], Timer t_build ) {
// Build trees until we hit the limit
int tid;
DTree[] ktrees = null; // Trees
TreeStats tstats = model.treeStats!=null ? model.treeStats : new TreeStats();
for( tid=0; tid<ntrees; tid++) {
// During first iteration model contains 0 trees, then 0-trees, then 1-tree,...
// BUT if validation is not specified model does not participate in voting
// but on-the-fly computed data are used
if (tid!=0 || checkpoint==null) { // do not make initial scoring if model already exist
model = doScoring(model, fr, ktrees, tid, tstats, false, false, false);
}
// ESL2, page 387
// Step 2a: Compute prediction (prob distribution) from prior tree results:
// Work <== f(Tree)
new ComputeProb().doAll(fr);
// ESL2, page 387
// Step 2b i: Compute residuals from the prediction (probability distribution)
// Work <== f(Work)
new ComputeRes().doAll(fr);
// ESL2, page 387, Step 2b ii, iii, iv
Timer kb_timer = new Timer();
ktrees = buildNextKTrees(fr);
Log.info(Sys.GBM__, (tid+1) + ". tree was built in " + kb_timer.toString());
if( !Job.isRunning(self()) ) break; // If canceled during building, do not bulkscore
// Check latest predictions
tstats.updateBy(ktrees);
}
// Final scoring (skip if job was cancelled)
if (Job.isRunning(self())) {
model = doScoring(model, fr, ktrees, tid, tstats, true, false, false);
}
return model;
}
// --------------------------------------------------------------------------
// Tag out rows missing the response column
class ExcludeNAResponse extends MRTask2<ExcludeNAResponse> {
@Override public void map( Chunk chks[] ) {
Chunk ys = chk_resp(chks);
for( int row=0; row<ys._len; row++ )
if( ys.isNA0(row) )
for( int t=0; t<_nclass; t++ )
chk_nids(chks,t).set0(row,-1);
}
}
// --------------------------------------------------------------------------
// Compute Prediction from prior tree results.
// Classification (multinomial): Probability Distribution of loglikelyhoods
// Prob_k = exp(Work_k)/sum_all_K exp(Work_k)
// Classification (bernoulli): Probability of y = 1 given logit link function
// Prob_0 = 1/(1 + exp(Work)), Prob_1 = 1/(1 + exp(-Work))
// Regression: Just prior tree results
// Work <== f(Tree)
class ComputeProb extends MRTask2<ComputeProb> {
@Override public void map( Chunk chks[] ) {
Chunk ys = chk_resp(chks);
if( family == Family.bernoulli ) {
Chunk tr = chk_tree(chks,0);
Chunk wk = chk_work(chks,0);
for( int row = 0; row < ys._len; row++)
// wk.set0(row, 1.0f/(1f+Math.exp(-tr.at0(row))) ); // Prob_1
wk.set0(row, 1.0f/(1f+Math.exp(tr.at0(row))) ); // Prob_0
} else if( _nclass > 1 ) { // Classification
float fs[] = new float[_nclass+1];
for( int row=0; row<ys._len; row++ ) {
float sum = score1(chks,fs,row);
if( Float.isInfinite(sum) ) // Overflow (happens for constant responses)
for( int k=0; k<_nclass; k++ )
chk_work(chks,k).set0(row,Float.isInfinite(fs[k+1])?1.0f:0.0f);
else
for( int k=0; k<_nclass; k++ ) // Save as a probability distribution
chk_work(chks,k).set0(row,fs[k+1]/sum);
}
} else { // Regression
Chunk tr = chk_tree(chks,0); // Prior tree sums
Chunk wk = chk_work(chks,0); // Predictions
for( int row=0; row<ys._len; row++ )
wk.set0(row,(float)tr.at0(row));
}
}
}
// Read the 'tree' columns, do model-specific math and put the results in the
// fs[] array, and return the sum. Dividing any fs[] element by the sum
// turns the results into a probability distribution.
@Override protected float score1( Chunk chks[], float fs[/*nclass*/], int row ) {
if(family == Family.bernoulli) {
fs[1] = 1.0f/(float)(1f+Math.exp(chk_tree(chks,0).at0(row)));
fs[2] = 1f-fs[1];
return fs[1]+fs[2];
}
if( _nclass == 1 ) // Classification?
return fs[0]=(float)chk_tree(chks,0).at0(row); // Regression.
if( _nclass == 2 ) { // The Boolean Optimization
// This optimization assumes the 2nd tree of a 2-class system is the
// inverse of the first. Fill in the missing tree
fs[1] = (float)Math.exp(chk_tree(chks,0).at0(row));
fs[2] = 1.0f/fs[1]; // exp(-d) === 1/d
return fs[1]+fs[2];
}
float sum=0;
for( int k=0; k<_nclass; k++ ) // Sum across of likelyhoods
sum+=(fs[k+1]=(float)Math.exp(chk_tree(chks,k).at0(row)));
return sum;
}
// --------------------------------------------------------------------------
// Compute Residuals from Actuals
// Work <== f(Work)
class ComputeRes extends MRTask2<ComputeRes> {
@Override public void map( Chunk chks[] ) {
Chunk ys = chk_resp(chks);
if(family == Family.bernoulli) {
for(int row = 0; row < ys._len; row++) {
if( ys.isNA0(row) ) continue;
int y = (int)ys.at80(row); // zero-based response variable
Chunk wk = chk_work(chks,0);
// wk.set0(row, y-(float)wk.at0(row)); // wk.at0(row) is Prob_1
wk.set0(row, y-1f+(float)wk.at0(row)); // wk.at0(row) is Prob_0
}
} else if( _nclass > 1 ) { // Classification
for( int row=0; row<ys._len; row++ ) {
if( ys.isNA0(row) ) continue;
int y = (int)ys.at80(row); // zero-based response variable
// Actual is '1' for class 'y' and '0' for all other classes
for( int k=0; k<_nclass; k++ ) {
if( _distribution[k] != 0 ) {
Chunk wk = chk_work(chks,k);
wk.set0(row, (y==k?1f:0f)-(float)wk.at0(row) );
}
}
}
} else { // Regression
Chunk wk = chk_work(chks,0); // Prediction==>Residuals
for( int row=0; row<ys._len; row++ )
wk.set0(row, (float)(ys.at0(row)-wk.at0(row)) );
}
}
}
// --------------------------------------------------------------------------
// Build the next k-trees, which is trying to correct the residual error from
// the prior trees. From LSE2, page 387. Step 2b ii, iii.
private DTree[] buildNextKTrees(Frame fr) {
// We're going to build K (nclass) trees - each focused on correcting
// errors for a single class.
final DTree[] ktrees = new DTree[_nclass];
// Initial set of histograms. All trees; one leaf per tree (the root
// leaf); all columns
DHistogram hcs[][][] = new DHistogram[_nclass][1/*just root leaf*/][_ncols];
// Adjust nbins for the top-levels
int adj_nbins = Math.max((1<<(10-0)),nbins);
for( int k=0; k<_nclass; k++ ) {
// Initially setup as-if an empty-split had just happened
if( _distribution == null || _distribution[k] != 0 ) {
// The Boolean Optimization
// This optimization assumes the 2nd tree of a 2-class system is the
// inverse of the first. This is false for DRF (and true for GBM) -
// DRF picks a random different set of columns for the 2nd tree.
if( k==1 && _nclass==2 ) continue;
ktrees[k] = new DTree(fr._names,_ncols,(char)nbins,(char)_nclass,min_rows);
new GBMUndecidedNode(ktrees[k],-1,DHistogram.initialHist(fr,_ncols,adj_nbins,hcs[k][0],group_split,false) ); // The "root" node
}
}
int[] leafs = new int[_nclass]; // Define a "working set" of leaf splits, from here to tree._len
// ----
// ESL2, page 387. Step 2b ii.
// One Big Loop till the ktrees are of proper depth.
// Adds a layer to the trees each pass.
int depth=0;
for( ; depth<max_depth; depth++ ) {
if( !Job.isRunning(self()) ) return null;
hcs = buildLayer(fr, ktrees, leafs, hcs, false, false);
// If we did not make any new splits, then the tree is split-to-death
if( hcs == null ) break;
}
// Each tree bottomed-out in a DecidedNode; go 1 more level and insert
// LeafNodes to hold predictions.
for( int k=0; k<_nclass; k++ ) {
DTree tree = ktrees[k];
if( tree == null ) continue;
int leaf = leafs[k] = tree.len();
for( int nid=0; nid<leaf; nid++ ) {
if( tree.node(nid) instanceof DecidedNode ) {
DecidedNode dn = tree.decided(nid);
for( int i=0; i<dn._nids.length; i++ ) {
int cnid = dn._nids[i];
if( cnid == -1 || // Bottomed out (predictors or responses known constant)
tree.node(cnid) instanceof UndecidedNode || // Or chopped off for depth
(tree.node(cnid) instanceof DecidedNode && // Or not possible to split
((DecidedNode)tree.node(cnid))._split.col()==-1) )
dn._nids[i] = new GBMLeafNode(tree,nid).nid(); // Mark a leaf here
}
// Handle the trivial non-splitting tree
if( nid==0 && dn._split.col() == -1 )
new GBMLeafNode(tree,-1,0);
}
}
} // -- k-trees are done
// ----
// ESL2, page 387. Step 2b iii. Compute the gammas, and store them back
// into the tree leaves. Includes learn_rate.
// For classification (bernoulli):
// gamma_i = sum res_i / sum p_i*(1 - p_i) where p_i = y_i - res_i
// For classification (multinomial):
// gamma_i_k = (nclass-1)/nclass * (sum res_i / sum (|res_i|*(1-|res_i|)))
// For regression (gaussian):
// gamma_i = sum res_i / count(res_i)
GammaPass gp = new GammaPass(ktrees,leafs).doAll(fr);
double m1class = _nclass > 1 && family != Family.bernoulli ? (double)(_nclass-1)/_nclass : 1.0; // K-1/K for multinomial
for( int k=0; k<_nclass; k++ ) {
final DTree tree = ktrees[k];
if( tree == null ) continue;
for( int i=0; i<tree._len-leafs[k]; i++ ) {
double g = gp._gss[k][i] == 0 // Constant response?
? (gp._rss[k][i]==0?0:1000) // Cap (exponential) learn, instead of dealing with Inf
: learn_rate*m1class*gp._rss[k][i]/gp._gss[k][i];
assert !Double.isNaN(g);
((LeafNode)tree.node(leafs[k]+i))._pred = g;
}
}
// ----
// ESL2, page 387. Step 2b iv. Cache the sum of all the trees, plus the
// new tree, in the 'tree' columns. Also, zap the NIDs for next pass.
// Tree <== f(Tree)
// Nids <== 0
new MRTask2() {
@Override public void map( Chunk chks[] ) {
// For all tree/klasses
for( int k=0; k<_nclass; k++ ) {
final DTree tree = ktrees[k];
if( tree == null ) continue;
final Chunk nids = chk_nids(chks,k);
final Chunk ct = chk_tree(chks,k);
for( int row=0; row<nids._len; row++ ) {
int nid = (int)nids.at80(row);
if( nid < 0 ) continue;
// Prediction stored in Leaf is cut to float to be deterministic in reconstructing
// <tree_klazz> fields from tree prediction
ct.set0(row, (float)(ct.at0(row) + (float) ((LeafNode)tree.node(nid))._pred));
nids.set0(row,0);
}
}
}
}.doAll(fr);
// Collect leaves stats
for (int i=0; i<ktrees.length; i++)
if( ktrees[i] != null )
ktrees[i].leaves = ktrees[i].len() - leafs[i];
// DEBUG: Print the generated K trees
// printGenerateTrees(ktrees);
return ktrees;
}
// ---
// ESL2, page 387. Step 2b iii.
// Nids <== f(Nids)
private class GammaPass extends MRTask2<GammaPass> {
final DTree _trees[]; // Read-only, shared (except at the histograms in the Nodes)
final int _leafs[]; // Number of active leaves (per tree)
// Per leaf: sum(res);
double _rss[/*tree/klass*/][/*tree-relative node-id*/];
// Per leaf: multinomial: sum(|res|*1-|res|), gaussian: sum(1), bernoulli: sum((y-res)*(1-y+res))
double _gss[/*tree/klass*/][/*tree-relative node-id*/];
GammaPass(DTree trees[], int leafs[]) { _leafs=leafs; _trees=trees; }
@Override public void map( Chunk[] chks ) {
_gss = new double[_nclass][];
_rss = new double[_nclass][];
final Chunk resp = chk_resp(chks); // Response for this frame
// For all tree/klasses
for( int k=0; k<_nclass; k++ ) {
final DTree tree = _trees[k];
final int leaf = _leafs[k];
if( tree == null ) continue; // Empty class is ignored
// A leaf-biased array of all active Tree leaves.
final double gs[] = _gss[k] = new double[tree._len-leaf];
final double rs[] = _rss[k] = new double[tree._len-leaf];
final Chunk nids = chk_nids(chks,k); // Node-ids for this tree/class
final Chunk ress = chk_work(chks,k); // Residuals for this tree/class
// If we have all constant responses, then we do not split even the
// root and the residuals should be zero.
if( tree.root() instanceof LeafNode ) continue;
for( int row=0; row<nids._len; row++ ) { // For all rows
int nid = (int)nids.at80(row); // Get Node to decide from
if( nid < 0 ) continue; // Missing response
if( tree.node(nid) instanceof UndecidedNode ) // If we bottomed out the tree
nid = tree.node(nid)._pid; // Then take parent's decision
DecidedNode dn = tree.decided(nid); // Must have a decision point
if( dn._split._col == -1 ) // Unable to decide?
dn = tree.decided(nid = dn._pid); // Then take parent's decision
int leafnid = dn.ns(chks,row); // Decide down to a leafnode
assert leaf <= leafnid && leafnid < tree._len;
assert tree.node(leafnid) instanceof LeafNode;
// Note: I can which leaf/region I end up in, but I do not care for
// the prediction presented by the tree. For GBM, we compute the
// sum-of-residuals (and sum/abs/mult residuals) for all rows in the
// leaf, and get our prediction from that.
nids.set0(row,leafnid);
assert !ress.isNA0(row);
// Compute numerator (rs) and denominator (gs) of gamma
double res = ress.at0(row);
double ares = Math.abs(res);
if(family == Family.bernoulli) {
double prob = resp.at0(row) - res;
gs[leafnid-leaf] += prob*(1-prob);
} else
gs[leafnid-leaf] += _nclass > 1 ? ares*(1-ares) : 1;
rs[leafnid-leaf] += res;
}
}
}
@Override public void reduce( GammaPass gp ) {
Utils.add(_gss,gp._gss);
Utils.add(_rss,gp._rss);
}
}
@Override protected DecidedNode makeDecided( UndecidedNode udn, DHistogram hs[] ) {
return new GBMDecidedNode(udn,hs);
}
// ---
// GBM DTree decision node: same as the normal DecidedNode, but
// specifies a decision algorithm given complete histograms on all
// columns. GBM algo: find the lowest error amongst *all* columns.
static class GBMDecidedNode extends DecidedNode {
GBMDecidedNode( UndecidedNode n, DHistogram[] hs ) { super(n,hs); }
@Override public UndecidedNode makeUndecidedNode(DHistogram[] hs ) {
return new GBMUndecidedNode(_tree,_nid,hs);
}
// Find the column with the best split (lowest score). Unlike RF, GBM
// scores on all columns and selects splits on all columns.
@Override public DTree.Split bestCol( UndecidedNode u, DHistogram[] hs ) {
DTree.Split best = new DTree.Split(-1,-1,null,(byte)0,Double.MAX_VALUE,Double.MAX_VALUE,0L,0L,0,0);
if( hs == null ) return best;
for( int i=0; i<hs.length; i++ ) {
if( hs[i]==null || hs[i].nbins() <= 1 ) continue;
DTree.Split s = hs[i].scoreMSE(i);
if( s == null ) continue;
if( best == null || s.se() < best.se() ) best = s;
if( s.se() <= 0 ) break; // No point in looking further!
}
return best;
}
}
// ---
// GBM DTree undecided node: same as the normal UndecidedNode, but specifies
// a list of columns to score on now, and then decide over later.
// GBM algo: use all columns
static class GBMUndecidedNode extends UndecidedNode {
GBMUndecidedNode( DTree tree, int pid, DHistogram hs[] ) { super(tree,pid,hs); }
// Randomly select mtry columns to 'score' in following pass over the data.
// In GBM, we use all columns (as opposed to RF, which uses a random subset).
@Override public int[] scoreCols( DHistogram[] hs ) { return null; }
}
// ---
static class GBMLeafNode extends LeafNode {
GBMLeafNode( DTree tree, int pid ) { super(tree,pid); }
GBMLeafNode( DTree tree, int pid, int nid ) { super(tree,pid,nid); }
// Insert just the predictions: a single byte/short if we are predicting a
// single class, or else the full distribution.
@Override protected AutoBuffer compress(AutoBuffer ab) { assert !Double.isNaN(_pred); return ab.put4f((float)_pred); }
@Override protected int size() { return 4; }
}
/** Compute relative variable importance for GBM model.
*
* See (45), (35) formulas in Friedman: Greedy Function Approximation: A Gradient boosting machine.
* Algo used here can be used for computation individual importance of features per output class. */
@Override protected VarImp doVarImpCalc(GBMModel model, DTree[] ktrees, int tid, Frame validationFrame, boolean scale) {
assert model.ntrees()-1-_ntreesFromCheckpoint == tid : "varimp computation expect model with already serialized trees: tid="+tid;
// Iterates over k-tree
for (DTree t : ktrees) { // Iterate over trees
if (t!=null) {
for (int n = 0; n< t.len()-t.leaves; n++)
if (t.node(n) instanceof DecidedNode) { // it is split node
Split split = t.decided(n)._split;
if (split.col()!=-1) // Skip impossible splits ~ leafs
_improvPerVar[split.col()] += split.improvement(); // least squares improvement
}
}
}
// Compute variable importance for all trees in model
float[] varimp = new float[model.nfeatures()];
int ntreesTotal = model.ntrees() * model.nclasses();
int maxVar = 0;
for (int var=0; var<_improvPerVar.length; var++) {
varimp[var] = _improvPerVar[var] / ntreesTotal;
if (varimp[var] > varimp[maxVar]) maxVar = var;
}
// GBM scale varimp to scale 0..100
if (scale) {
float maxVal = varimp[maxVar];
for (int var=0; var<varimp.length; var++) varimp[var] /= maxVal;
}
return new VarImpRI(varimp);
}
/**
* Cross-Validate a GBM model by building new models on N train/test holdout splits
* @param splits Frames containing train/test splits
* @param cv_preds Array of Frames to store the predictions for each cross-validation run
* @param offsets Array to store the offsets of starting row indices for each cross-validation run
* @param i Which fold of cross-validation to perform
*/
@Override public void crossValidate(Frame[] splits, Frame[] cv_preds, long[] offsets, int i) {
// Train a clone with slightly modified parameters (to account for cross-validation)
GBM cv = (GBM) this.clone();
cv.genericCrossValidation(splits, offsets, i);
cv_preds[i] = ((GBMModel) UKV.get(cv.dest())).score(cv.validation);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/gbm/ResidualsCollector.java
|
package hex.gbm;
import java.util.Arrays;
import water.Key;
import water.fvec.Chunk;
public class ResidualsCollector extends DTreeScorer<ResidualsCollector> {
public ResidualsCollector(int ncols, int nclass, Key[][] treeKeys) {
super(ncols, nclass, treeKeys);
}
@Override public void map(Chunk[] chks) {
double[] data = new double[_ncols];
float [] preds = new float[_nclass+1];
int ntrees = _trees.length;
Chunk cys = chk_resp(chks);
for( int tidx=0; tidx<ntrees; tidx++) { // tree
for (int row=0; row<cys._len; row++) {
// Make a prediction
for (int i=0;i<_ncols;i++) data[i] = chks[i].at0(row);
Arrays.fill(preds, 0);
score0(data, preds, _trees[tidx]);
if (_nclass==1) preds[1]=preds[0]; // regression shortcut
// Write tree predictions
for (int c=0;c<_nclass;c++) { // over all class
if (preds[1+c] != 0) {
Chunk ctree = chk_tree(chks, c);
ctree.set0(row, (float)(ctree.at0(row) + preds[1+c]));
}
}
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/gbm/SharedTreeModelBuilder.java
|
package hex.gbm;
import water.api.AUCData;
import static water.util.MRUtils.sampleFrameStratified;
import static water.util.ModelUtils.getPrediction;
import hex.ConfusionMatrix;
import hex.VarImp;
import hex.drf.DRF;
import hex.rng.MersenneTwisterRNG;
import jsr166y.CountedCompleter;
import water.*;
import water.H2O.H2OCountedCompleter;
import water.Job.ValidatedJob;
import water.api.AUC;
import water.api.DocGen;
import water.api.ParamImportance;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.Log;
import water.util.Log.Tag.Sys;
import water.util.MRUtils;
import water.util.ModelUtils;
import water.util.Utils;
import java.util.Arrays;
import java.util.Random;
/**
* Shared (distributed) trees builder.
*
* <p>Used for both <em>Gradient Boosted Method</em> (see {@link GBM}) and <em>Random
* Forest</em> (see {@link DRF}), and really could be used for any decision-tree builder.</p>
*
* <p>While this is a wholly H<sub>2</sub>O-design, we found these papers afterwards that
* describes our design fairly well:</p>
* <ul>
* <li><a href="http://www.cse.wustl.edu/~kilian/papers/fr819-tyreeA.pdf">Parallel GBRT</a></li>
* <li><a href="http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf">Streaming parallel decision tree</a></li>
* </ul>
*
* <p>Note that our <em>dynamic histogram</em> technique is different (surely faster, and
* probably less mathematically clean). I'm sure a host of other smaller details
* differ also - but in the Big Picture the paper and our algorithm are similar.</p>
*/
public abstract class SharedTreeModelBuilder<TM extends DTree.TreeModel> extends ValidatedJob {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "Number of trees. Grid Search, comma sep values:50,100,150,200", filter = Default.class, lmin=1, lmax=1000000, json=true, importance=ParamImportance.CRITICAL)
public int ntrees = 50;
@API(help = "Maximum tree depth. Grid Search, comma sep values:5,7", filter = Default.class, lmin=1, lmax=10000, json=true, importance=ParamImportance.CRITICAL)
public int max_depth = 5;
@API(help = "Fewest allowed observations in a leaf (in R called 'nodesize'). Grid Search, comma sep values", filter = Default.class, lmin=1, json=true, importance=ParamImportance.SECONDARY)
public int min_rows = 10;
@API(help = "Build a histogram of this many bins, then split at the best point", filter = Default.class, lmin=2, lmax=10000, json=true, importance=ParamImportance.SECONDARY)
public int nbins = 20;
@API(help = "Perform scoring after each iteration (can be slow)", filter = Default.class, json=true)
public boolean score_each_iteration = false;
@API(help = "Compute variable importance (true/false).", filter = Default.class )
protected boolean importance = false; // compute variable importance
/**
* For imbalanced data, balance training data class counts via
* over/under-sampling. This can result in improved predictive accuracy.
*/
@API(help = "Balance training data class counts via over/under-sampling (for imbalanced data)", filter = Default.class, json = true, importance = ParamImportance.EXPERT)
public boolean balance_classes = false;
/**
* When classes are balanced, limit the resulting dataset size to the
* specified multiple of the original dataset size.
*/
@API(help = "Maximum relative size of the training data after balancing class counts (can be less than 1.0)", filter = Default.class, json = true, dmin=1e-3, importance = ParamImportance.EXPERT)
public float max_after_balance_size = Float.POSITIVE_INFINITY;
@API(help = "Model checkpoint to start building a new model from", filter = Default.class, json = true, required = false)
public Key checkpoint;
@API(help = "Overwrite checkpoint", filter = Default.class, json = true, required = false)
public boolean overwrite_checkpoint = true;
// @API(help = "Active feature columns")
protected int _ncols;
// @API(help = "Rows in training dataset")
protected long _nrows;
// @API(help = "Number of classes")
protected int _nclass;
@API(help = "Class distribution")
protected long _distribution[];
// Distribution of classes in response
protected float[] _priorClassDist = null;
// New distribution of classes if input frame was modified (resampled, balanced)
protected float[] _modelClassDist = null;
// Number of trees inherited from checkpoint
protected int _ntreesFromCheckpoint;
/** Maximal number of supported levels in response. */
public static final int MAX_SUPPORTED_LEVELS = 1000;
/** Marker for already decided row. */
static public final int DECIDED_ROW = -1;
/** Marker for sampled out rows */
static public final int OUT_OF_BAG = -2;
@Override public float progress(){
Value value = DKV.get(dest());
DTree.TreeModel m = value != null ? (DTree.TreeModel) value.get() : null;
return m == null ? 0 : cv_progress(m.ntrees() / (float) m.N);
}
// Verify input parameters
@Override protected void init() {
super.init();
// Check parameters
assert 0 <= ntrees && ntrees < 1000000; // Sanity check
// Should be handled by input
//assert response.isEnum() : "Response is not enum";
assert (classification && (response.isInt() || response.isEnum())) || // Classify Int or Enums
(!classification && !response.isEnum()) : "Classification="+classification + " and response="+response.isInt(); // Regress Int or Float
if (source.numRows() - response.naCnt() <=0)
throw new IllegalArgumentException("Dataset contains too many NAs!");
_ncols = _train.length;
_nrows = source.numRows() - response.naCnt();
assert (_nrows>0) : "Dataset contains no rows - validation of input parameters is probably broken!";
// Transform response to enum
// TODO: moved to shared model job
if( !response.isEnum() && classification ) {
response = response.toEnum();
gtrash(response); //_gen_enum = true;
}
_nclass = response.isEnum() ? (char)(response.domain().length) : 1;
if (classification && _nclass <= 1)
throw new IllegalArgumentException("Constant response column!");
if (_nclass > MAX_SUPPORTED_LEVELS)
throw new IllegalArgumentException("Too many levels in response column!");
int usableColumns = 0;
assert _ncols == _train.length : "Number of selected train columns does not correspond to a number of columns!";
for (int i = 0; i < _ncols; i++) {
Vec v = _train[i];
if (v.isBad() || v.isConst()) continue;
usableColumns++;
}
if (usableColumns==0) throw new IllegalArgumentException("There is no usable column to generate model!");
if (checkpoint!=null && DKV.get(checkpoint)==null) throw new IllegalArgumentException("Checkpoint "+checkpoint.toString() + " does not exists!");
}
@Override protected Key defaultDestKey() {
if (checkpoint!=null && overwrite_checkpoint)
return checkpoint;
else
return super.defaultDestKey();
}
// --------------------------------------------------------------------------
// Driver for model-building.
public void buildModel(long seed) {
final Key outputKey = dest();
final Key dataKey = source != null ? source._key : null;
final Key testKey = validation != null ? validation._key : dataKey;
// Lock the input datasets against deletes
source.read_lock(self());
if( validation != null && !source._key.equals(validation._key) )
validation.read_lock(self());
// Prepare a frame for this tree algorithm run
Frame fr = new Frame(_names, _train);
fr.add(_responseName,response);
final Frame frm = new Frame(fr); // Model-Frame; no extra columns
String names[] = frm.names();
String domains[][] = frm.domains();
// For doing classification on Integer (not Enum) columns, we want some
// handy names in the Model. This really should be in the Model code.
String[] domain = response.domain();
if( domain == null && _nclass > 1 ) // No names? Something is wrong since we converted response to enum already !
assert false : "Response domain' names should be always presented in case of classification";
if( domain == null ) domain = new String[] {"r"}; // For regression, give a name to class 0
// Compute class distribution
if (classification) {
MRUtils.ClassDist cdmt = new MRUtils.ClassDist(_nclass).doAll(response);
_distribution = cdmt.dist();
_priorClassDist = cdmt.rel_dist();
}
// Handle imbalanced classes by stratified over/under-sampling
// initWorkFrame sets the modeled class distribution, and model.score() corrects the probabilities back using the distribution ratios
float[] trainSamplingFactors;
if (classification && balance_classes) {
int response_idx = fr.find(_responseName);
trainSamplingFactors = new float[domain.length]; //leave initialized to 0 -> will be filled up below
Frame stratified = sampleFrameStratified(
fr, fr.lastVec(), trainSamplingFactors, (long)(max_after_balance_size*fr.numRows()), seed, true, false);
if (stratified != fr) {
fr = stratified;
_nrows = fr.numRows();
response = fr.vecs()[response_idx];
// Recompute distribution since the input frame was modified
MRUtils.ClassDist cdmt = new MRUtils.ClassDist(_nclass).doAll(response);
_distribution = cdmt.dist();
_modelClassDist = cdmt.rel_dist();
gtrash(stratified);
}
}
Log.info(logTag(), "Prior class distribution: " + Arrays.toString(_priorClassDist));
Log.info(logTag(), "Model class distribution: " + Arrays.toString(_modelClassDist));
// Also add to the basic working Frame these sets:
// nclass Vecs of current forest results (sum across all trees)
// nclass Vecs of working/temp data
// nclass Vecs of NIDs, allowing 1 tree per class
// Current forest values: results of summing the prior M trees
for( int i=0; i<_nclass; i++ )
fr.add("Tree_"+domain[i], response.makeZero());
// Initial work columns. Set-before-use in the algos.
for( int i=0; i<_nclass; i++ )
fr.add("Work_"+domain[i], response.makeZero());
// One Tree per class, each tree needs a NIDs. For empty classes use a -1
// NID signifying an empty regression tree.
for( int i=0; i<_nclass; i++ )
fr.add("NIDs_"+domain[i], response.makeCon(_distribution==null ? 0 : (_distribution[i]==0?-1:0)));
// Timer for model building
Timer bm_timer = new Timer();
long before = System.currentTimeMillis();
// Fetch checkpoint
assert checkpoint==null || (!(overwrite_checkpoint && checkpoint!=null) || outputKey==checkpoint): "If checkpoint is to be overwritten then outputkey has to equal to checkpoint key";
TM checkpointModel = checkpoint!=null ? (TM) UKV.get(checkpoint) : null;
// Create an INITIAL MODEL based on given parameters
TM model = makeModel(outputKey, dataKey, testKey, checkpointModel!=null?ntrees+checkpointModel.ntrees():ntrees,names, domains, getCMDomain(), _priorClassDist, _modelClassDist);
// Update the model by a checkpoint
if (checkpointModel!=null) {
checkpointModel.read_lock(self()); // lock it for read to avoid any other job to start working on it
try {
// Create a new initial model based on given checkpoint
// TODO: check compatibility of parameters !
model = updateModel(model, checkpointModel, overwrite_checkpoint);
_ntreesFromCheckpoint = checkpointModel.ntrees();
} finally { checkpointModel.unlock(self()); }
}
// Save the model ! (delete_and_lock has side-effect of saving model into DKV)
if (checkpoint!=null && overwrite_checkpoint)
model.write_lock(self()); // do not delete previous model since it would trigger delete of stored trees which we need
else
model.delete_and_lock(self()); // we can safely delete any previous model since this one should be the first one
// Prepare and cache adapted validation dataset if it is necessary
prepareValidationWithModel(model);
try {
// Initialized algorithm
initAlgo(model);
// Init working frame
initWorkFrame(model, fr);
// Compute the model
model = buildModel(model, fr, names, domains, bm_timer);
//} catch (Throwable t) { t.printStackTrace();
} finally {
model.unlock(self()); // Update and unlock model
cleanUp(fr,bm_timer); // Shared cleanup
model.start_training(before);
model.stop_training();
}
}
// Tree model cleanup
protected void cleanUp(Frame fr, Timer t_build) {
//super.cleanUp(fr, t_build);
Log.info(logTag(),"Modeling done in "+t_build);
// Remove temp vectors; cleanup the Frame
while( fr.numCols() > _ncols+1/*Do not delete the response vector*/ )
UKV.remove(fr.remove(fr.numCols()-1)._key);
// Unlock the input datasets against deletes
source.unlock(self());
if( validation != null && !source._key.equals(validation._key) )
validation.unlock(self());
}
transient long _timeLastScoreStart, _timeLastScoreEnd, _firstScore;
protected TM doScoring(TM model, Frame fTrain, DTree[] ktrees, int tid, DTree.TreeModel.TreeStats tstats, boolean finalScoring, boolean oob, boolean build_tree_one_node ) {
long now = System.currentTimeMillis();
if( _firstScore == 0 ) _firstScore=now;
long sinceLastScore = now-_timeLastScoreStart;
Score sc = null;
// If validation is specified we use a model for scoring, so we need to update it!
// First we save model with trees (i.e., make them available for scoring)
// and then update it with resulting error
model = makeModel(model, ktrees, tstats);
model.update(self());
// Now model already contains tid-trees in serialized form
if( score_each_iteration ||
finalScoring ||
(now-_firstScore < 4000) || // Score every time for 4 secs
// Throttle scoring to keep the cost sane; limit to a 10% duty cycle & every 4 secs
(sinceLastScore > 4000 && // Limit scoring updates to every 4sec
(double)(_timeLastScoreEnd-_timeLastScoreStart)/sinceLastScore < 0.1) ) { // 10% duty cycle
_timeLastScoreStart = now;
// Perform scoring - first get adapted validation response
Response2CMAdaptor vadaptor = getValidAdaptor();
sc = new Score().doIt(model, fTrain, vadaptor, oob, build_tree_one_node).report(logTag(),tid,ktrees);
_timeLastScoreEnd = System.currentTimeMillis();
}
// Compute variable importance for this tree if necessary
VarImp varimp = null;
if (importance && ktrees!=null) { // compute this tree votes but skip the first scoring call which is done over empty forest
Timer vi_timer = new Timer();
varimp = doVarImpCalc(model, ktrees, tid-1, fTrain, false);
Log.info(logTag(), "Computation of variable importance with "+tid+"th-tree took: " + vi_timer.toString());
}
// Double update - after scoring
model = makeModel(model,
sc==null ? Double.NaN : sc.mse(),
sc==null ? null : (_nclass>1? new ConfusionMatrix(sc._cm):null),
varimp,
sc==null ? null : (_nclass==2 ? makeAUC(toCMArray(sc._cms), ModelUtils.DEFAULT_THRESHOLDS) : null)
);
model.update(self());
return model;
}
protected abstract VarImp doVarImpCalc(TM model, DTree[] ktrees, int tid, Frame validationFrame, boolean scale);
private ConfusionMatrix[] toCMArray(long[][][] cms) {
int n = cms.length;
ConfusionMatrix[] res = new ConfusionMatrix[n];
for (int i = 0; i < n; i++) res[i] = new ConfusionMatrix(cms[i]);
return res;
}
public boolean supportsBagging() { return false; }
// --------------------------------------------------------------------------
// Convenvience accessor for a complex chunk layout.
// Wish I could name the array elements nicer...
protected Chunk chk_resp( Chunk chks[] ) { return chks[_ncols]; }
protected Chunk chk_tree( Chunk chks[], int c ) { return chks[_ncols+1+c]; }
protected Chunk chk_work( Chunk chks[], int c ) { return chks[_ncols+1+_nclass+c]; }
protected Chunk chk_nids( Chunk chks[], int t ) { return chks[_ncols+1+_nclass+_nclass+t]; }
// Out-of-bag trees counter - only one since it is shared via k-trees
protected Chunk chk_oobt(Chunk chks[]) { return chks[_ncols+1+_nclass+_nclass+_nclass]; }
protected final Vec vec_nids( Frame fr, int t) { return fr.vecs()[_ncols+1+_nclass+_nclass+t]; }
protected final Vec vec_resp( Frame fr, int t) { return fr.vecs()[_ncols]; }
protected final Vec vec_tree( Frame fr, int c ) { return fr.vecs()[_ncols+1+c]; }
protected double[] data_row( Chunk chks[], int row, double[] data) {
assert data.length == _ncols;
for(int f=0; f<_ncols; f++) data[f] = chks[f].at0(row);
return data;
}
// --------------------------------------------------------------------------
// Fuse 2 conceptual passes into one:
//
// Pass 1: Score a prior partially-built tree model, and make new Node
// assignments to every row. This involves pulling out the current
// assigned DecidedNode, "scoring" the row against that Node's
// decision criteria, and assigning the row to a new child
// UndecidedNode (and giving it an improved prediction).
//
// Pass 2: Build new summary DHistograms on the new child UndecidedNodes
// every row got assigned into. Collect counts, mean, variance, min,
// max per bin, per column.
//
// The result is a set of DHistogram arrays; one DHistogram array for
// each unique 'leaf' in the tree being histogramed in parallel. These have
// node ID's (nids) from 'leaf' to 'tree._len'. Each DHistogram array is
// for all the columns in that 'leaf'.
//
// The other result is a prediction "score" for the whole dataset, based on
// the previous passes' DHistograms.
public class ScoreBuildHistogram extends MRTask2<ScoreBuildHistogram> {
final int _k; // Which tree
final DTree _tree; // Read-only, shared (except at the histograms in the Nodes)
final int _leaf; // Number of active leaves (per tree)
// Histograms for every tree, split & active column
final DHistogram _hcs[/*tree-relative node-id*/][/*column*/];
final boolean _subset; // True if working a subset of cols
public ScoreBuildHistogram(H2OCountedCompleter cc, int k, DTree tree, int leaf, DHistogram hcs[][], boolean subset) {
super(cc);
_k = k;
_tree= tree;
_leaf= leaf;
_hcs = hcs;
_subset = subset;
}
// Once-per-node shared init
@Override public void setupLocal( ) {
// Init all the internal tree fields after shipping over the wire
_tree.init_tree();
// Allocate local shared memory histograms
for( int l=_leaf; l<_tree._len; l++ ) {
DTree.UndecidedNode udn = _tree.undecided(l);
DHistogram hs[] = _hcs[l-_leaf];
int sCols[] = udn._scoreCols;
if( sCols != null ) { // Sub-selecting just some columns?
for( int j=0; j<sCols.length; j++) // For tracked cols
hs[sCols[j]].init();
} else { // Else all columns
for( int j=0; j<_ncols; j++) // For all columns
if( hs[j] != null ) // Tracking this column?
hs[j].init();
}
}
}
@Override public void map( Chunk[] chks ) {
assert chks.length==_ncols+4;
final Chunk tree = chks[_ncols+1];
final Chunk wrks = chks[_ncols+2];
final Chunk nids = chks[_ncols+3];
// Pass 1: Score a prior partially-built tree model, and make new Node
// assignments to every row. This involves pulling out the current
// assigned DecidedNode, "scoring" the row against that Node's decision
// criteria, and assigning the row to a new child UndecidedNode (and
// giving it an improved prediction).
int nnids[] = new int[nids._len];
if( _leaf > 0) // Prior pass exists?
score_decide(chks,nids,wrks,tree,nnids);
else // Just flag all the NA rows
for( int row=0; row<nids._len; row++ )
if( isDecidedRow((int)nids.at0(row)) ) nnids[row] = -1;
// Pass 2: accumulate all rows, cols into histograms
if( _subset ) accum_subset(chks,nids,wrks,nnids);
else accum_all (chks, wrks,nnids);
}
@Override public void reduce( ScoreBuildHistogram sbh ) {
// Merge histograms
if( sbh._hcs == _hcs ) return; // Local histograms all shared; free to merge
// Distributed histograms need a little work
for( int i=0; i<_hcs.length; i++ ) {
DHistogram hs1[] = _hcs[i], hs2[] = sbh._hcs[i];
if( hs1 == null ) _hcs[i] = hs2;
else if( hs2 != null )
for( int j=0; j<hs1.length; j++ )
if( hs1[j] == null ) hs1[j] = hs2[j];
else if( hs2[j] != null )
hs1[j].add(hs2[j]);
}
}
// Pass 1: Score a prior partially-built tree model, and make new Node
// assignments to every row. This involves pulling out the current
// assigned DecidedNode, "scoring" the row against that Node's decision
// criteria, and assigning the row to a new child UndecidedNode (and
// giving it an improved prediction).
private void score_decide(Chunk chks[], Chunk nids, Chunk wrks, Chunk tree, int nnids[]) {
for( int row=0; row<nids._len; row++ ) { // Over all rows
int nid = (int)nids.at80(row); // Get Node to decide from
if( isDecidedRow(nid)) { // already done
nnids[row] = (nid-_leaf);
continue;
}
// Score row against current decisions & assign new split
boolean oob = isOOBRow(nid);
if( oob ) nid = oob2Nid(nid); // sampled away - we track the position in the tree
DTree.DecidedNode dn = _tree.decided(nid);
if (dn._split._col == -1 && DTree.isRootNode(dn)) { nnids[row] = (nid-_leaf); continue; }
if( dn._split._col == -1 ) { // Might have a leftover non-split
nid = dn._pid; // Use the parent split decision then
int xnid = oob ? nid2Oob(nid) : nid;
nids.set0(row, xnid);
nnids[row] = xnid-_leaf;
dn = _tree.decided(nid); // Parent steers us
}
assert !isDecidedRow(nid);
nid = dn.ns(chks,row); // Move down the tree 1 level
if( !isDecidedRow(nid) ) {
int xnid = oob ? nid2Oob(nid) : nid;
nids.set0(row, xnid);
nnids[row] = xnid-_leaf;
} else {
nnids[row] = nid-_leaf;
}
}
}
// All rows, some cols, accumulate histograms
private void accum_subset(Chunk chks[], Chunk nids, Chunk wrks, int nnids[]) {
for( int row=0; row<nnids.length; row++ ) { // Over all rows
int nid = nnids[row]; // Get Node to decide from
if( nid >= 0 ) { // row already predicts perfectly or OOB
assert !Double.isNaN(wrks.at0(row)); // Already marked as sampled-away
DHistogram nhs[] = _hcs[nid];
int sCols[] = _tree.undecided(nid+_leaf)._scoreCols; // Columns to score (null, or a list of selected cols)
for( int j=0; j<sCols.length; j++) { // For tracked cols
final int c = sCols[j];
nhs[c].incr((float)chks[c].at0(row),wrks.at0(row)); // Histogram row/col
}
}
}
}
// All rows, all cols, accumulate histograms. This is the hot hot inner
// loop of GBM, so we do some non-standard optimizations. The rows in this
// chunk are spread out amongst a modest set of NodeIDs/splits. Normally
// we would visit the rows in row-order, but this visits the NIDs in random
// order. The hot-part of this code updates the histograms racily (via
// atomic updates) - once-per-row. This optimized version updates the
// histograms once-per-NID, but requires pre-sorting the rows by NID.
private void accum_all(Chunk chks[], Chunk wrks, int nnids[]) {
final DHistogram hcs[][] = _hcs;
// Sort the rows by NID, so we visit all the same NIDs in a row
// Find the count of unique NIDs in this chunk
int nh[] = new int[hcs.length+1];
for( int i : nnids ) if( i >= 0 ) nh[i+1]++;
// Rollup the histogram of rows-per-NID in this chunk
for( int i=0; i<hcs.length; i++ ) nh[i+1] += nh[i];
// Splat the rows into NID-groups
int rows[] = new int[nnids.length];
for( int row=0; row<nnids.length; row++ )
if( nnids[row] >= 0 )
rows[nh[nnids[row]]++] = row;
// rows[] has Chunk-local ROW-numbers now, in-order, grouped by NID.
// nh[] lists the start of each new NID, and is indexed by NID+1.
accum_all2(chks,wrks,nh,rows);
}
// For all columns, for all NIDs, for all ROWS...
private void accum_all2(Chunk chks[], Chunk wrks, int nh[], int[] rows) {
final DHistogram hcs[][] = _hcs;
// Local temp arrays, no atomic updates.
int bins[] = new int [nbins];
double sums[] = new double[nbins];
double ssqs[] = new double[nbins];
// For All Columns
for( int c=0; c<_ncols; c++) { // for all columns
Chunk chk = chks[c];
// For All NIDs
for( int n=0; n<hcs.length; n++ ) {
final DRealHistogram rh = ((DRealHistogram)hcs[n][c]);
if( rh==null ) continue; // Ignore untracked columns in this split
final int lo = n==0 ? 0 : nh[n-1];
final int hi = nh[n];
float min = rh._min2;
float max = rh._maxIn;
// While most of the time we are limited to nbins, we allow more bins
// in a few cases (top-level splits have few total bins across all
// the (few) splits) so it's safe to bin more; also categoricals want
// to split one bin-per-level no matter how many levels).
if( rh._bins.length >= bins.length ) { // Grow bins if needed
bins = new int [rh._bins.length];
sums = new double[rh._bins.length];
ssqs = new double[rh._bins.length];
}
// Gather all the data for this set of rows, for 1 column and 1 split/NID
// Gather min/max, sums and sum-squares.
for( int xrow=lo; xrow<hi; xrow++ ) {
int row = rows[xrow];
float col_data = (float)chk.at0(row);
if( col_data < min ) min = col_data;
if( col_data > max ) max = col_data;
int b = rh.bin(col_data); // Compute bin# via linear interpolation
bins[b]++; // Bump count in bin
double resp = wrks.at0(row);
sums[b] += resp;
ssqs[b] += resp*resp;
}
// Add all the data into the Histogram (atomically add)
rh.setMin(min); // Track actual lower/upper bound per-bin
rh.setMax(max);
for( int b=0; b<rh._bins.length; b++ ) { // Bump counts in bins
if( bins[b] != 0 ) { Utils.AtomicIntArray.add(rh._bins,b,bins[b]); bins[b]=0; }
if( ssqs[b] != 0 ) { rh.incr1(b,sums[b],ssqs[b]); sums[b]=ssqs[b]=0; }
}
}
}
}
}
// --------------------------------------------------------------------------
// Build an entire layer of all K trees
protected DHistogram[][][] buildLayer(final Frame fr, final DTree ktrees[], final int leafs[], final DHistogram hcs[][][], boolean subset, boolean build_tree_one_node) {
// Build K trees, one per class.
// Build up the next-generation tree splits from the current histograms.
// Nearly all leaves will split one more level. This loop nest is
// O( #active_splits * #bins * #ncols )
// but is NOT over all the data.
H2OCountedCompleter sb1ts[] = new H2OCountedCompleter[_nclass];
Vec vecs[] = fr.vecs();
for( int k=0; k<_nclass; k++ ) {
final DTree tree = ktrees[k]; // Tree for class K
if( tree == null ) continue;
// Build a frame with just a single tree (& work & nid) columns, so the
// nested MRTask2 ScoreBuildHistogram in ScoreBuildOneTree does not try
// to close other tree's Vecs when run in parallel.
Frame fr2 = new Frame(Arrays.copyOf(fr._names,_ncols+1), Arrays.copyOf(vecs,_ncols+1));
fr2.add(fr._names[_ncols+1+k],vecs[_ncols+1+k]);
fr2.add(fr._names[_ncols+1+_nclass+k],vecs[_ncols+1+_nclass+k]);
fr2.add(fr._names[_ncols+1+_nclass+_nclass+k],vecs[_ncols+1+_nclass+_nclass+k]);
// Start building one of the K trees in parallel
H2O.submitTask(sb1ts[k] = new ScoreBuildOneTree(k,tree,leafs,hcs,fr2, subset, build_tree_one_node));
}
// Block for all K trees to complete.
boolean did_split=false;
for( int k=0; k<_nclass; k++ ) {
final DTree tree = ktrees[k]; // Tree for class K
if( tree == null ) continue;
sb1ts[k].join();
if( ((ScoreBuildOneTree)sb1ts[k])._did_split ) did_split=true;
}
// The layer is done.
return did_split ? hcs : null;
}
private class ScoreBuildOneTree extends H2OCountedCompleter {
final int _k; // The tree
final DTree _tree;
final int _leafs[/*nclass*/];
final DHistogram _hcs[/*nclass*/][][];
final Frame _fr2;
final boolean _build_tree_one_node;
final boolean _subset; // True if working a subset of cols
boolean _did_split;
ScoreBuildOneTree( int k, DTree tree, int leafs[], DHistogram hcs[][][], Frame fr2, boolean subset, boolean build_tree_one_node ) {
_k = k;
_tree = tree;
_leafs= leafs;
_hcs = hcs;
_fr2 = fr2;
_subset = subset;
_build_tree_one_node = build_tree_one_node;
}
@Override public void compute2() {
// Fuse 2 conceptual passes into one:
// Pass 1: Score a prior DHistogram, and make new Node assignments
// to every row. This involves pulling out the current assigned Node,
// "scoring" the row against that Node's decision criteria, and assigning
// the row to a new child Node (and giving it an improved prediction).
// Pass 2: Build new summary DHistograms on the new child Nodes every row
// got assigned into. Collect counts, mean, variance, min, max per bin,
// per column.
new ScoreBuildHistogram(this,_k,_tree,_leafs[_k],_hcs[_k],_subset).dfork(0,_fr2,_build_tree_one_node);
}
@Override public void onCompletion(CountedCompleter caller) {
ScoreBuildHistogram sbh = (ScoreBuildHistogram)caller;
//System.out.println(sbh.profString());
final int leafk = _leafs[_k];
int tmax = _tree.len(); // Number of total splits in tree K
for( int leaf=leafk; leaf<tmax; leaf++ ) { // Visit all the new splits (leaves)
DTree.UndecidedNode udn = _tree.undecided(leaf);
//System.out.println((_nclass==1?"Regression":("Class "+_fr2.vecs()[_ncols]._domain[_k]))+",\n Undecided node:"+udn);
// Replace the Undecided with the Split decision
DTree.DecidedNode dn = makeDecided(udn,sbh._hcs[leaf-leafk]);
//System.out.println("--> Decided node: " + dn +
// " > Split: " + dn._split + " L/R:" + dn._split.rowsLeft()+" + "+dn._split.rowsRight());
if( dn._split.col() == -1 ) udn.do_not_split();
else _did_split = true;
}
_leafs[_k]=tmax; // Setup leafs for next tree level
int new_leafs = _tree.len()-tmax;
_hcs[_k] = new DHistogram[new_leafs][/*ncol*/];
for( int nl = tmax; nl<_tree.len(); nl ++ )
_hcs[_k][nl-tmax] = _tree.undecided(nl)._hs;
if (new_leafs>0) _tree.depth++; // Next layer done but update tree depth only if new leaves are generated
}
}
// Builder-specific decision node
protected abstract DTree.DecidedNode makeDecided( DTree.UndecidedNode udn, DHistogram hs[] );
// --------------------------------------------------------------------------
// Read the 'tree' columns, do model-specific math and put the results in the
// fs[] array, and return the sum. Dividing any fs[] element by the sum
// turns the results into a probability distribution.
protected abstract float score1( Chunk chks[], float fs[/*nclass*/], int row );
// Call builder specific score code and then correct probabilities
// if it is necessary.
private float score2(Chunk chks[], float fs[/*nclass*/], int row ) {
float sum = score1(chks, fs, row);
if (/*false &&*/ classification && _priorClassDist!=null && _modelClassDist!=null && !Float.isInfinite(sum) && sum>0f) {
Utils.div(fs, sum);
ModelUtils.correctProbabilities(fs, _priorClassDist, _modelClassDist);
sum = 1.0f;
}
return sum;
}
// Score the *tree* columns, and produce a confusion matrix
public class Score extends MRTask2<Score> {
/* @OUT */ long _cm[/*actual*/][/*predicted*/]; // Confusion matrix
/* @OUT */ double _sum; // Sum-squared-error
/* @OUT */ long _snrows; // Count of voted-on rows
/* @OUT */ long _cms[/*threshold*/][/*actual*/][/*predicted*/]; // Compute CM per threshold for binary classifiers
/* @IN */ boolean _oob;
/* @IN */ boolean _validation;
/* @IN */ int _cmlen;
/* @IN */ boolean _cavr; // true if validation response needs to be adapted to CM domain
public double sum() { return _sum; }
public long[][] cm () { return _cm; }
public long nrows() { return _snrows; }
public double mse() { return sum() / nrows(); }
/**
* Compute CM and MSE on either the training or testing dataset.
*
* It expect already adapted validation dataset which is adapted to a model
* and contains a response which is adapted to confusion matrix domain. Uff :)
*
* @param model a model which is used to perform computation
* @param fr a model training frame
* @param vadaptor an adaptor which helps to adapt model/validation response to confusion matrix domain.
* @param oob perform out-of-bag validation on training frame
* @param build_tree_one_node
* @return this score object
*/
public Score doIt(Model model, Frame fr, Response2CMAdaptor vadaptor, boolean oob, boolean build_tree_one_node) {
assert !oob || vadaptor.getValidation()==null : "Validation frame cannot be specified if oob validation is demanded!"; // oob => validation==null
assert _nclass == 1 || vadaptor.getCMDomain() != null : "CM domain has to be configured from classification!";
_cmlen = _nclass > 1 ? vadaptor.getCMDomain().length : 1;
_oob = oob;
// Validation frame adapted to a model
Frame adaptedValidation = vadaptor.getValidation();
// No validation frame is specified, so perform computation on training data
if( adaptedValidation == null ) return doAll(fr, build_tree_one_node);
_validation = true;
_cavr = false;
// Validation: need to score the set, getting a probability distribution for each class
// Frame has nclass vectors (nclass, or 1 for regression), for classification it
Frame res = model.score(adaptedValidation, false); // For classification: predicted values (~ values in res[0]) are in interval 0..domain().length-1, for regression just single column.
Frame adapValidation = new Frame(adaptedValidation); // adapted validation dataset
// All columns including response of validation frame are already adapted to model
if (_nclass>1) { // Only for Classification
for( int i=0; i<_nclass; i++ ) // Distribution of response classes
adapValidation.add("ClassDist"+i,res.vecs()[i+1]);
if (vadaptor.needsAdaptation2CM()) {
Vec ar = vadaptor.adaptModelResponse2CM(res.vecs()[0]); // perform transformation of model results to be consistent with expected confusion matrix domain
adapValidation.add("Prediction", ar); // add as a prediction
adapValidation.add("ActualValidationResponse", vadaptor.getAdaptedValidationResponse2CM());
_cavr = true; // signal that we have two predictions vectors in the frame.
res.add("__dummyx__", ar); // add the vector to clean up list
} else
adapValidation.add("Prediction",res.vecs()[0]); // Predicted values
} else { // Regression
adapValidation.add("Prediction",res.vecs()[0]);
}
// Compute a CM & MSE
try {
doAll(adapValidation, build_tree_one_node);
} finally {
// Perform clean-up: remove temporary result
res.delete();
}
return this;
}
@Override public void map( Chunk chks[] ) {
Chunk ys = chk_resp(chks); // Response
Chunk ays = _cavr ? chks[_ncols+1+_nclass+1] : ys; // Remember adapted response
_cm = new long[_cmlen][_cmlen];
float fs[] = new float[_nclass+1]; // Array to hold prediction and distribution given by the model.
// For binary classifier allocate cms for individual thresholds
_cms = new long[ModelUtils.DEFAULT_THRESHOLDS.length][2][2];
// Score all Rows
for( int row=0; row<ys._len; row++ ) {
if( ays.isNA0(row) ) continue; // Ignore missing response vars only if it was actual NA
float sum;
if( _validation ) { // Passed in a class distribution from scoring
for( int i=0; i<_nclass; i++ )
fs[i+1] = (float)chk_tree(chks,i).at0(row); // Get the class distros
if (_nclass > 1 ) sum = 1.0f; // Sum of a distribution is 1.0 for classification
else sum = fs[1]; // Sum is the same as prediction for regression.
} else { // Passed in the model-specific columns
sum = score2(chks,fs,row);
}
float err; int yact=0; // actual response from dataset
int yact_orig = 0; // actual response from dataset before potential scaling
if (_oob && inBagRow(chks, row)) continue; // score only on out-of-bag rows
if( _nclass > 1 ) { // Classification
// Compute error
if( sum == 0 ) { // This tree does not predict this row *at all* ! In prediction we will make random decision, but here compute error based on number of classes
yact = yact_orig = (int) ys.at80(row); // OPS: Pick an actual prediction adapted to model values <0, nclass-1)
err = 1.0f-1.0f/_nclass; // Then take ycls=0, uniform predictive power
} else {
if (_cavr && ys.isNA0(row)) { // Handle adapted validation response - actual response was adapted but does not contain NA - it is implicit misprediction,
err = 1f;
} else { // No adaptation of validation response
yact = yact_orig = (int) ys.at80(row);// OPS: Pick an actual prediction adapted to model values <0, nclass-1)
assert 0 <= yact && yact < _nclass : "weird ycls="+yact+", y="+ys.at0(row);
err = Float.isInfinite(sum)
? (Float.isInfinite(fs[yact+1]) ? 0f : 1f)
: 1.0f-fs[yact+1]/sum; // Error: distance from predicting ycls as 1.0
}
}
assert !Double.isNaN(err) : "fs[cls]="+fs[yact+1] + ", sum=" + sum;
// Overwrite response by adapted value to provide correct CM
if (_cavr) yact = (int) ays.at80(row);
} else { // Regression
err = (float)ys.at0(row) - sum;
}
_sum += err*err; // Squared error
assert !Double.isNaN(_sum);
// Pick highest prob for our prediction.
if (_nclass > 1) { // fill CM only for classification
if(_nclass == 2) { // Binomial classification -> compute AUC, draw ROC
float snd = _validation ? fs[2] : (!Float.isInfinite(sum) ? fs[2] / sum : Float.isInfinite(fs[2]) ? 1 : 0); // for validation dataset sum is always 1
for(int i = 0; i < ModelUtils.DEFAULT_THRESHOLDS.length; i++) {
int p = snd >= ModelUtils.DEFAULT_THRESHOLDS[i] ? 1 : 0; // Compute prediction based on threshold
_cms[i][yact_orig][p]++; // Increase matrix
}
}
int ypred = _validation ? (int) chks[_ncols+1+_nclass].at80(row) : getPrediction(fs, row);
_cm[yact][ypred]++; // actual v. predicted
}
_snrows++;
}
}
@Override public void reduce( Score t ) {
_sum += t._sum;
Utils.add(_cm,t._cm);
_snrows += t._snrows;
if (_cms!=null)
for (int i = 0; i < _cms.length; i++) Utils.add(_cms[i], t._cms[i]);
}
public Score report( Sys tag, int ntree, DTree[] trees ) {
assert !Double.isNaN(_sum);
Log.info(tag,"============================================================== ");
int lcnt=0;
if( trees!=null ) for( DTree t : trees ) if( t != null ) lcnt += t._len;
long err=_snrows;
for( int c=0; c<_nclass; c++ ) err -= _cm[c][c];
Log.info(tag,"Mean Squared Error is "+(_sum/_snrows)+", with "+ntree+"x"+_nclass+" trees (average of "+((float)lcnt/_nclass)+" nodes)");
if( _nclass > 1 )
Log.info(tag,"Total of "+err+" errors on "+_snrows+" rows, CM= "+Arrays.deepToString(_cm));
else
Log.info("Reported on "+_snrows+" rows.");
return this;
}
}
@Override public String speedDescription() { return "time/tree"; }
@Override public long speedValue() {
Value value = DKV.get(dest());
DTree.TreeModel m = value != null ? (DTree.TreeModel) value.get() : null;
long numTreesBuiltSoFar = m == null ? 0 : m.ntrees();
long sv = (numTreesBuiltSoFar <= 0) ? 0 : (runTimeMs() / numTreesBuiltSoFar);
return sv;
}
/** Returns a log tag for a particular model builder (e.g., DRF, GBM) */
protected abstract water.util.Log.Tag.Sys logTag();
/**
* Builds model
* @param initialModel initial model created by makeModel() method.
* @param trainFr training dataset which can contain additional temporary vectors prepared by buildModel() method.
* @param names names of columns in <code>trainFr</code> used for model training
* @param domains domains of columns in <code>trainFr</code> used for model training
* @param t_build timer to measure model building process
* @return resulting model
*/
protected abstract TM buildModel( TM initialModel, Frame trainFr, String names[], String domains[][], Timer t_build );
/**
* Initialize algorithm - e.g., allocate algorithm specific datastructure.
*
* @param initialModel
*/
protected abstract void initAlgo( TM initialModel);
/**
* Initialize working frame.
* @param initialModel initial model
* @param fr working frame which contains train data and additional columns prepared by this builder.
*/
protected abstract void initWorkFrame( TM initialModel, Frame fr);
protected abstract TM makeModel( Key outputKey, Key dataKey, Key testKey, int ntrees, String names[], String domains[][], String[] cmDomain, float[] priorClassDist, float[] classDist);
protected abstract TM makeModel( TM model, double err, ConfusionMatrix cm, VarImp varimp, AUCData validAUC);
protected abstract TM makeModel( TM model, DTree ktrees[], DTree.TreeModel.TreeStats tstats);
protected abstract TM updateModel( TM model, TM checkpoint, boolean overwriteCheckpoint);
protected AUCData makeAUC(ConfusionMatrix[] cms, float[] threshold) {
assert _nclass == 2;
return cms != null ? new AUC(cms, threshold, _cmDomain).data() : null;
}
protected boolean inBagRow(Chunk[] chks, int row) { return false; }
protected final boolean isClassification() { return _nclass > 1; }
static public final boolean isOOBRow(int nid) { return nid <= OUT_OF_BAG; }
static public final boolean isDecidedRow(int nid) { return nid == DECIDED_ROW; }
static public final int oob2Nid(int oobNid) { return -oobNid + OUT_OF_BAG; }
static public final int nid2Oob(int nid) { return -nid + OUT_OF_BAG; }
// Helper to unify use of M-T RNG
public static Random createRNG(long seed) {
return new MersenneTwisterRNG(new int[] { (int)(seed>>32L),(int)seed });
}
// helper for debugging
static protected void printGenerateTrees(DTree[] trees) {
for( int k=0; k<trees.length; k++ )
if( trees[k] != null )
System.out.println(trees[k].root().toString2(new StringBuilder(),0));
}
protected final void debugPrintTreeColumns(Frame fr) {
new MRTask2() {
@Override public void map(Chunk[] cs) {
for (int r=0; r<cs[0]._len; r++) {
System.err.print("Row "+ r +": ");
for (int i=0; i<_nclass; i++) {
Chunk c = chk_tree(cs, i);
System.err.print(c.at0(r));
System.err.print(',');
}
if (supportsBagging()) {
Chunk c = chk_oobt(cs);
System.err.print(c.at80(r)>0 ? ":OUT" : ":IN");
}
System.err.println();
}
}
}.doAll(fr);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/glm/GLM2.java
|
package hex.glm;
import dontweave.gson.JsonObject;
import hex.FrameTask.DataInfo;
import hex.FrameTask.DataInfo.TransformType;
import hex.GridSearch.GridSearchProgress;
import hex.glm.GLMModel.GLMXValidationTask;
import hex.glm.GLMModel.Submodel;
import hex.glm.GLMParams.Family;
import hex.glm.GLMParams.Link;
import hex.glm.GLMTask.GLMInterceptTask;
import hex.glm.GLMTask.GLMIterationTask;
import hex.glm.GLMTask.YMUTask;
import hex.glm.LSMSolver.ADMMSolver;
import jsr166y.CountedCompleter;
import water.*;
import water.H2O.H2OCallback;
import water.H2O.H2OCountedCompleter;
import water.H2O.H2OEmptyCompleter;
import water.api.DocGen;
import water.api.ParamImportance;
import water.api.RequestServer.API_VERSION;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.Log;
import water.util.ModelUtils;
import water.util.RString;
import water.util.Utils;
import java.text.DecimalFormat;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
public class GLM2 extends Job.ModelJobWithoutClassificationField {
public static final double LS_STEP = .9;
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
public static DocGen.FieldDoc[] DOC_FIELDS;
public static final String DOC_GET = "GLM2";
public final String _jobName;
// API input parameters BEGIN ------------------------------------------------------------
class colsNamesIdxFilter extends MultiVecSelect { public colsNamesIdxFilter() {super("source", MultiVecSelectType.NAMES_THEN_INDEXES); } }
@API(help="Column to be used as an offset, if you have one.", required=false, filter=responseFilter.class, json = true)
public Vec offset = null;
class responseFilter extends SpecialVecSelect { responseFilter() { super("source"); } }
public void setLambda(double l){ lambda = new double []{l};}
public void setTweediePower(double pwr){
tweedie_variance_power = pwr;
tweedie_link_power = 1 - tweedie_variance_power;
_glm = new GLMParams(family,tweedie_variance_power,link,tweedie_link_power);
}
double [] beta_start = null;
@API(help = "max-iterations", filter = Default.class, lmin=1, lmax=1000000, json=true, importance = ParamImportance.CRITICAL)
public int max_iter = 100;
transient public boolean _done = false;
@API(help = "Standardize numeric columns to have zero mean and unit variance.", filter = Default.class, json=true, importance = ParamImportance.CRITICAL)
protected boolean standardize = true;
@API(help = "Include has_intercept term in the model.", filter = Default.class, json=true, importance = ParamImportance.CRITICAL)
protected boolean has_intercept = true;
@API(help = "Restrict coefficients to be non-negative.", filter = Default.class, json=true, importance = ParamImportance.CRITICAL)
protected boolean non_negative = false;
@API(help="lower bounds for coefficients",filter=Default.class,hide=true)
protected Frame beta_constraints = null;
@API(help = "validation folds", filter = Default.class, lmin=0, lmax=100, json=true, importance = ParamImportance.CRITICAL)
protected int n_folds;
@API(help = "Family.", filter = Default.class, json=true, importance = ParamImportance.CRITICAL)
protected Family family = Family.gaussian;
@API(help = "", filter = Default.class, json=true, importance = ParamImportance.SECONDARY)
protected Link link = Link.family_default;
@API(help = "Tweedie variance power", filter = Default.class, json=true, importance = ParamImportance.SECONDARY)
protected double tweedie_variance_power;
@API(help = "distribution of regularization between L1 and L2.", filter = Default.class, json=true, importance = ParamImportance.SECONDARY)
protected double [] alpha = new double[]{0.5};
public final double DEFAULT_LAMBDA = 1e-5;
@API(help = "regularization strength", filter = Default.class, json=true, importance = ParamImportance.SECONDARY)
protected double [] lambda = new double[]{DEFAULT_LAMBDA};
private double _currentLambda = Double.POSITIVE_INFINITY;
@API(help = "beta_eps", filter = Default.class, json=true, importance = ParamImportance.SECONDARY)
protected double beta_epsilon = DEFAULT_BETA_EPS;
@API(help="use line search (slower speed, to be used if glm does not converge otherwise)",filter=Default.class, importance = ParamImportance.SECONDARY)
protected boolean higher_accuracy = false;
@API(help="By default, first factor level is skipped from the possible set of predictors. Set this flag if you want use all of the levels. Needs sufficient regularization to solve!",filter=Default.class, importance = ParamImportance.SECONDARY)
protected boolean use_all_factor_levels = false;
@API(help="use lambda search starting at lambda max, given lambda is then interpreted as lambda min",filter=Default.class, importance = ParamImportance.SECONDARY)
protected boolean lambda_search;
@API(help="use strong rules to filter out inactive columns",filter=Default.class, importance = ParamImportance.SECONDARY)
protected boolean strong_rules_enabled = true;
// intentionally not declared as API now
int sparseCoefThreshold = 1000; // if more than this number of predictors, result vector of coefficients will be stored sparse
@API(help="lambda_Search stop condition: stop training when model has more than than this number of predictors (or don't use this option if -1).",filter=Default.class, importance = ParamImportance.EXPERT)
protected int max_predictors = -1;
@API(help="number of lambdas to be used in a search",filter=Default.class, importance = ParamImportance.EXPERT)
protected int nlambdas = 100;
@API(help="min lambda used in lambda search, specified as a ratio of lambda_max",filter=Default.class, importance = ParamImportance.EXPERT)
protected double lambda_min_ratio = -1;
@API(help="prior probability for y==1. To be used only for logistic regression iff the data has been sampled and the mean of response does not reflect reality.",filter=Default.class, importance = ParamImportance.EXPERT)
protected double prior = -1; // -1 is magic value for default value which is mean(y) computed on the current dataset
private double _iceptAdjust; // adjustment due to the prior
public int MAX_ITERATIONS_PER_LAMBDA = 10;
/**
* Whether to compute variable importances for input features, based on the absolute
* value of the coefficients. For safety this should only be done if
* use_all_factor_levels, because an important factor level can be skipped and not
* appear if !use_all_factor_levels.
*/
@API(help = "Compute variable importances for input features. NOTE: If use_all_factor_levels is off the importance of the base level will NOT be shown.", filter = Default.class, json=true, importance = ParamImportance.SECONDARY)
public boolean variable_importances = false;
// API input parameters END ------------------------------------------------------------
// API output parameters BEGIN ------------------------------------------------------------
@API(help = "", json=true, importance = ParamImportance.SECONDARY)
private double [] _wgiven;
@API(help = "", json=true, importance = ParamImportance.SECONDARY)
private double _proximalPenalty;
@API(help = "", json=true, importance = ParamImportance.SECONDARY)
private double [] _beta;
@API(help = "", json=true, importance = ParamImportance.SECONDARY)
private boolean _runAllLambdas = true;
@API(help = "Tweedie link power", json=true, importance = ParamImportance.SECONDARY)
double tweedie_link_power;
@API(help = "lambda_value max", json=true, importance = ParamImportance.SECONDARY)
double lambda_max = Double.NaN;
double lambda_min = Double.NaN;
long _nobs = 0;
private double _nullDeviance;
public static int MAX_PREDICTORS = 7000;
// API output parameters END ------------------------------------------------------------
private static double GLM_GRAD_EPS = 1e-4; // done (converged) if subgrad < this value.
private boolean highAccuracy(){return higher_accuracy;}
public GLM2 setHighAccuracy(){
higher_accuracy = true;
return this;
}
private Key _progressKey;
private DataInfo _srcDinfo;
private int [] _activeCols;
private DataInfo _activeData;
public GLMParams _glm;
private boolean _grid;
private double ADMM_GRAD_EPS = 1e-4; // default addm gradietn eps
private static final double MIN_ADMM_GRAD_EPS = 1e-5; // min admm gradient eps
int _lambdaIdx = -1;
private double _addedL2;
private boolean _failedLineSearch;
public static final double DEFAULT_BETA_EPS = 5e-5;
private double _ymu;
private int _iter;
@Override protected void registered(API_VERSION ver) {
super.registered(ver);
Argument c = find("ignored_cols");
Argument r = find("offset");
int ci = _arguments.indexOf(c);
int ri = _arguments.indexOf(r);
_arguments.set(ri, c);
_arguments.set(ci, r);
((FrameKeyMultiVec) c).ignoreVec((FrameKeyVec)r);
}
private double objval(GLMIterationTask glmt){
return glmt._val.residual_deviance / glmt._nobs + 0.5 * l2pen() * l2norm(glmt._beta) + l1pen() * l1norm(glmt._beta);
}
private IterationInfo makeIterationInfo(int i, GLMIterationTask glmt, final int [] activeCols, double [] gradient){
IterationInfo ii = new IterationInfo(_iter, glmt,activeCols,gradient);
if(ii._glmt._grad == null)
ii._glmt._grad = contractVec(gradient,activeCols);
return ii;
}
private static class IterationInfo extends Iced {
final int _iter;
private double [] _fullGrad;
public double [] fullGrad(double alpha, double lambda){
if(_fullGrad == null)return null;
double [] res = _fullGrad.clone();
double l2 = (1-alpha)*lambda; // no 0.5 mul here since we're adding derivative of 0.5*|b|^2
if(_activeCols != null)
for(int i = 0; i < _glmt._beta.length-1; ++i)
res[_activeCols[i]] += _glmt._beta[i]*l2;
else for(int i = 0; i < _glmt._beta.length; ++i) {
res[i] += _glmt._beta[i]*l2;
}
return res;
}
private final GLMIterationTask _glmt;
final int [] _activeCols;
IterationInfo(int i, GLMIterationTask glmt, final int [] activeCols, double [] gradient){
_iter = i;
_glmt = glmt.clone();
assert _glmt._grad != null;
_activeCols = activeCols;
_fullGrad = gradient;
// NOTE: _glmt._beta CAN BE NULL (unlikely but possible, if activecCols were empty)
assert _glmt._val != null:"missing validation";
}
}
private IterationInfo _lastResult;
@Override
public JsonObject toJSON() {
JsonObject jo = super.toJSON();
if (lambda == null) jo.addProperty("lambda_value", "automatic"); //better than not printing anything if lambda_value=null
return jo;
}
@Override public Key defaultDestKey(){
return null;
}
@Override public Key defaultJobKey() {return null;}
public GLM2() {_jobName = "";}
public static class Source {
public final Frame fr;
public final Vec response;
public final Vec offset;
public final boolean standardize;
public final boolean intercept;
public Source(Frame fr,Vec response, boolean standardize){ this(fr,response,standardize,true,null);}
public Source(Frame fr,Vec response, boolean standardize, boolean intercept){ this(fr,response,standardize,intercept,null);}
public Source(Frame fr,Vec response, boolean standardize, boolean intercept, Vec offset){
this.fr = fr;
this.response = response;
this.offset = offset;
this.standardize = standardize;
this.intercept = intercept;
}
}
public GLM2(String desc, Key jobKey, Key dest, Source src, Family family){
this(desc,jobKey,dest,src,family,Link.family_default);
}
public GLM2(String desc, Key jobKey, Key dest, Source src, Family family, Link l){
this(desc, jobKey, dest, src, family, l, 0, false);
}
public GLM2(String desc, Key jobKey, Key dest, Source src, Family family, Link l, int nfolds, boolean highAccuracy) {
job_key = jobKey;
description = desc;
destination_key = dest;
this.offset = src.offset;
this.has_intercept = src.intercept;
this.family = family;
this.link = l;
n_folds = nfolds;
source = src.fr;
this.response = src.response;
this.standardize = src.standardize;
_jobName = dest.toString() + ((nfolds > 1)?("[" + 0 + "]"):"");
higher_accuracy = highAccuracy;
}
public GLM2 doInit(){
init();
return this;
}
public GLM2 setNonNegative(boolean val){
non_negative = val;
return this;
}
public GLM2 setRegularization(double [] alpha, double [] lambda){
this.alpha = alpha;
this.lambda = lambda;
return this;
}
public GLM2 setBetaConstraints(Frame f){
beta_constraints = f;
return this;
}
static String arrayToString (double[] arr) {
if (arr == null) {
return "(null)";
}
StringBuffer sb = new StringBuffer();
for (int i = 0; i < arr.length; i++) {
if (i > 0) {
sb.append(", ");
}
sb.append(arr[i]);
}
return sb.toString();
}
public float [] thresholds = ModelUtils.DEFAULT_THRESHOLDS;
/** Return the query link to this page */
public static String link(Key k, String content) {
RString rs = new RString("<a href='GLM2.query?source=%$key'>%content</a>");
rs.replace("key", k.toString());
rs.replace("content", content);
return rs.toString();
}
public GLMGridSearch gridSearch(){
return new GLMGridSearch(4, this, destination_key).fork();
}
private transient AtomicBoolean _jobdone = new AtomicBoolean(false);
@Override public void cancel(String msg){
if(!_grid) {
source.unlock(self());
}
DKV.remove(_progressKey);
Value v = DKV.get(destination_key);
if(v != null){
GLMModel m = v.get();
Key [] xvals = m.xvalModels();
if(xvals != null)
for(Key k:xvals)
DKV.remove(k);
DKV.remove(destination_key);
}
DKV.remove(destination_key);
super.cancel(msg);
}
private boolean sorted(int [] ary){
for(int i = 0; i < ary.length-1; ++i)
if(ary[i+1] < ary[i])return false;
return true;
}
private double computeIntercept(DataInfo dinfo, double ymu, Vec offset, Vec response){
double mul = 1, sub = 0;
int vecId = dinfo._adaptedFrame.find(offset);
if(dinfo._normMul != null)
mul = dinfo._normMul[vecId-dinfo._cats];
if(dinfo._normSub != null)
sub = dinfo._normSub[vecId-dinfo._cats];
double icpt = ymu - (offset.mean() - sub)*mul;
double icpt2 = new GLMInterceptTask(_glm,sub,mul,icpt).doAll(offset,response)._icpt;
double diff = icpt2 - icpt;
int iter = 0;
while((1e-4 < diff || diff < -1e-4) && ++iter <= 10){
icpt = icpt2;
icpt2 = new GLMInterceptTask(_glm,sub,mul,icpt).doAll(offset,response)._icpt;
diff = icpt2 - icpt;
}
return icpt;
}
private transient Frame source2; // adapted source with reordered (and removed) vecs we do not want to push back into KV
private int _noffsets = 0;
private int _intercept = 1; // 1 or 0
private double [] _lbs;
private double [] _ubs;
private double [] _bgs;
private double [] _rho;
boolean toEnum = false;
private double [] makeAry(int sz, double val){
double [] res = MemoryManager.malloc8d(sz);
Arrays.fill(res,val);
return res;
}
private double [] mapVec(double [] src, double [] tgt, int [] map){
for(int i = 0; i < src.length; ++i)
if(map[i] != -1) tgt[map[i]] = src[i];
return tgt;
}
@Override public void init(){
try {
super.init();
if (family == Family.gamma)
setHighAccuracy();
if (link == Link.family_default)
link = family.defaultLink;
_intercept = has_intercept ? 1 : 0;
tweedie_link_power = 1 - tweedie_variance_power;// TODO
if (tweedie_link_power == 0) link = Link.log;
_glm = new GLMParams(family, tweedie_variance_power, link, tweedie_link_power);
source2 = new Frame(source);
assert sorted(ignored_cols);
if (offset != null) {
if (offset.isEnum())
throw new IllegalArgumentException("Categorical offsets are not supported. Can not use column '" + source2.names()[source2.find(offset)] + "' as offset");
int id = source.find(offset);
int idx = Arrays.binarySearch(ignored_cols, id);
if (idx >= 0) Utils.remove(ignored_cols, idx);
String name = source2.names()[id];
source2.add(name, source2.remove(id));
_noffsets = 1;
}
if (nlambdas == -1)
nlambdas = 100;
if (lambda_search && lambda.length > 1)
throw new IllegalArgumentException("Can not supply both lambda_search and multiple lambdas. If lambda_search is on, GLM expects only one value of lambda_value, representing the lambda_value min (smallest lambda_value in the lambda_value search).");
// check the response
if (response.isEnum() && family != Family.binomial)
throw new IllegalArgumentException("Invalid response variable, trying to run regression with categorical response!");
switch (family) {
case poisson:
case tweedie:
if (response.min() < 0)
throw new IllegalArgumentException("Illegal response column for family='" + family + "', response must be >= 0.");
break;
case gamma:
if (response.min() <= 0)
throw new IllegalArgumentException("Invalid response for family='Gamma', response must be > 0!");
break;
case binomial:
if (response.min() < 0 || response.max() > 1)
throw new IllegalArgumentException("Illegal response column for family='Binomial', response must in <0,1> range!");
break;
default:
//pass
}
toEnum = family == Family.binomial && (!response.isEnum() && (response.min() < 0 || response.max() > 1));
Frame fr = DataInfo.prepareFrame(source2, response, ignored_cols, toEnum, true, true);
TransformType dt = TransformType.NONE;
if (standardize)
dt = has_intercept ? TransformType.STANDARDIZE : TransformType.DESCALE;
_srcDinfo = new DataInfo(fr, 1, has_intercept, use_all_factor_levels || lambda_search, dt, DataInfo.TransformType.NONE);
if (!has_intercept && _srcDinfo._cats > 0)
throw new IllegalArgumentException("Models with no intercept are only supported with all-numeric predictors.");
_activeData = _srcDinfo;
if (higher_accuracy) setHighAccuracy();
if (beta_constraints != null) {
Vec v;
v = beta_constraints.vec("names");
// for now only enums allowed here
String [] dom = v.domain();
String [] names = _srcDinfo.coefNames();
int [] map = Utils.asInts(v);
if(!Arrays.deepEquals(dom,names)) { // need mapping
HashMap<String,Integer> m = new HashMap<String, Integer>();
for(int i = 0; i < names.length; ++i)
m.put(names[i],i);
int [] newMap = MemoryManager.malloc4(dom.length);
for(int i = 0; i < dom.length; ++i) {
Integer I = m.get(dom[map[i]]);
newMap[i] = I == null?-1:I;
}
map = newMap;
}
if((v = beta_constraints.vec("lower_bounds")) != null) {
_lbs = map == null ? Utils.asDoubles(v) : mapVec(Utils.asDoubles(v), makeAry(names.length, Double.NEGATIVE_INFINITY), map);
System.out.println("lower bounds = " + Arrays.toString(_lbs));
for(int i = 0; i < _lbs.length; ++i) {
if(_lbs[i] > 0) throw new IllegalArgumentException("lower bounds must be non-positive");
if(_srcDinfo._normMul != null)
_lbs[i] /= _srcDinfo._normMul[i];
}
}
if((v = beta_constraints.vec("upper_bounds")) != null) {
_ubs = map == null ? Utils.asDoubles(v) : mapVec(Utils.asDoubles(v), makeAry(names.length, Double.POSITIVE_INFINITY), map);
System.out.println("upper bounds = " + Arrays.toString(_ubs));
for(int i = 0; i < _ubs.length; ++i) {
if (_ubs[i] < 0) throw new IllegalArgumentException("lower bounds must be non-positive");
if (_srcDinfo._normMul != null)
_ubs[i] /= _srcDinfo._normMul[i];
}
} if((v = beta_constraints.vec("beta_given")) != null)
_bgs = map == null?Utils.asDoubles(v):mapVec(Utils.asDoubles(v),makeAry(names.length,0),map);
if((v = beta_constraints.vec("rho")) != null)
_rho = map == null?Utils.asDoubles(v):mapVec(Utils.asDoubles(v),makeAry(names.length,0),map);
}
if (non_negative) { // make srue lb is >= 0
if (_lbs == null)
_lbs = new double[_srcDinfo.fullN()];
for (int i = 0; i < _lbs.length; ++i)
if (_lbs[i] < 0)
_lbs[i] = 0;
}
} catch(RuntimeException e) {
cleanup();
throw e;
}
}
@Override protected void cleanup(){
super.cleanup();
if(toEnum && _srcDinfo != null){
Futures fs = new Futures();
_srcDinfo._adaptedFrame.lastVec().remove(fs);
fs.blockForPending();
}
}
@Override protected boolean filterNaCols(){return true;}
@Override protected Response serve() {
try {
init();
if (alpha.length > 1) { // grid search
if (destination_key == null) destination_key = Key.make("GLMGridResults_" + Key.make());
if (job_key == null) job_key = Key.make((byte) 0, Key.JOB, H2O.SELF);
GLMGridSearch j = gridSearch();
_fjtask = j._fjtask;
assert _fjtask != null;
return GLMGridView.redirect(this, j.dest());
} else {
if (destination_key == null) destination_key = Key.make("GLMModel_" + Key.make());
if (job_key == null) job_key = Key.make("GLM2Job_" + Key.make());
fork();
assert _fjtask != null;
return GLMProgress.redirect(this, job_key, dest());
}
}catch(Throwable ex){
return Response.error(ex.getMessage());
}
}
private static double beta_diff(double[] b1, double[] b2) {
if(b1 == null || b1.length == 0)return Double.MAX_VALUE;
double res = b1[0] >= b2[0]?b1[0] - b2[0]:b2[0] - b1[0];
for( int i = 1; i < b1.length; ++i ) {
double diff = b1[i] - b2[i];
if(diff > res)
res = diff;
else if( -diff > res)
res = -diff;
}
return res;
}
//private static double beta_diff(double[] b1, double[] b2) {
// double res = 0;
// for(int i = 0; i < b1.length; ++i)
// res += (b1[i]-b2[i])*(b1[i]-b2[i]);
// return res;
//}
private static class GLM2_Progress extends Iced{
final long _total;
double _done;
public GLM2_Progress(int total){_total = total;
assert _total > 0:"total = " + _total;
}
public float progess(){
return 0.01f*((int)(100*_done/(double)_total));
}
}
private static class GLM2_ProgressUpdate extends TAtomic<GLM2_Progress> {
final int _i;
public GLM2_ProgressUpdate(){_i = 1;}
public GLM2_ProgressUpdate(int i){_i = i;}
@Override
public GLM2_Progress atomic(GLM2_Progress old) {
if(old == null)return old;
old._done += _i;
return old;
}
}
@Override public float progress(){
if(isDone())return 1.0f;
Value v = DKV.get(_progressKey);
if(v == null)return 0;
float res = v.<GLM2_Progress>get().progess();
if(res > 1f)
res = 1f;
return res;
}
protected double l2norm(double[] beta){
if(_beta == null)return 0;
double l2 = 0;
for (double aBeta : beta) l2 += aBeta * aBeta;
return l2;
}
protected double l1norm(double[] beta){
if(_beta == null)return 0;
double l2 = 0;
for (double aBeta : beta) l2 += Math.abs(aBeta);
return l2;
}
private final double [] expandVec(double [] beta, final int [] activeCols){
assert beta != null;
if (activeCols == null)
return beta;
double[] res = MemoryManager.malloc8d(_srcDinfo.fullN() + _intercept -_noffsets);
int i = 0;
for (int c = 0; c < activeCols.length-_noffsets; ++c)
res[_activeCols[c]] = beta[i++];
if(_intercept == 1)
res[res.length - 1] = beta[beta.length - 1];
for(int j = beta.length-_noffsets; j < beta.length-1; ++j)
beta[j] = 1;
return res;
}
private final double [] contractVec(double [] beta, final int [] activeCols){ return contractVec(beta,activeCols,_intercept);}
private final double [] contractVec(double [] beta, final int [] activeCols, int intercept){
if(beta == null)return null;
if(activeCols == null)return beta.clone();
final int N = activeCols.length - _noffsets;
double [] res = MemoryManager.malloc8d(N+intercept);
for(int i = 0; i < N; ++i)
res[i] = beta[activeCols[i]];
if(intercept == 1)
res[res.length-1] = beta[beta.length-1];
return res;
}
private final double [] resizeVec(double[] beta, final int[] activeCols, final int[] oldActiveCols){
if(beta == null || Arrays.equals(activeCols,oldActiveCols))return beta;
double [] full = expandVec(beta, oldActiveCols);
if(activeCols == null)return full;
return contractVec(full,activeCols,_intercept);
}
// protected boolean needLineSearch(final double [] beta,double objval, double step){
protected boolean needLineSearch(final GLMIterationTask glmt) {
if(_glm.family == Family.gaussian)
return false;
if(glmt._beta == null)
return false;
if (Utils.hasNaNsOrInfs(glmt._xy) || (glmt._grad != null && Utils.hasNaNsOrInfs(glmt._grad)) || (glmt._gram != null && glmt._gram.hasNaNsOrInfs())) {
return true;
}
if(glmt._val != null && Double.isNaN(glmt._val.residualDeviance())){
return true;
}
if(glmt._val == null) // no validation info, no way to decide
return false;
final double [] grad = Arrays.equals(_activeCols,_lastResult._activeCols)
?_lastResult._glmt.gradient(alpha[0],_currentLambda)
:contractVec(_lastResult.fullGrad(alpha[0],_currentLambda),_activeCols);
return needLineSearch(1, objval(_lastResult._glmt),objval(glmt),diff(glmt._beta,_lastResult._glmt._beta),grad);
}
private static double [] diff(double [] x, double [] y){
if(y == null)return x.clone();
double [] res = MemoryManager.malloc8d(x.length);
for(int i = 0; i < x.length; ++i)
res[i] = x[i] - y[i];
return res;
}
public static final double c1 = 1e-2;
// protected boolean needLineSearch(final double [] beta,double objval, double step){
// Armijo line-search rule enhanced with generalized gradient to handle l1 pen
protected final boolean needLineSearch(double step, final double objOld, final double objNew, final double [] pk, final double [] gradOld){
// line search
double f_hat = 0;
for(int i = 0; i < pk.length; ++i)
f_hat += gradOld[i] * pk[i];
f_hat = step*f_hat + objOld;
return objNew > (f_hat + 1/(2*step)*l2norm(pk));
}
private class LineSearchIteration extends H2OCallback<GLMTask.GLMLineSearchTask> {
final GLMIterationTask _glmt;
LineSearchIteration(GLMIterationTask glmt, CountedCompleter cmp){super((H2OCountedCompleter)cmp); cmp.addToPendingCount(1); _glmt = glmt;}
@Override public void callback(final GLMTask.GLMLineSearchTask glmt) {
assert getCompleter().getPendingCount() >= 1:"unexpected pending count, expected 1, got " + getCompleter().getPendingCount();
double step = LS_STEP;
for(int i = 0; i < glmt._glmts.length; ++i){
if(!needLineSearch(glmt._glmts[i]) || (i == glmt._glmts.length-1 && objval(glmt._glmts[i]) < objval(_lastResult._glmt))){
LogInfo("line search: found admissible step = " + step + ", objval = " + objval(glmt._glmts[i]));
setHighAccuracy();
new GLMIterationTask(_noffsets,GLM2.this.self(),_activeData,_glm,true,true,true,glmt._glmts[i]._beta,_ymu,1.0/_nobs,thresholds, new Iteration(getCompleter(),false,false)).asyncExec(_activeData._adaptedFrame);
return;
}
step *= LS_STEP;
}
LogInfo("line search: did not find admissible step, smallest step = " + step + ", objval = " + objval(glmt._glmts[glmt._glmts.length-1]) + ", old objval = " + objval(_lastResult._glmt));
// check if objval of smallest step is below the previous step, if so, go on
LogInfo("Line search did not find feasible step, converged.");
_failedLineSearch = true;
GLMIterationTask res = highAccuracy()?_lastResult._glmt:_glmt;
if(_activeCols != _lastResult._activeCols && !Arrays.equals(_activeCols,_lastResult._activeCols)) {
_activeCols = _lastResult._activeCols;
_activeData = _srcDinfo.filterExpandedColumns(_activeCols);
}
checkKKTAndComplete(getCompleter(),res,res._beta,true);
}
}
protected double checkGradient(final double [] newBeta, final double [] grad){
// check the gradient
ADMMSolver.subgrad(alpha[0], _currentLambda, newBeta, grad);
double err = 0;
for(double d:grad)
if(d > err) err = d;
else if(d < -err) err = -d;
LogInfo("converged with max |subgradient| = " + err);
return err;
}
private String LogInfo(String msg){
msg = "GLM2[dest=" + dest() + ", iteration=" + _iter + ", lambda = " + _currentLambda + "]: " + msg;
Log.info(msg);
return msg;
}
private double [] setSubmodel(final double[] newBeta, GLMValidation val, H2OCountedCompleter cmp){
int intercept = (has_intercept?1:0);
double [] fullBeta = (_activeCols == null || newBeta == null)?newBeta:expandVec(newBeta,_activeCols);
if(val != null) val.null_deviance = _nullDeviance;
if(_noffsets > 0){
fullBeta = Arrays.copyOf(fullBeta,fullBeta.length + _noffsets);
if(has_intercept)
fullBeta[fullBeta.length-1] = fullBeta[fullBeta.length-intercept-_noffsets];
for(int i = fullBeta.length-intercept-_noffsets; i < fullBeta.length-intercept; ++i)
fullBeta[i] = 1;//_srcDinfo.applyTransform(i,1);
}
final double [] newBetaDeNorm;
final int numoff = _srcDinfo.numStart();
if(_srcDinfo._predictor_transform == DataInfo.TransformType.STANDARDIZE) {
assert has_intercept;
newBetaDeNorm = fullBeta.clone();
double norm = 0.0; // Reverse any normalization on the has_intercept
// denormalize only the numeric coefs (categoricals are not normalized)
for( int i=numoff; i< fullBeta.length-intercept; i++ ) {
double b = newBetaDeNorm[i]* _srcDinfo._normMul[i-numoff];
norm += b* _srcDinfo._normSub[i-numoff]; // Also accumulate the has_intercept adjustment
newBetaDeNorm[i] = b;
}
if(has_intercept)
newBetaDeNorm[newBetaDeNorm.length-1] -= norm;
} else if (_srcDinfo._predictor_transform == TransformType.DESCALE) {
assert !has_intercept;
newBetaDeNorm = fullBeta.clone();
for( int i=numoff; i< fullBeta.length; i++ )
newBetaDeNorm[i] *= _srcDinfo._normMul[i-numoff];
} else
newBetaDeNorm = null;
GLMModel.setSubmodel(cmp, dest(), _currentLambda, newBetaDeNorm == null ? fullBeta : newBetaDeNorm, newBetaDeNorm == null ? null : fullBeta, _iter, System.currentTimeMillis() - start_time, _srcDinfo.fullN() >= sparseCoefThreshold, val);
return fullBeta;
}
private transient long _callbackStart = 0;
private transient double _rho_mul = 1.0;
private transient double _gradientEps = ADMM_GRAD_EPS;
private double [] lastBeta(int noffsets){
final double [] b;
if(_lastResult == null || _lastResult._glmt._beta == null) {
int bsz = _activeCols == null? _srcDinfo.fullN()+1-noffsets:_activeCols.length+1;
b = MemoryManager.malloc8d(bsz);
b[bsz-1] = _glm.linkInv(_ymu);
} else
b = resizeVec(_lastResult._glmt._beta, _activeCols, _lastResult._activeCols);
return b;
}
protected void checkKKTAndComplete(final CountedCompleter cc, final GLMIterationTask glmt, final double [] newBeta, final boolean failedLineSearch){
H2OCountedCompleter cmp = (H2OCountedCompleter)cc;
final double [] fullBeta = newBeta == null?MemoryManager.malloc8d(_srcDinfo.fullN()+_intercept-_noffsets):expandVec(newBeta,_activeCols);
// now we need full gradient (on all columns) using this beta
new GLMIterationTask(_noffsets,GLM2.this.self(), _srcDinfo,_glm,false,true,true,fullBeta,_ymu,1.0/_nobs,thresholds, new H2OCallback<GLMIterationTask>(cmp) {
@Override public String toString(){
return "checkKKTAndComplete.Callback, completer = " + getCompleter() == null?"null":getCompleter().toString();
}
@Override
public void callback(final GLMIterationTask glmt2) {
// first check KKT conditions!
final double [] grad = glmt2.gradient(alpha[0],_currentLambda);
if(Utils.hasNaNsOrInfs(grad)){
_failedLineSearch = true;
if(!failedLineSearch) {
getCompleter().addToPendingCount(1);
checkKKTAndComplete(cc,glmt,glmt._beta,true);
LogInfo("Check KKT got NaNs. Taking previous solution");
return;
} else {
// TODO: add warning and break th lambda search? Or throw Exception?
LogInfo("got NaNs/Infs in gradient at lambda " + _currentLambda);
}
}
glmt._val = glmt2._val;
_lastResult = makeIterationInfo(_iter,glmt2,null,glmt2.gradient(alpha[0],0));
// check the KKT conditions and filter data for next lambda_value
// check the gradient
double[] subgrad = grad.clone();
ADMMSolver.subgrad(alpha[0], _currentLambda, fullBeta, subgrad);
double grad_eps = GLM_GRAD_EPS;
if (!failedLineSearch &&_activeCols != null) {
for (int c = 0; c < _activeCols.length-_noffsets; ++c)
if (subgrad[_activeCols[c]] > grad_eps) grad_eps = subgrad[_activeCols[c]];
else if (subgrad[c] < -grad_eps) grad_eps = -subgrad[_activeCols[c]];
int[] failedCols = new int[64];
int fcnt = 0;
for (int i = 0; i < grad.length - 1; ++i) {
if (Arrays.binarySearch(_activeCols, i) >= 0) continue;
if (subgrad[i] > grad_eps || -subgrad[i] > grad_eps) {
if (fcnt == failedCols.length)
failedCols = Arrays.copyOf(failedCols, failedCols.length << 1);
failedCols[fcnt++] = i;
}
}
if (fcnt > 0) {
final int n = _activeCols.length;
final int[] oldActiveCols = _activeCols;
_activeCols = Arrays.copyOf(_activeCols, _activeCols.length + fcnt);
for (int i = 0; i < fcnt; ++i)
_activeCols[n + i] = failedCols[i];
Arrays.sort(_activeCols);
LogInfo(fcnt + " variables failed KKT conditions check! Adding them to the model and continuing computation.(grad_eps = " + grad_eps + ", activeCols = " + (_activeCols.length > 100?"lost":Arrays.toString(_activeCols)));
_activeData = _srcDinfo.filterExpandedColumns(_activeCols);
// NOTE: tricky completer game here:
// We expect 0 pending in this method since this is the end-point, ( actually it's racy, can be 1 with pending 1 decrement from the original Iteration callback, end result is 0 though)
// while iteration expects pending count of 1, so we need to increase it here (Iteration itself adds 1 but 1 will be subtracted when we leave this method since we're in the callback which is called by onCompletion!
// [unlike at the start of nextLambda call when we're not inside onCompletion]))
getCompleter().addToPendingCount(1);
new GLMIterationTask(_noffsets,GLM2.this.self(), _activeData, _glm, true, true, true, resizeVec(newBeta, _activeCols, oldActiveCols), _ymu, glmt._reg, thresholds, new Iteration(getCompleter())).asyncExec(_activeData._adaptedFrame);
return;
}
}
int diff = MAX_ITERATIONS_PER_LAMBDA - _iter + _iter1;
if(diff > 0)
new GLM2_ProgressUpdate(diff).fork(_progressKey); // update progress
GLM2.this.setSubmodel(newBeta, glmt2._val,(H2OCountedCompleter)getCompleter().getCompleter());
_done = true;
LogInfo("computation of current lambda done in " + (System.currentTimeMillis() - GLM2.this.start_time) + "ms");
assert _lastResult._fullGrad != null;
}
}).asyncExec(_srcDinfo._adaptedFrame);
}
private class Iteration extends H2OCallback<GLMIterationTask> {
public final long _iterationStartTime;
final boolean _countIteration;
final boolean _checkLineSearch;
public Iteration(CountedCompleter cmp){ this(cmp,true,true);}
public Iteration(CountedCompleter cmp, boolean countIteration,boolean checkLineSearch){
super((H2OCountedCompleter)cmp);
cmp.addToPendingCount(1);
_checkLineSearch = checkLineSearch;
_countIteration = countIteration;
_iterationStartTime = System.currentTimeMillis(); }
@Override public void callback(final GLMIterationTask glmt){
if( !isRunning(self()) ) throw new JobCancelledException();
assert _activeCols == null || glmt._beta == null || glmt._beta.length == (_activeCols.length+_intercept-glmt._noffsets):LogInfo("betalen = " + glmt._beta.length + ", activecols = " + _activeCols.length + " noffsets = " + glmt._noffsets);
assert _activeCols == null || _activeCols.length == _activeData.fullN();
assert getCompleter().getPendingCount() >= 1 : LogInfo("unexpected pending count, expected >= 1, got " + getCompleter().getPendingCount()); // will be decreased by 1 after we leave this callback
if (_countIteration) ++_iter;
_callbackStart = System.currentTimeMillis();
double gerr = Double.NaN;
boolean hasNaNs = glmt._gram.hasNaNsOrInfs() || Utils.hasNaNsOrInfs(glmt._xy);
boolean needLineSearch = hasNaNs || _checkLineSearch && needLineSearch(glmt);
if (glmt._val != null && glmt._computeGradient) { // check gradient
final double[] grad = glmt.gradient(alpha[0], _currentLambda);
ADMMSolver.subgrad(alpha[0], _currentLambda, glmt._beta, grad);
gerr = 0;
for (double d : grad)
gerr += d*d;
if(gerr <= GLM_GRAD_EPS*GLM_GRAD_EPS || (needLineSearch && gerr <= 5*ADMM_GRAD_EPS*ADMM_GRAD_EPS)){
LogInfo("converged by reaching small enough gradient, with max |subgradient| = " + gerr );
checkKKTAndComplete(getCompleter(),glmt, glmt._beta,false);
return;
}
}
if(needLineSearch){
if(!_checkLineSearch){ // has to converge here
LogInfo("Line search did not progress, converged.");
checkKKTAndComplete(getCompleter(),glmt, glmt._beta,true);
return;
}
LogInfo("invoking line search");
new GLMTask.GLMLineSearchTask(_noffsets, GLM2.this.self(), _activeData,_glm, lastBeta(_noffsets), glmt._beta, 1e-4, _ymu, _nobs, new LineSearchIteration(glmt,getCompleter())).asyncExec(_activeData._adaptedFrame);
return;
}
if(glmt._grad != null)
_lastResult = makeIterationInfo(_iter,glmt,_activeCols,null);
if(glmt._newThresholds != null) {
thresholds = Utils.join(glmt._newThresholds[0], glmt._newThresholds[1]);
Arrays.sort(thresholds);
}
final double [] newBeta = MemoryManager.malloc8d(glmt._xy.length);
long t1 = System.currentTimeMillis();
ADMMSolver slvr = new ADMMSolver(lambda_max, _currentLambda,alpha[0], _gradientEps, _addedL2);
if(_lbs != null)
slvr._lb = _activeCols == null?contractVec(_lbs,_activeCols,0):_lbs;
if(_ubs != null)
slvr._ub = _activeCols == null?contractVec(_ubs,_activeCols,0):_ubs;
if(_bgs != null && _rho != null) {
slvr._wgiven = _activeCols == null ? contractVec(_bgs, _activeCols, 0) : _bgs;
slvr._proximalPenalties = _activeCols == null ? contractVec(_rho, _activeCols, 0) : _rho;
}
slvr.solve(glmt._gram,glmt._xy,glmt._yy,newBeta,Math.max(1e-8*lambda_max,_currentLambda*alpha[0]));
// print all info about iteration
LogInfo("Gram computed in " + (_callbackStart - _iterationStartTime) + "ms, " + (Double.isNaN(gerr)?"":"gradient = " + gerr + ",") + ", step = " + 1 + ", ADMM: " + slvr.iterations + " iterations, " + (System.currentTimeMillis() - t1) + "ms (" + slvr.decompTime + "), subgrad_err=" + slvr.gerr);
// int [] iBlocks = new int[]{8,16,32,64,128,256,512,1024};
// int [] rBlocks = new int[]{1,2,4,8,16,32,64,128};
// for(int i:iBlocks)
// for(int r:rBlocks){
// long ttx = System.currentTimeMillis();
// try {
// slvr.gerr = Double.POSITIVE_INFINITY;
// ADMMSolver.ParallelSolver pslvr = slvr.parSolver(glmt._gram, glmt._wy, newBeta, _currentLambda * alpha[0] * _rho_mul, i, r);
// pslvr.invoke();
// System.out.println("iBlock = " + i + ", rBlocsk = " + r + "ms");
// LogInfo("ADMM: " + pslvr._iter + " iterations, " + (System.currentTimeMillis() - ttx) + "ms (" + slvr.decompTime + "), subgrad_err=" + slvr.gerr);
// } catch(Throwable t){
// System.out.println("iBlock = " + i + ", rBlocsk = " + r + " failed! err = " + t);
// }
// }
if (slvr._addedL2 > _addedL2) LogInfo("added " + (slvr._addedL2 - _addedL2) + "L2 penalty");
new GLM2_ProgressUpdate().fork(_progressKey); // update progress
_gradientEps = Math.max(ADMM_GRAD_EPS, Math.min(slvr.gerr, 0.01));
_addedL2 = slvr._addedL2;
if (Utils.hasNaNsOrInfs(newBeta)) {
throw new RuntimeException(LogInfo("got NaNs and/or Infs in beta"));
} else {
final double bdiff = beta_diff(glmt._beta, newBeta);
if(_glm.family == Family.gaussian) {
checkKKTAndComplete(getCompleter(),glmt, newBeta, false);
return;
} else if (bdiff < beta_epsilon || _iter >= max_iter) { // Gaussian is non-iterative and gradient is ADMMSolver's gradient => just validate and move on to the next lambda_value
int diff = (int) Math.log10(bdiff);
int nzs = 0;
for (int i = 0; i < glmt._beta.length; ++i)
if (glmt._beta[i] != 0) ++nzs;
LogInfo("converged (reached a fixed point with ~ 1e" + diff + " precision), got " + nzs + " nzs");
checkKKTAndComplete(getCompleter(),glmt, newBeta, false); // NOTE: do not use newBeta here, it has not been checked and can lead to NaNs in KKT check, redoing line search, coming up with the same beta and so on.
return;
} else { // not done yet, launch next iteration
if (glmt._beta != null)
setSubmodel(glmt._beta, glmt._val, (H2OCountedCompleter) getCompleter().getCompleter()); // update current intermediate result
final boolean validate = higher_accuracy || (_iter % 5) == 0;
new GLMIterationTask(_noffsets,GLM2.this.self(),_activeData,glmt._glm, true, validate, validate, newBeta,_ymu,1.0/_nobs,thresholds, new Iteration(getCompleter(),true,true)).asyncExec(_activeData._adaptedFrame);
}
}
}
}
private static int nzs(double ds[]){
int res = 0;
for(double d:ds)if(d != 0)++res;
return res;
}
private class LambdaIteration extends H2OCallback {
public LambdaIteration(CountedCompleter cmp) {
super((H2OCountedCompleter) cmp);
}
@Override
public void callback(H2OCountedCompleter h2OCountedCompleter) {
// check if we're done otherwise launch next lambda computation
_done = _currentLambda <= lambda_min
|| (max_predictors != -1 && nzs(_lastResult._glmt._beta) > max_predictors); // _iter < max_iter && (improved || _runAllLambdas) && _lambdaIdx < (lambda_value.length-1);;
if(!_done) {
H2OCountedCompleter cmp = (H2OCountedCompleter)getCompleter();
cmp.addToPendingCount(1);
nextLambda(nextLambdaValue(), new LambdaIteration(cmp));
}
}
}
private class GLMJobCompleter extends H2OCountedCompleter {
AtomicReference<CountedCompleter> _cmp = new AtomicReference<CountedCompleter>();
public GLMJobCompleter(H2OCountedCompleter cmp){super(cmp);}
@Override
public void compute2() {
run(true,this);
}
private transient boolean _failed;
@Override public void onCompletion(CountedCompleter cmp){
if(!_grid)source.unlock(self());
if(!_failed) {
assert _cmp.compareAndSet(null, cmp) : "double completion, first from " + _cmp.get().getClass().getName() + ", second from " + cmp.getClass().getName();
_done = true;
// TODO: move these updates to Model into a DKeyTask so that it runs remotely on the model's home
GLMModel model = DKV.get(dest()).get();
model.maybeComputeVariableImportances();
model.stop_training();
if (_addedL2 > 0) {
String warn = "Added L2 penalty (rho = " + _addedL2 + ") due to non-spd matrix. ";
model.addWarning(warn);
}
if(_failedLineSearch && !highAccuracy())
model.addWarning("High accuracy settings recommended.");
state = JobState.DONE;
DKV.remove(_progressKey);
model.get_params().state = state;
model.update(self());
getCompleter().addToPendingCount(1);
new GLMModel.UnlockModelTask(new H2OCallback((H2OCountedCompleter) getCompleter()) {
@Override
public void callback(H2OCountedCompleter h2OCountedCompleter) {
remove(); // Remove/complete job only for top-level, not xval GLM2s
}
}, model._key, self()).forkTask();
cleanup();
}
}
@Override public boolean onExceptionalCompletion(Throwable t, CountedCompleter cmp){
if(_cmp.compareAndSet(null, cmp)) {
_done = true;
GLM2.this.cancel(t);
cleanup();
if(_grid){
_failed = true;
tryComplete();
}
}
return !_grid;
}
}
@Override
public GLM2 fork(){return fork(null);}
public GLM2 fork(H2OCountedCompleter cc){
if(!_grid)source.read_lock(self());
// keep *this* separate from what's stored in K/V as job (will be changing it!)
Futures fs = new Futures();
_progressKey = Key.make(dest().toString() + "_progress", (byte) 1, Key.HIDDEN_USER_KEY, dest().home_node());
int total = max_iter;
if(lambda_search)
total = MAX_ITERATIONS_PER_LAMBDA*nlambdas;
GLM2_Progress progress = new GLM2_Progress(total*(n_folds > 1?(n_folds+1):1));
LogInfo("created progress " + progress);
DKV.put(_progressKey,progress,fs);
fs.blockForPending();
_fjtask = new H2O.H2OEmptyCompleter(cc);
H2OCountedCompleter fjtask = new GLMJobCompleter(_fjtask);
GLM2 j = (GLM2)clone();
j.start(_fjtask); // modifying GLM2 object, don't want job object to be the same instance
H2O.submitTask(fjtask);
return j;
}
transient GLM2 [] _xvals;
private class XvalidationCallback extends H2OCallback {
public XvalidationCallback(H2OCountedCompleter cmp){super(cmp);}
@Override
public void callback(H2OCountedCompleter cc) {
ParallelGLMs pgs = (ParallelGLMs)cc;
_xvals = pgs._glms;
for(int i = 0; i < _xvals.length; ++i){
assert _xvals[i]._lastResult._fullGrad != null:LogInfo("last result missing full gradient!");
assert _xvals[i]._lastResult._glmt._val != null:LogInfo("last result missing validation!");
}
_iter = _xvals[0]._iter;
thresholds = _xvals[0].thresholds;
_lastResult = (IterationInfo)pgs._glms[0]._lastResult.clone();
final GLMModel [] xvalModels = new GLMModel[_xvals.length-1];
final double curentLambda = _currentLambda;
final H2OCountedCompleter mainCmp = (H2OCountedCompleter)getCompleter().getCompleter();
mainCmp.addToPendingCount(1);
final GLMModel.GetScoringModelTask [] tasks = new GLMModel.GetScoringModelTask[pgs._glms.length];
H2OCallback c = new H2OCallback(mainCmp) {
@Override public String toString(){
return "GetScoringModelTask.Callback, completer = " + getCompleter() == null?"null":getCompleter().toString();
}
AtomicReference<CountedCompleter> _cmp = new AtomicReference<CountedCompleter>();
@Override
public void callback(H2OCountedCompleter cc) {
assert _cmp.compareAndSet(null,cc):"Double completion, first " + _cmp.get().getClass().getName() + ", second from " + cc.getClass().getName();
for(int i = 1; i < tasks.length; ++i)
xvalModels[i-1] = tasks[i]._res;
mainCmp.addToPendingCount(1);
new GLMXValidationTask(tasks[0]._res, curentLambda, xvalModels, thresholds,mainCmp).asyncExec(_srcDinfo._adaptedFrame);
}
};
c.addToPendingCount(tasks.length-1);
for(int i = 0; i < tasks.length; ++i)
(tasks[i] = new GLMModel.GetScoringModelTask(c,pgs._glms[i].dest(),curentLambda)).forkTask();
}
}
private GLMModel addLmaxSubmodel(GLMModel m,GLMValidation val, double [] beta){
m.submodels = new GLMModel.Submodel[]{new GLMModel.Submodel(lambda_max,beta,beta,0,0, beta.length >= sparseCoefThreshold)};
m.submodels[0].validation = val;
assert val != null;
return m;
}
public void run(boolean doLog, H2OCountedCompleter cmp){
if(doLog) logStart();
// if this is cross-validated task, don't do actual computation,
// just fork off the nfolds+1 tasks and wait for the results
assert alpha.length == 1;
start_time = System.currentTimeMillis();
if(nlambdas == -1)nlambdas = 100;
if(lambda_search && nlambdas <= 1)
throw new IllegalArgumentException(LogInfo("GLM2: nlambdas must be > 1 when running with lambda search."));
Futures fs = new Futures();
Key dst = dest();
new YMUTask(GLM2.this.self(), _srcDinfo, n_folds,new H2OCallback<YMUTask>(cmp) {
@Override
public String toString(){
return "YMUTask callback. completer = " + getCompleter() != null?"null":getCompleter().toString();
}
@Override
public void callback(final YMUTask ymut) {
if (ymut._ymin == ymut._ymax)
throw new IllegalArgumentException(LogInfo("GLM2: attempted to run with constant response. Response == " + ymut._ymin + " for all rows in the training set."));
if(ymut.nobs() == 0)
throw new IllegalArgumentException(LogInfo("GLM2: got no active rows in the dataset after discarding rows with NAs"));
_ymu = ymut.ymu();
_nobs = ymut.nobs();
if(_glm.family == Family.binomial && prior != -1 && prior != _ymu && !Double.isNaN(prior)) {
double ratio = prior / _ymu;
double pi0 = 1, pi1 = 1;
if (ratio > 1) {
pi1 = 1.0 / ratio;
} else if (ratio < 1) {
pi0 = ratio;
}
_iceptAdjust = Math.log(pi0 / pi1);
} else prior = _ymu;
H2OCountedCompleter cmp = (H2OCountedCompleter)getCompleter();
cmp.addToPendingCount(1);
// public GLMIterationTask(int noff, Key jobKey, DataInfo dinfo, GLMParams glm, boolean computeGram, boolean validate, boolean computeGradient, double [] beta, double ymu, double reg, float [] thresholds, H2OCountedCompleter cmp) {
new GLMIterationTask(_noffsets,GLM2.this.self(), _srcDinfo, _glm, false, true, true, nullModelBeta(_srcDinfo,_ymu), _ymu, 1.0/_nobs, thresholds, new H2OCallback<GLMIterationTask>(cmp){
@Override
public String toString(){
return "LMAXTask callback. completer = " + (getCompleter() != null?"NULL":getCompleter().toString());
}
@Override public void callback(final GLMIterationTask glmt){
double [] beta = glmt._beta;
if(beta_start == null) {
beta_start = beta;
}
_nullDeviance = glmt._val.residualDeviance();
_currentLambda = lambda_max = Math.max(Utils.maxValue(glmt._grad),-Utils.minValue(glmt._grad))/Math.max(1e-3,alpha[0]);
assert lambda_max > 0;
_lastResult = makeIterationInfo(0,glmt,null,glmt.gradient(0,0));
GLMModel model = new GLMModel(GLM2.this, dest(), _srcDinfo, _glm, glmt._val, beta_epsilon, alpha[0], lambda_max, _ymu, prior);
model.start_training(start_time);
if(lambda_search) {
assert !Double.isNaN(lambda_max) : LogInfo("running lambda_value search, but don't know what is the lambda_value max!");
model = addLmaxSubmodel(model, glmt._val, beta);
if (nlambdas == -1) {
lambda = null;
} else {
if (lambda_min_ratio == -1)
lambda_min_ratio = _nobs > 25 * _srcDinfo.fullN() ? 1e-4 : 1e-2;
final double d = Math.pow(lambda_min_ratio, 1.0 / (nlambdas - 1));
if (nlambdas == 0)
throw new IllegalArgumentException("nlambdas must be > 0 when running lambda search.");
lambda = new double[nlambdas];
lambda[0] = lambda_max;
if (nlambdas == 1)
throw new IllegalArgumentException("Number of lambdas must be > 1 when running with lambda_search!");
for (int i = 1; i < lambda.length; ++i)
lambda[i] = lambda[i - 1] * d;
lambda_min = lambda[lambda.length - 1];
max_iter = MAX_ITERATIONS_PER_LAMBDA * nlambdas;
}
_runAllLambdas = false;
} else {
if(lambda == null || lambda.length == 0)
lambda = new double[]{DEFAULT_LAMBDA};
int i = 0;
while(i < lambda.length && lambda[i] >= lambda_max)++i;
if(i == lambda.length)
throw new IllegalArgumentException("Given lambda(s) are all > lambda_max = " + lambda_max + ", have nothing to run with. lambda = " + Arrays.toString(lambda));
if(i > 0) {
model.addWarning("Removed " + i + " lambdas greater than lambda_max.");
lambda = Utils.append(new double[]{lambda_max},Arrays.copyOfRange(lambda,i,lambda.length));
addLmaxSubmodel(model,glmt._val, beta);
}
}
model.delete_and_lock(self());
lambda_min = lambda[lambda.length-1];
if(n_folds > 1){
final H2OCountedCompleter futures = new H2OEmptyCompleter();
final GLM2 [] xvals = new GLM2[n_folds+1];
futures.addToPendingCount(xvals.length-2);
for(int i = 0; i < xvals.length; ++i){
xvals[i] = (GLM2)GLM2.this.clone();
xvals[i].n_folds = 0;
xvals[i].standardize = standardize;
xvals[i].family = family;
xvals[i].link = link;
xvals[i].beta_epsilon = beta_epsilon;
xvals[i].max_iter = max_iter;
xvals[i].variable_importances = variable_importances;
if(i != 0){
xvals[i]._srcDinfo = _srcDinfo.getFold(i-1,n_folds);
xvals[i].destination_key = Key.make(dest().toString() + "_xval_" + i, (byte) 1, Key.HIDDEN_USER_KEY, H2O.SELF);
xvals[i]._nobs = ymut.nobs(i-1);
xvals[i]._ymu = ymut.ymu(i-1);
final int fi = i;
final double ymu = ymut.ymu(fi-1);
// new GLMIterationTask(offset_cols.length,GLM2.this.self(), _srcDinfo, _glm, false, true, true,nullModelBeta(),_ymu,1.0/_nobs, thresholds, new H2OCallback<GLMIterationTask>(cmp){
new GLMIterationTask(_noffsets,self(),xvals[i]._srcDinfo,_glm, false,true,true, nullModelBeta(xvals[fi]._srcDinfo,ymu),ymu,1.0/ymut.nobs(fi-1),thresholds,new H2OCallback<GLMIterationTask>(futures){
@Override
public String toString(){
return "Xval LMAXTask callback., completer = " + getCompleter() == null?"null":getCompleter().toString();
}
@Override
public void callback(GLMIterationTask t) {
xvals[fi].beta_start = t._beta;
xvals[fi]._currentLambda = xvals[fi].lambda_max = Math.max(Utils.maxValue(glmt._grad),-Utils.minValue(glmt._grad))/Math.max(1e-3,alpha[0]);
assert xvals[fi].lambda_max > 0;
xvals[fi]._lastResult = makeIterationInfo(0,t,null,t.gradient(alpha[0],0));
GLMModel m = new GLMModel(GLM2.this, xvals[fi].destination_key, xvals[fi]._srcDinfo, _glm, t._val, beta_epsilon, alpha[0], xvals[fi].lambda_max, xvals[fi]._ymu, prior);//.delete_and_lock(self());
m.submodels = new Submodel[]{new Submodel(xvals[fi].lambda_max,t._beta,t._beta,0,0, t._beta.length >= sparseCoefThreshold)};
m.submodels[0].validation = t._val;
assert t._val != null;
m.setSubmodelIdx(0);
m.delete_and_lock(self());
if(xvals[fi].lambda_max > lambda_max){
futures.addToPendingCount(1);
new ParallelGLMs(GLM2.this,new GLM2[]{xvals[fi]},lambda_max,1,futures).fork();
}
}
}).asyncExec(xvals[i]._srcDinfo._adaptedFrame);
}
}
_xvals = xvals;
futures.join();
}
getCompleter().addToPendingCount(1);
nextLambda(nextLambdaValue(), new LambdaIteration(getCompleter()));
}
}).asyncExec(_srcDinfo._adaptedFrame);
}
}).asyncExec(_srcDinfo._adaptedFrame);
}
private double [] nullModelBeta(DataInfo dinfo, double ymu){
double[] beta = MemoryManager.malloc8d(_srcDinfo.fullN() + (dinfo._hasIntercept?1:0) - _noffsets);
if(has_intercept) {
double icpt = _noffsets == 0?_glm.link(ymu):computeIntercept(dinfo,ymu,offset,response);
if (dinfo._hasIntercept) beta[beta.length - 1] = icpt;
}
return beta;
}
public double nextLambdaValue(){
assert lambda == null || lambda_min == lambda[lambda.length-1];
return (lambda == null)?pickNextLambda():lambda[++_lambdaIdx];
}
private transient int _iter1 = 0;
void nextLambda(final double currentLambda, final H2OCountedCompleter cmp){
if(currentLambda > lambda_max){
_done = true;
cmp.tryComplete();
return;
}
if(_beta != null)
beta_start = _beta;
_iter1 = _iter;
LogInfo("starting computation of lambda = " + currentLambda + ", previous lambda = " + _currentLambda);
_done = false;
final double previousLambda = _currentLambda;
_currentLambda = currentLambda;
if(n_folds > 1){ // if we're cross-validated tasks, just fork off the parallel glms and wait for result!
for(int i = 0; i < _xvals.length; ++i)
if(_xvals[i]._lastResult._fullGrad == null){
RuntimeException re = new RuntimeException(LogInfo("missing full gradient at lambda = " + previousLambda + " at fold " + i));
Log.err(re);
throw re;
}
ParallelGLMs pgs = new ParallelGLMs(this,_xvals,currentLambda, H2O.CLOUD.size(),new XvalidationCallback(cmp));
pgs.fork();
return;
} else {
if(lambda_search){ // if we are in lambda_search, we want only limited number of iters per lambda!
max_iter = _iter + MAX_ITERATIONS_PER_LAMBDA;
}
final double[] grad = _lastResult.fullGrad(alpha[0],previousLambda);
assert grad != null;
activeCols(_currentLambda, previousLambda, grad);
if(_activeCols != null && _activeCols.length == _noffsets) {
// nothing to do but to store the null model and report back...
setSubmodel(_lastResult._glmt._beta,_lastResult._glmt._val,cmp);
_done = true;
cmp.tryComplete();
return;
}
assert cmp.getPendingCount() == 0;
// expand the beta
// todo make this work again
// if (Arrays.equals(_lastResult._activeCols, _activeCols) && _lastResult._glmt._gram != null) { // set of coefficients did not change
// new Iteration(cmp, false).callback(_lastResult._glmt);
// _lastResult._glmt.tryComplete(); // shortcut to reuse the last gram if same active columns
// } else
new GLMIterationTask(_noffsets,GLM2.this.self(), _activeData, _glm, true, false, false, resizeVec(_lastResult._glmt._beta, _activeCols, _lastResult._activeCols), _ymu, 1.0 / _nobs, thresholds, new Iteration(cmp)).asyncExec(_activeData._adaptedFrame);;
}
}
private final double l2pen(){return 0.5*_currentLambda*(1-alpha[0]);}
private final double l1pen(){return _currentLambda*alpha[0];}
// // filter the current active columns using the strong rules
// // note: strong rules are update so tha they keep all previous coefficients in, to prevent issues with line-search
private double pickNextLambda(){
final double[] grad = _lastResult.fullGrad(alpha[0],_currentLambda);
return pickNextLambda(_currentLambda, grad, Math.max((int) (Math.min(_srcDinfo.fullN(),_nobs) * 0.05), 1));
}
private double pickNextLambda(final double oldLambda, final double[] grad, int maxNewVars){
double [] g = grad.clone();
for(int i = 0; i < g.length; ++i)
g[i] = g[i] < 0?g[i]:-g[i];
if(_activeCols != null) { // not interested in cols which are already active!
for (int i : _activeCols) g[i] *= -1;
}
Arrays.sort(g);
if(maxNewVars < (g.length-1) && g[maxNewVars] == g[maxNewVars+1]){
double x = g[maxNewVars];
while(maxNewVars > 0 && g[maxNewVars] == x)--maxNewVars;
}
double res = 0.5*(-g[maxNewVars]/Math.max(1e-3,alpha[0]) + oldLambda);
return res < oldLambda?res:oldLambda*0.9;
}
// filter the current active columns using the strong rules
private int [] activeCols(final double l1, final double l2, final double [] grad){
final double rhs = alpha[0]*(2*l1-l2);
int [] cols = MemoryManager.malloc4(_srcDinfo.fullN());
int selected = 0;
int j = 0;
if(_activeCols == null)_activeCols = new int[]{-1};
for(int i = 0; i < _srcDinfo.fullN() - _noffsets; ++i)
if((j < _activeCols.length && i == _activeCols[j]) || !(grad[i] < rhs && grad[i] > -rhs) /* note negated here to have column included in case its gradient came as NaN */){
cols[selected++] = i;
if(j < _activeCols.length && i == _activeCols[j])++j;
}
for(int c = _srcDinfo.fullN()-_noffsets; c < _srcDinfo.fullN(); ++c)
cols[selected++] = c;
if(!strong_rules_enabled || selected == _srcDinfo.fullN()){
_activeCols = null;
_activeData._adaptedFrame = _srcDinfo._adaptedFrame;
_activeData = _srcDinfo;
} else {
_activeCols = Arrays.copyOf(cols,selected);
_activeData = _srcDinfo.filterExpandedColumns(_activeCols);
}
LogInfo("strong rule at lambda_value=" + l1 + ", got " + (selected - _noffsets) + " active cols out of " + (_srcDinfo.fullN() - _noffsets) + " total.");
assert _activeCols == null || _activeData.fullN() == _activeCols.length:LogInfo("mismatched number of cols, got " + _activeCols.length + " active cols, but data info claims " + _activeData.fullN());
return _activeCols;
}
// Expand grid search related argument sets
@Override protected NanoHTTPD.Response serveGrid(NanoHTTPD server, Properties parms, RequestType type) {
return superServeGrid(server, parms, type);
}
public static final DecimalFormat AUC_DFORMAT = new DecimalFormat("#.###");
public static final String aucStr(double auc){
return AUC_DFORMAT.format(Math.round(1000 * auc) * 0.001);
}
public static final DecimalFormat AIC_DFORMAT = new DecimalFormat("###.###");
public static final String aicStr(double aic){
return AUC_DFORMAT.format(Math.round(1000*aic)*0.001);
}
public static final DecimalFormat DEV_EXPLAINED_DFORMAT = new DecimalFormat("#.###");
public static final String devExplainedStr(double dev){
return AUC_DFORMAT.format(Math.round(1000*dev)*0.001);
}
public static class GLMGrid extends Lockable<GLMGrid> {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
final Key _jobKey;
final long _startTime;
@API(help="mean of response in the training dataset")
public final Key [] destination_keys;
final double [] _alphas;
public GLMGrid (Key gridKey,Key jobKey, GLM2 [] jobs){
super(gridKey);
_jobKey = jobKey;
_alphas = new double [jobs.length];
destination_keys = new Key[jobs.length];
for(int i = 0; i < jobs.length; ++i){
destination_keys[i] = jobs[i].destination_key;
_alphas[i] = jobs[i].alpha[0];
}
_startTime = System.currentTimeMillis();
}
public static class UnlockGridTsk extends DTask.DKeyTask<UnlockGridTsk,GLMGrid> {
final Key _jobKey;
public UnlockGridTsk(Key gridKey, Key jobKey, H2OCountedCompleter cc){
super(cc,gridKey);
_jobKey = jobKey;
}
@Override
public void map(GLMGrid g) {
addToPendingCount(g.destination_keys.length);
for(Key k:g.destination_keys)
new GLMModel.UnlockModelTask(this,k,_jobKey).forkTask();
g.unlock(_jobKey);
}
}
public static class DeleteGridTsk extends DTask.DKeyTask<DeleteGridTsk,GLMGrid> {
public DeleteGridTsk(H2OCountedCompleter cc, Key gridKey){
super(cc,gridKey);
}
@Override
public void map(GLMGrid g) {
addToPendingCount(g.destination_keys.length);
for(Key k:g.destination_keys)
new GLMModel.DeleteModelTask(this,k).forkTask();
assert g.is_unlocked():"not unlocked??";
g.delete();
}
}
@Override
protected Futures delete_impl(Futures fs) {return fs;}
@Override
protected String errStr() {
return null;
}
}
public class GLMGridSearch extends Job {
public final int _maxParallelism;
transient private AtomicInteger _idx;
public final GLM2 [] _jobs;
public final GLM2 _glm2;
public GLMGridSearch(int maxP, GLM2 glm2, Key destKey){
super(glm2.self(), destKey);
_glm2 = glm2;
description = "GLM Grid on data " + glm2._srcDinfo.toString() ;
_maxParallelism = maxP;
_jobs = new GLM2[glm2.alpha.length];
_idx = new AtomicInteger(_maxParallelism);
for(int i = 0; i < _jobs.length; ++i) {
_jobs[i] = (GLM2)_glm2.clone();
_jobs[i]._grid = true;
_jobs[i].alpha = new double[]{glm2.alpha[i]};
_jobs[i].destination_key = Key.make(glm2.destination_key + "_" + i);
_jobs[i]._progressKey = Key.make(dest().toString() + "_progress_" + i, (byte) 1, Key.HIDDEN_USER_KEY, dest().home_node());
_jobs[i].job_key = Key.make(glm2.job_key + "_" + i);
}
}
@Override public float progress(){
float sum = 0f;
for(GLM2 g:_jobs)sum += g.progress();
return sum/_jobs.length;
}
private transient boolean _cancelled;
@Override public void cancel(){
_cancelled = true;
for(GLM2 g:_jobs)
g.cancel();
source.unlock(self());
DKV.remove(destination_key);
super.cancel();
}
@Override
public GLMGridSearch fork(){
System.out.println("read-locking " + source._key + " by job " + self());
source.read_lock(self());
Futures fs = new Futures();
new GLMGrid(destination_key,self(),_jobs).delete_and_lock(self());
// keep *this* separate from what's stored in K/V as job (will be changing it!)
assert _maxParallelism >= 1;
final Job job = this;
_fjtask = new H2O.H2OEmptyCompleter();
H2OCountedCompleter fjtask = new H2OCallback<ParallelGLMs>(_fjtask) {
@Override public String toString(){
return "GLMGrid.Job.Callback, completer = " + getCompleter() == null?"null":getCompleter().toString();
}
@Override
public void callback(ParallelGLMs parallelGLMs) {
_glm2._done = true;
// we're gonna get success-callback after cancelling forked tasks since forked glms do not propagate exception if part of grid search
if(!_cancelled) {
source.unlock(self());
Lockable.unlock_lockable(destination_key, self());
remove();
}
}
@Override public boolean onExceptionalCompletion(Throwable t, CountedCompleter cmp){
if(!(t instanceof JobCancelledException) && (t.getMessage() == null || !t.getMessage().contains("job was cancelled"))) {
job.cancel(t);
}
return true;
}
};
start(_fjtask); // modifying GLM2 object, don't want job object to be the same instance
fs.blockForPending();
H2O.submitTask(new ParallelGLMs(this,_jobs,Double.NaN,H2O.CLOUD.size(),fjtask));
return this;
}
@Override public Response redirect() {
String n = GridSearchProgress.class.getSimpleName();
return Response.redirect( this, n, "job_key", job_key, "destination_key", destination_key);
}
}
private static class GLMT extends DTask<GLMT> {
private final GLM2 _glm;
private final double _lambda;
public GLMT(H2OCountedCompleter cmp, GLM2 glm, double lambda){
super(cmp);
_glm = glm;
_lambda = lambda;
}
@Override
public void compute2() {
assert Double.isNaN(_lambda) || _glm._lastResult._fullGrad != null:_glm.LogInfo("missing full gradient");
if(Double.isNaN(_lambda))
_glm.fork(this);
else {
_glm.nextLambda(_lambda, this);
}
}
@Override public void onCompletion(CountedCompleter cc){
if(!Double.isNaN(_lambda)) {
assert _glm._done : _glm.LogInfo("GLMT hit onCompletion but glm is not done yet!");
assert _glm._lastResult._fullGrad != null : _glm.LogInfo(" GLMT done with missing full gradient");
}
}
}
// class to execute multiple GLM runs in parallel
// (with user-given limit on how many to run in parallel)
public static class ParallelGLMs extends H2OCountedCompleter {
transient final private GLM2 [] _glms;
transient final private GLMT [] _tasks;
transient final Job _job;
transient final public int _maxP;
transient private AtomicInteger _nextTask;
public final double _lambda;
public ParallelGLMs(Job j, GLM2 [] glms){this(j,glms,Double.NaN);}
public ParallelGLMs(Job j, GLM2 [] glms, double lambda){this(j,glms,lambda, H2O.CLOUD.size());}
public ParallelGLMs(Job j, GLM2 [] glms, double lambda, int maxP){
_job = j; _lambda = lambda; _glms = glms; _maxP = maxP;
_tasks = new GLMT[_glms.length];
addToPendingCount(_glms.length);
}
public ParallelGLMs(Job j, GLM2 [] glms, double lambda, int maxP, H2OCountedCompleter cmp){
super(cmp); _lambda = lambda; _job = j; _glms = glms; _maxP = maxP;
_tasks = new GLMT[_glms.length];
addToPendingCount(_glms.length);
}
private void forkDTask(int i){
int nodeId = i%H2O.CLOUD.size();
forkDTask(i,H2O.CLOUD._memary[nodeId]);
}
private void forkDTask(final int i, H2ONode n){
_tasks[i] = new GLMT(new Callback(n,i),_glms[i],_lambda);
assert Double.isNaN(_lambda) || _tasks[i]._glm._lastResult._fullGrad != null;
if(n == H2O.SELF) H2O.submitTask(_tasks[i]);
else new RPC(n,_tasks[i]).call();
}
class Callback extends H2OCallback<H2OCountedCompleter> {
final int i;
final H2ONode n;
public Callback(H2ONode n, int i){super(ParallelGLMs.this); this.n = n; this.i = i;}
@Override public void callback(H2OCountedCompleter cc){
int i;
if((i = _nextTask.getAndIncrement()) < _glms.length) { // not done yet
forkDTask(i, n);
}
}
@Override public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller){
_job.cancel(ex);
return true;
}
}
@Override public void compute2(){
final int n = Math.min(_maxP, _glms.length);
_nextTask = new AtomicInteger(n);
for(int i = 0; i < n; ++i)
forkDTask(i);
tryComplete();
}
@Override public void onCompletion(CountedCompleter cc){
if(!Double.isNaN(_lambda))
for(int i= 0; i < _tasks.length; ++i) {
assert _tasks[i]._glm._lastResult._fullGrad != null;
_glms[i] = _tasks[i]._glm;
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/glm/GLMGridProgress.java
|
package hex.glm;
import hex.glm.GLM2.GLMGrid;
import water.*;
import water.api.*;
public class GLMGridProgress extends Progress2 {
@Override public boolean toHTML(StringBuilder sb){
Value v = DKV.get(destination_key);
if(v == null)return true;
GLMGrid g = v.get();
return new GLMGridView(g).toHTML(sb);
}
/** Return {@link Response} for finished job. */
@Override
protected Response jobDone(final Key dst) {
return GLMGridView.redirect(this, dst);
}
/** Return default progress {@link Response}. */
@Override
protected Response jobInProgress(final Job job, final Key dst) {
progress = job.progress();
return Response.poll(this, (int) (100 * progress), 100, "job_key", job_key.toString(), "destination_key",
destination_key.toString());
}
public static Response redirect(Request req, Key jobkey, Key dest) {
return Response.redirect(req, "/2/GLMProgress", "job_key", jobkey, "destination_key", dest);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/glm/GLMGridView.java
|
package hex.glm;
import hex.glm.GLM2.GLMGrid;
import hex.glm.GLMParams.Family;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Collections;
import water.*;
import water.api.DocGen;
import water.api.Request;
public class GLMGridView extends Request2 {
public GLMGridView(){}
public GLMGridView(GLMGrid g){grid = g;}
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="GLM Grid Key", required=true, filter=GLMGridKeyFilter.class)
Key grid_key;
class GLMGridKeyFilter extends H2OKey { public GLMGridKeyFilter() { super("",true); } }
@API(help="GLM Grid object")
GLMGrid grid;
public static String link(String txt, Key grid) {
return "<a href='GLMGridView.html?grid=" + grid + "'>" + txt + "</a>";
}
public static Response redirect(Request req, Key gridKey) {
return Response.redirect(req, "/2/GLMGridView", "grid_key", gridKey);
}
public static Response redirect2(Request req, Key modelKey) {
return Response.redirect(req, "/2/GLMGridView", "grid_key", modelKey);
}
public static final DecimalFormat AUC_DFORMAT = new DecimalFormat("#.###");
public static final String aucStr(double auc){
return AUC_DFORMAT.format(Math.round(1000*auc)*0.001);
}
public static final DecimalFormat AIC_DFORMAT = new DecimalFormat("###.###");
public static final String aicStr(double aic){
return AUC_DFORMAT.format(Math.round(1000*aic)*0.001);
}
public static final DecimalFormat DEV_EXPLAINED_DFORMAT = new DecimalFormat("#.###");
public static final String devExplainedStr(double dev){
return AUC_DFORMAT.format(Math.round(1000*dev)*0.001);
}
@Override public boolean toHTML(StringBuilder sb){
// if(title != null && !title.isEmpty())DocGen.HTML.title(sb,title);
ArrayList<GLMModel> models = new ArrayList<GLMModel>(grid.destination_keys.length);
for(int i = 0; i < grid.destination_keys.length; ++i){
Value v = DKV.get(grid.destination_keys[i]);
if(v != null)models.add(v.<GLMModel>get());
}
if(models.isEmpty()){
sb.append("no models computed yet..");
} else {
DocGen.HTML.arrayHead(sb);
sb.append("<tr>");
sb.append("<th>α</th>");
sb.append("<th>λ<sub>max</sub></th>");
sb.append("<th>λ<sub>min</sub></th>");
sb.append("<th>λ<sub>best</sub></th>");
sb.append("<th>nonzeros</th>");
sb.append("<th>iterations</td>");
if(models.get(0).glm.family == Family.binomial)
sb.append("<th>AUC</td>");
if(models.get(0).glm.family != Family.gamma)
sb.append("<th>AIC</td>");
sb.append("<th>Deviance Explained</td>");
sb.append("<th>Model</th>");
// sb.append("<th>Progress</th>");
sb.append("</tr>");
Collections.sort(models);//, _cmp);
for(int i = 0; i < models.size();++i){
GLMModel m = models.get(i);
sb.append("<tr>");
sb.append("<td>" + m.alpha + "</td>");
sb.append("<td>" + m.submodels[0].lambda_value + "</td>");
sb.append("<td>" + m.submodels[m.submodels.length-1].lambda_value + "</td>");
sb.append("<td>" + m.lambda() + "</td>");
sb.append("<td>" + (m.rank()-1) + "</td>");
sb.append("<td>" + m.iteration() + "</td>");
if(m.glm.family == Family.binomial)
sb.append("<td>" + aucStr(m.auc()) + "</td>");
if(m.glm.family != Family.gamma)
sb.append("<td>" + aicStr(m.aic()) + "</td>");
sb.append("<td>" + devExplainedStr(m.devExplained()) + "</td>");
sb.append("<td>" + GLMModelView.link("View Model", m._key) + "</td>");
// if(job != null && !job.isDone())DocGen.HTML.progress(job.progress(), sb.append("<td>")).append("</td>");
// else sb.append("<td class='alert alert-success'>" + "DONE" + "</td>");
sb.append("</tr>");
}
DocGen.HTML.arrayTail(sb);
}
return true;
}
@Override protected Response serve() {
grid = DKV.get(grid_key).get();
Job j = null;
if((j = UKV.get(grid._jobKey)) != null){
switch(j.state){
case DONE: return Response.done(this);
case FAILED: return Response.error(j.exception);
case CANCELLED:return Response.error("Job was cancelled by user!");
case RUNNING: return Response.poll(this, (int) (100 * j.progress()), 100, "grid_key", grid_key.toString());
default: break;
}
}
return Response.poll(this, 0, 100, "grid_key", grid_key.toString());
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/glm/GLMModel.java
|
package hex.glm;
import hex.FrameTask.DataInfo;
import hex.VarImp;
import hex.glm.GLMParams.Family;
import hex.glm.GLMValidation.GLMXValidation;
import water.*;
import water.H2O.H2OCountedCompleter;
import water.api.DocGen;
import water.api.Request.API;
import water.fvec.Chunk;
import water.util.Utils;
import java.util.Arrays;
import java.util.HashMap;
public class GLMModel extends Model implements Comparable<GLMModel> {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="lambda_value max, smallest lambda_value which drives all coefficients to zero")
final double lambda_max;
@API(help="mean of response in the training dataset")
final double ymu;
@API(help="actual expected mean of the response (given by the user before running the model or ymu)")
final double prior;
@API(help="job key assigned to the job building this model")
final Key job_key;
@API(help = "Model parameters", json = true)
final private GLM2 parameters;
@Override public final GLM2 get_params() { return parameters; }
@Override public final Request2 job() { return get_params(); }
@API(help="Input data info")
DataInfo data_info;
@API(help="Decision threshold.")
double threshold;
@API(help="glm params")
final GLMParams glm;
@API(help="beta epsilon - stop iterating when beta diff is below this threshold.")
final double beta_eps;
@API(help="regularization parameter driving proportion of L1/L2 penalty.")
final double alpha;
@API(help="column names including expanded categorical values")
public String [] coefficients_names;
@API(help="index of lambda_value giving best results")
int best_lambda_idx;
public Key [] xvalModels() {
if(submodels == null)return null;
for(Submodel sm:submodels)
if(sm.xvalidation instanceof GLMXValidation){
return ((GLMXValidation)sm.xvalidation).xval_models;
}
return null;
}
public double auc(){
if(glm.family == Family.binomial && submodels != null && submodels[best_lambda_idx].validation != null) {
Submodel sm = submodels[best_lambda_idx];
return sm.xvalidation != null?sm.xvalidation.auc:sm.validation.auc;
}
return -1;
}
public double aic(){
if(submodels != null && submodels[best_lambda_idx].validation != null){
Submodel sm = submodels[best_lambda_idx];
return sm.xvalidation != null?sm.xvalidation.aic:sm.validation.aic;
}
return Double.MAX_VALUE;
}
public double devExplained(){
if(submodels == null || submodels[best_lambda_idx].validation == null)
return 0;
Submodel sm = submodels[best_lambda_idx];
GLMValidation val = sm.xvalidation == null?sm.validation:sm.xvalidation;
return 1.0 - val.residual_deviance/null_validation.residual_deviance;
}
public static class UnlockModelTask extends DTask.DKeyTask<UnlockModelTask,GLMModel>{
final Key _jobKey;
public UnlockModelTask(H2OCountedCompleter cmp, Key modelKey, Key jobKey){
super(cmp,modelKey);
_jobKey = jobKey;
}
@Override
public void map(GLMModel m) {
Key [] xvals = m.xvalModels();
if(xvals != null){
addToPendingCount(xvals.length);
for(int i = 0; i < xvals.length; ++i)
new UnlockModelTask(this,xvals[i],_jobKey).forkTask();
}
m.unlock(_jobKey);
}
}
public static class DeleteModelTask extends DTask.DKeyTask<DeleteModelTask,GLMModel>{
final Key _modelKey;
public DeleteModelTask(H2OCountedCompleter cmp, Key modelKey){
super(cmp,modelKey);
_modelKey = modelKey;
}
@Override
public void map(GLMModel m) {
Key[] xvals = m.xvalModels();
if (xvals != null) {
addToPendingCount(xvals.length);
for (int i = 0; i < xvals.length; ++i)
new DeleteModelTask(this, xvals[i]).forkTask();
}
m.delete();
}
}
@Override public GLMModel clone(){
GLMModel res = (GLMModel)super.clone();
res.submodels = submodels.clone();
if(warnings != null)
res.warnings = warnings.clone();
else
res.warnings = new String[0];
return res;
}
@Override
public int compareTo(GLMModel m){
// assert m._dataKey.equals(_dataKey);
assert m.glm.family == glm.family;
assert m.glm.link == glm.link;
switch(glm.family){
case binomial: // compare by AUC, higher is better
return (int)(1e6*(m.auc()-auc()));
case gamma: // compare by percentage of explained deviance, higher is better
return (int)(100*(m.devExplained()-devExplained()));
default: // compare by AICs by default, lower is better
return (int)(100*(aic()- m.aic()));
}
}
@API(help="Overall run time")
long run_time;
@API(help="computation started at")
long start_time;
// fully expanded beta used for scoring
private double [] global_beta;
public void setBestSubmodel(double lambda){
}
@API(help="Validation of the null model")
public GLMValidation null_validation;
static class Submodel extends Iced {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="lambda_value value used for computation of this submodel")
final double lambda_value;
@API(help="number of iterations computed.")
final int iteration;
@API(help="running time of the algo in ms.")
final long run_time;
@API(help="Validation")
GLMValidation validation;
@API(help="X-Validation")
GLMValidation xvalidation;
@API(help="Beta vector containing model coefficients.") double [] beta;
@API(help="Beta vector containing normalized coefficients (coefficients obtained on normalized data).") double [] norm_beta;
final int rank;
@API(help="Indexes to the coefficient_names array containing names (and order) of the non-zero coefficients in this model.")
final int [] idxs;
@API(help="sparseCoefFlag")
final boolean sparseCoef;
public Submodel(double lambda , double [] beta, double [] norm_beta, long run_time, int iteration, boolean sparseCoef){
this.lambda_value = lambda;
this.run_time = run_time;
this.iteration = iteration;
int r = 0;
if(beta != null){
final double [] b = norm_beta != null?norm_beta:beta;
// grab the indeces of non-zero coefficients
for(double d:beta)if(d != 0)++r;
idxs = MemoryManager.malloc4(sparseCoef?r:beta.length);
int j = 0;
for(int i = 0; i < beta.length; ++i)
if(!sparseCoef || beta[i] != 0)idxs[j++] = i;
j = 0;
this.beta = MemoryManager.malloc8d(idxs.length);
for(int i:idxs)
this.beta[j++] = beta[i];
if(norm_beta != null){
j = 0;
this.norm_beta = MemoryManager.malloc8d(idxs.length);
for(int i:idxs) this.norm_beta[j++] = norm_beta[i];
}
} else idxs = null;
rank = r;
this.sparseCoef = sparseCoef;
}
}
@API(help = "models computed for particular lambda_value values")
Submodel [] submodels;
final boolean useAllFactorLevels;
@API(help = "Variable importances", json=true)
VarImp variable_importances;
public GLMModel(GLM2 job, Key selfKey, DataInfo dinfo, GLMParams glm, GLMValidation nullVal, double beta_eps, double alpha, double lambda_max, double ymu, double prior) {
super(selfKey,job.source._key == null ? dinfo._frameKey : job.source._key,dinfo._adaptedFrame, /* priorClassDistribution */ null);
parameters = Job.hygiene((GLM2) job.clone());
job_key = job.self();
this.ymu = ymu;
this.prior = prior;
this.glm = glm;
threshold = 0.5;
this.data_info = dinfo;
this.warnings = new String[0];
this.alpha = alpha;
this.lambda_max = lambda_max;
this.beta_eps = beta_eps;
submodels = new Submodel[0];
run_time = 0;
start_time = System.currentTimeMillis();
coefficients_names = coefNames();
useAllFactorLevels = dinfo._useAllFactorLevels;
null_validation = nullVal;
null_validation.null_deviance = null_validation.residual_deviance;
}
public void pickBestModel(boolean useAuc){
int bestId = submodels.length-1;
if(submodels.length > 2) {
boolean xval = false;
GLMValidation bestVal = null;
for(Submodel sm:submodels) {
if(sm.xvalidation != null) {
xval = true;
bestVal = sm.xvalidation;
}
}
if(!xval)
bestVal = submodels[0].validation;
for (int i = 1; i < submodels.length; ++i) {
GLMValidation val = xval ? submodels[i].xvalidation : submodels[i].validation;
if (val == null || val == bestVal) continue;
if ((useAuc && val.auc > bestVal.auc)
|| (xval && val.residual_deviance < bestVal.residual_deviance)
|| (((bestVal.residual_deviance - val.residual_deviance) / null_validation.residual_deviance) >= 0.01)) {
bestVal = val;
bestId = i;
}
}
}
best_lambda_idx = bestId;
setSubmodelIdx(bestId);
}
// public static void setSubmodel(H2OCountedCompleter cmp, Key modelKey, final double lambda, double[] beta, double[] norm_beta, int iteration, long runtime, boolean sparseCoef){
public static void setSubmodel(H2OCountedCompleter cmp, Key modelKey, final double lambda, double[] beta, double[] norm_beta, int iteration, long runtime, boolean sparseCoef){
setSubmodel(cmp,modelKey,lambda,beta,norm_beta,iteration,runtime,sparseCoef,null);
}
public static class GetScoringModelTask extends DTask.DKeyTask<GetScoringModelTask,GLMModel> {
final double _lambda;
public GLMModel _res;
public GetScoringModelTask(H2OCountedCompleter cmp, Key modelKey, double lambda){
super(cmp,modelKey);
_lambda = lambda;
}
@Override
public void map(GLMModel m) {
_res = m.clone();
Submodel sm = Double.isNaN(_lambda)?_res.submodels[_res.best_lambda_idx]:_res.submodelForLambda(_lambda);
assert sm != null : "GLM[" + m._key + "]: missing submodel for lambda " + _lambda;
sm = (Submodel) sm.clone();
_res.submodels = new Submodel[]{sm};
_res.setSubmodelIdx(0);
}
}
public static void setXvalidation(H2OCountedCompleter cmp, Key modelKey, final double lambda, final GLMValidation val){
// expected cmp has already set correct pending count
new TAtomic<GLMModel>(cmp){
@Override
public GLMModel atomic(GLMModel old) {
if(old == null)return old; // job could've been cancelled
old.submodels = old.submodels.clone();
int id = old.submodelIdForLambda(lambda);
old.submodels[id] = (Submodel)old.submodels[id].clone();
old.submodels[id].xvalidation = val;
old.pickBestModel(false);
return old;
}
}.fork(modelKey);
}
public static void setSubmodel(H2OCountedCompleter cmp, Key modelKey, final double lambda, double[] beta, double[] norm_beta, final int iteration, long runtime, boolean sparseCoef, final GLMValidation val){
final Submodel sm = new Submodel(lambda,beta, norm_beta, runtime, iteration,sparseCoef);
sm.validation = val;
cmp.addToPendingCount(1);
new TAtomic<GLMModel>(cmp){
@Override
public GLMModel atomic(GLMModel old) {
if(old == null)return old; // job could've been cancelled!
if(old.submodels == null){
old.submodels = new Submodel[]{sm};
} else {
int id = old.submodelIdForLambda(lambda);
if (id < 0) {
id = -id - 1;
old.submodels = Arrays.copyOf(old.submodels, old.submodels.length + 1);
for (int i = old.submodels.length - 1; i > id; --i)
old.submodels[i] = old.submodels[i - 1];
} else if (old.submodels[id].iteration > sm.iteration)
return old;
else
old.submodels = old.submodels.clone();
old.submodels[id] = sm;
old.run_time = Math.max(old.run_time,sm.run_time);
}
old.pickBestModel(false);
return old;
}
}.fork(modelKey);
}
public void addSubmodel(double lambda){
submodels = Arrays.copyOf(submodels,submodels.length+1);
run_time = (System.currentTimeMillis()-start_time);
submodels[submodels.length-1] = new Submodel(lambda,null, null, 0, 0,true);
}
public void dropSubmodel() {
submodels = Arrays.copyOf(submodels,submodels.length-1);
}
public double lambda(){
if(submodels == null)return Double.NaN;
return submodels[best_lambda_idx].lambda_value;
}
public GLMValidation validation(){
return submodels[best_lambda_idx].validation;
}
public int iteration(){
Submodel [] sm = submodels;
for(int i = sm.length-1; i >= 0; --i)
if(sm[i] != null && sm[i].iteration != 0)
return sm[i].iteration;
return 0;
}
public double [] beta(){return global_beta;}
public double [] norm_beta(double lambda){
int i = submodels.length-1;
for(;i>=0;--i)
if(submodels[i].lambda_value == lambda) {
if(submodels[i].norm_beta == null)
return beta(); // not normalized
double [] res = MemoryManager.malloc8d(beta().length);
int k = 0;
for(int j:submodels[i].idxs)
res[j] = submodels[i].norm_beta[k++];
return res;
}
throw new RuntimeException("No submodel for lambda_value = " + lambda);
}
public void addWarning(String w){
final int n = warnings.length;
warnings = Arrays.copyOf(warnings,warnings.length+1);
warnings[n] = w;
}
@Override protected float[] score0(double[] data, float[] preds) {
double eta = 0.0;
final double [] b = beta();
if(!useAllFactorLevels){ // skip level 0 of all factors
for(int i = 0; i < data_info._catOffsets.length-1; ++i) if(data[i] != 0)
eta += b[data_info._catOffsets[i] + (int)(data[i]-1)];
} else { // do not skip any levels!
for(int i = 0; i < data_info._catOffsets.length-1; ++i)
eta += b[data_info._catOffsets[i] + (int)data[i]];
}
final int noff = data_info.numStart() - data_info._cats;
for(int i = data_info._cats; i < data.length; ++i)
eta += b[noff+i]*data[i];
if(data_info._hasIntercept)
eta += b[b.length-1]; // add has_intercept
double mu = glm.linkInv(eta);
preds[0] = (float)mu;
if( glm.family == Family.binomial ) { // threshold for prediction
if(Double.isNaN(mu)){
preds[0] = Float.NaN;
preds[1] = Float.NaN;
preds[2] = Float.NaN;
} else {
preds[0] = (mu >= threshold ? 1 : 0);
preds[1] = 1.0f - (float)mu; // class 0
preds[2] = (float)mu; // class 1
}
}
return preds;
}
public final int ncoefs() {return beta().length;}
@Override public int nclasses(){ return glm.family == Family.binomial?2:1;}
@Override
public String [] classNames(){
String [] res = super.classNames();
if(glm.getFamily() == Family.binomial && res == null)
res = new String[]{"0","1"};
return res;
}
// public static void setAndTestValidation(final H2OCountedCompleter cmp,final Key modelKey, final double lambda, final GLMValidation val){
// if(cmp != null)cmp.addToPendingCount(1);
// new TAtomic<GLMModel>(cmp){
// @Override
// public GLMModel atomic(GLMModel old) {
// if(old == null)return old;
// old.submodels = old.submodels.clone();
// Submodel sm = old.submodelForLambda(lambda);
// if(sm == null)return old;
// if(val instanceof GLMXValidation)
// sm.xvalidation = (GLMXValidation)val;
// else
// sm.validation = val;
// old.pickBestModel(false);
// return old;
// }
// }.fork(modelKey);
// }
public static class GLMValidationTask<T extends GLMValidationTask<T>> extends MRTask2<T> {
protected final GLMModel _model;
protected GLMValidation _res;
public final double _lambda;
public boolean _improved;
Key _jobKey;
public static Key makeKey(){return Key.make("__GLMValidation_" + Key.make().toString());}
public GLMValidationTask(GLMModel model, double lambda){this(model,lambda,null);}
public GLMValidationTask(GLMModel model, double lambda, H2OCountedCompleter completer){super(completer); _lambda = lambda; _model = model;}
@Override public void map(Chunk [] chunks){
_res = new GLMValidation(null,_model.glm,_model.rank(_lambda));
final int nrows = chunks[0]._len;
double [] row = MemoryManager.malloc8d(_model._names.length);
float [] preds = MemoryManager.malloc4f(_model.glm.family == Family.binomial?3:1);
OUTER:
for(int i = 0; i < nrows; ++i){
if(chunks[chunks.length-1].isNA0(i))continue;
for(int j = 0; j < chunks.length-1; ++j){
if(chunks[j].isNA0(i))continue OUTER;
row[j] = chunks[j].at0(i);
}
_model.score0(row, preds);
double response = chunks[chunks.length-1].at0(i);
_res.add(response, _model.glm.family == Family.binomial?preds[2]:preds[0]);
}
}
@Override public void reduce(GLMValidationTask gval){_res.add(gval._res);}
@Override public void postGlobal(){
_res.computeAIC();
_res.computeAUC();
}
}
// use general score to reduce number of possible different code paths
public static class GLMXValidationTask extends GLMValidationTask<GLMXValidationTask>{
protected final GLMModel [] _xmodels;
protected GLMValidation [] _xvals;
long _nobs;
final float [] _thresholds;
public static Key makeKey(){return Key.make("__GLMValidation_" + Key.make().toString());}
public GLMXValidationTask(GLMModel mainModel,double lambda, GLMModel [] xmodels, float [] thresholds){this(mainModel,lambda,xmodels,thresholds,null);}
public GLMXValidationTask(GLMModel mainModel,double lambda, GLMModel [] xmodels, float [] thresholds, final H2OCountedCompleter completer){
super(mainModel, lambda,completer);
_xmodels = xmodels;
_thresholds = thresholds;
}
@Override public void map(Chunk [] chunks){
_xvals = new GLMValidation[_xmodels.length];
for(int i = 0; i < _xmodels.length; ++i)
_xvals[i] = new GLMValidation(null,_xmodels[i].glm,_xmodels[i].rank(),_thresholds);
final int nrows = chunks[0]._len;
double [] row = MemoryManager.malloc8d(_xmodels[0]._names.length);
float [] preds = MemoryManager.malloc4f(_xmodels[0].glm.family == Family.binomial?3:1);
OUTER:
for(int i = 0; i < nrows; ++i){
if(chunks[chunks.length-1].isNA0(i))continue;
for(int j = 0; j < chunks.length-1; ++j){
if(chunks[j].isNA0(i))continue OUTER;
row[j] = chunks[j].at0(i);
}
++_nobs;
final int mid = i % _xmodels.length;
final GLMModel model = _xmodels[mid];
final GLMValidation val = _xvals[mid];
model.score0(row, preds);
double response = chunks[chunks.length-1].at80(i);
val.add(response, model.glm.family == Family.binomial?preds[2]:preds[0]);
}
}
@Override public void reduce(GLMXValidationTask gval){
_nobs += gval._nobs;
for(int i = 0; i < _xvals.length; ++i)
_xvals[i].add(gval._xvals[i]);}
@Override public void postGlobal() {
H2OCountedCompleter cmp = (H2OCountedCompleter)getCompleter();
if(cmp != null)cmp.addToPendingCount(_xvals.length + 1);
for (int i = 0; i < _xvals.length; ++i) {
_xvals[i].computeAIC();
_xvals[i].computeAUC();
_xvals[i].nobs = _nobs - _xvals[i].nobs;
_xvals[i].null_deviance = _xmodels[i].null_validation.residual_deviance;
GLMModel.setXvalidation(cmp, _xmodels[i]._key, _lambda, _xvals[i]);
}
GLMXValidation xval = new GLMXValidation(_model, _xmodels, _xvals, _lambda, _nobs,_thresholds);
xval.null_deviance = _model.null_validation.residual_deviance;
GLMModel.setXvalidation(cmp, _model._key, _lambda, xval);
}
}
public GLMParams getParams() {
return glm;
}
@Override
public String toString(){
return ("GLM Model (key=" + _key + " , trained on " + _dataKey + ", family = " + glm.family + ", link = " + glm.link + ", #iterations = " + iteration() + ")");
}
public int rank() {return rank(submodels[best_lambda_idx].lambda_value);}
public int submodelIdForLambda(double lambda){
if(lambda > lambda_max)lambda = lambda_max;
int i = submodels.length-1;
for(;i >=0; --i)
// first condition to cover lambda == 0 case (0/0 is Inf in java!)
if(lambda == submodels[i].lambda_value || Math.abs(submodels[i].lambda_value - lambda)/lambda < 1e-5)
return i;
else if(submodels[i].lambda_value > lambda)
return -i-2;
return -1;
}
public Submodel submodelForLambda(double lambda){
if(lambda > lambda_max)
return submodels[0];
int i = submodelIdForLambda(lambda);
return i < 0?null:submodels[i];
}
public int rank(double lambda) {
Submodel sm = submodelForLambda(lambda);
if(sm == null)return 0;
return submodelForLambda(lambda).rank;
}
public void setValidation(GLMValidation val ){
submodels[submodels.length-1].validation = val;
}
public void setSubmodelIdx(int l){
best_lambda_idx = l;
threshold = submodels[l].validation == null?0.5:submodels[l].validation.best_threshold;
if(global_beta == null) global_beta = MemoryManager.malloc8d(this.coefficients_names.length);
else Arrays.fill(global_beta,0);
int j = 0;
for(int i:submodels[l].idxs)
global_beta[i] = submodels[l].beta[j++];
}
/**
* get beta coefficients in a map indexed by name
* @return
*/
public HashMap<String,Double> coefficients(){
HashMap<String, Double> res = new HashMap<String, Double>();
final double [] b = beta();
if(b != null) for(int i = 0; i < b.length; ++i)res.put(coefficients_names[i],b[i]);
return res;
}
private String [] coefNames(){
return Utils.append(data_info.coefNames(),new String[]{"Intercept"});
}
public VarImp varimp() {
return this.variable_importances;
}
protected void maybeComputeVariableImportances() {
GLM2 params = get_params();
this.variable_importances = null;
final double[] b = beta();
if (params.variable_importances && null != b) {
// Warn if we may be returning results that might not include an important (base) level. . .
if (! params.use_all_factor_levels)
this.addWarning("Variable Importance may be missing important variables: because use_all_factor_levels is off the importance of base categorical levels will NOT be included.");
float[] coefs_abs_value = new float[b.length - 1]; // Don't include the Intercept
String[] names = new String[b.length - 1];
for (int i = 0; i < b.length - 1; ++i) {
coefs_abs_value[i] = (float)Math.abs(b[i]);
names[i] = coefficients_names[i];
}
this.variable_importances = new VarImp(coefs_abs_value, names);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/glm/GLMModelUpdate.java
|
package hex.glm;
import water.*;
import water.api.DocGen;
import water.api.Request;
public class GLMModelUpdate extends Request2 {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="GLM Model Key", required=true, filter=GLMModelKeyFilter.class)
Key _modelKey;
class GLMModelKeyFilter extends H2OKey { public GLMModelKeyFilter() { super("",true); } }
@API(help="GLM Model")
GLMModel glm_model;
@API(help = "decision threshold",filter=Default.class)
double threshold;
@API(help="lambda_value to be used in scoring",filter=Default.class)
double lambda = Double.NaN;
public static String link(String txt, Key model) {return link(txt,model,Double.NaN);}
public static String link(String txt, Key model, double lambda) {
return "<a href='GLMModelUpdate.html?_modelKey=" + model + "&lambda=" + lambda + "'>" + txt + "</a>";
}
public static Response redirect(Request req, Key modelKey) {
return Response.redirect(req, "/2/GLMModelUpdate", "_modelKey", modelKey);
}
public static Response redirect(Request req, Key modelKey, double threshold, int lambdaId) {
return Response.redirect(req, "/2/GLMModelUpdate", "_modelKey", modelKey,"threshold",threshold, "lambda_id", lambdaId);
}
@Override public boolean toHTML(StringBuilder sb){
new GLMModelView(glm_model).toHTML(sb);
return true;
}
@Override protected Response serve() {
Value v = DKV.get(_modelKey);
if(v != null){
glm_model = v.get();
glm_model.write_lock(null);
int id = 0;
for(; id < glm_model.submodels.length; ++id)
if(glm_model.submodels[id].lambda_value == lambda){
threshold = glm_model.submodels[id].validation.best_threshold;
break;
}
glm_model.setSubmodelIdx(id);
glm_model.update(null);
glm_model.unlock(null);
}
return GLMModelView.redirect(this,_modelKey);
}
// @Override protected Response serve() {
// Value v = DKV.get(_modelKey);
// if(v == null)
// return Response.poll(this, 0, 100, "_modelKey", _modelKey.toString());
// glm_model = v.get();
// if(Double.isNaN(lambda_value))lambda_value = glm_model.lambdas[glm_model.best_lambda_idx];
// Job j;
// if((j = Job.findJob(glm_model.job_key)) != null && j.exception != null)
// return Response.error(j.exception);
// if(DKV.get(glm_model.job_key) != null && j != null)
// return Response.poll(this, (int) (100 * j.progress()), 100, "_modelKey", _modelKey.toString());
// else
// return Response.done(this);
// }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/glm/GLMModelView.java
|
package hex.glm;
import hex.glm.GLMModel.Submodel;
import hex.glm.GLMParams.Family;
import hex.glm.GLMValidation.GLMXValidation;
import water.*;
import water.api.*;
import water.util.RString;
import water.util.UIUtils;
import java.text.DecimalFormat;
public class GLMModelView extends Request2 {
public GLMModelView(){}
public GLMModelView(GLMModel m){glm_model = m;}
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="GLM Model Key", required=true, filter=GLMModelKeyFilter.class)
Key _modelKey;
@API(help="Lambda value which should be displayed as main model", required=false, filter=Default.class)
double lambda = Double.NaN;
class GLMModelKeyFilter extends H2OKey { public GLMModelKeyFilter() { super("",true); } }
@API(help="GLM Model")
GLMModel glm_model;
@API(help="job key",required=false, filter=Default.class)
Key job_key;
public static String link(String txt, Key model) {return link(txt,model,Double.NaN);}
public static String link(String txt, Key model, double lambda) {
return "<a href='GLMModelView.html?_modelKey=" + model + "&lambda=" + lambda + "'>" + txt + "</a>";
}
public static Response redirect(Request req, Key modelKey) {
return Response.redirect(req, "/2/GLMModelView", "_modelKey", modelKey);
}
public static Response redirect(Request req, Key modelKey, Key job_key) {
return Response.redirect(req, "/2/GLMModelView", "_modelKey", modelKey,"job_key",job_key);
}
@Override public boolean toHTML(StringBuilder sb){
// if(title != null && !title.isEmpty())DocGen.HTML.title(sb,title);
if(glm_model == null){
sb.append("No model yet...");
return true;
}
glm_model.get_params().makeJsonBox(sb);
DocGen.HTML.paragraph(sb,"Model Key: "+glm_model._key);
if(glm_model.submodels != null) {
DocGen.HTML.paragraph(sb,water.api.GLMPredict.link(glm_model._key,lambda,"Predict!"));
DocGen.HTML.paragraph(sb,UIUtils.qlink(SaveModel.class, "model", glm_model._key, "Save model"));
}
String succ = (glm_model.warnings == null || glm_model.warnings.length == 0)?"alert-success":"alert-warning";
sb.append("<div class='alert " + succ + "'>");
pprintTime(sb.append(glm_model.iteration() + " iterations computed in "),glm_model.run_time);
if(glm_model.warnings != null && glm_model.warnings.length > 0){
sb.append("<ul>");
for(String w:glm_model.warnings)sb.append("<li><b>Warning:</b>" + w + "</li>");
sb.append("</ul>");
}
sb.append("</div>");
if(!Double.isNaN(lambda) && lambda != glm_model.submodels[glm_model.best_lambda_idx].lambda_value){ // show button to permanently set lambda_value to this value
sb.append("<div class='alert alert-warning'>\n");
sb.append(GLMModelUpdate.link("Set lambda_value to current value!",_modelKey,lambda) + "\n");
sb.append("</div>");
}
sb.append("<h4>Parameters</h4>");
parm(sb,"family",glm_model.glm.family);
parm(sb,"link",glm_model.glm.link);
parm(sb,"ε<sub>β</sub>",glm_model.beta_eps);
parm(sb,"α",glm_model.alpha);
if(!Double.isNaN(glm_model.lambda_max))
parm(sb,"λ<sub>max</sub>",DFORMAT2.format(glm_model.lambda_max));
parm(sb,"λ",DFORMAT2.format(lambda));
if(glm_model.submodels.length > 1){
sb.append("\n<table class='table table-bordered table-condensed'>\n");
StringBuilder firstRow = new StringBuilder("\t<tr><th>λ</th>\n");
StringBuilder secondRow = new StringBuilder("\t<tr><th>nonzeros</th>\n");
StringBuilder thirdRow = new StringBuilder("\t<tr><th>Deviance Explained</th>\n");
StringBuilder fourthRow = new StringBuilder("\t<tr><th>" + (glm_model.glm.family == Family.binomial?"AUC":"AIC") + "</th>\n");
for(int i = 0; i < glm_model.submodels.length; ++i){
final Submodel sm = glm_model.submodels[i];
if(sm.validation == null)break;
if (glm_model.submodels[i].lambda_value == lambda)
firstRow.append("\t\t<td><b>" + DFORMAT2.format(glm_model.submodels[i].lambda_value) + "</b></td>\n");
else
firstRow.append("\t\t<td>" + link(DFORMAT2.format(glm_model.submodels[i].lambda_value), glm_model._key, glm_model.submodels[i].lambda_value) + "</td>\n");
secondRow.append("\t\t<td>" + Math.max(0,(sm.rank - 1)) + "</td>\n"); // rank counts intercept, that's why -1 is there, however, intercept can be 0 as well, so just prevent -1
if(sm.xvalidation != null){
thirdRow.append("\t\t<td>" + DFORMAT.format(1 - sm.xvalidation.residual_deviance / glm_model.null_validation.residualDeviance()) + "<sub>x</sub>(" + DFORMAT.format(1 - sm.validation.residual_deviance /glm_model.null_validation.residualDeviance()) + ")" + "</td>\n");
fourthRow.append("\t\t<td>" + DFORMAT.format(glm_model.glm.family == Family.binomial ? sm.xvalidation.auc : sm.xvalidation.aic) + "<sub>x</sub>("+ DFORMAT.format(glm_model.glm.family == Family.binomial ? sm.validation.auc : sm.validation.aic) + ")</td>\n");
} else {
thirdRow.append("\t\t<td>" + DFORMAT.format(1 - sm.validation.residual_deviance / glm_model.null_validation.residualDeviance()) + "</td>\n");
fourthRow.append("\t\t<td>" + DFORMAT.format(glm_model.glm.family == Family.binomial ? sm.validation.auc : sm.validation.aic) + "</td>\n");
}
}
sb.append(firstRow.append("\t</tr>\n"));
sb.append(secondRow.append("\t</tr>\n"));
sb.append(thirdRow.append("\t</tr>\n"));
sb.append(fourthRow.append("\t</tr>\n"));
sb.append("</table>\n");
}
if(glm_model.submodels.length == 0)return true;
Submodel sm = glm_model.submodels[glm_model.best_lambda_idx];
if(!Double.isNaN(lambda) && glm_model.submodels[glm_model.best_lambda_idx].lambda_value != lambda){
int ii = 0;
sm = glm_model.submodels[0];
while(glm_model.submodels[ii].lambda_value != lambda && ++ii < glm_model.submodels.length)
sm = glm_model.submodels[ii];
if(ii == glm_model.submodels.length)throw new IllegalArgumentException("Unexpected value of lambda '" + lambda + "'");
}
if(glm_model.submodels != null)
coefs2html(sm,sb);
if(sm.xvalidation != null)
val2HTML(sm,sm.xvalidation,sb);
else if(sm.validation != null)
val2HTML(sm,sm.validation, sb);
// Variable importance
if (glm_model.varimp() != null) {
glm_model.varimp().toHTML(glm_model, sb);
}
return true;
}
public void val2HTML(Submodel sm,GLMValidation val, StringBuilder sb) {
String title = (val instanceof GLMXValidation)?"Cross Validation":"Validation";
sb.append("<h4>" + title + "</h4>");
sb.append("<table class='table table-striped table-bordered table-condensed'>");
final long null_dof = val.nobs-1, res_dof = Math.max(0,val.nobs-sm.rank);
sb.append("<tr><th>Degrees of freedom:</th><td>" + null_dof + " total (i.e. Null); " + res_dof + " Residual</td></tr>");
sb.append("<tr><th>Null Deviance</th><td>" + glm_model.null_validation.residualDeviance() + "</td></tr>");
sb.append("<tr><th>Residual Deviance</th><td>" + val.residual_deviance + "</td></tr>");
sb.append("<tr><th>AIC</th><td>" + val.aic() + "</td></tr>");
if(glm_model.glm.family == Family.binomial)sb.append("<tr><th>AUC</th><td>" + DFORMAT.format(val.auc()) + "</td></tr>");
sb.append("</table>");
if(glm_model.glm.family == Family.binomial)new AUC(val._cms,val.thresholds,glm_model._domains[glm_model._domains.length-1]).toHTML(sb);
if(val instanceof GLMXValidation){
GLMXValidation xval = (GLMXValidation)val;
// add links to the xval models
sb.append("<h4>Cross Validation Models</h4>");
sb.append("<table class='table table-bordered table-condensed'>");
sb.append("<tr><th>Model</th><th>nonzeros</th>");
sb.append("<th>" + ((glm_model.glm.family == Family.binomial)?"AUC":"AIC") + "</th>");
sb.append("<th>Deviance Explained</th>");
sb.append("</tr>");
int i = 0;
for(Key k:xval.xval_models){
Value v = DKV.get(k);
if(v == null)continue;
GLMModel m = v.get();
sb.append("<tr>");
sb.append("<td>" + GLMModelView.link("Model " + ++i, k) + "</td>");
sb.append("<td>" + (m.rank()-1) + "</td>");
sb.append("<td>" + ((glm_model.glm.family == Family.binomial)?format(m.auc()):format(m.aic())) + "</td>");
sb.append("<td>" + format(m.devExplained()) + "</td>");
sb.append("</tr>");
}
sb.append("</table>");
}
}
private static final DecimalFormat DFORMAT3 = new DecimalFormat("##.##");
private static String format(double d){
return DFORMAT3.format(0.01*(int)(100*d));
}
private static void parm( StringBuilder sb, String x, Object... y ) {
sb.append("<span><b>").append(x).append(": </b>").append(y[0]).append("</span> ");
}
private static final DecimalFormat DFORMAT = new DecimalFormat("###.###");
private static final DecimalFormat DFORMAT2 = new DecimalFormat("0.##E0");
private void coefs2html(final Submodel sm,StringBuilder sb){
StringBuilder names = new StringBuilder();
StringBuilder equation = new StringBuilder();
StringBuilder vals = new StringBuilder();
StringBuilder normVals = sm.norm_beta == null?null:new StringBuilder();
int [] sortedIds = new int[sm.beta.length];
for(int i = 0; i < sortedIds.length; ++i)
sortedIds[i] = i;
final double [] b = sm.norm_beta == null?sm.beta:sm.norm_beta;
// now sort the indeces according to their abs value from biggest to smallest (but keep intercept last)
int r = sortedIds.length-1;
for(int i = 1; i < r; ++i){
for(int j = 1; j < r-i;++j){
if(Math.abs(b[sortedIds[j-1]]) < Math.abs(b[sortedIds[j]])){
int jj = sortedIds[j];
sortedIds[j] = sortedIds[j-1];
sortedIds[j-1] = jj;
}
}
}
String [] cNames = glm_model.coefficients_names;
boolean first = true;
int j = 0;
for(int i:sortedIds){
names.append("<th>" + cNames[sm.idxs[i]] + "</th>");
vals.append("<td>" + sm.beta[i] + "</td>");
if(first){
equation.append(DFORMAT.format(sm.beta[i]));
first = false;
} else {
equation.append(sm.beta[i] > 0?" + ":" - ");
equation.append(DFORMAT.format(Math.abs(sm.beta[i])));
}
if(i < (cNames.length-1))
equation.append("*x[" + cNames[i] + "]");
if(sm.norm_beta != null) normVals.append("<td>" + sm.norm_beta[i] + "</td>");
++j;
}
sb.append("<h4>Equation</h4>");
RString eq = null;
switch( glm_model.glm.link ) {
case identity: eq = new RString("y = %equation"); break;
case logit: eq = new RString("y = 1/(1 + Math.exp(-(%equation)))"); break;
case log: eq = new RString("y = Math.exp((%equation)))"); break;
case inverse: eq = new RString("y = 1/(%equation)"); break;
case tweedie: eq = new RString("y = (%equation)^(1 - )"); break;
default: eq = new RString("equation display not implemented"); break;
}
eq.replace("equation",equation.toString());
sb.append("<div style='width:100%;overflow:scroll;'>");
sb.append("<div><code>" + eq + "</code></div>");
sb.append("<h4>Coefficients</h4><table class='table table-bordered table-condensed'>");
sb.append("<tr>" + names.toString() + "</tr>");
sb.append("<tr>" + vals.toString() + "</tr>");
sb.append("</table>");
if(sm.norm_beta != null){
sb.append("<h4>Normalized Coefficients</h4>" +
"<table class='table table-bordered table-condensed'>");
sb.append("<tr>" + names.toString() + "</tr>");
sb.append("<tr>" + normVals.toString() + "</tr>");
sb.append("</table>");
}
sb.append("</div>");
}
private void pprintTime(StringBuilder sb, long t){
long hrs = t / (1000*60*60);
long minutes = (t -= 1000*60*60*hrs)/(1000*60);
long seconds = (t -= 1000*60*minutes)/1000;
t -= 1000*seconds;
if(hrs > 0)sb.append(hrs + "hrs ");
if(hrs > 0 || minutes > 0)sb.append(minutes + "min ");
if(hrs > 0 || minutes > 0 | seconds > 0)sb.append(seconds + "sec ");
sb.append(t + "msec");
}
// Job jjob = null;
// if( job_key != null )
// jjob = Job.findJob(job_key);
// if( jjob != null && jjob.exception != null )
// return Response.error(jjob.exception == null ? "cancelled" : jjob.exception);
// if( jjob == null || jjob.end_time > 0 || jjob.cancelled() )
// return jobDone(jjob, destination_key);
// return jobInProgress(jjob, destination_key);
@Override protected Response serve() {
Job jjob = ( job_key != null )?Job.findJob(job_key):null;
if( jjob != null && jjob.exception != null )
return Response.error(jjob.exception == null ? "cancelled" : jjob.exception);
Value v = DKV.get(_modelKey);
if(v != null){
glm_model = v.get();
if(Double.isNaN(lambda) && glm_model.submodels.length != 0)
lambda = glm_model.submodels[glm_model.best_lambda_idx].lambda_value;
}
if( jjob == null || jjob.end_time > 0 || jjob.isCancelledOrCrashed() )
return Response.done(this);
return Response.poll(this,(int)(100*jjob.progress()),100,"_modelKey",_modelKey.toString());
}
// @Override protected Response serve() {
// Value v = DKV.get(_modelKey);
// if(v == null)
// return Response.poll(this, 0, 100, "_modelKey", _modelKey.toString());
// glm_model = v.get();
// if(Double.isNaN(lambda_value))lambda_value = glm_model.lambdas[glm_model.best_lambda_idx];
// Job j;
// if((j = Job.findJob(glm_model.job_key)) != null && j.exception != null)
// return Response.error(j.exception);
// if(DKV.get(glm_model.job_key) != null && j != null)
// return Response.poll(this, (int) (100 * j.progress()), 100, "_modelKey", _modelKey.toString());
// else
// return Response.done(this);
// }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/glm/GLMParams.java
|
package hex.glm;
import hex.FrameTask;
import water.H2O;
import water.Iced;
import water.MemoryManager;
import water.api.DocGen;
import water.api.Request.API;
public final class GLMParams extends Iced {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="family")
final Family family;
@API(help="link")
final Link link;
@API(help="tweedie variance power")
final double tweedie_variance_power;
@API(help="tweedie link power")
final double tweedie_link_power;
public GLMParams(Family f){this(f,0,f.defaultLink,0);}
public GLMParams(Family f,Link l){this(f,0,l,0);}
public GLMParams(Family f, double twVar){this(f,twVar,f.defaultLink,1-twVar);}
public GLMParams(Family f, double twVar, Link l, double twLnk){
family = f;
if(l == Link.family_default)
link = family.defaultLink;
else { // check we have compatible link
link = l;
switch(family){
case gaussian:
if(link != Link.identity && link != Link.log && link != Link.inverse)
throw new IllegalArgumentException("Incompatible link function for selected family. Only identity, log and inverse links are allowed for family=gaussian.");
break;
case binomial:
if(link != Link.logit && link != Link.log)
throw new IllegalArgumentException("Incompatible link function for selected family. Only logit and log links are allowed for family=binomial.");
break;
case poisson:
if(link != Link.log && link != Link.identity)
throw new IllegalArgumentException("Incompatible link function for selected family. Only log and identity links are allowed for family=poisson.");
break;
case gamma:
if(link != Link.inverse && link != Link.log && link != Link.identity)
throw new IllegalArgumentException("Incompatible link function for selected family. Only inverse, log and identity links are allowed for family=gamma.");
break;
case tweedie:
if(link != Link.tweedie)
throw new IllegalArgumentException("Incompatible link function for selected family. Only tweedie link allowed for family=tweedie.");
break;
default:
H2O.fail();
}
}
tweedie_variance_power = twVar;
tweedie_link_power = twLnk;
}
public final double variance(double mu){
switch( family ) {
case gaussian:
return 1;
case binomial:
// assert (0 <= mu && mu <= 1) : "mu out of bounds<0,1>:" + mu;
return mu * (1 - mu);
case poisson:
return mu;
case gamma:
return mu * mu;
case tweedie:
return Math.pow(mu, tweedie_variance_power);
default:
throw new RuntimeException("unknown family Id " + this);
}
}
public final boolean canonical(){
switch(family){
case gaussian:
return link == Link.identity;
case binomial:
return link == Link.logit;
case poisson:
return link == Link.log;
case gamma:
return false; //return link == Link.inverse;
case tweedie:
return false;
default:
throw H2O.unimpl();
}
}
public final double mustart(double y, double ymu) {
switch( family ) {
case gaussian:
case binomial:
case poisson:
return ymu;
case gamma:
return y;
case tweedie:
return y + (y==0?0.1:0);
default:
throw new RuntimeException("unimplemented");
}
}
public final double deviance(double yr, double ym){
switch(family){
case gaussian:
return (yr - ym) * (yr - ym);
case binomial:
return 2 * ((y_log_y(yr, ym)) + y_log_y(1 - yr, 1 - ym));
case poisson:
if( yr == 0 ) return 2 * ym;
return 2 * ((yr * Math.log(yr / ym)) - (yr - ym));
case gamma:
if( yr == 0 ) return -2;
return -2 * (Math.log(yr / ym) - (yr - ym) / ym);
case tweedie:
// Theory of Dispersion Models: Jorgensen
// pg49: $$ d(y;\mu) = 2 [ y \cdot \left(\tau^{-1}(y) - \tau^{-1}(\mu) \right) - \kappa \{ \tau^{-1}(y)\} + \kappa \{ \tau^{-1}(\mu)\} ] $$
// pg133: $$ \frac{ y^{2 - p} }{ (1 - p) (2-p) } - \frac{y \cdot \mu^{1-p}}{ 1-p} + \frac{ \mu^{2-p} }{ 2 - p }$$
double one_minus_p = 1 - tweedie_variance_power;
double two_minus_p = 2 - tweedie_variance_power;
return Math.pow(yr, two_minus_p) / (one_minus_p * two_minus_p) - (yr * (Math.pow(ym, one_minus_p)))/one_minus_p + Math.pow(ym, two_minus_p)/two_minus_p;
default:
throw new RuntimeException("unknown family " + family);
}
}
public final double link(double x) {
switch( link ) {
case identity:
return x;
case logit:
assert 0 <= x && x <= 1:"x out of bounds, expected <0,1> range, got " + x;
return Math.log(x / (1 - x));
case log:
return Math.log(x);
case inverse:
double xx = (x < 0) ? Math.min(-1e-5, x) : Math.max(1e-5, x);
return 1.0 / xx;
case tweedie:
return Math.pow(x, tweedie_link_power);
default:
throw new RuntimeException("unknown link function " + this);
}
}
public final double linkDeriv(double x) {
double res = 0;
switch( link ) {
case logit: res = +1.0 / (x * (1 - x)); break;
case identity: res = +1.0 ; break;
case log: res = +1.0 / x ; break;
case inverse: res = -1.0 / (x * x) ; break;
case tweedie:
res = tweedie_link_power * Math.pow(x, tweedie_link_power - 1);
break;
default:
throw H2O.unimpl();
}
if(res > 1e5)
res = 1e5;
else if(res < -1e5)
res = -1e5;
return res;
}
public final double linkInv(double x) {
switch( link ) {
case identity:
return x;
case logit:
return 1.0 / (Math.exp(-x) + 1.0);
case log:
return Math.exp(x);
case inverse:
double xx = (x < 0) ? Math.min(-1e-5, x) : Math.max(1e-5, x);
return 1.0 / xx;
case tweedie:
return Math.pow(x, 1/tweedie_link_power);
default:
throw new RuntimeException("unexpected link function id " + this);
}
}
public final double linkInvDeriv(double x) {
switch( link ) {
case identity:
return 1;
case logit:
double g = Math.exp(-x);
double gg = (g + 1) * (g + 1);
return g / gg;
case log:
//return (x == 0)?MAX_SQRT:1/x;
return Math.max(Math.exp(x), Double.MIN_NORMAL);
case inverse:
double xx = (x < 0) ? Math.min(-1e-5, x) : Math.max(1e-5, x);
return -1 / (xx * xx);
case tweedie:
double vp = (1. - tweedie_link_power) / tweedie_link_power;
return (1/tweedie_link_power) * Math.pow(x, vp);
default:
throw new RuntimeException("unexpected link function id " + this);
}
}
// supported families
public enum Family {
gaussian(Link.identity), binomial(Link.logit), poisson(Link.log),
gamma(Link.inverse), tweedie(Link.tweedie);
public final Link defaultLink;
Family(Link link){defaultLink = link;}
}
public static enum Link {family_default, identity, logit, log,inverse,tweedie;}
// helper function
static final double y_log_y(double y, double mu) {
if(y == 0)return 0;
if(mu < Double.MIN_NORMAL) mu = Double.MIN_NORMAL;
return y * Math.log(y / mu);
}
public Family getFamily() {
return family;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/glm/GLMProgress.java
|
package hex.glm;
import water.*;
import water.api.*;
public class GLMProgress extends Progress2 {
@Override public boolean toHTML(StringBuilder sb){
Value v = DKV.get(destination_key);
if(v == null)return true;
GLMModel m = v.get();
return new GLMModelView(m).toHTML(sb);
}
/** Return {@link Response} for finished job. */
@Override
protected Response jobDone(final Key dst) {
return GLMModelView.redirect(this, dst);
}
/** Return default progress {@link Response}. */
@Override
protected Response jobInProgress(final Job job, final Key dst) {
progress = job.progress();
return Response.poll(this, (int) (100 * progress), 100, "job_key", job_key.toString(), "destination_key",
destination_key.toString());
}
public static Response redirect(Request req, Key jobkey, Key dest) {
return Response.redirect(req, "/2/GLMProgress", "job_key", jobkey, "destination_key", dest);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/glm/GLMTask.java
|
package hex.glm;
import hex.FrameTask;
import hex.glm.GLMParams.Family;
import hex.gram.Gram;
import java.util.Arrays;
import water.H2O.H2OCountedCompleter;
import water.*;
import water.fvec.Chunk;
import water.util.Utils;
/**
* Contains all GLM related tasks.
*
* @author tomasnykodym
*
*/
public abstract class GLMTask<T extends GLMTask<T>> extends FrameTask<T> {
final protected GLMParams _glm;
public GLMTask(Key jobKey, DataInfo dinfo, GLMParams glm){this(jobKey,dinfo,glm,null);}
public GLMTask(Key jobKey, DataInfo dinfo, GLMParams glm,H2OCountedCompleter cmp){super(jobKey,dinfo,cmp);_glm = glm;}
//helper function to compute eta - i.e. beta * row
protected final double computeEta(final int ncats, final int [] cats, final int nnums, final double [] nums, final double [] beta){
double res = 0;
for(int i = 0; i < ncats; ++i)res += beta[cats[i]];
final int numStart = _dinfo.numStart();
for (int i = 0; i < nnums; ++i) res += nums[i] * beta[numStart + i];
if(_dinfo._hasIntercept)
res += beta[beta.length-1]; // has_intercept
return res;
}
/**
* Helper task to compute precise mean of response and number of observations.
* (We skip rows with NAs, so we can't use Vec's mean in general.
*
* @author tomasnykodym
*
*/
static class YMUTask extends FrameTask<YMUTask>{
private long [] _nobs;
protected double [] _ymu;
public double [] _ymin;
public double [] _ymax;
final int _nfolds;
public YMUTask(Key jobKey, DataInfo dinfo,int nfolds) {this(jobKey,dinfo, nfolds, null);}
public YMUTask(Key jobKey, DataInfo dinfo,int nfolds, H2OCountedCompleter cmp) {
super(jobKey,dinfo,cmp);
_nfolds = nfolds;
}
@Override public void chunkInit(){
super.chunkInit();
_ymu = new double[_nfolds+1];
_nobs = new long[_nfolds+1];
_ymax = new double[_nfolds+1];
_ymin = new double[_nfolds+1];
Arrays.fill(_ymax,Double.NEGATIVE_INFINITY);
Arrays.fill(_ymax,Double.POSITIVE_INFINITY);
}
@Override protected void processRow(long gid, double[] nums, int ncats, int[] cats, double [] responses) {
double response = responses[0];
_ymu[0] += response;
++_nobs[0];
if(response < _ymin[0])_ymin[0] = response;
if(response > _ymax[0])_ymax[0] = response;
for(int i = 1; i < _nfolds+1; ++i) {
if(gid % _nfolds == (i-1))
continue;
_ymu[i] += response;
++_nobs[i];
if(response < _ymin[0])_ymin[i] = response;
if(response > _ymax[i])_ymax[i] = response;
}
}
@Override public void reduce(YMUTask t){
if(t._nobs[0] != 0){
if(_nobs[0] == 0){
_ymu = t._ymu;
_nobs = t._nobs;
_ymin = t._ymin;
_ymax = t._ymax;
} else {
for(int i = 0; i < _nfolds+1; ++i) {
if(_nobs[i] + t._nobs[i] == 0)continue;
_ymu[i] = _ymu[i] * ((double) _nobs[i] / (_nobs[i] + t._nobs[i])) + t._ymu[i] * t._nobs[i] / (_nobs[i] + t._nobs[i]);
_nobs[i] += t._nobs[i];
if(t._ymax[i] > _ymax[i])
_ymax[i] = t._ymax[i];
if(t._ymin[i] < _ymin[i])
_ymin[i] = t._ymin[i];
}
}
}
}
@Override protected void chunkDone(long n){
for(int i = 0; i < _ymu.length; ++i)
if(_nobs[i] != 0)_ymu[i] /= _nobs[i];
}
public double ymu(){return ymu(-1);}
public long nobs(){return nobs(-1);}
public double ymu(int foldId){return _ymu[foldId+1];}
public long nobs(int foldId){return _nobs[foldId+1];}
}
public static class GLMLineSearchTask extends GLMTask<GLMLineSearchTask> {
public GLMLineSearchTask(int noff, Key jobKey, DataInfo dinfo, GLMParams glm, double[] oldBeta, double[] newBeta, double betaEps, double ymu, long nobs, H2OCountedCompleter cmp) {
super(jobKey, dinfo, glm, cmp);
double [][] betas = new double[32][];
double step = GLM2.LS_STEP;
for(int i = 0; i < betas.length; ++i){
betas[i] = MemoryManager.malloc8d(newBeta.length);
for(int j = 0; j < oldBeta.length; ++j)
betas[i][j] = oldBeta[j] + step*(newBeta[j] - oldBeta[j]);
step *= GLM2.LS_STEP;
}
// public GLMIterationTask(Key jobKey, DataInfo dinfo, GLMParams glm, boolean computeGram, boolean validate, boolean computeGradient, double [] beta, double ymu, double reg, float [] thresholds, H2OCountedCompleter cmp) {
_glmts = new GLMIterationTask[betas.length];
for(int i = 0; i < _glmts.length; ++i)
_glmts[i] = new GLMIterationTask(noff, jobKey,dinfo,glm,false,true,true,betas[i],ymu,1.0/nobs,new float[]{0} /* don't really want CMs!*/,null);
}
GLMIterationTask [] _glmts;
@Override public void chunkInit(){
_glmts = _glmts.clone();
for(int i = 0; i < _glmts.length; ++i)
(_glmts[i] = _glmts[i].clone()).chunkInit();
}
@Override public void chunkDone(long n){
for(int i = 0; i < _glmts.length; ++i)
_glmts[i].chunkDone(n);
}
@Override public void postGlobal(){
for(int i = 0; i < _glmts.length; ++i)
_glmts[i].postGlobal();
}
@Override public final void processRow(long gid, final double [] nums, final int ncats, final int [] cats, double [] responses){
for(int i = 0; i < _glmts.length; ++i)
_glmts[i].processRow(gid,nums,ncats,cats,responses);
}
@Override
public void reduce(GLMLineSearchTask git){
for(int i = 0; i < _glmts.length; ++i)
_glmts[i].reduce(git._glmts[i]);
}
}
public static class GLMInterceptTask extends MRTask2<GLMInterceptTask> {
double _xy;
double _xx;
double _icpt;
final double _sub;
final double _mul;
final GLMParams _glm;
public GLMInterceptTask(GLMParams glm, double sub, double mul, double icpt){
_glm = glm;
_sub = sub;
_mul = mul;
_icpt = icpt ;
}
@Override public void map(Chunk offset, Chunk response){
for(int i = 0; i < offset._len; ++i){
if(offset.isNA0(i) || response.isNA0(i)) continue;
double eta = _icpt;
double y = response.at0(i);
double mu = _glm.linkInv(eta+(offset.at0(i)-_sub)*_mul);
double var = _glm.variance(mu);
if(var < 1e-5) var = 1e-5; // avoid numerical problems with 0 variance
double d = _glm.linkDeriv(mu);
double w = 1.0/(var*d*d);
double z = eta + (y-mu)*d;
_xy += w*z;
_xx += w;
}
}
@Override public void reduce(GLMInterceptTask other){
_xx += other._xx;
_xy += other._xy;
}
@Override public void postGlobal(){
_icpt = _xy/_xx;
}
}
/**
* One iteration of glm, computes weighted gram matrix and t(x)*y vector and t(y)*y scalar.
*
* @author tomasnykodym
*/
public static class GLMIterationTask extends GLMTask<GLMIterationTask> {
final double [] _beta;
protected Gram _gram;
double [] _xy;
protected double [] _grad;
double _yy;
GLMValidation _val; // validation of previous model
protected final double _reg;
long _nobs;
final boolean _validate;
final float [] _thresholds;
float [][] _newThresholds;
int [] _ti;
final boolean _computeGradient;
final boolean _computeGram;
public static final int N_THRESHOLDS = 50;
final int _noffsets;
final double _ymu;
public GLMIterationTask(int noff, Key jobKey, DataInfo dinfo, GLMParams glm, boolean computeGram, boolean validate, boolean computeGradient, double [] beta, double ymu, double reg, float [] thresholds, H2OCountedCompleter cmp) {
super(jobKey, dinfo,glm,cmp);
assert beta != null;
_ymu = ymu;
_noffsets = noff;
assert beta == null || beta.length == (dinfo.fullN()+(_dinfo._hasIntercept?1:0)-_noffsets):"beta.length != dinfo.fullN(), beta = " + beta.length + " dinfo = " + dinfo.fullN() + ", noffsets = " + _noffsets;
_beta = beta;
_reg = reg;
_computeGram = computeGram;
_validate = validate;
assert thresholds != null;
_thresholds = _validate?thresholds:null;
_computeGradient = computeGradient;
assert !_computeGradient || validate;
}
private void sampleThresholds(int yi){
_ti[yi] = (_newThresholds[yi].length >> 2);
try{ Arrays.sort(_newThresholds[yi]);} catch(Throwable t){
System.out.println("got AIOOB during sort?! ary = " + Arrays.toString(_newThresholds[yi]));
return;
} // sort throws AIOOB sometimes!
for (int i = 0; i < _newThresholds.length; i += 4)
_newThresholds[yi][i >> 2] = _newThresholds[yi][i];
}
@Override public void processRow(long gid, final double [] nums, final int ncats, final int [] cats, double [] responses /* time...*/){
++_nobs;
double off = 0;
for(int i = 1; i <= _noffsets; ++i)
off += nums[nums.length-i];
final double y = responses[0];
assert ((_glm.family != Family.gamma) || y > 0) : "illegal response column, y must be > 0 for family = Gamma.";
assert ((_glm.family != Family.binomial) || (0 <= y && y <= 1)) : "illegal response column, y must be <0,1> for family=Binomial. got " + y;
double w, eta, mu, var, z;
final int numStart = _dinfo.numStart();
double d = 1;
if( _glm.family == Family.gaussian){
w = 1;
z = y;
mu = off + ((_validate || _computeGradient)?computeEta(ncats,cats,nums.length-_noffsets,nums,_beta):0);
} else {
eta = computeEta(ncats, cats,nums.length-_noffsets,nums,_beta);
mu = _glm.linkInv(eta+off);
var = _glm.variance(mu);
if(var < 1e-5) var = 1e-5; // avoid numerical problems with 0 variance
d = _glm.linkDeriv(mu);
z = eta + (y-mu)*d;
w = 1.0/(var*d*d);
}
if(_validate) {
_val.add(y, mu);
if(_glm.family == Family.binomial) {
int yi = (int) y;
if (_ti[yi] == _newThresholds[yi].length)
sampleThresholds(yi);
_newThresholds[yi][_ti[yi]++] = (float) mu;
}
}
assert w >= 0|| Double.isNaN(w) : "invalid weight " + w; // allow NaNs - can occur if line-search is needed!
final double wz = w * z;
_yy += wz * z;
if(_computeGradient || _computeGram){
final double grad = _computeGradient?w*d*(mu-y):0;
for(int i = 0; i < ncats; ++i){
final int ii = cats[i];
if(_computeGradient)_grad[ii] += grad;
_xy[ii] += wz;
}
for(int i = 0; i < nums.length - _noffsets; ++i){
_xy[numStart+i] += wz*nums[i];
if(_computeGradient)
_grad[numStart+i] += grad*nums[i];
}
if(_dinfo._hasIntercept){
_xy[_xy.length-1] += wz;
if(_computeGradient)
_grad[numStart + _dinfo._nums - _noffsets] += grad;
}
if(_computeGram)_gram.addRow(nums, ncats, cats, w);
}
}
@Override protected void chunkInit(){
if(_computeGram)_gram = new Gram(_dinfo.fullN()-_noffsets, _dinfo.largestCat(), _dinfo._nums-_noffsets, _dinfo._cats, _dinfo._hasIntercept);
_xy = MemoryManager.malloc8d(_dinfo.fullN()+(_dinfo._hasIntercept?1:0)-_noffsets); // + 1 is for has_intercept
int rank = 0;
if(_beta != null)for(double d:_beta)if(d != 0)++rank;
if(_validate){
_val = new GLMValidation(null, _glm,rank, _thresholds);
if(_glm.family == Family.binomial){
_ti = new int[2];
_newThresholds = new float[2][N_THRESHOLDS << 2];
}
}
if(_computeGradient)
_grad = MemoryManager.malloc8d(_dinfo.fullN()+ (_dinfo._hasIntercept?1:0) - _noffsets);
if(_glm.family == Family.binomial && _validate){
_ti = new int[2];
_newThresholds = new float[2][4*N_THRESHOLDS];
}
}
@Override protected void chunkDone(long n){
if(_computeGram)_gram.mul(_reg);
for(int i = 0; i < _xy.length; ++i)
_xy[i] *= _reg;
if(_grad != null)
for(int i = 0; i < _grad.length; ++i)
_grad[i] *= _reg;
_yy *= _reg;
if(_validate && _glm.family == Family.binomial) {
_newThresholds[0] = Arrays.copyOf(_newThresholds[0],_ti[0]);
_newThresholds[1] = Arrays.copyOf(_newThresholds[1],_ti[1]);
Arrays.sort(_newThresholds[0]);
Arrays.sort(_newThresholds[1]);
}
}
@Override
public void reduce(GLMIterationTask git){
if(_jobKey == null || Job.isRunning(_jobKey)) {
Utils.add(_xy, git._xy);
if (_computeGram) _gram.add(git._gram);
_yy += git._yy;
_nobs += git._nobs;
if (_validate) _val.add(git._val);
if (_computeGradient) Utils.add(_grad, git._grad);
if(_validate && _glm.family == Family.binomial) {
_newThresholds[0] = Utils.join(_newThresholds[0], git._newThresholds[0]);
_newThresholds[1] = Utils.join(_newThresholds[1], git._newThresholds[1]);
if (_newThresholds[0].length >= 2 * N_THRESHOLDS) {
for (int i = 0; i < 2 * N_THRESHOLDS; i += 2)
_newThresholds[0][i >> 1] = _newThresholds[0][i];
}
if (_newThresholds[0].length > N_THRESHOLDS)
_newThresholds[0] = Arrays.copyOf(_newThresholds[0], N_THRESHOLDS);
if (_newThresholds[1].length >= 2 * N_THRESHOLDS) {
for (int i = 0; i < 2 * N_THRESHOLDS; i += 2)
_newThresholds[1][i >> 1] = _newThresholds[1][i];
}
if (_newThresholds[1].length > N_THRESHOLDS)
_newThresholds[1] = Arrays.copyOf(_newThresholds[1], N_THRESHOLDS);
}
super.reduce(git);
}
}
@Override public void postGlobal(){
if(_val != null){
_val.computeAIC();
_val.computeAUC();
}
}
public double [] gradient(double alpha, double lambda){
final double [] res = _grad.clone();
if(_beta != null)
for(int i = 0; i < res.length-1; ++i) res[i] += (1-alpha)*lambda*_beta[i];
return res;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/glm/GLMValidation.java
|
package hex.glm;
import hex.ConfusionMatrix;
import hex.glm.GLMParams.Family;
import water.Iced;
import water.Key;
import water.api.AUC;
import water.api.DocGen;
import water.api.Request.API;
import water.util.ModelUtils;
/**
* Class for GLMValidation.
*
* @author tomasnykodym
*
*/
public class GLMValidation extends Iced {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="")
double null_deviance;
@API(help="")
double residual_deviance;
@API(help="")
long nobs;
@API(help="best decision threshold")
float best_threshold;
@API(help="")
double auc = Double.NaN;
@API(help="corss validation models")
Key [] xval_models;
@API(help="AIC")
double aic;// internal aic used only for poisson family!
@API(help="internal aic used only for poisson family!")
private double _aic2;// internal aic used only for poisson family!
@API(help="")
final Key dataKey;
@API(help="Decision thresholds used to generare confuion matrices, AUC and to find the best thresholds based on user criteria")
public final float [] thresholds;
@API(help="")
ConfusionMatrix [] _cms;
@API(help="")
final GLMParams _glm;
@API(help="")
final private int _rank;
public static class GLMXValidation extends GLMValidation {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
public GLMXValidation(GLMModel mainModel, GLMModel [] xvalModels, GLMValidation [] xvals, double lambda, long nobs, float [] thresholds) {
super(mainModel._dataKey, mainModel.glm, mainModel.rank(lambda),thresholds);
xval_models = new Key[xvalModels.length];
for(int i = 0; i < xval_models.length; ++i)
xval_models[i] = xvalModels[i]._key;
double t = 0;
for(int i = 0; i < xvalModels.length; ++i){
add(xvals[i]);
t += xvals[i].best_threshold;
}
computeAUC();
computeAIC();
best_threshold = (float)(t/xvalModels.length);
this.nobs = nobs;
}
}
public GLMValidation(Key dataKey, GLMParams glm, int rank){
this(dataKey, glm, rank,glm.family == Family.binomial?ModelUtils.DEFAULT_THRESHOLDS:null);
}
public GLMValidation(Key dataKey, GLMParams glm, int rank, float [] thresholds){
_rank = rank;
_glm = glm;
if(_glm.family == Family.binomial){
_cms = new ConfusionMatrix[thresholds.length];
for(int i = 0; i < _cms.length; ++i)
_cms[i] = new ConfusionMatrix(2);
}
this.dataKey = dataKey;
this.thresholds = thresholds;
}
public static Key makeKey(){return Key.make("__GLMValidation_" + Key.make());}
public void add(double yreal, double ymodel){
if(_glm.family == Family.binomial) // classification -> update confusion matrix too
for(int i = 0; i < thresholds.length; ++i)
_cms[i].add((int)yreal, (ymodel >= thresholds[i])?1:0);
residual_deviance += _glm.deviance(yreal, ymodel);
++nobs;
if( _glm.family == Family.poisson ) { // aic for poisson
long y = Math.round(yreal);
double logfactorial = 0;
for( long i = 2; i <= y; ++i )
logfactorial += Math.log(i);
_aic2 += (yreal * Math.log(ymodel) - logfactorial - ymodel);
}
}
public void add(GLMValidation v){
residual_deviance += v.residual_deviance;
nobs += v.nobs;
_aic2 += v._aic2;
if(_cms == null)_cms = v._cms;
else for(int i = 0; i < _cms.length; ++i)_cms[i].add(v._cms[i]);
}
public final double residualDeviance(){return residual_deviance;}
public final long resDOF(){return nobs - _rank -1;}
public double auc(){return auc;}
public double aic(){return aic;}
protected void computeAIC(){
aic = 0;
switch( _glm.family ) {
case gaussian:
aic = nobs * (Math.log(residual_deviance / nobs * 2 * Math.PI) + 1) + 2;
break;
case binomial:
aic = residual_deviance;
break;
case poisson:
aic = -2*_aic2;
break; // aic is set during the validation task
case gamma:
case tweedie:
aic = Double.NaN;
break;
default:
assert false : "missing implementation for family " + _glm.family;
}
aic += 2*_rank;
}
protected void computeAUC(){
if(_glm.family == Family.binomial){
for(ConfusionMatrix cm:_cms)cm.reComputeErrors();
AUC auc = new AUC(_cms,thresholds,/*TODO: add CM domain*/null);
this.auc = auc.data().AUC();
best_threshold = auc.data().threshold();
}
}
@Override
public String toString(){
return " res_dev = " + residual_deviance + ", auc = " + auc();
}
/**
* Computes area under the ROC curve. The ROC curve is computed from the confusion matrices
* (there is one for each computed threshold). Area under this curve is then computed as a sum
* of areas of trapezoids formed by each neighboring points.
*
* @return estimate of the area under ROC curve of this classifier.
*/
double[] tprs;
double[] fprs;
private double trapeziod_area(double x1, double x2, double y1, double y2) {
double base = Math.abs(x1 - x2);
double havg = 0.5 * (y1 + y2);
return base * havg;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/glm/LSMSolver.java
|
package hex.glm;
import hex.gram.Gram;
import hex.gram.Gram.Cholesky;
import java.util.Arrays;
import jsr166y.CountedCompleter;
import water.H2O;
import water.Iced;
import water.Key;
import water.MemoryManager;
import dontweave.gson.JsonObject;
/**
* Distributed least squares solvers
* @author tomasnykodym
*
*/
public abstract class LSMSolver extends Iced{
public enum LSMSolverType {
AUTO, // AUTO: (len(beta) < 1000)?ADMM:GenGradient
ADMM,
GenGradient
}
double _lambda;
final double _alpha;
public Key _jobKey;
public String _id;
public LSMSolver(double lambda, double alpha){
_lambda = lambda;
_alpha = alpha;
}
public final double [] grad(Gram gram, double [] beta, double [] xy){
double [] grad = gram.mul(beta);
for(int i = 0; i < grad.length; ++i)
grad[i] -= xy[i];
return grad;
}
public static void subgrad(final double alpha, final double lambda, final double [] beta, final double [] grad){
if(beta == null)return;
final double l1pen = lambda*alpha;
for(int i = 0; i < grad.length-1; ++i) {// add l2 reg. term to the gradient
if(beta[i] < 0) grad[i] -= l1pen;
else if(beta[i] > 0) grad[i] += l1pen;
else grad[i] = LSMSolver.shrinkage(grad[i], l1pen);
}
}
/**
* @param xy - guassian: -X'y binomial: -(1/4)X'(XB + (y-p)/(p*1-p))
* @param yy - < y,y > /2
* @param newBeta - resulting vector of coefficients
* @return true if converged
*
*/
public abstract boolean solve(Gram gram, double [] xy, double yy, double [] newBeta);
protected boolean _converged;
public final boolean converged(){return _converged;}
public static class LSMSolverException extends RuntimeException {
public LSMSolverException(String msg){super(msg);}
}
public abstract String name();
protected static double shrinkage(double x, double kappa) {
double sign = x < 0?-1:1;
double sx = x*sign;
if(sx <= kappa) return 0;
return sign*(sx - kappa);
// return Math.max(0, x - kappa) - Math.max(0, -x - kappa);
}
/**
* Compute least squares objective function value:
* lsm_obj(beta) = 0.5*(y - X*b)'*(y - X*b) + l1 + l2
* = 0.5*y'y - (X'y)'*b + 0.5*b'*X'X*b) + l1 + l2
* l1 = alpha*lambda_value*l1norm(beta)
* l2 = (1-alpha)*lambda_value*l2norm(beta)/2
* @param xy: X'y
* @param yy: 0.5*y'y
* @param beta: b (vector of coefficients)
* @param xb: X'X*beta
* @return 0.5*(y - X*b)'*(y - X*b) + l1 + l2
*/
protected double objectiveVal(double[] xy, double yy, double[] beta, double [] xb) {
double res = lsm_objectiveVal(xy,yy,beta, xb);
double l1 = 0, l2 = 0;
for(int i = 0; i < beta.length; ++i){
l1 += Math.abs(beta[i]);
l2 += beta[i]*beta[i];
}
return res + _alpha*_lambda*l1 + 0.5*(1-_alpha)*_lambda*l2;
}
/**
* Compute the LSM objective.
*
* lsm_obj(beta) = 0.5 * (y - X*b)' * (y - X*b)
* = 0.5 * y'y - (X'y)'*b + 0.5*b'*X'X*b)
* = 0.5yy + b*(0.5*X'X*b - X'y)
* @param xy X'y
* @param yy y'y
* @param beta
* @param xb X'X*beta
* @return
*/
protected double lsm_objectiveVal(double[] xy, double yy, double[] beta, double [] xb) {
double res = 0.5*yy;
for(int i = 0; i < xb.length; ++i)
res += beta[i]*(0.5*xb[i] - xy[i]);
return res;
}
static final double[] mul(double[][] X, double[] y, double[] z) {
final int M = X.length;
final int N = y.length;
for( int i = 0; i < M; ++i ) {
z[i] = X[i][0] * y[0];
for( int j = 1; j < N; ++j )
z[i] += X[i][j] * y[j];
}
return z;
}
static final double[] mul(double[] x, double a, double[] z) {
for( int i = 0; i < x.length; ++i )
z[i] = a * x[i];
return z;
}
static final double[] plus(double[] x, double[] y, double[] z) {
for( int i = 0; i < x.length; ++i )
z[i] = x[i] + y[i];
return z;
}
static final double[] minus(double[] x, double[] y, double[] z) {
for( int i = 0; i < x.length; ++i )
z[i] = x[i] - y[i];
return z;
}
static final double[] shrink(double[] x, double[] z, double kappa) {
for( int i = 0; i < x.length - 1; ++i )
z[i] = shrinkage(x[i], kappa);
z[x.length - 1] = x[x.length - 1]; // do not penalize intercept!
return z;
}
public static final class ADMMSolver extends LSMSolver {
//public static final double DEFAULT_LAMBDA = 1e-5;
public static final double DEFAULT_ALPHA = 0.5;
public double [] _wgiven;
public double [] _lb;
public double [] _ub;
public double [] _proximalPenalties;
final public double _gradientEps;
private static final double GLM1_RHO = 1.0e-3;
public double gerr = Double.POSITIVE_INFINITY;
public int iterations = 0;
public long decompTime;
public boolean normalize() {return _lambda != 0;}
final double _lambdaMax;
public double _addedL2;
public ADMMSolver (double lmax, double lambda, double alpha, double gradEps) {
super(lambda,alpha);
_gradientEps = gradEps;
_lambdaMax = lmax;
}
public ADMMSolver (double lmax, double lambda, double alpha, double gradEps,double addedL2) {
super(lambda,alpha);
_lambdaMax = lmax;
_addedL2 = addedL2;
_gradientEps = gradEps;
}
public JsonObject toJson(){
JsonObject res = new JsonObject();
res.addProperty("lambda_value",_lambda);
res.addProperty("alpha",_alpha);
return res;
}
public static class NonSPDMatrixException extends LSMSolverException {
public NonSPDMatrixException(){super("Matrix is not SPD, can't solve without regularization\n");}
public NonSPDMatrixException(Gram grm){
super("Matrix is not SPD, can't solve without regularization\n" + grm);
}
}
@Override
public boolean solve(Gram gram, double [] xy, double yy, double[] z) {
return solve(gram, xy, yy, z, Double.POSITIVE_INFINITY);
}
private static double l1_norm(double [] v){
double res = 0;
for(double d:v)res += Math.abs(d);
return res;
}
private static double l2_norm(double [] v){
double res = 0;
for(double d:v)res += d*d;
return res;
}
private double converged(Gram g, double [] beta, double [] xy){
double [] grad = grad(g,beta,xy);
subgrad(_alpha,_lambda,beta,grad);
double err = 0;
for(double d:grad)
if(d > err)err = d;
else if(d < -err)err = -d;
return err;
}
private double getGrad(Gram gram, double [] beta, double [] xy){
double [] g = grad(gram,beta,xy);
double err = 0;
for(double d3:g)
if(d3 > err)err = d3;
else if(d3 < -err)err = -d3;
return err;
}
public ParallelSolver parSolver(Gram gram, double [] xy, double [] res, double rho, int iBlock, int rBlock){
return new ParallelSolver(gram, xy, res, rho,iBlock, rBlock);
}
public final class ParallelSolver extends H2O.H2OCountedCompleter {
final Gram gram;
final double rho;
final double kappa;
double _bestErr = Double.POSITIVE_INFINITY;
double _lastErr = Double.POSITIVE_INFINITY;
final double [] xy;
double [] _xyPrime;
double _orlx;
int _k;
final double [] u;
final double [] z;
Cholesky chol;
final double d;
int _iter;
final int N;
final int max_iter;
final int round;
final int _iBlock;
final int _rBlock;
private ParallelSolver(Gram g, double [] xy, double [] res, double rho, int iBlock, int rBlock){
_iBlock = iBlock;
_rBlock = rBlock;
gram = g; this.xy = xy; this.z = res;;
N = xy.length;
d = gram._diagAdded;
this.rho = rho;
u = MemoryManager.malloc8d(N);
kappa = _lambda*_alpha/rho;
max_iter = (int)(10000*(250.0/(1+xy.length)));
round = Math.max(20,(int)(max_iter*0.01));
_k = round;
}
@Override
public void compute2() {
Arrays.fill(z, 0);
if(_lambda>0 || _addedL2 > 0)
gram.addDiag(_lambda*(1-_alpha) + _addedL2);
if(_alpha > 0 && _lambda > 0)
gram.addDiag(rho);
if(_wgiven != null){
gram.addDiag(_proximalPenalties);
for(int i = 0; i < xy.length; ++i)
xy[i] += _proximalPenalties[i]*_wgiven[i];
}
int attempts = 0;
long t1 = System.currentTimeMillis();
chol = gram.cholesky(null,true,_id);
long t2 = System.currentTimeMillis();
while(!chol.isSPD() && attempts < 10){
if(_addedL2 == 0) _addedL2 = 1e-5;
else _addedL2 *= 10;
++attempts;
gram.addDiag(_addedL2); // try to add L2 penalty to make the Gram issp
gram.cholesky(chol);
}
decompTime = (t2-t1);
if(!chol.isSPD())
throw new NonSPDMatrixException(gram);
if(_alpha == 0 || _lambda == 0){ // no l1 penalty
System.arraycopy(xy, 0, z, 0, xy.length);
chol.parSolver(this,z,_iBlock,_rBlock).fork();
return;
}
gerr = Double.POSITIVE_INFINITY;
_xyPrime = xy.clone();
_orlx = 1.8; // over-relaxation
// first compute the x update
// add rho*(z-u) to A'*y
new ADMMIteration(this).fork();
}
@Override public void onCompletion(CountedCompleter caller){
gram.addDiag(-gram._diagAdded + d);
assert gram._diagAdded == d;
}
private final class ADMMIteration extends CountedCompleter {
final long t1;
public ADMMIteration(H2O.H2OCountedCompleter cmp){super(cmp); t1 = System.currentTimeMillis();}
@Override public void compute(){
++_iter;
final double [] xyPrime = _xyPrime;
// first compute the x update
// add rho*(z-u) to A'*y
for( int j = 0; j < N-1; ++j )xyPrime[j] = xy[j] + rho*(z[j] - u[j]);
xyPrime[N-1] = xy[N-1];
// updated x
chol.parSolver(this,xyPrime,_iBlock,_rBlock).fork();
}
@Override
public void onCompletion(CountedCompleter caller) {
final double [] xyPrime = _xyPrime;
final double orlx = _orlx;
// compute u and z updateADMM
for( int j = 0; j < N-1; ++j ) {
double x_hat = xyPrime[j];
x_hat = x_hat * orlx + (1 - orlx) * z[j];
z[j] = shrinkage(x_hat + u[j], kappa);
u[j] += x_hat - z[j];
}
z[N-1] = xyPrime[N-1];
if(_iter == _k) {
double[] grad = grad(gram, z, xy);
subgrad(_alpha, _lambda, z, grad);
for (int x = 0; x < grad.length - 1; ++x) {
if (gerr < grad[x] || gerr < -grad[x])
gerr = grad[x];
}
if (gerr < 9e-4)
return;
// if(grad < bestErr){
// bestErr = err;
// System.arraycopy(z,0,res,0,z.length);
// if(err < _gradientEps)
// break;
// } else {
// boolean allzeros = true;
// for (int x = 0; allzeros && x < z.length - 1; ++x)
// allzeros = z[x] == 0;
// if (!allzeros) { // only want this check if we're past the warm up period (there can be many iterations with all zeros!)
// // did not converge, check if we can converge in reasonable time
// if (diff < 1e-4) // we won't ever converge with this setup (maybe change rho and try again?)
// break;
// orlx = (1 + 15 * orlx) * 0.0625;
// } else
// orlx = 1.8;
// }
// lastErr = err;
_k += round;
}
if(_iter < max_iter){
getCompleter().addToPendingCount(1);
new ADMMIteration((H2O.H2OCountedCompleter)getCompleter()).fork();
}
}
}
}
final static double RELTOL = 1e-4;
public boolean solve(Gram gram, double [] xy, double yy, final double[] z, final double rho) {
if(xy.length == 0) return true; // special case which can happen if we run with offset and no intercept and have 0 active cols
gerr = 0;
boolean bounds = _lb != null || _ub != null;
double d = gram._diagAdded;
final int N = xy.length;
Arrays.fill(z, 0);
if(_lambda>0 || _addedL2 > 0)
gram.addDiag(_lambda*(1-_alpha) + _addedL2);
if(_alpha > 0 && _lambda > 0)
gram.addDiag(rho);
if(_wgiven != null){
gram.addDiag(_proximalPenalties);
xy = xy.clone();
for(int i = 0; i < xy.length; ++i)
xy[i] += _proximalPenalties[i]*_wgiven[i];
}
int attempts = 0;
long t1 = System.currentTimeMillis();
Cholesky chol = gram.cholesky(null,true,_id);
long t2 = System.currentTimeMillis();
double inc = 1e-1*_lambdaMax;
while(!chol.isSPD() && attempts < 10){
_addedL2 += inc;
++attempts;
gram.addDiag(inc); // try to add L2 penalty to make the Gram issp
gram.cholesky(chol);
}
decompTime = (t2-t1);
if(!chol.isSPD())
throw new NonSPDMatrixException(gram);
if(_alpha == 0 || _lambda == 0 && !bounds){ // no l1 penalty nor upper/lower bounds
System.arraycopy(xy, 0, z, 0, xy.length);
chol.solve(z);
gram.addDiag(-gram._diagAdded + d);
return true;
}
double[] u = MemoryManager.malloc8d(N);
double [] xyPrime = xy.clone();
double kappa = _lambda*_alpha/rho;
int i;
int max_iter = Math.max(500,(int)(50000.0/(1+(xy.length >> 3))));
double orlx = 1.8; // over-relaxation
double reltol = RELTOL;
for(i = 0; i < max_iter; ++i ) {
long tX = System.currentTimeMillis();
// first compute the x update
// add rho*(z-u) to A'*y
for( int j = 0; j < N-1; ++j )
xyPrime[j] = xy[j] + rho*(z[j] - u[j]);
xyPrime[N-1] = xy[N-1];
// updated x
chol.solve(xyPrime);
// compute u and z update ADMM
double rnorm = 0, snorm = 0, unorm = 0, xnorm = 0;
for( int j = 0; j < N-1; ++j ) {
double x = xyPrime[j];
double zold = z[j];
double x_hat = x * orlx + (1 - orlx) * zold;
z[j] = shrinkage(x_hat + u[j], kappa);
if(_lb != null && z[j] < _lb[j])
z[j] = _lb[j];
if(_ub != null && z[j] > _ub[j])
z[j] = _ub[j];
u[j] += x_hat - z[j];
double r = xyPrime[j] - z[j];
double s = z[j] - zold;
rnorm += r*r;
snorm += s*s;
xnorm += x*x;
unorm += u[j]*u[j];
}
z[N-1] = xyPrime[N-1];
if(rnorm < reltol*xnorm && snorm < reltol*unorm){
gerr = 0;
double [] grad = grad(gram,z,xy);
subgrad(_alpha,_lambda,z,grad);
for(int x = 0; x < grad.length-1; ++x){
if(gerr < grad[x]) gerr = grad[x];
else if(gerr < -grad[x]) gerr = -grad[x];
}
if(gerr < 1e-4 || reltol <= 1e-6)break;
while(rnorm < reltol*xnorm && snorm < reltol*unorm)
reltol *= .1;
}
if(i % 20 == 0)
orlx = (1 + 15 * orlx) * 0.0625;
}
gram.addDiag(-gram._diagAdded + d);
assert gram._diagAdded == d;
iterations = i;
return _converged = (gerr < _gradientEps);
}
@Override
public String name() {return "ADMM";}
}
// public static final class ProxSolver extends LSMSolver {
// public ProxSolver(double lambda, double alpha){super(lambda,alpha);}
//
// /**
// * @param newB
// * @param oldObj
// * @param oldB
// * @param
// * @param t
// * @return
// */
// private static final double f_hat(double [] newB,double oldObj, double [] oldB,double [] xb, double [] xy, double t){
// double res = oldObj;
// double l2 = 0;
// for(int i = 0; i < newB.length; ++i){
// double diff = newB[i] - oldB[i];
// res += (xb[i]-xy[i])*diff;
// l2 += diff*diff;
// }
// return res + 0.25*l2/t;
// }
// private double penalty(double [] beta){
// double l1 = 0,l2 = 0;
// for(int i = 0; i < beta.length; ++i){
// l1 += Math.abs(beta[i]);
// l2 += beta[i]*beta[i];
// }
// return _lambda*(_alpha*l1 + (1-_alpha)*l2*0.5);
// }
// private static double betaDiff(double [] b1, double [] b2){
// double res = 0;
// for(int i = 0; i < b1.length; ++i)
// Math.max(res, Math.abs(b1[i] - b2[i]));
// return res;
// }
// @Override
// public boolean solve(Gram gram, double [] xy, double yy, double[] beta) {
// ADMMSolver admm = new ADMMSolver(_lambda,_alpha,1e-2);
// if(gram != null)return admm.solve(gram,xy,yy,beta);
// Arrays.fill(beta,0);
// long t1 = System.currentTimeMillis();
// final double [] xb = gram.mul(beta);
// double objval = objectiveVal(xy,yy,beta,xb);
// final double [] newB = MemoryManager.malloc8d(beta.length);
// final double [] newG = MemoryManager.malloc8d(beta.length);
// double step = 1;
// final double l1pen = _lambda*_alpha;
// final double l2pen = _lambda*(1-_alpha);
// double lsmobjval = lsm_objectiveVal(xy,yy,beta,xb);
// boolean converged = false;
// final int intercept = beta.length-1;
// int iter = 0;
// MAIN:
// while(!converged && iter < 1000) {
// ++iter;
// step = 1;
// while(step > 1e-12){ // line search
// double l2shrink = 1/(1+step*l2pen);
// double l1shrink = l1pen*step;
// for(int i = 0; i < beta.length-1; ++i)
// newB[i] = l2shrink*shrinkage((beta[i]-step*(xb[i]-xy[i])),l1shrink);
// newB[intercept] = beta[intercept] - step*(xb[intercept]-xy[intercept]);
// gram.mul(newB, newG);
// double newlsmobj = lsm_objectiveVal(xy, yy, newB,newG);
// double fhat = f_hat(newB,lsmobjval,beta,xb,xy,step);
// if(newlsmobj <= fhat){
// lsmobjval = newlsmobj;
// converged = betaDiff(beta,newB) < 1e-6;
// System.arraycopy(newB,0,beta,0,newB.length);
// System.arraycopy(newG,0,xb,0,newG.length);
// continue MAIN;
// } else step *= 0.8;
// }
// converged = true;
// }
// return converged;
// }
// public String name(){return "ProximalGradientSolver";}
// }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/gram/Gram.java
|
package hex.gram;
import Jama.CholeskyDecomposition;
import Jama.Matrix;
import hex.FrameTask;
import hex.glm.LSMSolver.ADMMSolver.NonSPDMatrixException;
import jsr166y.CountedCompleter;
import jsr166y.ForkJoinTask;
import jsr166y.RecursiveAction;
import sun.misc.Unsafe;
import water.*;
import water.nbhm.UtilUnsafe;
import water.util.Utils;
import java.util.Arrays;
public final class Gram extends Iced {
final boolean _hasIntercept;
public double[][] _xx;
double[] _diag;
public final int _diagN;
final int _denseN;
final int _fullN;
final static int MIN_TSKSZ=10000;
public Gram() {_diagN = _denseN = _fullN = 0; _hasIntercept = false; }
public Gram(int N, int diag, int dense, int sparse, boolean hasIntercept) {
_hasIntercept = hasIntercept;
_fullN = N + (_hasIntercept?1:0);
_xx = new double[_fullN - diag][];
_diag = MemoryManager.malloc8d(_diagN = diag);
_denseN = dense;
for( int i = 0; i < (_fullN - _diagN); ++i )
_xx[i] = MemoryManager.malloc8d(diag + i + 1);
}
public Gram(Gram g){
_diagN = g._diagN;
_denseN = g._denseN;
_fullN = g._fullN;
_hasIntercept = g._hasIntercept;
if(g._diag != null)_diag = g._diag.clone();
if(g._xx != null){
_xx = g._xx.clone();
for(int i = 0; i < _xx.length; ++i)
_xx[i] = _xx[i].clone();
}
}
public final int fullN(){return _fullN;}
public double _diagAdded;
public void addDiag(double [] ds) {
int i = 0;
for(;i < Math.min(_diagN,ds.length); ++i)
_diag[i] += ds[i];
for(;i < ds.length; ++i)
_xx[i-_diagN][i] += ds[i];
}
public void addDiag(double d) {addDiag(d,false);}
public void addDiag(double d, boolean add2Intercept) {
_diagAdded += d;
for( int i = 0; i < _diag.length; ++i )
_diag[i] += d;
int ii = (!_hasIntercept || add2Intercept)?0:1;
for( int i = 0; i < _xx.length - ii; ++i )
_xx[i][_xx[i].length - 1] += d;
}
public double sparseness(){
double [][] xx = getXX();
double nzs = 0;
for(int i = 0; i < xx.length; ++i)
for(int j = 0; j < xx[i].length; ++j)
if(xx[i][j] != 0) nzs += 1;
return nzs/(xx.length*xx.length);
}
public double diagAvg(){
double res = 0;
int n = 0;
if(_diag != null){
n += _diag.length;
for(double d:_diag) res += d;
}
if(_xx != null){
n += _xx.length;
for(double [] x:_xx)res += x[x.length-1];
}
return res/n;
}
public double diagMin(){
double res = Double.POSITIVE_INFINITY;
if(_diag != null)
for(double d:_diag) if(d < res)res = d;
if(_xx != null)
for(int i = 0; i < _xx.length-1; ++i){
final double [] x = _xx[i];
if(x[x.length-1] < res)res = x[x.length-1];
}
return res;
}
@Override
public Gram clone(){return new Gram(this);}
public String toString(){
if(_fullN >= 1000){
if(_denseN >= 1000) return "Gram(" + _fullN + ")";
else return "diag:\n" + Arrays.toString(_diag) + "\ndense:\n" + Utils.pprint(getDenseXX());
} else return Utils.pprint(getXX());
}
static public class InPlaceCholesky {
final double _xx[][]; // Lower triangle of the symmetric matrix.
private boolean _isSPD;
private InPlaceCholesky(double xx[][], boolean isspd) { _xx = xx; _isSPD = isspd; }
static private class BlockTask extends RecursiveAction {
final double[][] _xx;
final int _i0, _i1, _j0, _j1;
public BlockTask(double xx[][], int ifr, int ito, int jfr, int jto) {
_xx = xx;
_i0 = ifr; _i1 = ito; _j0 = jfr; _j1 = jto;
}
@Override public void compute() {
for (int i=_i0; i < _i1; i++) {
double rowi[] = _xx[i];
for (int k=_j0; k < _j1; k++) {
double rowk[] = _xx[k];
double s = 0.0;
for (int jj = 0; jj < k; jj++) s += rowk[jj]*rowi[jj];
rowi[k] = (rowi[k] - s) / rowk[k];
}
}
}
}
public static InPlaceCholesky decompose_2(double xx[][], int STEP, int P) {
boolean isspd = true;
final int N = xx.length;
P = Math.max(1, P);
for (int j=0; j < N; j+=STEP) {
// update the upper left triangle.
final int tjR = Math.min(j+STEP, N);
for (int i=j; i < tjR; i++) {
double rowi[] = xx[i];
double d = 0.0;
for (int k=j; k < i; k++) {
double rowk[] = xx[k];
double s = 0.0;
for (int jj = 0; jj < k; jj++) s += rowk[jj]*rowi[jj];
rowi[k] = s = (rowi[k] - s) / rowk[k];
d += s*s;
}
for (int jj = 0; jj < j; jj++) { double s = rowi[jj]; d += s*s; }
d = rowi[i] - d;
isspd = isspd && (d > 0.0);
rowi[i] = Math.sqrt(Math.max(0.0, d));
}
if (tjR == N) break;
// update the lower strip
int i = tjR;
Futures fs = new Futures();
int rpb = 0; // rows per block
int p = P; // concurrency
while ( tjR*(rpb=(N - tjR)/p)<Gram.MIN_TSKSZ && p>1) --p;
while (p-- > 1) {
fs.add(new BlockTask(xx,i,i+rpb,j,tjR).fork());
i += rpb;
}
new BlockTask(xx,i,N,j,tjR).compute();
fs.blockForPending();
}
return new InPlaceCholesky(xx, isspd);
}
public double[][] getL() { return _xx; }
public boolean isSPD() { return _isSPD; }
}
public Cholesky cholesky(Cholesky chol) {
return cholesky(chol,true,"");
}
/**
* Compute the cholesky decomposition.
*
* In case our gram starts with diagonal submatrix of dimension N, we exploit this fact to reduce the complexity of the problem.
* We use the standard decomposition of the cholesky factorization into submatrices.
*
* We split the Gram into 3 regions (4 but we only consider lower diagonal, sparse means diagonal region in this context):
* diagonal
* diagonal*dense
* dense*dense
* Then we can solve the cholesky in 3 steps:
* 1. We solve the diagnonal part right away (just do the sqrt of the elements).
* 2. The diagonal*dense part is simply divided by the sqrt of diagonal.
* 3. Compute Cholesky of dense*dense - outer product of cholesky of diagonal*dense computed in previous step
*
* @param chol
* @return
*/
public Cholesky cholesky(Cholesky chol, boolean parallelize,String id) {
long start = System.currentTimeMillis();
if( chol == null ) {
double[][] xx = _xx.clone();
for( int i = 0; i < xx.length; ++i )
xx[i] = xx[i].clone();
chol = new Cholesky(xx, _diag.clone());
}
final Cholesky fchol = chol;
final int sparseN = _diag.length;
final int denseN = _fullN - sparseN;
// compute the cholesky of the diagonal and diagonal*dense parts
if( _diag != null ) for( int i = 0; i < sparseN; ++i ) {
double d = 1.0 / (chol._diag[i] = Math.sqrt(_diag[i]));
for( int j = 0; j < denseN; ++j )
chol._xx[j][i] = d*_xx[j][i];
}
ForkJoinTask [] fjts = new ForkJoinTask[denseN];
// compute the outer product of diagonal*dense
//Log.info("SPARSEN = " + sparseN + " DENSEN = " + denseN);
final int[][] nz = new int[denseN][];
for( int i = 0; i < denseN; ++i ) {
final int fi = i;
fjts[i] = new RecursiveAction() {
@Override protected void compute() {
int[] tmp = new int[sparseN];
double[] rowi = fchol._xx[fi];
int n = 0;
for( int k = 0; k < sparseN; ++k )
if (rowi[k] != .0) tmp[n++] = k;
nz[fi] = Arrays.copyOf(tmp, n);
}
};
}
ForkJoinTask.invokeAll(fjts);
for( int i = 0; i < denseN; ++i ) {
final int fi = i;
fjts[i] = new RecursiveAction() {
@Override protected void compute() {
double[] rowi = fchol._xx[fi];
int[] nzi = nz[fi];
for( int j = 0; j <= fi; ++j ) {
double[] rowj = fchol._xx[j];
int[] nzj = nz[j];
double s = 0;
for (int t=0,z=0; t < nzi.length && z < nzj.length; ) {
int k1 = nzi[t];
int k2 = nzj[z];
if (k1 < k2) { t++; continue; }
else if (k1 > k2) { z++; continue; }
else {
s += rowi[k1] * rowj[k1];
t++; z++;
}
}
rowi[j + sparseN] = _xx[fi][j + sparseN] - s;
}
}
};
}
ForkJoinTask.invokeAll(fjts);
// compute the cholesky of dense*dense-outer_product(diagonal*dense)
// TODO we still use Jama, which requires (among other things) copy and expansion of the matrix. Do it here without copy and faster.
double[][] arr = new double[denseN][];
for( int i = 0; i < arr.length; ++i )
arr[i] = Arrays.copyOfRange(fchol._xx[i], sparseN, sparseN + denseN);
// Log.info(id + ": CHOLESKY PRECOMPUTE TIME " + (System.currentTimeMillis() - start));
start = System.currentTimeMillis();
// parallelize cholesky
if (parallelize) {
int p = Runtime.getRuntime().availableProcessors();
InPlaceCholesky d = InPlaceCholesky.decompose_2(arr, 10, p);
fchol.setSPD(d.isSPD());
arr = d.getL();
// Log.info (id + ": H2O CHOLESKY DECOMP TAKES: " + (System.currentTimeMillis()-start));
} else {
// make it symmetric
for( int i = 0; i < arr.length; ++i )
for( int j = 0; j < i; ++j )
arr[j][i] = arr[i][j];
CholeskyDecomposition c = new Matrix(arr).chol();
fchol.setSPD(c.isSPD());
arr = c.getL().getArray();
//Log.info ("JAMA CHOLESKY DECOMPOSE TAKES: " + (System.currentTimeMillis()-start));
}
for( int i = 0; i < arr.length; ++i )
System.arraycopy(arr[i], 0, fchol._xx[i], sparseN, i + 1);
return chol;
}
public double[][] getXX() {
final int N = _fullN;
double[][] xx = new double[N][];
for( int i = 0; i < N; ++i )
xx[i] = MemoryManager.malloc8d(N);
for( int i = 0; i < _diag.length; ++i )
xx[i][i] = _diag[i];
for( int i = 0; i < _xx.length; ++i ) {
for( int j = 0; j < _xx[i].length; ++j ) {
xx[i + _diag.length][j] = _xx[i][j];
xx[j][i + _diag.length] = _xx[i][j];
}
}
return xx;
}
public double[][] getDenseXX() {
final int N = _denseN;
double[][] xx = new double[N][];
for( int i = 0; i < N; ++i )
xx[i] = MemoryManager.malloc8d(N);
for( int i = 0; i < _xx.length; ++i ) {
for( int j = _diagN; j < _xx[i].length; ++j ) {
xx[i][j-_diagN] = _xx[i][j];
xx[j-_diagN][i] = _xx[i][j];
}
}
return xx;
}
public void add(Gram grm) {
Utils.add(_xx,grm._xx);
Utils.add(_diag,grm._diag);
}
public final boolean hasNaNsOrInfs() {
for( int i = 0; i < _xx.length; ++i )
for( int j = 0; j < _xx[i].length; ++j )
if( Double.isInfinite(_xx[i][j]) || Double.isNaN(_xx[i][j]) ) return true;
for( double d : _diag )
if( Double.isInfinite(d) || Double.isNaN(d) ) return true;
return false;
}
public static final class Cholesky {
public final double[][] _xx;
protected final double[] _diag;
private boolean _isSPD;
public Cholesky(double[][] xx, double[] diag) {
_xx = xx;
_diag = diag;
}
public Cholesky(Gram gram) {
_xx = gram._xx.clone();
for( int i = 0; i < _xx.length; ++i )
_xx[i] = gram._xx[i].clone();
_diag = gram._diag.clone();
}
public double[][] getXX() {
final int N = _xx.length+_diag.length;
double[][] xx = new double[N][];
for( int i = 0; i < N; ++i )
xx[i] = MemoryManager.malloc8d(N);
for( int i = 0; i < _diag.length; ++i )
xx[i][i] = _diag[i];
for( int i = 0; i < _xx.length; ++i ) {
for( int j = 0; j < _xx[i].length; ++j ) {
xx[i + _diag.length][j] = _xx[i][j];
xx[j][i + _diag.length] = _xx[i][j];
}
}
return xx;
}
public double sparseness(){
double [][] xx = getXX();
double nzs = 0;
for(int i = 0; i < xx.length; ++i)
for(int j = 0; j < xx[i].length; ++j)
if(xx[i][j] != 0) nzs += 1;
return nzs/(xx.length*xx.length);
}
@Override
public String toString() {
return "";
}
public static abstract class DelayedTask extends RecursiveAction {
private static final Unsafe U;
private static final long PENDING;
private int _pending;
static {
try {
U = UtilUnsafe.getUnsafe();;
PENDING = U.objectFieldOffset
(CountedCompleter.class.getDeclaredField("pending"));
} catch (Exception e) {
throw new Error(e);
}
}
public DelayedTask(int pending){ _pending = pending;}
public final void tryFork(){
int c = _pending;
while(c != 0 && !U.compareAndSwapInt(this,PENDING,c,c-1))
c = _pending;
// System.out.println(" tryFork of " + this + ". c = " + c);
if(c == 0) fork();
}
}
private final class BackSolver2 extends CountedCompleter {
// private final AtomicIntegerArray _rowPtrs;
// private final int [] _rowPtrs;
final BackSolver2 [] _tasks;
volatile private int _endPtr;
final double [] _y;
final int _row;
private final int _blocksz;
private final int _rblocksz;
private final CountedCompleter _cmp;
public BackSolver2(CountedCompleter cmp, double [] y, int blocksz, int rBlock){
this(cmp,y.length-1,y,new BackSolver2[(y.length-_diag.length)/rBlock],blocksz,rBlock,(y.length-_diag.length)/rBlock-1);
_cmp.addToPendingCount(_tasks.length-1);
int row = _diag.length + (y.length - _diag.length) % _rblocksz + _rblocksz - 1;
for(int i = 0; i < _tasks.length-1; ++i, row += _rblocksz)
_tasks[i] = new BackSolver2(_cmp, row, _y, _tasks, _blocksz,rBlock,i);
assert row == y.length-1;
_tasks[_tasks.length-1] = this;
}
public BackSolver2(CountedCompleter cmp,int row,double [] y, BackSolver2 [] tsks, int iBlock, int rBlock, int tid){
super(cmp);
_cmp = cmp;
_row = row;
_y = y;
_tasks = tsks;
_blocksz = iBlock;
_rblocksz = rBlock;
_endPtr = _row+1;
_tid =tid;
}
final int _tid;
@Override
public void compute() {
int rEnd = _row - _rblocksz;
if(rEnd < _diag.length + _rblocksz)
rEnd = _diag.length;
int bStart = Math.max(0,rEnd - rEnd % _blocksz);
assert _tid == _tasks.length-1 || bStart >= _tasks[_tid+1]._endPtr;
for(int i = 0; i < _rblocksz; ++i) {
final double [] x = _xx[_row-_diag.length-i];
final double yr = _y[_row - i] /= x[_row - i];
for(int j = bStart; j < (_row-i); ++j)
_y[j] -= yr * x[j];
}
boolean first = true;
for(; bStart >= _blocksz; bStart -= _blocksz){
final int bEnd = bStart - _blocksz;
if(_tid != _tasks.length-1)
while(_tasks[_tid+1]._endPtr > bEnd)
Thread.yield(); // synchronization :/
for(int r = _row; r >= rEnd; --r){
final double [] x = _xx[r-_diag.length];
final double yr = _y[r];
for(int i = bStart-1; i >= bEnd; --i)
_y[i] -= _y[r] * x[i];
}
_endPtr = bEnd;
if (first && _tid > 0 && (bEnd <= _row - 2*_rblocksz - _blocksz)) { // first go -> launch next row
_tasks[_tid - 1].fork();
first = false;
}
}
assert bStart == 0;
tryComplete();
}
@Override public boolean onExceptionalCompletion(Throwable ex, CountedCompleter cc){
return true;
}
}
private final class BackSolver extends CountedCompleter {
final int _diagLen;
final double[] _y;
final DelayedTask [][] _tasks;
BackSolver(double [] y, int kBlocksz, int iBlocksz){
final int n = y.length;
_y = y;
int kRem = _xx.length % kBlocksz;
int M = _xx.length/kBlocksz + (kRem == 0?0:1);;
int N = n / iBlocksz; // iRem is added to the diagonal block
_tasks = new DelayedTask[M][];
int rsz = N;
for(int i = M-1; i >= 0; --i)
_tasks[i] = new DelayedTask[rsz--];
_diagLen = _diag == null?0:_diag.length;
// Solve L'*X = Y;
int kFrom = _diagLen + _xx.length-1;
int kTo = _diagLen + _xx.length;
int iFrom = n;
int pending = 0;
int rem = 0;
if(kRem > 0){
rem = 1;
int k = _tasks.length-1;
int i = _tasks[k].length-1;
iFrom = i*iBlocksz;
kTo = kFrom - kRem + 1;
_tasks[k][i] = new BackSolveDiagTsk(0,k,kFrom,kTo,iFrom);
for(int j = 0; j < _tasks[k].length-1; ++j)
_tasks[k][j] = new BackSolveInnerTsk(pending,M-1,j,kFrom,kTo, j*iBlocksz,(j+1)*iBlocksz);
pending = 1;
}
for( int k = _tasks.length-1-rem; k >= 0; --k) {
kFrom = kTo -1;
kTo -= kBlocksz;
int ii = _tasks[k].length-1;
iFrom = ii*iBlocksz;
_tasks[k][_tasks[k].length-1] = new BackSolveDiagTsk(0,k,kFrom,kTo,iFrom);
for(int i = 0; i < _tasks[k].length-1; ++i)
_tasks[k][i] = new BackSolveInnerTsk(pending,k,i,i+iBlocksz,kFrom,kTo, i*iBlocksz);
pending = 1;
}
addToPendingCount(_tasks[0].length-1);
}
@Override public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller){
try {
for (ForkJoinTask[] ary : _tasks)
for (ForkJoinTask fjt : ary)
fjt.cancel(true);
} catch(Throwable t){}
return true;
}
@Override
public void compute() {
_tasks[_tasks.length-1][_tasks[_tasks.length-1].length-1].fork(); }
final class BackSolveDiagTsk extends DelayedTask {
final int _kfrom, _kto,_ifrom, _row;
public BackSolveDiagTsk(int pending, int row, int kfrom, int kto, int ifrom) {
super(pending);
_row = row;
_kfrom = kfrom;
_kto = kto;
_ifrom = ifrom;
}
@Override
protected void compute() {
if(BackSolver.this.isCompletedAbnormally())
return;
try {
// same as single threaded solve,
// except we do only a (lower diagonal) square block here
// and we (try to) launch dependents in the end
for (int k = _kfrom; k >= _kto; --k) {
_y[k] /= _xx[k - _diagLen][k];
for (int i = _ifrom; i < k; ++i)
_y[i] -= _y[k] * _xx[k - _diagLen][i];
}
if (_row == 0) tryComplete(); // the last row of task completes the parent
// try to fork the whole row to the left
// (tryFork will fork task t iff all of it's dependencies are done)
for (int i = 0; i < _tasks[_row].length - 1; ++i)
_tasks[_row][i].tryFork();
} catch(Throwable t){
t.printStackTrace();
BackSolver.this.completeExceptionally(t);
}
}
@Override public String toString(){
return ("DiagTsk, ifrom = " + _ifrom + ", kto = " + _kto);
}
}
final class BackSolveInnerTsk extends DelayedTask {
final int _kfrom, _kto, _ifrom, _ito, _row, _col;
public BackSolveInnerTsk(int pending,int row, int col, int kfrom, int kto, int ifrom, int ito) {
super(pending);
_kfrom = kfrom;
_kto = kto;
_ifrom = ifrom;
_ito = ito;
_col = col;
_row = row;
}
@Override
public void compute() {
if(BackSolver.this.isCompletedAbnormally())
return;
try {
// same as single threaded solve,
// except we do only a (lower diagonal) square block here
// and we (try to) launch dependents in the end
for (int k = _kfrom; k >= _kto; --k) {
final double yk = _y[k];
final double [] x = _xx[k-_diagLen];
for (int i = _ifrom; i < _ito; ++i)
_y[i] -= yk * x[i];
}
if (_row == 0) tryComplete();
// try to fork task directly above
else _tasks[_row - 1][_col].tryFork();
} catch(Throwable t){
t.printStackTrace();
BackSolver.this.completeExceptionally(t);
}
}
@Override public String toString(){
return ("InnerTsk, ifrom = " + _ifrom + ", kto = " + _kto);
}
}
}
public ParSolver parSolver(CountedCompleter cmp, double[] y, int iBlock, int rBlock){ return new ParSolver(cmp,y, iBlock, rBlock);}
public final class ParSolver extends CountedCompleter {
final double [] y;
final int _iBlock;
final int _rBlock;
private ParSolver(CountedCompleter cmp, double [] y, int iBlock, int rBlock){
super(cmp);
this.y = y;
_iBlock = iBlock;
_rBlock = rBlock;
}
@Override
public void compute() {
// long t = System.currentTimeMillis();
if( !isSPD() ) throw new NonSPDMatrixException();
assert _xx.length + _diag.length == y.length:"" + _xx.length + " + " + _diag.length + " != " + y.length;
// diagonal
for( int k = 0; k < _diag.length; ++k )
y[k] /= _diag[k];
// rest
final int n = y.length;
// Solve L*Y = B;
for( int k = _diag.length; k < n; ++k ) {
double d = 0;
for( int i = 0; i < k; i++ )
d += y[i] * _xx[k - _diag.length][i];
y[k] = (y[k]-d)/_xx[k - _diag.length][k];
}
// System.out.println("st part done in " + (System.currentTimeMillis()-t));
// do the dense bit in parallel
if(y.length >= 0) {
addToPendingCount(1);
new BackSolver2(this, y, _iBlock,_rBlock).fork();
} else { // too small, solve single threaded
// Solve L'*X = Y;
for( int k = n - 1; k >= _diag.length; --k ) {
y[k] /= _xx[k - _diag.length][k];
for( int i = 0; i < k; ++i )
y[i] -= y[k] * _xx[k - _diag.length][i];
}
}
tryComplete();
}
@Override public void onCompletion(CountedCompleter caller){
// diagonal
for( int k = _diag.length - 1; k >= 0; --k )
y[k] /= _diag[k];
}
}
/**
* Find solution to A*x = y.
*
* Result is stored in the y input vector. May throw NonSPDMatrix exception in case Gram is not
* positive definite.
*
* @param y
*/
public final void solve(double[] y) {
if( !isSPD() ) throw new NonSPDMatrixException();
assert _xx.length + _diag.length == y.length:"" + _xx.length + " + " + _diag.length + " != " + y.length;
// diagonal
for( int k = 0; k < _diag.length; ++k )
y[k] /= _diag[k];
// rest
final int n = y.length;
// Solve L*Y = B;
for( int k = _diag.length; k < n; ++k ) {
double d = 0;
for( int i = 0; i < k; i++ )
d += y[i] * _xx[k - _diag.length][i];
y[k] = (y[k]-d)/_xx[k - _diag.length][k];
}
// Solve L'*X = Y;
for( int k = n - 1; k >= _diag.length; --k ) {
y[k] /= _xx[k - _diag.length][k];
for( int i = 0; i < k; ++i )
y[i] -= y[k] * _xx[k - _diag.length][i];
}
// diagonal
for( int k = _diag.length - 1; k >= 0; --k )
y[k] /= _diag[k];
}
public final boolean isSPD() {return _isSPD;}
public final void setSPD(boolean b) {_isSPD = b;}
}
public final void addRow(final double[] x, final int catN, final int [] catIndexes, final double w) {
final int intercept = _hasIntercept?1:0;
final int denseRowStart = _fullN - _denseN - _diagN - intercept; // we keep dense numbers at the right bottom of the matrix, -1 is for intercept
final int denseColStart = _fullN - _denseN - intercept;
assert _denseN + denseRowStart == _xx.length-intercept;
final double [] interceptRow = _hasIntercept?_xx[_denseN + denseRowStart]:null;
// nums
for(int i = 0; i < _denseN; ++i) if(x[i] != 0) {
final double [] mrow = _xx[i+denseRowStart];
final double d = w*x[i];
for(int j = 0; j <= i; ++j)if(x[j] != 0)
mrow[j+denseColStart] += d*x[j];
if(_hasIntercept)
interceptRow[i+denseColStart] += d; // intercept*x[i]
// nums * cats
for(int j = 0; j < catN; ++j)
mrow[catIndexes[j]] += d;
}
if(_hasIntercept){
// intercept*intercept
interceptRow[_denseN+denseColStart] += w;
// intercept X cat
for(int j = 0; j < catN; ++j)
interceptRow[catIndexes[j]] += w;
}
final boolean hasDiag = (_diagN > 0 && catN > 0 && catIndexes[0] < _diagN);
// cat X cat
for(int i = hasDiag?1:0; i < catN; ++i){
final double [] mrow = _xx[catIndexes[i] - _diagN];
for(int j = 0; j <= i; ++j)
mrow[catIndexes[j]] += w;
}
// DIAG
if(hasDiag && catN > 0)
_diag[catIndexes[0]] += w;
}
public void mul(double x){
if(_diag != null)for(int i = 0; i < _diag.length; ++i)
_diag[i] *= x;
for(int i = 0; i < _xx.length; ++i)
for(int j = 0; j < _xx[i].length; ++j)
_xx[i][j] *= x;
}
public double [] mul(double [] x){
double [] res = MemoryManager.malloc8d(x.length);
mul(x,res);
return res;
}
public void mul(double [] x, double [] res){
Arrays.fill(res,0);
for(int i = 0; i < _diagN; ++i)
res[i] = x[i] * _diag[i];
for(int ii = 0; ii < _xx.length; ++ii){
final int n = _xx[ii].length-1;
final int i = _diagN + ii;
for(int j = 0; j < n; ++j) {
double e = _xx[ii][j]; // we store only lower diagonal, so we have two updates:
res[i] += x[j]*e; // standard matrix mul, row * vec, except short (only up to diag)
res[j] += x[i]*e; // symmetric matrix => each non-diag element adds to 2 places
}
res[i] += _xx[ii][n]*x[n]; // diagonal element
}
}
/**
* Task to compute gram matrix normalized by the number of observations (not counting rows with NAs).
* in R's notation g = t(X)%*%X/nobs, nobs = number of rows of X with no NA.
* @author tomasnykodym
*/
public static class GramTask extends FrameTask<GramTask> {
public Gram _gram;
public double [][] _XY;
public long _nobs;
public final boolean _hasIntercept;
public final boolean _isWeighted; // last response is weight vector?
public GramTask(Key jobKey, DataInfo dinfo, boolean hasIntercept, boolean isWeighted){
super(jobKey,dinfo);
_hasIntercept = hasIntercept;
_isWeighted = isWeighted;
}
@Override protected void chunkInit(){
_gram = new Gram(_dinfo.fullN(), _dinfo.largestCat(), _dinfo._nums, _dinfo._cats,_hasIntercept);
final int responses = _dinfo._responses - (_isWeighted?1:0);
if(responses > 0){
_XY = new double[responses][];
for(int i = 0; i < responses; ++i)
_XY[i] = MemoryManager.malloc8d(_gram._fullN);
}
}
@Override protected void processRow(long gid, double[] nums, int ncats, int[] cats, double [] responses) {
double w = _isWeighted?responses[responses.length-1]:1;
_gram.addRow(nums, ncats, cats, w);
if(_XY != null){
for(int i = 0 ; i < _XY.length; ++i){
final double y = responses[i]*w;
for(int j = 0; j < ncats; ++i)
_XY[i][cats[j]] += y;
int numoff = _dinfo.numStart();
for(int j = 0; j < nums.length; ++j)
_XY[i][numoff+j] += nums[j]*y;
}
}
++_nobs;
}
@Override protected void chunkDone(long n){
double r = 1.0/_nobs;
_gram.mul(r);
if(_XY != null)for(int i = 0; i < _XY.length; ++i)
for(int j = 0; j < _XY[i].length; ++j)
_XY[i][j] *= r;
}
@Override public void reduce(GramTask gt){
double r1 = (double)_nobs/(_nobs+gt._nobs);
_gram.mul(r1);
double r2 = (double)gt._nobs/(_nobs+gt._nobs);
gt._gram.mul(r2);
_gram.add(gt._gram);
if(_XY != null)for(int i = 0; i < _XY.length; ++i)
for(int j = 0; j < _XY[i].length; ++j)
_XY[i][j] = _XY[i][j]*r1 + gt._XY[i][j]*r2;
_nobs += gt._nobs;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/la/Matrix.java
|
package hex.la;
import water.H2O;
import water.fvec.*;
public final class Matrix {
final Frame _x;
public Matrix(Frame x) { _x = x; }
// Matrix multiplication
public Frame mult(Frame y) {
int xrows = (int)_x.numRows();
int xcols = _x.numCols();
int yrows = (int) y.numRows();
int ycols = y.numCols();
if(xcols != yrows)
throw new IllegalArgumentException("Matrices are not compatible for multiplication: ["+xrows+"x"+xcols+"] * ["+yrows+"x"+ycols+"]. Requires [n x m] * [m x p]");
Vec[] x_vecs = _x.vecs();
Vec[] y_vecs = y.vecs();
for(int k = 0; k < xcols; k++) {
if(x_vecs[k].isEnum())
throw new IllegalArgumentException("Multiplication not meaningful for factor column "+k);
}
for(int j = 0; j < ycols; j++) {
if(y_vecs[j].isEnum())
throw new IllegalArgumentException("Multiplication not meaningful for factor column "+j);
}
Vec[] output = new Vec[ycols];
for(int j = 0; j < ycols; j++)
output[j] = Vec.makeSeq(xrows);
for(int i = 0; i < xrows; i++) {
for(int j = 0; j < ycols; j++) {
Vec yvec = y_vecs[j];
double d = 0;
for(int k = 0; k < xcols; k++)
d += x_vecs[k].at(i) * yvec.at(k);
output[j].set(i, d);
}
}
return new Frame(y._names,output);
}
// Outer product
public Frame outerProd() {
int xrows = (int)_x.numRows();
int xcols = _x.numCols();
Vec[] x_vecs = _x.vecs();
for(int j = 0; j < xcols; j++) {
if(x_vecs[j].isEnum())
throw new IllegalArgumentException("Multiplication not meaningful for factor column "+j);
}
Vec[] output = new Vec[xrows];
String[] names = new String[xrows];
for(int i = 0; i < xrows; i++) {
output[i] = Vec.makeSeq(xrows);
names[i] = "C" + String.valueOf(i+1);
}
for(int i = 0; i < xrows; i++) {
for(int j = 0; j < xrows; j++) {
double d = 0;
for(int k = 0; k < xcols; k++)
d += x_vecs[k].at(i)*x_vecs[k].at(k);
output[j].set(i, d);
}
}
return new Frame(names, output);
}
// Transpose
public Frame trans() {
int xrows = (int)_x.numRows();
int xcols = _x.numCols();
Vec[] x_vecs = _x.vecs();
// Currently cannot transpose factors due to domain mismatch
for(int j = 0; j < xcols; j++) {
if(x_vecs[j].isEnum())
throw H2O.unimpl();
}
Vec[] output = new Vec[xrows];
String[] names = new String[xrows];
for(int i = 0; i < xrows; i++) {
output[i] = Vec.makeSeq(xcols);
names[i] = "C" + String.valueOf(i+1);
}
for(int i = 0; i < xrows; i++) {
for(int j = 0; j < xcols; j++) {
double d = x_vecs[j].at(i);
output[i].set(j, d);
}
}
return new Frame(names, output);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/nb/NBModel.java
|
package hex.nb;
import hex.FrameTask.DataInfo;
import hex.nb.NaiveBayes.NBTask;
import org.apache.commons.math3.distribution.NormalDistribution;
import water.Key;
import water.Model;
import water.Request2;
import water.api.DocGen;
import water.api.Predict;
import water.api.Request.API;
import water.api.RequestBuilders.ElementBuilder;
/**
* FIXME comment please
*/
public class NBModel extends Model {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "Class counts of the dependent variable")
final double[] rescnt;
@API(help = "Class distribution of the dependent variable")
final double[] pprior;
@API(help = "For every predictor variable, a table giving, for each attribute level, the conditional probabilities given the target class")
final double[][][] pcond;
@API(help = "Number of categorical predictor variables")
final int ncats;
@API(help = "Number of numeric predictor variables")
final int nnums;
@API(help = "Laplace smoothing parameter")
final double laplace;
@API(help = "Min. standard deviation to use for observations with not enough data")
final double min_std_dev;
@API(help = "Model parameters", json = true)
private Request2 job;
@Override public final NaiveBayes get_params() { return (NaiveBayes)job; }
@Override public final Request2 job() { return job; }
public NBModel(Key selfKey, Key dataKey, DataInfo dinfo, NBTask tsk, double[] pprior, double[][][] pcond, double laplace, double min_std_dev) {
super(selfKey, dataKey, dinfo._adaptedFrame, /* priorClassDistribution */ null);
this.rescnt = tsk._rescnt;
this.job= tsk._job;
this.pprior = pprior;
this.pcond = pcond;
this.ncats = dinfo._cats;
this.nnums = dinfo._nums;
this.laplace = laplace;
this.min_std_dev = min_std_dev;
}
public double[] pprior() { return pprior; }
public double[][][] pcond() { return pcond; }
// Note: For small probabilities, product may end up zero due to underflow error. Can circumvent by taking logs.
@Override protected float[] score0(double[] data, float[] preds) {
double denom = 0;
assert preds.length == (pprior.length + 1); // Note: First column of preds is predicted response class
// Compute joint probability of predictors for every response class
for(int rlevel = 0; rlevel < pprior.length; rlevel++) {
double num = 1;
for(int col = 0; col < ncats; col++) {
if(Double.isNaN(data[col])) continue; // Skip predictor in joint x_1,...,x_m if NA
int plevel = (int)data[col];
num *= pcond[col][rlevel][plevel]; // p(x|y) = \Pi_{j = 1}^m p(x_j|y)
}
// For numeric predictors, assume Gaussian distribution with sample mean and variance from model
for(int col = ncats; col < data.length; col++) {
if(Double.isNaN(data[col])) continue;
// Two ways to get non-zero std deviation HEX-1852
// double stddev = pcond[col][rlevel][1] > 0 ? pcond[col][rlevel][1] : min_std_dev; //only use the placeholder for critically low data
double stddev = Math.max(pcond[col][rlevel][1], min_std_dev); // more stable for almost constant data
double mean = pcond[col][rlevel][0];
double x = data[col];
num *= Math.exp(-((x-mean)*(x-mean)/(2.*stddev*stddev)))/stddev/Math.sqrt(2.*Math.PI); // faster
// num *= new NormalDistribution(mean, stddev).density(data[col]); //slower
}
num *= pprior[rlevel]; // p(x,y) = p(x|y)*p(y)
denom += num; // p(x) = \Sum_{levels of y} p(x,y)
preds[rlevel+1] = (float)num;
}
// Select class with highest conditional probability
float max = -1;
for(int i = 1; i < preds.length; i++) {
preds[i] /= denom; // p(y|x) = p(x,y)/p(x)
if(preds[i] > max) {
max = preds[i];
preds[0] = i-1;
}
}
return preds;
}
@Override public String toString(){
StringBuilder sb = new StringBuilder("Naive Bayes Model (key=" + _key + " , trained on " + _dataKey + "):\n");
return sb.toString();
}
public void generateHTML(String title, StringBuilder sb) {
if(title != null && !title.isEmpty()) DocGen.HTML.title(sb, title);
DocGen.HTML.paragraph(sb, "Model Key: " + _key);
sb.append("<div class='alert'>Actions: " + Predict.link(_key, "Predict on dataset") + ", "
+ NaiveBayes.link(_dataKey, "Compute new model") + "</div>");
DocGen.HTML.section(sb, "A-Priori Probabilities");
sb.append("<span style='display: inline-block;'>");
sb.append("<table class='table table-striped table-bordered'>");
// Domain of the response variable
String[] resdom = _domains[_domains.length-1];
sb.append("<tr>");
for(int i = 0; i < resdom.length; i++)
sb.append("<th>").append(resdom[i]).append("</th>");
sb.append("</tr>");
// Display table of a-priori response probabilities
sb.append("<tr>");
for(int i = 0; i < pprior.length; i++)
sb.append("<td>").append(ElementBuilder.format(pprior[i])).append("</td>");
sb.append("</tr>");
sb.append("</table></span>");
DocGen.HTML.section(sb, "Conditional Probabilities");
// Display table of conditional probabilities for categorical predictors
for(int col = 0; col < ncats; col++) {
DocGen.HTML.paragraph(sb, "Column: " + _names[col]);
sb.append("<span style='display: inline-block;'>");
sb.append("<table class='table table-striped table-bordered'>");
// Domain of the predictor variable
sb.append("<tr>");
sb.append("<th>").append("Response/Predictor").append("</th>");
for(int i = 0; i < _domains[col].length; i++)
sb.append("<th>").append(_domains[col][i]).append("</th>");
sb.append("</tr>");
// For each predictor, display table of conditional probabilities
for(int r = 0; r < pcond[col].length; r++) {
sb.append("<tr>");
sb.append("<th>").append(resdom[r]).append("</th>");
for(int c = 0; c < pcond[col][r].length; c++) {
double e = pcond[col][r][c];
sb.append("<td>").append(ElementBuilder.format(e)).append("</td>");
}
sb.append("</tr>");
}
sb.append("</table></span>");
}
// Display table of statistics for numeric predictors
for(int col = ncats; col < ncats + nnums; col++) {
DocGen.HTML.paragraph(sb, "Column: " + _names[col]);
sb.append("<span style='display: inline-block;'>");
sb.append("<table class='table table-striped table-bordered'>");
// Labels for the predictor variable columns
sb.append("<tr>");
sb.append("<th>").append("Response/Predictor").append("</th>");
sb.append("<th>").append("Mean").append("</th>");
sb.append("<th>").append("Standard Deviation").append("</th>");
sb.append("</tr>");
// For each predictor, display mean and standard deviation within every response level
for(int r = 0; r < pcond[col].length; r++) {
sb.append("<tr>");
sb.append("<th>").append(resdom[r]).append("</th>");
double pmean = pcond[col][r][0];
double psdev = pcond[col][r][1];
sb.append("<td>").append(ElementBuilder.format(pmean)).append("</td>");
sb.append("<td>").append(ElementBuilder.format(psdev)).append("</td>");
sb.append("</tr>");
}
sb.append("</table></span>");
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/nb/NBModelView.java
|
package hex.nb;
import water.*;
import water.api.DocGen;
import water.api.Request;
public class NBModelView extends Request2 {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="Naive Bayes Model Key", required = true, filter = NBModelKeyFilter.class)
Key _modelKey;
class NBModelKeyFilter extends H2OKey { public NBModelKeyFilter() { super("",true); } }
@API(help="Naive Bayes Model")
NBModel nb_model;
public static String link(String txt, Key model) {
return "<a href='/2/NBModelView.html?_modelKey=" + model + "'>" + txt + "</a>";
}
public static Response redirect(Request req, Key modelKey) {
return Response.redirect(req, "/2/NBModelView", "_modelKey", modelKey);
}
@Override public boolean toHTML(StringBuilder sb){
nb_model.generateHTML("", sb);
return true;
}
@Override protected Response serve() {
nb_model = DKV.get(_modelKey).get();
return Response.done(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/nb/NBProgressPage.java
|
package hex.nb;
import water.*;
import water.api.*;
import water.api.RequestBuilders.Response;
public class NBProgressPage extends Progress2 {
/** Return {@link Response} for finished job. */
@Override protected Response jobDone(final Key dst) {
return NBModelView.redirect(this, dst);
}
public static Response redirect(Request req, Key jobkey, Key dest) {
return Response.redirect(req, "/2/NBProgressPage", JOB_KEY, jobkey, DEST_KEY, dest );
}
@Override public boolean toHTML( StringBuilder sb ) {
Job jjob = Job.findJob(job_key);
if (jjob ==null) return true;
Value v = DKV.get(jjob.dest());
if(v != null){
NBModel m = v.get();
m.generateHTML("Naive Bayes Model", sb);
} else
sb.append("<b>No model yet.</b>");
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/nb/NaiveBayes.java
|
package hex.nb;
import hex.FrameTask.DataInfo;
import water.*;
import water.api.DocGen;
import water.fvec.*;
import water.util.RString;
import water.util.Utils;
/**
* Naive Bayes
* This is an algorithm for computing the conditional a-posterior probabilities of a categorical
* response from independent predictors using Bayes rule.
* <a href = "http://en.wikipedia.org/wiki/Naive_Bayes_classifier">Naive Bayes on Wikipedia</a>
* <a href = "http://cs229.stanford.edu/notes/cs229-notes2.pdf">Lecture Notes by Andrew Ng</a>
* @author anqi_fu
*
*/
public class NaiveBayes extends Job.ModelJobWithoutClassificationField {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
static final String DOC_GET = "naive bayes";
@API(help = "Laplace smoothing parameter", filter = Default.class, lmin = 0, lmax = 100000, json = true)
public int laplace = 0;
@API(help = "Min. standard deviation to use for observations with not enough data", filter = Default.class, dmin = 1e-10, json = true)
public double min_std_dev = 1e-3;
@API(help = "Drop columns with more than 20% missing values", filter = Default.class)
public boolean drop_na_cols = true;
@Override protected void execImpl() {
long before = System.currentTimeMillis();
Frame fr = DataInfo.prepareFrame(source, response, ignored_cols, false, true /*drop const*/, drop_na_cols);
DataInfo dinfo = new DataInfo(fr, 1, false, true, DataInfo.TransformType.NONE, DataInfo.TransformType.NONE);
NBTask tsk = new NBTask(this, dinfo).doAll(dinfo._adaptedFrame);
NBModel myModel = buildModel(dinfo, tsk, laplace, min_std_dev);
myModel.start_training(before);
myModel.stop_training();
myModel.delete_and_lock(self());
myModel.unlock(self());
}
@Override protected void init() {
super.init();
if(!response.isEnum())
throw new IllegalArgumentException("Response must be a categorical column");
if (laplace < 0) throw new IllegalArgumentException("Laplace smoothing must be an integer >= 0.");
if (min_std_dev <= 1e-10) throw new IllegalArgumentException("Min. standard deviation must be at least 1e-10.");
}
@Override protected Response redirect() {
return NBProgressPage.redirect(this, self(), dest());
}
public static String link(Key src_key, String content) {
RString rs = new RString("<a href='/2/NaiveBayes.query?%key_param=%$key'>%content</a>");
rs.replace("key_param", "source");
rs.replace("key", src_key.toString());
rs.replace("content", content);
return rs.toString();
}
public NBModel buildModel(DataInfo dinfo, NBTask tsk, double laplace, double min_std_dev) {
logStart();
double[] pprior = tsk._rescnt.clone();
double[][][] pcond = tsk._jntcnt.clone();
String[][] domains = dinfo._adaptedFrame.domains();
// A-priori probability of response y
for(int i = 0; i < pprior.length; i++)
pprior[i] = (pprior[i] + laplace)/(tsk._nobs + tsk._nres*laplace);
// pprior[i] = pprior[i]/tsk._nobs; // Note: R doesn't apply laplace smoothing to priors, even though this is textbook definition
// Probability of categorical predictor x_j conditional on response y
for(int col = 0; col < dinfo._cats; col++) {
assert pcond[col].length == tsk._nres;
for(int i = 0; i < pcond[col].length; i++) {
for(int j = 0; j < pcond[col][i].length; j++)
pcond[col][i][j] = (pcond[col][i][j] + laplace)/(tsk._rescnt[i] + domains[col].length*laplace);
}
}
// Mean and standard deviation of numeric predictor x_j for every level of response y
for(int col = 0; col < dinfo._nums; col++) {
for(int i = 0; i < pcond[0].length; i++) {
int cidx = dinfo._cats + col;
double num = tsk._rescnt[i];
double pmean = pcond[cidx][i][0]/num;
pcond[cidx][i][0] = pmean;
// double pvar = pcond[cidx][i][1]/num - pmean*pmean;
double pvar = pcond[cidx][i][1]/(num - 1) - pmean*pmean*num/(num - 1);
pcond[cidx][i][1] = Math.sqrt(pvar);
}
}
Key dataKey = input("source") == null ? null : Key.make(input("source"));
return new NBModel(destination_key, dataKey, dinfo, tsk, pprior, pcond, laplace, min_std_dev);
}
// Note: NA handling differs from R for efficiency purposes
// R's method: For each predictor x_j, skip counting that row for p(x_j|y) calculation if x_j = NA. If response y = NA, skip counting row entirely in all calculations
// H2O's method: Just skip all rows where any x_j = NA or y = NA. Should be more memory-efficient, but results incomparable with R.
public static class NBTask extends MRTask2<NBTask> {
final Job _job;
final protected DataInfo _dinfo;
final int _nres; // Number of levels for the response y
public int _nobs; // Number of rows counted in calculation
public double[] _rescnt; // Count of each level in the response
public double[][][] _jntcnt; // For each categorical predictor, joint count of response and predictor levels
// For each numeric predictor, sum of entries for every response level
public NBTask(Job job, DataInfo dinfo) {
_job = job;
_dinfo = dinfo;
_nobs = 0;
String[][] domains = dinfo._adaptedFrame.domains();
int ncol = dinfo._adaptedFrame.numCols();
assert ncol-1 == dinfo._nums + dinfo._cats; // ncol-1 because we drop response col
_nres = domains[ncol-1].length;
_rescnt = new double[_nres];
_jntcnt = new double[ncol-1][][];
for(int i = 0; i < _jntcnt.length; i++) {
int ncnt = domains[i] == null ? 2 : domains[i].length;
_jntcnt[i] = new double[_nres][ncnt];
}
}
@Override public void map(Chunk[] chks) {
int res_idx = chks.length - 1;
Chunk res = chks[res_idx];
OUTER:
for(int row = 0; row < chks[0]._len; row++) {
// Skip row if any entries in it are NA
for(int col = 0; col < chks.length; col++) {
if(chks[col].isNA0(row)) continue OUTER;
}
// Record joint counts of categorical predictors and response
int rlevel = (int)res.at0(row);
for(int col = 0; col < _dinfo._cats; col++) {
int plevel = (int)chks[col].at0(row);
_jntcnt[col][rlevel][plevel]++;
}
// Record sum for each pair of numerical predictors and response
for(int col = 0; col < _dinfo._nums; col++) {
int cidx = _dinfo._cats + col;
double x = chks[cidx].at0(row);
_jntcnt[cidx][rlevel][0] += x;
_jntcnt[cidx][rlevel][1] += x*x;
}
_rescnt[rlevel]++;
_nobs++;
}
}
@Override public void reduce(NBTask nt) {
_nobs += nt._nobs;
Utils.add(_rescnt, nt._rescnt);
for(int col = 0; col < _jntcnt.length; col++)
_jntcnt[col] = Utils.add(_jntcnt[col], nt._jntcnt[col]);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/pca/PCA.java
|
package hex.pca;
import Jama.Matrix;
import Jama.SingularValueDecomposition;
import hex.FrameTask.DataInfo;
import hex.gram.Gram.GramTask;
import water.Job.ColumnsJob;
import water.*;
import water.api.DocGen;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.Log;
import water.util.RString;
import java.util.ArrayList;
/**
* Principal Components Analysis
* This is an algorithm for dimensionality reduction of numerical data.
* <a href = "http://en.wikipedia.org/wiki/Principal_component_analysis">PCA on Wikipedia</a>
* @author anqi_fu
*
*/
public class PCA extends ColumnsJob {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
static final String DOC_GET = "pca";
static final int MAX_COL = 5000;
@API(help = "The PCA Model")
public PCAModel pca_model;
@API(help = "Maximum number of principal components to return.", filter = Default.class, lmin = 1, lmax = 5000, json=true)
int max_pc = 5000;
@API(help = "Omit components with std dev <= tol times std dev of first component.", filter = Default.class, lmin = 0, lmax = 1, json=true)
double tolerance = 0;
@API(help = "If true, data will be standardized on the fly when computing the model.", filter = Default.class, json=true)
boolean standardize = true;
public PCA() {}
public PCA(String desc, Key dest, Frame src, double tolerance, boolean standardize) {
this(desc, dest, src, 5000, tolerance, standardize);
}
public PCA(String desc, Key dest, Frame src, int max_pc, double tolerance, boolean standardize) {
description = desc;
destination_key = dest;
source = src;
this.max_pc = max_pc;
this.tolerance = tolerance;
this.standardize = standardize;
}
@Override public boolean toHTML(StringBuilder sb) { return makeJsonBox(sb); }
@Override protected void execImpl() {
Frame fr = selectFrame(source);
Vec[] vecs = fr.vecs();
// Remove constant cols and cols with too many NAs
ArrayList<Integer> removeCols = new ArrayList<Integer>();
for(int i = 0; i < vecs.length; i++) {
if(vecs[i].min() == vecs[i].max() || vecs[i].naCnt() > vecs[i].length()*0.2)
// if(vecs[i].min() == vecs[i].max() || vecs[i].naCnt() > vecs[i].length()*0.2 || vecs[i].domain() != null)
removeCols.add(i);
}
if(!removeCols.isEmpty()) {
int[] cols = new int[removeCols.size()];
for(int i = 0; i < cols.length; i++)
cols[i] = removeCols.get(i);
fr.remove(cols);
}
if( fr.numCols() < 2 )
throw new IllegalArgumentException("Need more than one column to run PCA");
DataInfo dinfo = new DataInfo(fr, 0, false, false, standardize ? DataInfo.TransformType.STANDARDIZE : DataInfo.TransformType.NONE);
GramTask tsk = new GramTask(self(), dinfo, false,false).doAll(dinfo._adaptedFrame);
PCAModel myModel = buildModel(dinfo, tsk);
myModel.delete_and_lock(self());
myModel.unlock(self());
remove(); // Close/remove job
final JobState state = UKV.<Job>get(self()).state;
new TAtomic<PCAModel>() {
@Override
public PCAModel atomic(PCAModel m) {
if (m != null) m.get_params().state = state;
return m;
}
}.invoke(dest());
}
@Override protected void init() {
super.init();
int num_ecols = selectFrame(source).numExpCols();
Log.info("Running PCA on dataset with " + num_ecols + " expanded columns in Gram matrix");
if(num_ecols > MAX_COL)
throw new IllegalArgumentException("Cannot process more than " + MAX_COL + " columns, taking into account expanded categoricals");
}
@Override protected Response redirect() {
return PCAProgressPage.redirect(this, self(), dest());
}
public PCAModel buildModel(DataInfo dinfo, GramTask tsk) {
logStart();
Matrix myGram = new Matrix(tsk._gram.getXX()); // X'X/n where n = num rows
SingularValueDecomposition mySVD = myGram.svd();
// Extract eigenvalues and eigenvectors
// Note: Singular values ordered in weakly descending order by algorithm
double[] Sval = mySVD.getSingularValues();
double[][] eigVec = mySVD.getV().getArray(); // rows = features, cols = principal components
assert Sval.length == eigVec.length;
// DKV.put(EigenvectorMatrix.makeKey(input("source"), destination_key), new EigenvectorMatrix(eigVec));
// Compute standard deviation
double[] sdev = new double[Sval.length];
double totVar = 0;
double dfcorr = dinfo._adaptedFrame.numRows()/(dinfo._adaptedFrame.numRows() - 1.0);
for(int i = 0; i < Sval.length; i++) {
// if(standardize)
Sval[i] = dfcorr*Sval[i]; // Correct since degrees of freedom = n-1
sdev[i] = Math.sqrt(Sval[i]);
totVar += Sval[i];
}
double[] propVar = new double[Sval.length]; // Proportion of total variance
double[] cumVar = new double[Sval.length]; // Cumulative proportion of total variance
for(int i = 0; i < Sval.length; i++) {
propVar[i] = Sval[i]/totVar;
cumVar[i] = i == 0 ? propVar[0] : cumVar[i-1] + propVar[i];
}
Key dataKey = input("source") == null ? null : Key.make(input("source"));
int ncomp = Math.min(getNumPC(sdev, tolerance), max_pc);
return new PCAModel(this, destination_key, dataKey, dinfo, tsk, sdev, propVar, cumVar, eigVec, mySVD.rank(), ncomp);
}
public static int getNumPC(double[] sdev, double tol) {
if(sdev == null) return 0;
double cutoff = tol*sdev[0];
for( int i=0; i<sdev.length; i++ )
if( sdev[i] < cutoff )
return i;
return sdev.length;
}
public static String link(Key src_key, String content) {
RString rs = new RString("<a href='/2/PCA.query?%key_param=%$key'>%content</a>");
rs.replace("key_param", "source");
rs.replace("key", src_key.toString());
rs.replace("content", content);
return rs.toString();
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/pca/PCAImpute.java
|
package hex.pca;
import water.Job.FrameJob;
import water.api.DocGen;
import water.fvec.Frame;
public class PCAImpute extends FrameJob {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
static final String DOC_GET = "pca_imputation";
@API(help = "Number of principal components to use", filter = Default.class, lmin = 1, lmax = 5000)
int num_pc = 1;
@API(help = "Threshold for convergence", filter = Default.class)
double threshold = 1e-5;
@API(help = "Maximum number of iterations", filter = Default.class, lmin = 1, lmax = 1000000)
int max_iter = 50;
@API(help = "Scale columns by their standard deviations", filter = Default.class)
boolean scale = true;
@Override protected void execImpl() {
Frame fr = source;
new Frame(destination_key,fr._names.clone(),fr.vecs().clone()).delete_and_lock(null).unlock(null);
}
@Override protected void init() {
super.init();
if(source != null && num_pc > source.vecs().length)
throw new IllegalArgumentException("Argument 'num_pc' must be between 1 and " + source.vecs().length);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/pca/PCAModel.java
|
package hex.pca;
import hex.FrameTask.DataInfo;
import hex.gram.Gram.GramTask;
import water.Key;
import water.MemoryManager;
import water.Model;
import water.Request2;
import water.api.DocGen;
import water.api.Request.API;
import water.api.RequestBuilders.ElementBuilder;
public class PCAModel extends Model {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "Column names expanded to accommodate categoricals")
final String[] namesExp;
@API(help = "Standard deviation of each principal component")
final double[] sdev;
@API(help = "Proportion of variance explained by each principal component")
final double[] propVar;
@API(help = "Cumulative proportion of variance explained by each principal component")
final double[] cumVar;
@API(help = "Principal components (eigenvector) matrix")
final double[][] eigVec;
@API(help = "If standardized, mean of each numeric data column")
final double[] normSub;
@API(help = "If standardized, one over standard deviation of each numeric data column")
final double[] normMul;
@API(help = "Offsets of categorical columns into the sdev vector. The last value is the offset of the first numerical column.")
final int[] catOffsets;
@API(help = "Rank of eigenvector matrix")
final int rank;
@API(help = "Number of principal components to display")
int num_pc;
@API(help = "Model parameters")
PCA parameters;
public PCAModel(PCA params, Key selfKey, Key dataKey, DataInfo dinfo, GramTask gramt, double[] sdev, double[] propVar, double[] cumVar, double[][] eigVec, int rank, int num_pc) {
super(selfKey, dataKey, dinfo._adaptedFrame, /* priorClassDistribution */ null);
this.sdev = sdev;
this.propVar = propVar;
this.cumVar = cumVar;
this.eigVec = eigVec;
this.parameters = params;
this.catOffsets = dinfo._catOffsets;
this.namesExp = namesExp();
this.rank = rank;
this.num_pc = num_pc;
// TODO: Need to ensure this maps correctly to scored data cols
this.normSub = gramt.normSub();
this.normMul = gramt.normMul();
}
@Override public final PCA get_params() { return parameters; }
@Override public final Request2 job() { return get_params(); }
@Override public int nfeatures() { return _names.length; }
@Override public boolean isSupervised() { return false; }
@Override public String responseName() { throw new IllegalArgumentException("PCA doesn't have a response."); }
public double[] sdev() { return sdev; }
public double[][] eigVec() { return eigVec; }
@Override protected float[] score0(double[] data, float[] preds) {
throw new RuntimeException("TODO Auto-generated method stub");
}
@Override public String toString(){
StringBuilder sb = new StringBuilder("PCA Model (key=" + _key + " , trained on " + _dataKey + "):\n");
return sb.toString();
}
public String[] namesExp(){
final int n = _names.length;
int[] nums = MemoryManager.malloc4(n);
int[] cats = MemoryManager.malloc4(n);
// Store indices of numeric and categorical cols
int nnums = 0, ncats = 0;
for(int i = 0; i < n; ++i){
if(_domains[i] != null)
cats[ncats++] = i;
else
nums[nnums++] = i;
}
// Sort the categoricals in decreasing order according to size
for(int i = 0; i < ncats; ++i)
for(int j = i+1; j < ncats; ++j)
if(_domains[cats[i]].length < _domains[cats[j]].length) {
int x = cats[i];
cats[i] = cats[j];
cats[j] = x;
}
// Construct expanded col names, with categoricals first followed by numerics
int k = 0;
String[] names = new String[sdev.length];
for(int i = 0; i < ncats; ++i){
for(int j = 1; j < _domains[cats[i]].length; ++j)
names[k++] = _names[cats[i]] + "." + _domains[cats[i]][j];
}
for(int i = 0; i < nnums; ++i) {
names[k++] = _names[nums[i]];
}
return names;
}
public void generateHTML(String title, StringBuilder sb) {
if(title != null && !title.isEmpty()) DocGen.HTML.title(sb, title);
DocGen.HTML.paragraph(sb, "Model Key: " + _key);
job().toHTML(sb);
sb.append("<script type=\"text/javascript\" src='/h2o/js/d3.v3.min.js'></script>");
sb.append("<div class='alert'>Actions: " + PCAScore.link(_key, "Score on dataset") + (_dataKey != null ? (", " + PCA.link(_dataKey, "Compute new model")):"") + "</div>");
screevarString(sb);
sb.append("<span style='display: inline-block;'>");
sb.append("<table class='table table-striped table-bordered'>");
sb.append("<tr>");
sb.append("<th>Feature</th>");
for(int i = 0; i < num_pc; i++)
sb.append("<th>").append("PC" + i).append("</th>");
sb.append("</tr>");
// Row of standard deviation values
sb.append("<tr class='warning'>");
// sb.append("<td>").append("σ").append("</td>");
sb.append("<td>").append("Std Dev").append("</td>");
for(int c = 0; c < num_pc; c++)
sb.append("<td>").append(ElementBuilder.format(sdev[c])).append("</td>");
sb.append("</tr>");
// Row with proportion of variance
sb.append("<tr class='warning'>");
sb.append("<td>").append("Prop Var").append("</td>");
for(int c = 0; c < num_pc; c++)
sb.append("<td>").append(ElementBuilder.format(propVar[c])).append("</td>");
sb.append("</tr>");
// Row with cumulative proportion of variance
sb.append("<tr class='warning'>");
sb.append("<td>").append("Cum Prop Var").append("</td>");
for(int c = 0; c < num_pc; c++)
sb.append("<td>").append(ElementBuilder.format(cumVar[c])).append("</td>");
sb.append("</tr>");
// Each row is component of eigenvector
for(int r = 0; r < eigVec.length; r++) {
sb.append("<tr>");
sb.append("<th>").append(namesExp[r]).append("</th>");
for( int c = 0; c < num_pc; c++ ) {
double e = eigVec[r][c];
sb.append("<td>").append(ElementBuilder.format(e)).append("</td>");
}
sb.append("</tr>");
}
sb.append("</table></span>");
}
public void screevarString(StringBuilder sb) {
sb.append("<div class=\"pull-left\"><a href=\"#\" onclick=\'$(\"#scree_var\").toggleClass(\"hide\");\' class=\'btn btn-inverse btn-mini\'>Scree & Variance Plots</a></div>");
sb.append("<div class=\"hide\" id=\"scree_var\">");
sb.append("<style type=\"text/css\">");
sb.append(".axis path," +
".axis line {\n" +
"fill: none;\n" +
"stroke: black;\n" +
"shape-rendering: crispEdges;\n" +
"}\n" +
".axis text {\n" +
"font-family: sans-serif;\n" +
"font-size: 11px;\n" +
"}\n");
sb.append("</style>");
sb.append("<div id=\"scree\" style=\"display:inline;\">");
sb.append("<script type=\"text/javascript\">");
sb.append("//Width and height\n");
sb.append("var w = 500;\n"+
"var h = 300;\n"+
"var padding = 40;\n"
);
sb.append("var dataset = [");
for(int c = 0; c < num_pc; c++) {
if (c == 0) {
sb.append("["+String.valueOf(c+1)+",").append(ElementBuilder.format(sdev[c]*sdev[c])).append("]");
}
sb.append(", ["+String.valueOf(c+1)+",").append(ElementBuilder.format(sdev[c]*sdev[c])).append("]");
}
sb.append("];");
sb.append(
"//Create scale functions\n"+
"var xScale = d3.scale.linear()\n"+
".domain([0, d3.max(dataset, function(d) { return d[0]; })])\n"+
".range([padding, w - padding * 2]);\n"+
"var yScale = d3.scale.linear()"+
".domain([0, d3.max(dataset, function(d) { return d[1]; })])\n"+
".range([h - padding, padding]);\n"+
"var rScale = d3.scale.linear()"+
".domain([0, d3.max(dataset, function(d) { return d[1]; })])\n"+
".range([2, 5]);\n"+
"//Define X axis\n"+
"var xAxis = d3.svg.axis()\n"+
".scale(xScale)\n"+
".orient(\"bottom\")\n"+
".ticks(5);\n"+
"//Define Y axis\n"+
"var yAxis = d3.svg.axis()\n"+
".scale(yScale)\n"+
".orient(\"left\")\n"+
".ticks(5);\n"+
"//Create SVG element\n"+
"var svg = d3.select(\"#scree\")\n"+
".append(\"svg\")\n"+
".attr(\"width\", w)\n"+
".attr(\"height\", h);\n"+
"//Create circles\n"+
"svg.selectAll(\"circle\")\n"+
".data(dataset)\n"+
".enter()\n"+
".append(\"circle\")\n"+
".attr(\"cx\", function(d) {\n"+
"return xScale(d[0]);\n"+
"})\n"+
".attr(\"cy\", function(d) {\n"+
"return yScale(d[1]);\n"+
"})\n"+
".attr(\"r\", function(d) {\n"+
"return 2;\n"+//rScale(d[1]);\n"+
"});\n"+
"/*"+
"//Create labels\n"+
"svg.selectAll(\"text\")"+
".data(dataset)"+
".enter()"+
".append(\"text\")"+
".text(function(d) {"+
"return d[0] + \",\" + d[1];"+
"})"+
".attr(\"x\", function(d) {"+
"return xScale(d[0]);"+
"})"+
".attr(\"y\", function(d) {"+
"return yScale(d[1]);"+
"})"+
".attr(\"font-family\", \"sans-serif\")"+
".attr(\"font-size\", \"11px\")"+
".attr(\"fill\", \"red\");"+
"*/\n"+
"//Create X axis\n"+
"svg.append(\"g\")"+
".attr(\"class\", \"axis\")"+
".attr(\"transform\", \"translate(0,\" + (h - padding) + \")\")"+
".call(xAxis);\n"+
"//X axis label\n"+
"d3.select('#scree svg')"+
".append(\"text\")"+
".attr(\"x\",w/2)"+
".attr(\"y\",h - 5)"+
".attr(\"text-anchor\", \"middle\")"+
".text(\"Principal Component\");\n"+
"//Create Y axis\n"+
"svg.append(\"g\")"+
".attr(\"class\", \"axis\")"+
".attr(\"transform\", \"translate(\" + padding + \",0)\")"+
".call(yAxis);\n"+
"//Y axis label\n"+
"d3.select('#scree svg')"+
".append(\"text\")"+
".attr(\"x\",150)"+
".attr(\"y\",-5)"+
".attr(\"transform\", \"rotate(90)\")"+
//".attr(\"transform\", \"translate(0,\" + (h - padding) + \")\")"+
".attr(\"text-anchor\", \"middle\")"+
".text(\"Eigenvalue\");\n"+
"//Title\n"+
"d3.select('#scree svg')"+
".append(\"text\")"+
".attr(\"x\",w/2)"+
".attr(\"y\",padding - 20)"+
".attr(\"text-anchor\", \"middle\")"+
".text(\"Scree Plot\");\n");
sb.append("</script>");
sb.append("</div>");
///////////////////////////////////
sb.append("<div id=\"var\" style=\"display:inline;\">");
sb.append("<script type=\"text/javascript\">");
sb.append("//Width and height\n");
sb.append("var w = 500;\n"+
"var h = 300;\n"+
"var padding = 50;\n"
);
sb.append("var dataset = [");
for(int c = 0; c < num_pc; c++) {
if (c == 0) {
sb.append("["+String.valueOf(c+1)+",").append(ElementBuilder.format(cumVar[c])).append("]");
}
sb.append(", ["+String.valueOf(c+1)+",").append(ElementBuilder.format(cumVar[c])).append("]");
}
sb.append("];");
sb.append(
"//Create scale functions\n"+
"var xScale = d3.scale.linear()\n"+
".domain([0, d3.max(dataset, function(d) { return d[0]; })])\n"+
".range([padding, w - padding * 2]);\n"+
"var yScale = d3.scale.linear()"+
".domain([0, d3.max(dataset, function(d) { return d[1]; })])\n"+
".range([h - padding, padding]);\n"+
"var rScale = d3.scale.linear()"+
".domain([0, d3.max(dataset, function(d) { return d[1]; })])\n"+
".range([2, 5]);\n"+
"//Define X axis\n"+
"var xAxis = d3.svg.axis()\n"+
".scale(xScale)\n"+
".orient(\"bottom\")\n"+
".ticks(5);\n"+
"//Define Y axis\n"+
"var yAxis = d3.svg.axis()\n"+
".scale(yScale)\n"+
".orient(\"left\")\n"+
".ticks(5);\n"+
"//Create SVG element\n"+
"var svg = d3.select(\"#var\")\n"+
".append(\"svg\")\n"+
".attr(\"width\", w)\n"+
".attr(\"height\", h);\n"+
"//Create circles\n"+
"svg.selectAll(\"circle\")\n"+
".data(dataset)\n"+
".enter()\n"+
".append(\"circle\")\n"+
".attr(\"cx\", function(d) {\n"+
"return xScale(d[0]);\n"+
"})\n"+
".attr(\"cy\", function(d) {\n"+
"return yScale(d[1]);\n"+
"})\n"+
".attr(\"r\", function(d) {\n"+
"return 2;\n"+//rScale(d[1]);\n"+
"});\n"+
"/*"+
"//Create labels\n"+
"svg.selectAll(\"text\")"+
".data(dataset)"+
".enter()"+
".append(\"text\")"+
".text(function(d) {"+
"return d[0] + \",\" + d[1];"+
"})"+
".attr(\"x\", function(d) {"+
"return xScale(d[0]);"+
"})"+
".attr(\"y\", function(d) {"+
"return yScale(d[1]);"+
"})"+
".attr(\"font-family\", \"sans-serif\")"+
".attr(\"font-size\", \"11px\")"+
".attr(\"fill\", \"red\");"+
"*/\n"+
"//Create X axis\n"+
"svg.append(\"g\")"+
".attr(\"class\", \"axis\")"+
".attr(\"transform\", \"translate(0,\" + (h - padding) + \")\")"+
".call(xAxis);\n"+
"//X axis label\n"+
"d3.select('#var svg')"+
".append(\"text\")"+
".attr(\"x\",w/2)"+
".attr(\"y\",h - 5)"+
".attr(\"text-anchor\", \"middle\")"+
".text(\"Principal Component\");\n"+
"//Create Y axis\n"+
"svg.append(\"g\")"+
".attr(\"class\", \"axis\")"+
".attr(\"transform\", \"translate(\" + padding + \",0)\")"+
".call(yAxis);\n"+
"//Y axis label\n"+
"d3.select('#var svg')"+
".append(\"text\")"+
".attr(\"x\",150)"+
".attr(\"y\",-5)"+
".attr(\"transform\", \"rotate(90)\")"+
//".attr(\"transform\", \"translate(0,\" + (h - padding) + \")\")"+
".attr(\"text-anchor\", \"middle\")"+
".text(\"Cumulative Proportion of Variance\");\n"+
"//Title\n"+
"d3.select('#var svg')"+
".append(\"text\")"+
".attr(\"x\",w/2)"+
".attr(\"y\",padding-20)"+
".attr(\"text-anchor\", \"middle\")"+
".text(\"Cumulative Variance Plot\");\n");
sb.append("</script>");
sb.append("</div>");
sb.append("</div>");
sb.append("<br />");
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/pca/PCAModelView.java
|
package hex.pca;
import water.DKV;
import water.Key;
import water.Request2;
import water.api.DocGen;
import water.api.Request;
public class PCAModelView extends Request2 {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="PCA Model Key", required = true, filter = PCAModelKeyFilter.class)
Key _modelKey;
class PCAModelKeyFilter extends H2OKey { public PCAModelKeyFilter() { super("",true); } }
@API(help="PCA Model")
public PCAModel pca_model;
public static String link(String txt, Key model) {
return "<a href='/2/PCAModelView.html?_modelKey=" + model + "'>" + txt + "</a>";
}
public static Response redirect(Request req, Key modelKey) {
return Response.redirect(req, "/2/PCAModelView", "_modelKey", modelKey);
}
@Override public boolean toHTML(StringBuilder sb){
pca_model.generateHTML("", sb);
return true;
}
@Override protected Response serve() {
pca_model = DKV.get(_modelKey).get();
return Response.done(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/pca/PCAParams.java
|
package hex.pca;
import water.Iced;
import water.api.DocGen;
import water.api.Request.API;
public class PCAParams extends Iced {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "maximum number of principal components")
final int max_pc;
@API(help = "tolerance")
final double tolerance;
@API(help = "standardize")
// final boolean standardize;
final int standardize;
public PCAParams(boolean std) {
max_pc = 5000;
tolerance = 0;
// standardize = std;
standardize = std ? 1 : 0;
}
public PCAParams(double tol, boolean std) {
max_pc = 5000;
tolerance = tol;
// standardize = std;
standardize = std ? 1 : 0;
}
public PCAParams(int max, double tol, boolean std) {
max_pc = max;
tolerance = tol;
// standardize = std;
standardize = std ? 1 : 0;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/pca/PCAProgressPage.java
|
package hex.pca;
import water.DKV;
import water.Job;
import water.Key;
import water.Value;
import water.api.Progress2;
import water.api.Request;
public class PCAProgressPage extends Progress2 {
/** Return {@link Response} for finished job. */
@Override protected Response jobDone(final Key dst) {
return PCAModelView.redirect(this, dst);
}
public static Response redirect(Request req, Key jobkey, Key dest) {
return Response.redirect(req, "/2/PCAProgressPage", JOB_KEY, jobkey, DEST_KEY, dest );
}
@Override public boolean toHTML( StringBuilder sb ) {
Job jjob = Job.findJob(job_key);
if (jjob == null) return true;
Value v = DKV.get(jjob.dest());
if(v != null){
PCAModel m = v.get();
m.generateHTML("PCA Model", sb);
} else
sb.append("<b>No model yet.</b>");
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/pca/PCAScore.java
|
package hex.pca;
import hex.FrameTask;
import hex.FrameTask.DataInfo;
import water.Job;
import water.Job.FrameJob;
import water.Key;
import water.api.DocGen;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.util.RString;
/**
* Principal Components Scoring
* This algorithm maps a dataset into the subspace generated by the principal components.
* If A = dataset to be scored, and B = eigenvector matrix (rows = features, cols = components),
* then the score is simply A * B, assuming the column features match up exactly.
* <a href = "http://en.wikipedia.org/wiki/Principal_component_analysis">PCA on Wikipedia</a>
* @author anqi_fu
*
*/
public class PCAScore extends FrameJob {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
static final String DOC_GET = "pca_score";
@API(help = "PCA model to use for scoring", required = true, filter = Default.class)
PCAModel model;
@API(help = "Number of principal components to return", filter = Default.class, lmin = 1, lmax = 5000)
int num_pc = 1;
@Override protected void execImpl() {
// Note: Source data MUST contain all features (matched by name) used to build PCA model!
// If additional columns exist in source, they are automatically ignored in scoring
new Frame(destination_key, new String[0], new Vec[0]).delete_and_lock(self());
Frame fr = model.adapt(source, true)[0];
int nfeat = model._names.length;
DataInfo dinfo = new DataInfo(fr, 0, false, false, model.normSub, model.normMul, DataInfo.TransformType.STANDARDIZE, null, null);
PCAScoreTask tsk = new PCAScoreTask(this, dinfo, nfeat, num_pc, model.eigVec);
tsk.doAll(num_pc, dinfo._adaptedFrame);
String[] names = new String[num_pc];
String[][] domains = new String[num_pc][];
for(int i = 0; i < num_pc; i++) {
names[i] = "PC" + i;
domains[i] = null;
}
tsk.outputFrame(destination_key, names, domains).unlock(self());
}
@Override protected void init() {
super.init();
if(model != null && num_pc > model.num_pc)
throw new IllegalArgumentException("Argument 'num_pc' must be between 1 and " + model.num_pc);
}
/* @Override public float progress() {
ChunkProgress progress = UKV.get(progressKey());
return (progress != null ? progress.progress() : 0);
} */
public static String link(Key modelKey, String content) {
return link("model", modelKey, content);
}
public static String link(String key_param, Key k, String content) {
RString rs = new RString("<a href='/2/PCAScore.query?%key_param=%$key'>%content</a>");
rs.replace("key_param", key_param);
rs.replace("key", k.toString());
rs.replace("content", content);
return rs.toString();
}
// Matrix multiplication A * B, where A is a skinny matrix (# rows >> # cols) and B is a
// small matrix that fits on a single node. For PCA scoring, the cols of A (rows of B) are
// the features of the input dataset, while the cols of B are the principal components.
public static class PCAScoreTask extends FrameTask<PCAScoreTask> {
final int _nfeat; // number of features
final int _ncomp; // number of principal components (<= nfeat)
final double[][] _eigvec; // eigenvector matrix
public PCAScoreTask(Job job, DataInfo dinfo, int nfeat, int ncomp, double[][] eigvec) {
super(job.self(), dinfo);
_nfeat = nfeat;
_ncomp = ncomp;
_eigvec = eigvec;
}
// Note: Rows with NAs (missing values) are automatically skipped!
@Override protected void processRow(long gid, double[] nums, int ncats, int[] cats, double[] response, NewChunk[] outputs) {
for(int c = 0; c < _ncomp; c++) {
double x = 0;
for(int d = 0; d < ncats; d++)
x += _eigvec[cats[d]][c];
int k = _dinfo.numStart();
for(int d = 0; d < nums.length; d++)
x += nums[d]*_eigvec[k++][c];
assert k == _eigvec.length;
outputs[c].addNum(x);
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/rng/H2ORandomRNG.java
|
package hex.rng;
import java.util.Random;
public class H2ORandomRNG extends Random {
public H2ORandomRNG(long seed) {
super();
if ((seed >>> 32) < 0x0000ffffL) seed |= 0x5b93000000000000L;
if (((seed << 32) >>> 32) < 0x0000ffffL) seed |= 0xdb910000L;
setSeed(seed);
}
public enum RNGKind {
DETERMINISTIC("deter", "determ"),
NON_DETERMINISTIC("nondeter", "non-deter", "nondeterm", "non-determ");
String[] shorcuts;
private RNGKind(String... shortcuts) { this.shorcuts = shortcuts; }
public static RNGKind value(String s) {
RNGKind[] kinds = values();
for( RNGKind kind : kinds )
for( String ss : kind.shorcuts )
if( ss.equals(s) ) return kind;
return RNGKind.valueOf(s);
}
}
public enum RNGType {
JavaRNG(RNGKind.DETERMINISTIC),
MersenneTwisterRNG(RNGKind.DETERMINISTIC),
XorShiftRNG(RNGKind.DETERMINISTIC),
SecureRNG(RNGKind.NON_DETERMINISTIC);
RNGKind kind;
private RNGType(RNGKind kind) { this.kind = kind; }
public RNGKind kind() { return this.kind; }
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/rng/MersenneTwisterRNG.java
|
// ============================================================================
// Copyright 2006-2012 Daniel W. Dyer
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ============================================================================
package hex.rng;
import water.util.Utils;
import java.util.Random;
import java.util.concurrent.locks.ReentrantLock;
/**
* <p>
* Random number generator based on the <a
* href="http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html"
* target="_top">Mersenne Twister</a> algorithm developed by Makoto Matsumoto
* and Takuji Nishimura.
* </p>
*
* <p>
* This is a very fast random number generator with good statistical properties
* (it passes the full DIEHARD suite). This is the best RNG for most
* experiments. If a non-linear generator is required, use the slower
* <code>AESCounterRNG</code> RNG.
* </p>
*
* <p>
* This PRNG is deterministic, which can be advantageous for testing purposes
* since the output is repeatable. If multiple instances of this class are
* created with the same seed they will all have identical output.
* </p>
*
* <p>
* This code is translated from the original C version and assumes that we will
* always seed from an array of bytes. I don't pretend to know the meanings of
* the magic numbers or how it works, it just does.
* </p>
*
* <p>
* <em>NOTE: Because instances of this class require 128-bit seeds, it is not
* possible to seed this RNG using the {@link #setSeed(long)} method inherited
* from {@link Random}. Calls to this method will have no effect.
* Instead the seed must be set by a constructor.</em>
* </p>
*
* @author Makoto Matsumoto and Takuji Nishimura (original C version)
* @author Daniel Dyer (Java port)
*/
public class MersenneTwisterRNG extends Random {
// Magic numbers from original C version.
private static final int N = 624;
private static final int M = 397;
private static final int[] MAG01 = { 0, 0x9908b0df };
private static final int UPPER_MASK = 0x80000000;
private static final int LOWER_MASK = 0x7fffffff;
private static final int BOOTSTRAP_SEED = 19650218;
private static final int BOOTSTRAP_FACTOR = 1812433253;
private static final int SEED_FACTOR1 = 1664525;
private static final int SEED_FACTOR2 = 1566083941;
private static final int GENERATE_MASK1 = 0x9d2c5680;
private static final int GENERATE_MASK2 = 0xefc60000;
// Lock to prevent concurrent modification of the RNG's internal state.
private final ReentrantLock lock = new ReentrantLock();
/* State vector */
private final int[] mt = new int[N];
/* Index into state vector */
private int mtIndex = 0;
public MersenneTwisterRNG(long... seeds) {
this(Utils.unpackInts(seeds));
}
/**
* Creates an RNG and seeds it with the specified seed data.
*
* @param seedInts The seed data used to initialise the RNG.
*/
public MersenneTwisterRNG(int... seedInts) {
// This section is translated from the init_genrand code in the C version.
mt[0] = BOOTSTRAP_SEED;
for( mtIndex = 1; mtIndex < N; mtIndex++ ) {
mt[mtIndex] = (BOOTSTRAP_FACTOR
* (mt[mtIndex - 1] ^ (mt[mtIndex - 1] >>> 30)) + mtIndex);
}
// This section is translated from the init_by_array code in the C version.
int i = 1;
int j = 0;
for( int k = Math.max(N, SEEDS.length); k > 0; k-- ) {
int jseeds = (j == 0 || j == 1) ? seedInts[j] : SEEDS[j];
mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >>> 30)) * SEED_FACTOR1))
+ jseeds + j;
i++;
j++;
if( i >= N ) {
mt[0] = mt[N - 1];
i = 1;
}
if( j >= SEEDS.length ) {
j = 0;
}
}
for( int k = N - 1; k > 0; k-- ) {
mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >>> 30)) * SEED_FACTOR2)) - i;
i++;
if( i >= N ) {
mt[0] = mt[N - 1];
i = 1;
}
}
mt[0] = UPPER_MASK; // Most significant bit is 1 - guarantees non-zero
// initial array.
}
@Override
protected final int next(int bits) {
int y;
try {
lock.lock();
if( mtIndex >= N ) // Generate N ints at a time.
{
int kk;
for( kk = 0; kk < N - M; kk++ ) {
y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK);
mt[kk] = mt[kk + M] ^ (y >>> 1) ^ MAG01[y & 0x1];
}
for( ; kk < N - 1; kk++ ) {
y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK);
mt[kk] = mt[kk + (M - N)] ^ (y >>> 1) ^ MAG01[y & 0x1];
}
y = (mt[N - 1] & UPPER_MASK) | (mt[0] & LOWER_MASK);
mt[N - 1] = mt[M - 1] ^ (y >>> 1) ^ MAG01[y & 0x1];
mtIndex = 0;
}
y = mt[mtIndex++];
} finally {
lock.unlock();
}
// Tempering
y ^= (y >>> 11);
y ^= (y << 7) & GENERATE_MASK1;
y ^= (y << 15) & GENERATE_MASK2;
y ^= (y >>> 18);
return y >>> (32 - bits);
}
/* 624 int seeds generated from /dev/random
*
* SEEDS[0], and SEEDS[1] are reserved for MersenneTwister initialization in hex.rf.Utils.
* They can obtain any value!
*
* Note: SEEDS are modified at this place. The user has to ensure proper locking.
*/
public static final int[] SEEDS = new int[] {
0x00000000, 0x00000000, 0x8a885b28, 0xcb618e3c, 0x6812fe78, 0xca8ca770, 0xf2a19ffd, 0xb6821eaa,
0xd1fa32c7, 0xc6dbee65, 0xd9534b7f, 0xa8e765a6, 0x2da3c864, 0xb5a7766a, 0x2bc7e671, 0xf80571d0,
0xa7174754, 0xf3234de2, 0x4e7cc080, 0x1140d082, 0x5fad93ab, 0x8cce5b9f, 0x1872465a, 0x6b42ecd3,
0x2c8c9653, 0x453a2eef, 0xcc508838, 0x5a85a0e1, 0x3b7a05e9, 0x2ac09cfd, 0x88aa58c6, 0xd9680c83,
0x061c1189, 0xc5ce6f21, 0x0acff61d, 0x3f550f57, 0xfce253ce, 0x72f39c54, 0x1772831b, 0x7f61413f,
0x5971d316, 0x38306f1e, 0xe4102ecc, 0xe64f0fc5, 0x3bc7ba66, 0x739ef534, 0x1379892e, 0x8f608758,
0x4828e965, 0xf4ac7b9a, 0xa8ddaba3, 0x50f8b1cb, 0xfec0f9d0, 0x842537e7, 0x5e6231bf, 0xef3ae390,
0x420f8f3a, 0xeedd75cc, 0xe3c10283, 0x5c38cbd6, 0x662c8b91, 0x2cd589d5, 0xe28522a7, 0xda03a7b4,
0xb29877dc, 0x45a109fb, 0x99c3021e, 0x0af14661, 0xe85d6e6e, 0xbdaa929b, 0x940e053d, 0x861e7d7d,
0x73ae673f, 0x8491c460, 0xc01be6a4, 0x06e0818c, 0x142f7399, 0xc80a6a41, 0x45600653, 0x1c0516d5,
0xd2ff0694, 0xb1cb723d, 0x73f355e0, 0x076cb63a, 0x7db7190f, 0x35ea0b80, 0xa36f646b, 0xb9ebfa2f,
0x3844839b, 0x58d80a19, 0x1f3d8746, 0x229bb12e, 0x0ac3846d, 0xd2f43715, 0x04aaeb46, 0xacc87633,
0x7dd5b268, 0xba3651fc, 0xd76801e6, 0x9e413be6, 0xb31b71c5, 0x5fd36451, 0x4041662e, 0x8e87487b,
0x03126116, 0x6574b757, 0x7717d040, 0x1d15c783, 0x7a167e9c, 0x8e4ec7a0, 0x749bc3e5, 0xfa2ea1b1,
0x25df2c84, 0xf9e7ae19, 0xe071597a, 0x6ae0fb27, 0x12380f69, 0xf672e42f, 0x5425f6f6, 0xed6e16b7,
0x36b29279, 0x24cbd8fb, 0x4d682009, 0x0e17116c, 0x10428b6b, 0xe463f573, 0x2c5ff8d0, 0x1102b138,
0xc544907c, 0xcf403704, 0x2565d0ec, 0x67e3111c, 0xc5097632, 0xe3505d2d, 0xb0a31246, 0x55cbffb3,
0xf2b662cb, 0x944ba74f, 0xf64a1136, 0x67628af5, 0x1d442a18, 0x31c8c7d4, 0x648a701b, 0x563930c4,
0x28ecd115, 0x9959be3f, 0x9afa938d, 0x0c40f581, 0x8ec73f72, 0x20dbf8a1, 0x2c2ca035, 0xb81f414c,
0xfc16c15c, 0xec386121, 0x41d8bd3a, 0x60eab9ce, 0x9f4b093d, 0x56e5bb7c, 0x0d60cd53, 0x3238a405,
0xa159ab87, 0xdadaaed3, 0xc86b574f, 0x9ed3b528, 0x3137e717, 0x028012fc, 0x8477ea87, 0x6477d097,
0x06b6e294, 0x1dd29c4e, 0x5c732920, 0xc760bcec, 0x5d40a29a, 0xc581f784, 0x13b46a5e, 0xf6761ea7,
0x1b4ee8c3, 0x1637d570, 0x0c00569a, 0xd01cb95e, 0x87343e82, 0x17190e4c, 0x357078a3, 0x3b59246c,
0xdf11b5e7, 0x68971c7a, 0xcc3d497e, 0x21659527, 0x2c211ba2, 0xf34aa1ee, 0x4a07f67e, 0x7ae0eacd,
0xe05bdc85, 0xfe2347a7, 0xebc4be3f, 0x1f033044, 0x82e2a46e, 0x75c66f49, 0x56c50b1e, 0xc20f0644,
0x798ec011, 0x9eba0c81, 0x0fe34e70, 0x28061a7f, 0x26536ace, 0x6541a948, 0x305edffe, 0x25eaa0a9,
0xef64db75, 0xe1f4d734, 0xe27e22de, 0x3b68a4b3, 0x8917d09f, 0x402f7e99, 0xe9b3e3e7, 0x9a95e6fb,
0x42a5725c, 0x00d9f288, 0x9e893c59, 0x3771df6d, 0xbfb39333, 0x9039fd17, 0x3d574609, 0xb8a44bc4,
0xe12f34ad, 0x7f165a6c, 0x8e13ec33, 0xa8d935be, 0x00ac09d8, 0x3ffff87b, 0xda94be75, 0x8b1804d5,
0xd1ac4301, 0xc2b4101d, 0xb8dae770, 0x3062dbf0, 0xc5defd8d, 0xa791e2aa, 0x678f3924, 0xec4ea145,
0x457c82b5, 0x6698be3c, 0xfbd4913f, 0xff52ad6d, 0x54c7f66d, 0x7d6ec779, 0x9ce9d1d9, 0x384dd1eb,
0xb4b4d565, 0xa5736588, 0x33ae82b2, 0x051221b0, 0x11a8775f, 0xd2ed52ea, 0xdf99b00b, 0xa0425a1a,
0xd6b32a9b, 0xfa162152, 0x4de98efb, 0xb0d5553e, 0xdd9d7239, 0x05be808d, 0x438f6f74, 0xdf28fc47,
0xb6fcd76d, 0x58375c21, 0x1a88eae6, 0x1ce15ca9, 0x46304120, 0xc2a8c9ee, 0xa2eaf06e, 0xf548a76c,
0xd288b960, 0xec1c7cb5, 0x6e59f189, 0x3424b4eb, 0x521220db, 0x9d2f797d, 0x8561d680, 0x63eda823,
0x7f406b58, 0x31104105, 0x1a457dc1, 0x3a94cec4, 0xed5a24b7, 0xa11766a2, 0xefd011e1, 0x10806e51,
0x5519474f, 0x08d1a66f, 0xc83ac414, 0xf9dad4f5, 0xfa64b469, 0x6cbfd6a3, 0xb2e787ce, 0x63eb2f8e,
0xe0d36a89, 0xe232fe8f, 0xd0d28011, 0xd198ab29, 0x1e5aa524, 0x05ae372d, 0x314fb7fb, 0x7e263de0,
0x61e8d239, 0x2f76e5b6, 0xaf2af828, 0x4146a159, 0x3626bccf, 0x308a82ed, 0x1e5527a3, 0xe540898d,
0xb2e944de, 0x010007fd, 0xaabb40cc, 0xa119fd6b, 0xefca25a8, 0xd1389d26, 0x15b65a4b, 0xf1323150,
0x3798f801, 0xf5787776, 0xcd069f96, 0x91da0117, 0xb603eaa4, 0xb068125e, 0x346216d5, 0xcb0af099,
0xad8131db, 0x1c5ce132, 0x3a094b8a, 0x68d20e3f, 0x6f62b0b9, 0x5b2da8a9, 0x11530b9a, 0x5c340608,
0x9b23c1d9, 0xf175fcba, 0x70fddd5e, 0x9c554ec4, 0xfc0cb505, 0x5249997f, 0xc42f151f, 0xee9f506f,
0x8fb2cd27, 0xb799db4b, 0x4c5c0eeb, 0x37278283, 0x8183b362, 0x928b4cc7, 0x6c895352, 0x9b0a8270,
0xc5cb93da, 0xf8268a31, 0x09fd1af6, 0xbc6e89fc, 0x5a614eb8, 0xe55b1348, 0x992a69ee, 0x55b0ffb7,
0x4eb5db62, 0x5cde9e6b, 0xad9b186d, 0xa5006f43, 0xc82c2c7f, 0x822fa75f, 0xa3a4cb06, 0x6d05edda,
0x5bf76fb7, 0x846a54f8, 0xca7ce73c, 0x43c1a8d1, 0x1b4c79a7, 0x85cb66c7, 0xc541b4ad, 0x07e69a11,
0xffb1e304, 0xe585f233, 0x506773a5, 0xc7adaa3c, 0xf980d0c6, 0xa3d90125, 0xfbce4232, 0xfe6fed8f,
0xe17f437a, 0x29c45214, 0xa0ea1046, 0xc025f727, 0x820202ca, 0x554f4e76, 0x5389096c, 0x7d58de96,
0xe32295b8, 0x689b5fbe, 0xdfefacf1, 0xd4facb70, 0x0cf3703e, 0x78fec105, 0x57b53e14, 0x54bcd2ef,
0x335f4d0d, 0x58552c2e, 0xf64df202, 0x0e5c3565, 0xa4cb22c5, 0xd91c91c1, 0x7827bb3f, 0x37b456e3,
0x84950a9e, 0x273edcd7, 0xddaa5ebd, 0xb1f46855, 0xe0052b20, 0xcfb04082, 0xa449e49b, 0xfd95e21c,
0xa9f477c0, 0xacf0be15, 0x611d1edc, 0xb3dca16a, 0x781efb9a, 0x6480c096, 0x4e545269, 0xbc836952,
0xd511b539, 0xdf6248b4, 0x8ff7da61, 0x0756106d, 0x92f04a17, 0xee649e83, 0x14e35780, 0x6dc76815,
0x0fe032bb, 0x1fd66462, 0x0f4be990, 0x1627c658, 0xb95f902d, 0xa6f9e4e9, 0xb7b9aa16, 0x6a0a31d5,
0x647129e6, 0x071f89b7, 0xe4033ca9, 0xd81b3f59, 0x74f8a887, 0xc44bc880, 0xf1c2d04c, 0xf9e246c9,
0x529f9c45, 0x14d322e7, 0x8c3305b1, 0x8dd9a988, 0x8a92b883, 0x47574eb3, 0x7b5779f4, 0x759a4eb6,
0xc8ed6a11, 0x42a4e0ee, 0xf4603b1d, 0x790d9126, 0xa261034e, 0x94569718, 0x5f57c893, 0xa1c2486a,
0x6727618f, 0xcfb7c5b3, 0xa4c2f232, 0x33b5e051, 0x9ed6c2d0, 0x16f3ec37, 0x5c7c96ba, 0x3a16185f,
0x361d6c17, 0xa179808b, 0xb6751231, 0xc8486729, 0x873fc8ab, 0xe7f78a78, 0x2fd3093b, 0x489efe89,
0x83628cd1, 0x67ad9faa, 0x623cbc2f, 0x3f01e8c4, 0xfdad453f, 0x2ccfb969, 0x5d2a3806, 0x9e3df87a,
0x04700155, 0xab7b57ef, 0x262d746b, 0x737aa3e3, 0x949c724c, 0xa4120c39, 0xb0d6fc26, 0xf627a213,
0xc0a0bc60, 0x24d6564a, 0x34d460dd, 0x785b0656, 0x9376f6a5, 0x25ebee5b, 0x5a0a5018, 0x84d02b01,
0xa2b3658a, 0xad0d1cce, 0x38271683, 0x9f491585, 0x8ba28247, 0x40d5a42e, 0x7780e82e, 0x4211ccc3,
0x99da0844, 0xb85f9474, 0xbdb158b6, 0xf8194c71, 0x6339f3ec, 0x4cd66cf7, 0xb636aa4f, 0x4068c56c,
0xe41080a1, 0x55740173, 0x95903235, 0x90f39f69, 0x3f10a4e2, 0x3192a79b, 0x0590a944, 0xc9058c4f,
0x6f05a8eb, 0xdb326d13, 0xfcefbcee, 0xa699db05, 0xd819d477, 0x610f7e52, 0xfa0a4aca, 0x0e6b3f1d,
0x7a8da290, 0x6d12a9ef, 0xa12642d5, 0xebdedcff, 0x175ed926, 0xa094363a, 0xb3a07e30, 0x34fa8d2c,
0xbc16e646, 0x3e6de94d, 0xd5288754, 0x204e5283, 0xc61106f6, 0x299835e0, 0xe04e7a38, 0x2e2c1e34,
0xc069ea80, 0x5c2117cf, 0xd8fc2947, 0x10a40dc9, 0xb40dacd9, 0xfbdac86b, 0x2a8383cb, 0x46d86dc1,
0x0a1f3958, 0x0f7e59ea, 0x5c10a118, 0xea13bfc8, 0xc82c0da5, 0x4cd40dd7, 0xdaa5dfe9, 0x8c2cc0a3,
0x8dc15a64, 0x241b160c, 0xc44f573b, 0x3eb3155f, 0x284ba3fc, 0x1ece8db4, 0x03eaf07f, 0x7cbd99fb,
0x7d313b45, 0xe7ea83a7, 0x6d339d60, 0x0ef002cb, 0x92a04b40, 0x510d79bc, 0x6440e050, 0x33916596,
0xa11c5df3, 0xb582a3de, 0x031001c1, 0x85951218, 0xbe538ada, 0xe3aec1d2, 0x7fb67836, 0xc2d9ab84,
0xb1841ad9, 0x1e64cc5f, 0xa3fe111d, 0xd081d6bb, 0xf8ae6c3b, 0x3b12ae4c, 0x9ba5eb58, 0x22931b18,
0xf99b2e61, 0x628f1252, 0x2fce9aa0, 0xf99a04fb, 0x21577d22, 0x9d474c81, 0x7350e54a, 0xf88c8ac6,
0x94f38853, 0x0b6333fe, 0x8875045e, 0x90c23689, 0x6b08a34b, 0x3fb742ea, 0xa8a9466a, 0xd543807d,
0xbf12e26e, 0x10211c25, 0x068852e1, 0xf1d8f035, 0x012a5782, 0xe84cbf5f, 0xee35a87a, 0x8bfa2f09,
};
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/rng/XorShiftRNG.java
|
package hex.rng;
import java.util.Random;
import java.util.concurrent.atomic.AtomicLong;
/**
* Simple XorShiftRNG.
*
* Note: According to RF benchmarks it does not provide so accurate results
* as {@link java.util.Random}, however it can be used as an alternative.
*
*/
public class XorShiftRNG extends Random {
private AtomicLong _seed;
public XorShiftRNG (long seed) {
this._seed = new AtomicLong(seed);
}
@Override
public long nextLong() {
long oldseed, nextseed;
AtomicLong seed = this._seed;
do {
oldseed = seed.get();
nextseed = xorShift(oldseed);
} while (!seed.compareAndSet(oldseed, nextseed));
return nextseed;
}
@Override
public int nextInt() {
return nextInt(Integer.MAX_VALUE);
}
@Override
public int nextInt(int n) {
int r = (int) (nextLong() % n);
return r > 0 ? r : -r;
}
@Override
protected int next(int bits) {
long nextseed = nextLong();
return (int) (nextseed & ((1L << bits) - 1));
}
private long xorShift(long x) {
x ^= (x << 21);
x ^= (x >>> 35);
x ^= (x << 4);
return x;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/singlenoderf/CMTask.java
|
package hex.singlenoderf;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.Log;
import water.util.Log.Tag.Sys;
import water.util.ModelUtils;
import water.util.Utils;
import java.util.Arrays;
import java.util.Random;
import hex.VarImp;
/**
* Confusion Matrix. Incrementally computes a Confusion Matrix for a forest
* of Trees, vs a given input dataset. The set of Trees can grow over time. Each
* request from the Confusion compute on any new trees (if any), and report a
* matrix. Cheap if all trees already computed.
*/
public class CMTask extends MRTask2<CMTask> {
public double[] _classWt;
public boolean _computeOOB;
public int _treesUsed;
public Key _modelKey;
public Key _datakey;
public int _classcol;
public CM _matrix;
public float _sum; //sum of squares Sum_ti((f_ti - delta(oti,i))^2) AKA brier score ~ classification mse
public CM[] _localMatrices;
public long[] _errorsPerTree;
public SpeeDRFModel _model;
public int[] _modelDataMap;
public Frame _data;
public int _N;
public long _cms[][][];
public VarImp _varimp;
public int[] _oobs;
public Key[][] _remoteChunksKeys;
public float _ss; // Sum of squares
public int _rowcnt; // Rows used in scoring for regression
public boolean _score_new_tree_only;
/** Data to replay the sampling algorithm */
private long[] _chunk_row_mapping;
/** Number of rows at each node */
private int[] _rowsPerNode;
/** Computed mapping of model prediction classes to confusion matrix classes */
private int[] _model_classes_mapping;
/** Computed mapping of data prediction classes to confusion matrix classes */
private int[] _data_classes_mapping;
/** Difference between model cmin and CM cmin */
private int _cmin_model_mapping;
/** Difference between data cmin and CM cmin */
private int _cmin_data_mapping;
transient private Random _rand;
/** Confusion matrix
* @param model the ensemble used to classify
*/
private CMTask(SpeeDRFModel model, int treesToUse, boolean computeOOB, Frame fr, Vec resp) {
_modelKey = model._key;
_datakey = model._dataKey;
_classcol = fr.numCols() - 1; //model.test_frame == null ? (model.fr.numCols() - 1) : (model.test_frame.numCols() - 1);
_treesUsed = treesToUse;
_computeOOB = computeOOB;
_model = model;
_varimp = null;
_ss = 0.f;
_data = fr;
shared_init(resp);
}
public static CMTask scoreTask(SpeeDRFModel model, int treesToUse, boolean computeOOB, Frame fr, Vec resp) {
CMTask tsk = new CMTask(model, treesToUse, computeOOB, fr, resp);
tsk.doAll(fr);
return tsk;
}
/** Shared init: pre-compute local data for new Confusions, for remote Confusions*/
private void shared_init(Vec resp) {
/* For reproducibility we can control the randomness in the computation of the
confusion matrix. The default seed when deserializing is 42. */
// _data = _model.test_frame == null ? _model.fr : _model.test_frame;
if (_model.validation) _computeOOB = false;
_modelDataMap = _model.colMap(_data);
assert !_computeOOB || _model._dataKey.equals(_datakey) : !_computeOOB + " || " + _model._dataKey + " equals " + _datakey;
Vec respModel = resp;
Vec respData = _data.vecs()[_classcol];
int model_max = (int)respModel.max();
int model_min = (int)respModel.min();
int data_max = (int)respData.max();
int data_min = (int)respData.min();
if (respModel._domain!=null) {
assert respData._domain != null;
_model_classes_mapping = new int[respModel._domain.length];
_data_classes_mapping = new int[respData._domain.length];
// compute mapping
_N = alignEnumDomains(respModel._domain, respData._domain, _model_classes_mapping, _data_classes_mapping);
} else {
assert respData._domain == null;
_model_classes_mapping = null;
_data_classes_mapping = null;
// compute mapping
_cmin_model_mapping = model_min - Math.min(model_min, data_min);
_cmin_data_mapping = data_min - Math.min(model_min, data_min);
_N = Math.max(model_max, data_max) - Math.min(model_min, data_min) + 1;
}
assert _N > 0; // You know...it is good to be sure
init();
}
public void init() {
// Make a mapping from chunk# to row# just for chunks on this node
// First compute the number of chunks homed to this node
int total_home = 0;
for (int i = 0; i < _data.anyVec().nChunks(); ++i) {
if (_data.anyVec().chunkKey(i).home()) {
total_home++;
}
}
// Now generate the mapping
_chunk_row_mapping = new long[total_home];
int off=0;
int cidx=0;
for (int i = 0; i < _data.anyVec().nChunks(); ++i) {
if (_data.anyVec().chunkKey(i).home()) {
_chunk_row_mapping[cidx++] = _data.anyVec().chunk2StartElem(i);
}
}
// Initialize number of rows per node
_rowsPerNode = new int[H2O.CLOUD.size()];
long chunksCount = _data.anyVec().nChunks();
for(int ci=0; ci<chunksCount; ci++) {
Key cKey = _data.anyVec().chunkKey(ci);
_rowsPerNode[cKey.home_node().index()] += _data.anyVec().chunkLen(ci);
}
_remoteChunksKeys = new Key[H2O.CLOUD.size()][];
int[] _remoteChunksCounter = new int[H2O.CLOUD.size()];
for (int i = 0; i < _data.anyVec().nChunks(); ++i) {
_remoteChunksCounter[_data.anyVec().chunkKey(i).home(H2O.CLOUD)]++;
}
for (int i = 0; i < H2O.CLOUD.size(); ++i) _remoteChunksKeys[i] = new Key[_remoteChunksCounter[i]];
int[] cnter = new int[H2O.CLOUD.size()];
for (int i = 0; i < _data.anyVec().nChunks(); ++i) {
int node_idx = _data.anyVec().chunkKey(i).home(H2O.CLOUD);
_remoteChunksKeys[node_idx][cnter[node_idx]++] = _data.anyVec().chunkKey(i);
}
}
private int producerRemoteRows(byte treeProducerID, Key chunkKey) {
Key[] remoteCKeys = _remoteChunksKeys[treeProducerID];
int off = 0;
for (int i=0; i<remoteCKeys.length; i++) {
if (chunkKey.equals(remoteCKeys[i])) return off;
off += _data.anyVec().chunkLen(i);
}
return off;
}
@Override public void map(Chunk[] chks) {
final int rows = chks[0]._len;
final int cmin = _model.resp_min;
short numClasses = (short)_model.classes();
_cms = new long[ModelUtils.DEFAULT_THRESHOLDS.length][2][2];
// Votes: we vote each tree on each row, holding on to the votes until the end
int[][] votes = new int[rows][_N];
int[][] localVotes = _computeOOB ? new int[rows][_N] : null;
// Errors per tree
_errorsPerTree = new long[_model.treeCount()];
// Replay the Data.java's "sample_fair" sampling algorithm to exclude data
// we trained on during voting.
for( int ntree = 0; ntree < _model.treeCount(); ntree++ ) {
if (_score_new_tree_only) ntree = _model.treeCount() - 1;
long treeSeed = _model.seed(ntree);
byte producerId = _model.producerId(ntree);
int init_row = (int)chks[0]._start;
boolean isLocalTree = _computeOOB && isLocalTree(producerId); // tree is local
boolean isRemote = true;
for (long a_chunk_row_mapping : _chunk_row_mapping) {
if (chks[0]._start == a_chunk_row_mapping) {
isRemote = false;
break;
}
}
boolean isRemoteTreeChunk = _computeOOB && isRemote; // this is chunk which was used for construction the tree by another node
if (isRemoteTreeChunk) init_row = _rowsPerNode[producerId] + (int)chks[0]._start + producerRemoteRows(producerId, chks[0]._vec.chunkKey(chks[0].cidx()));
/* NOTE: Before changing used generator think about which kind of random generator you need:
* if always deterministic or non-deterministic version - see hex.rf.Utils.get{Deter}RNG */
// DEBUG: if( _computeOOB && (isLocalTree || isRemoteTreeChunk)) System.err.println(treeSeed + " : " + init_row + " (CM) " + isRemoteTreeChunk);
long seed = Sampling.chunkSampleSeed(treeSeed, init_row);
Random rand = Utils.getDeterRNG(seed);
// Now for all rows, classify & vote!
ROWS: for( int row = 0; row < rows; row++ ) {
// ------ THIS CODE is crucial and serve to replay the same sequence
// of random numbers as in the method Data.sampleFair()
// Skip row used during training if OOB is computed
float sampledItem = rand.nextFloat();
// Bail out of broken rows with NA in class column.
// Do not skip yet the rows with NAs in the rest of columns
if( chks[_classcol].isNA0(row)) continue;
if( _computeOOB && (isLocalTree || isRemoteTreeChunk) ) { // if OOBEE is computed then we need to take into account utilized sampling strategy
if (sampledItem < _model.sample) continue;
}
// --- END OF CRUCIAL CODE ---
// Predict with this tree - produce 0-based class index
if (!_model.regression) {
int prediction = (int)_model.classify0(ntree, chks, row, _modelDataMap, numClasses, false /*Not regression*/);
if( prediction >= numClasses ) continue; // Junk row cannot be predicted
// Check tree miss
int alignedPrediction = alignModelIdx(prediction);
int alignedData = alignDataIdx((int) chks[_classcol].at80(row) - cmin);
if (alignedPrediction != alignedData) {
_errorsPerTree[ntree]++;
}
votes[row][alignedPrediction]++; // Vote the row
// if (isLocalTree) localVotes[row][alignedPrediction]++; // Vote
} else {
float pred = _model.classify0(ntree, chks, row, _modelDataMap, (short) 0, true /*regression*/);
float actual = chks[_classcol].at80(row);
float delta = actual - pred;
_ss += delta * delta;
_rowcnt++;
}
}
}
if(!_model.regression) {
// Assemble the votes-per-class into predictions & score each row
_matrix = computeCM(votes, chks, false /*Do the _cms once*/, _model.get_params().balance_classes); // Make a confusion matrix for this chunk
if (localVotes!=null) {
_localMatrices = new CM[H2O.CLOUD.size()];
_localMatrices[H2O.SELF.index()] = computeCM(localVotes, chks, true /*Don't compute the _cms again!*/, _model.get_params().balance_classes);
}
}
}
public static float[] computeVarImpSD(long[][] vote_diffs) {
float[] res = new float[vote_diffs.length];
for (int var = 0; var < vote_diffs.length; ++var) {
float mean_diffs = 0.f;
float r = 0.f;
for (long d: vote_diffs[var]) mean_diffs += (float) d / (float) vote_diffs.length;
for (long d: vote_diffs[var]) {
r += (d - mean_diffs) * (d - mean_diffs);
}
r *= 1.f / (float)vote_diffs[var].length;
res[var] = (float) Math.sqrt(r);
}
return res;
}
/** Returns true if tree was produced by this node.
* Note: chunkKey is key stored at this local node */
private boolean isLocalTree(byte treeProducerId) {
assert _computeOOB : "Calling this method makes sense only for oobee";
int idx = H2O.SELF.index();
return idx == treeProducerId;
}
/** Reduction combines the confusion matrices. */
@Override public void reduce(CMTask drt) {
if (!_model.regression) {
if (_matrix == null) {
_matrix = drt._matrix;
} else {
_matrix = _matrix.add(drt._matrix);
}
_sum += drt._sum;
// Reduce tree errors
long[] ept1 = _errorsPerTree;
long[] ept2 = drt._errorsPerTree;
if (ept1 == null) _errorsPerTree = ept2;
else if (ept2 != null) {
if (ept1.length < ept2.length) ept1 = Arrays.copyOf(ept1, ept2.length);
for (int i = 0; i < ept2.length; i++) ept1[i] += ept2[i];
}
if (_cms!=null)
for (int i = 0; i < _cms.length; i++) Utils.add(_cms[i], drt._cms[i]);
if (_oobs != null)
for (int i = 0; i < _oobs.length; ++i) _oobs[i] += drt._oobs[i];
} else {
_ss += drt._ss;
_rowcnt += drt._rowcnt;
}
}
/** Transforms 0-based class produced by model to CF zero-based */
private int alignModelIdx(int modelClazz) {
if (_model_classes_mapping!=null)
return _model_classes_mapping[modelClazz];
else
return modelClazz + _cmin_model_mapping;
}
/** Transforms 0-based class from input data to CF zero-based */
private int alignDataIdx(int dataClazz) {
if (_data_classes_mapping!=null)
return _data_classes_mapping[dataClazz];
else
return dataClazz + _cmin_data_mapping;
}
/** Merge model and data predictor domain to produce domain for CM.
* The domain is expected to be ordered and containing unique values. */
public static int alignEnumDomains(final String[] modelDomain, final String[] dataDomain, int[] modelMapping, int[] dataMapping) {
assert modelMapping!=null && modelMapping.length == modelDomain.length;
assert dataMapping!=null && dataMapping.length == dataDomain.length;
int idx = 0, idxM = 0, idxD = 0;
while(idxM!=modelDomain.length || idxD!=dataDomain.length) {
if (idxM==modelDomain.length) { dataMapping[idxD++] = idx++; continue; }
if (idxD==dataDomain.length) { modelMapping[idxM++] = idx++; continue; }
int c = modelDomain[idxM].compareTo(dataDomain[idxD]);
if (c < 0) {
modelMapping[idxM] = idx;
idxM++;
} else if (c > 0) {
dataMapping[idxD] = idx;
idxD++;
} else { // strings are identical
modelMapping[idxM] = idx;
dataMapping[idxD] = idx;
idxM++; idxD++;
}
idx++;
}
return idx;
}
public static String[] domain(final Vec modelCol, final Vec dataCol) {
int[] modelEnumMapping = null;
int[] dataEnumMapping = null;
int N;
if (modelCol._domain!=null) {
assert dataCol._domain != null;
modelEnumMapping = new int[modelCol._domain.length];
dataEnumMapping = new int[dataCol._domain.length];
N = alignEnumDomains(modelCol._domain, dataCol._domain, modelEnumMapping, dataEnumMapping);
} else {
assert dataCol._domain == null;
N = (int) (Math.max(modelCol.max(), dataCol.max()) - Math.min(modelCol.min(), dataCol.min()) + 1);
}
return domain(N, modelCol, dataCol, modelEnumMapping, dataEnumMapping);
}
public static String[] domain(int N, final Vec modelCol, final Vec dataCol, int[] modelEnumMapping, int[] dataEnumMapping) {
String[] result = new String[N];
String[] modelDomain = modelCol._domain;
String[] dataDomain = dataCol._domain;
if (modelDomain!=null) {
assert dataDomain!=null;
assert modelEnumMapping!=null && modelEnumMapping.length == modelDomain.length;
assert dataEnumMapping!=null && dataEnumMapping.length == dataDomain.length;
for (int i = 0; i < modelDomain.length; i++) result[modelEnumMapping[i]] = modelDomain[i];
for (int i = 0; i < dataDomain.length; i++) result[dataEnumMapping [i]] = dataDomain[i];
} else {
assert dataDomain==null;
int dmin = (int) Math.min(modelCol.min(), dataCol.min());
int dmax = (int) Math.max(modelCol.max(), dataCol.max());
for (int i = dmin; i <= dmax; i++) result[i-dmin] = String.valueOf(i);
}
return result;
}
/** Compute confusion matrix domain based on model and data key. */
public String[] domain(Vec modelResp) {
return domain(_N, modelResp, _data.vecs()[_classcol], _model_classes_mapping, _data_classes_mapping);
}
/** Return number of classes - in fact dimension of CM. */
public final int dimension() { return _N; }
/** Confusion matrix representation. */
static class CM extends Iced {
/** The Confusion Matrix - a NxN matrix of [actual] -vs- [predicted] classes,
referenced as _matrix[actual][predicted]. Each row in the dataset is
voted on by all trees, and the majority vote is the predicted class for
the row. Each row thus gets 1 entry in the matrix.*/
protected long _matrix[][];
/** Number of mistaken assignments. */
protected long _errors;
/** Number of rows used for building the matrix.*/
protected long _rows;
/** Number of skipped rows. Rows can contain bad data, or can be skipped by selecting only out-of-back rows */
protected long _skippedRows;
/** Domain - names of columns and rows */
public float classError() { return _errors / (float) _rows; }
/** Return number of rows used for CM computation */
public long rows() { return _rows; }
/** Return number of skipped rows during CM computation
* The number includes in-bag rows if oobee is used. */
public long skippedRows(){ return _skippedRows; }
/** Add a confusion matrix. */
public CM add(final CM cm) {
if (cm!=null) {
if( _matrix == null ) _matrix = cm._matrix; // Take other work straight-up
else Utils.add(_matrix,cm._matrix);
_rows += cm._rows;
_errors += cm._errors;
_skippedRows += cm._skippedRows;
}
return this;
}
/** Text form of the confusion matrix */
@Override public String toString() {
if( _matrix == null ) return "no trees";
int N = _matrix.length;
final int K = N + 1;
double[] e2c = new double[N];
for( int i = 0; i < N; i++ ) {
long err = -_matrix[i][i];
for( int j = 0; j < N; j++ ) err += _matrix[i][j];
e2c[i] = Math.round((err / (double) (err + _matrix[i][i])) * 100) / (double) 100;
}
String[][] cms = new String[K][K + 1];
cms[0][0] = "";
for( int i = 1; i < K; i++ ) cms[0][i] = "" + (i - 1);
cms[0][K] = "err/class";
for( int j = 1; j < K; j++ ) cms[j][0] = "" + (j - 1);
for( int j = 1; j < K; j++ ) cms[j][K] = "" + e2c[j - 1];
for( int i = 1; i < K; i++ )
for( int j = 1; j < K; j++ ) cms[j][i] = "" + _matrix[j - 1][i - 1];
int maxlen = 0;
for( int i = 0; i < K; i++ )
for( int j = 0; j < K + 1; j++ ) maxlen = Math.max(maxlen, cms[i][j].length());
for( int i = 0; i < K; i++ )
for( int j = 0; j < K + 1; j++ ) cms[i][j] = pad(cms[i][j], maxlen);
String s = "";
for( int i = 0; i < K; i++ ) {
for( int j = 0; j < K + 1; j++ ) s += cms[i][j];
s += "\n";
}
return s;
}
/** Pad a string with spaces. */
private String pad(String s, int l){ String p=""; for(int i=0; i<l-s.length();i++)p+=" "; return " "+p+s; }
}
public static class CMFinal extends CM {
final protected Key _SpeeDRFModelKey;
final protected String[] _domain;
final protected long [] _errorsPerTree;
final protected boolean _computedOOB;
final protected long[][][] _cms;
protected boolean _valid;
final protected float _sum;
private CMFinal() {
_valid = false;
_SpeeDRFModelKey = null;
_domain = null;
_errorsPerTree = null;
_computedOOB = false;
_sum = 0.f;
_cms = null;
}
private CMFinal(CM cm, Key SpeeDRFModelKey, String[] domain, long[] errorsPerTree, boolean computedOOB, boolean valid, float sum, long[][][] cms) {
_matrix = cm._matrix;
_errors = cm._errors;
_rows = cm._rows;
_skippedRows = cm._skippedRows;
_SpeeDRFModelKey = SpeeDRFModelKey;
_domain = domain;
_errorsPerTree = errorsPerTree;
_computedOOB = computedOOB;
_valid = valid;
_sum = sum;
_cms = cms;
}
/** Make non-valid confusion matrix */
public static CMFinal make() {
return new CMFinal();
}
/** Create a new confusion matrix. */
public static CMFinal make(CM cm, SpeeDRFModel model, String[] domain, long[] errorsPerTree, boolean computedOOB, float sum, long[][][] cms) {
return new CMFinal(cm, model._key, domain, errorsPerTree, computedOOB, true, sum, cms);
}
public String[] domain() { return _domain; }
public int dimension() { return _matrix.length; }
public long matrix(int i, int j) { return _matrix[i][j]; }
public boolean valid() { return _valid; }
public float mse() { return _sum / (float) _rows; }
/** Output information about this RF. */
public final void report() {
double err = classError();
assert _valid : "Trying to report status of invalid CM!";
SpeeDRFModel model = UKV.get(_SpeeDRFModelKey);
String s =
" Type of random forest: classification\n"
+ " Number of trees: " + model.size() + "\n"
+ "No of variables tried at each split: " + model.mtry + "\n"
+ " Estimate of err. rate: " + Math.round(err * 10000) / 100 + "% (" + err + ")\n"
+ " OOBEE: " + (_computedOOB ? "YES (sampling rate: "+model.sample*100+"%)" : "NO")+ "\n"
+ " Confusion matrix:\n"
+ toString() + "\n"
+ " CM domain: " + Arrays.toString(_domain) + "\n"
+ " Avg tree depth (min, max): " + model.depth() + "\n"
+ " Avg tree leaves (min, max): " + model.leaves() + "\n"
+ " Validated on (rows): " + rows() + "\n"
+ " Rows skipped during validation: " + skippedRows() + "\n"
+ " Mispredictions per tree (in rows): " + Arrays.toString(_errorsPerTree)+"\n";
Log.info(Sys.RANDF,s);
}
/**
* Reports size of dataset and computed classification error.
*/
public final void report(StringBuilder sb) {
double err = _errors / (double) _rows;
sb.append(_rows).append(',');
sb.append(err).append(',');
}
}
/** Compute the sum of squared errors */
static float doSSECalc(int[] votes, float[] preds, int cclass) {
float err;
// Get the total number of votes for the row
float sum = doSum(votes);
// No votes for the row
if (sum == 0) {
err = 1f - (1f / (votes.length - 0f));
return err * err;
}
err = Float.isInfinite(sum)
? (Float.isInfinite(preds[cclass + 1]) ? 0f : 1f)
: 1f - preds[cclass + 1] / sum;
return err * err;
}
static float doSum(int[] votes) {
float sum = 0f;
for (int v : votes)
sum += v;
return sum;
}
static float[] toProbs(float[] preds, float s ) {
for (int i = 1; i < preds.length; ++i) {
preds[i] /= s;
}
return preds;
}
/** Produce confusion matrix from given votes. */
final CM computeCM(int[/**/][/**/] votes, Chunk[] chks, boolean local, boolean balance) {
CM cm = new CM();
int rows = votes.length;
int validation_rows = 0;
int cmin = (int) _data.vecs()[_classcol].min();
// Assemble the votes-per-class into predictions & score each row
// Make an empty confusion matrix for this chunk
cm._matrix = new long[_N][_N];
float preds[] = new float[_N+1];
float num_trees = _errorsPerTree.length;
// Loop over the rows
for( int row = 0; row < rows; row++ ) {
// Skip rows with missing response values
if (chks[_classcol].isNA0(row)) continue;
// The class votes for the i-th row
int[] vi = votes[row];
// Fill the predictions with the vote counts, keeping the 0th index unchanged
for( int v=0; v<_N; v++ ) preds[v+1] = vi[v];
float s = doSum(vi);
if (s == 0) {
cm._skippedRows++;
continue;
}
int result;
if (balance) {
float[] scored = toProbs(preds.clone(), doSum(vi));
double probsum=0;
for( int c=1; c<scored.length; c++ ) {
final double original_fraction = _model.priordist()[c-1];
assert(original_fraction > 0) : "original fraction should be > 0, but is " + original_fraction + ": not using enough training data?";
final double oversampled_fraction = _model.modeldist()[c-1];
assert(oversampled_fraction > 0) : "oversampled fraction should be > 0, but is " + oversampled_fraction + ": not using enough training data?";
assert(!Double.isNaN(scored[c]));
scored[c] *= original_fraction / oversampled_fraction;
probsum += scored[c];
}
for (int i=1;i<scored.length;++i) scored[i] /= probsum;
result = ModelUtils.getPrediction(scored, row);
} else {
// `result` is the class with the most votes, accounting for ties in the shared logic in ModelUtils
result = ModelUtils.getPrediction(preds, row);
}
// Get the class value from the response column for the current row
int cclass = alignDataIdx((int) chks[_classcol].at80(row) - cmin);
assert 0 <= cclass && cclass < _N : ("cclass " + cclass + " < " + _N);
// Ignore rows with zero votes, but still update the sum of squared errors
if( vi[result]==0 ) {
cm._skippedRows++;
if (!local) _sum += doSSECalc(vi, preds, cclass);
continue;
}
// Update the confusion matrix
cm._matrix[cclass][result]++;
if( result != cclass ) cm._errors++;
validation_rows++;
// Update the sum of squared errors
if (!local) _sum += doSSECalc(vi, preds, cclass);
float sum = doSum(vi);
// Binomial classification -> compute AUC, draw ROC
if(_N == 2 && !local) {
float snd = preds[2] / sum;
for(int i = 0; i < ModelUtils.DEFAULT_THRESHOLDS.length; i++) {
int p = snd >= ModelUtils.DEFAULT_THRESHOLDS[i] ? 1 : 0;
_cms[i][cclass][p]++; // Increase matrix
}
}
}
// End of loop over rows, return confusion matrix
cm._rows=validation_rows;
return cm;
}
public static class MSETask extends MRTask2<MSETask> {
//M
double _ss;
public static double doTask(Frame fr) {
MSETask tsk = new MSETask();
tsk.doAll(fr);
return tsk._ss / (double) fr.numRows();
}
@Override public void map(Chunk[] cks) {
for (int i = 0; i < cks[0]._len; ++i) {
int cls = (int)cks[cks.length - 1].at0(i);
double err = ( 1 - cks[cls+1].at0(i));
_ss += err * err;
}
}
@Override public void reduce(MSETask tsk) { _ss += tsk._ss; }
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/singlenoderf/DABuilder.java
|
package hex.singlenoderf;
import hex.singlenoderf.SpeeDRF.DRFParams;
import jsr166y.ForkJoinTask;
import jsr166y.RecursiveAction;
import water.Job;
import water.Key;
import water.Timer;
import water.UKV;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.util.Log;
import java.util.ArrayList;
public class DABuilder {
protected final DRFParams _rfParams;
protected final Key _rfModel;
static DABuilder create(final DRFParams rfParams, final Key rfModel) {
switch( rfParams.sampling_strategy ) {
case RANDOM :
default : return new DABuilder(rfParams, rfModel);
}
}
DABuilder(final DRFParams rfparams, final Key rfmodel) { _rfParams = rfparams; _rfModel = rfmodel; }
final DataAdapter build(Frame fr, boolean useNonLocal) { return inhaleData(fr, useNonLocal); }
/** Check that we have proper number of valid columns vs. features selected, if not cap*/
private void checkAndLimitFeatureUsedPerSplit(Frame fr) {
int validCols = fr.numCols()-1; // for classIdx column
if (validCols < _rfParams.num_split_features) {
Log.info(Log.Tag.Sys.RANDF, "Limiting features from " + _rfParams.num_split_features +
" to " + validCols + " because there are no more valid columns in the dataset");
_rfParams.num_split_features= validCols;
}
}
/** Return the number of rows on this node. */
private int getRowCount(Frame fr) { return (int)fr.numRows(); }
/** Return chunk index of the first chunk on this node. Used to identify the trees built here.*/
private long getChunkId(final Frame fr) {
Key[] keys = new Key[fr.anyVec().nChunks()];
for(int i = 0; i < fr.anyVec().nChunks(); ++i) {
keys[i] = fr.anyVec().chunkKey(i);
}
for(int i = 0; i < keys.length; ++i) {
if (keys[i].home()) return i;
}
return -99999; //throw new Error("No key on this node");
}
private static int find(String n, String[] names) {
if( n == null ) return -1;
for( int j = 0; j<names.length; j++ )
if( n.equals(names[j]) )
return j;
return -1;
}
public static int[] colMap( String[] frame_names, String[] model_names ) {
int mapping[] = new int[frame_names.length];
for( int i = 0; i<mapping.length; i++ )
mapping[i] = find(frame_names[i],model_names);
return mapping;
}
/** Build data adapter for given frame */
protected DataAdapter inhaleData(Frame fr, boolean useNonLocal) {
Log.info("Prepping for data inhale.");
long id = getChunkId(fr);
if (id == -99999) {
return null;
}
Timer t_inhale = new Timer();
final SpeeDRFModel rfmodel = UKV.get(_rfModel);
boolean[] _isByteCol = new boolean[fr.numCols()];
long[] _naCnts = new long[fr.numCols()];
for (int i = 0; i < _isByteCol.length; ++i) {
_isByteCol[i] = DataAdapter.isByteCol(fr.vecs()[i], (int)fr.numRows(), i == _isByteCol.length - 1, rfmodel.regression);
_naCnts[i] = fr.vecs()[i].naCnt();
}
// The model columns are dense packed - but there will be columns in the
// data being ignored. This is a map from the model's columns to the
// building dataset's columns.
final int[] modelDataMap = colMap(fr._names, rfmodel._names);
final int totalRows = getRowCount(fr);
final DataAdapter dapt = new DataAdapter(fr, rfmodel, modelDataMap,
totalRows,
getChunkId(fr),
_rfParams.seed,
_rfParams.bin_limit,
_rfParams.class_weights);
// Check that we have proper number of valid columns vs. features selected, if not cap.
checkAndLimitFeatureUsedPerSplit(fr);
// Collects jobs loading local chunks
ArrayList<RecursiveAction> dataInhaleJobs = new ArrayList<RecursiveAction>();
Log.info("\n\nTotal Number of Chunks: " + fr.anyVec().nChunks()+"\n\n");
int cnter_local = 0;
int cnter_remote = 0;
for(int i = 0; i < fr.anyVec().nChunks(); ++i) {
if (useNonLocal) {
if (fr.anyVec().chunkKey(i).home()) { cnter_local++; } else { cnter_remote++; }
dataInhaleJobs.add(loadChunkAction(dapt, fr, i, _isByteCol, _naCnts, rfmodel.regression));
} else if (fr.anyVec().chunkKey(i).home()) {
cnter_local++;
dataInhaleJobs.add(loadChunkAction(dapt, fr, i, _isByteCol, _naCnts, rfmodel.regression));
}
}
Log.info("\n\nTotal local chunks to load: "+cnter_local+"\n\nTotal remote chunks to load:" +cnter_remote);
SpeeDRF.DRFTask.updateRFModelStatus(_rfModel, "Inhaling Data.");
Log.info(Log.Tag.Sys.RANDF,"Beginning Random Forest Inhale.");
ForkJoinTask.invokeAll(dataInhaleJobs);
if(dapt._jobKey != null && !Job.isRunning(dapt._jobKey)) throw new Job.JobCancelledException();
// Shrink data
dapt.shrink();
if(dapt._jobKey != null && !Job.isRunning(dapt._jobKey)) throw new Job.JobCancelledException();
Log.info(Log.Tag.Sys.RANDF,"Inhale done in " + t_inhale);
return dapt;
}
static RecursiveAction loadChunkAction(final DataAdapter dapt, final Frame fr, final int cidx, final boolean[] isByteCol, final long[] naCnts, boolean regression) {
return new RecursiveAction() {
@Override protected void compute() {
if(dapt._jobKey != null && !Job.isRunning(dapt._jobKey)) throw new Job.JobCancelledException();
try {
Chunk[] chks = new Chunk[fr.numCols()];
int ncolumns = chks.length;
for(int i = 0; i < chks.length; ++i) {
chks[i] = fr.vecs()[i].chunkForChunkIdx(cidx);
}
for (int j = 0; j < chks[0]._len; ++j) {
if(dapt._jobKey != null && !Job.isRunning(dapt._jobKey)) throw new Job.JobCancelledException();
int rowNum = (int)chks[0]._start + j;
boolean rowIsValid = false;
for(int c = 0; c < chks.length; ++c) {
if(naCnts[c] > 0) {
if(chks[c].isNA0(j)) {
if (c == ncolumns - 1) rowIsValid = false;
dapt.addBad(rowNum, c); continue;
}
}
if (isByteCol[c]) {
int val = (int)chks[c].at8(rowNum);
dapt.add1(val, rowNum, c);
} else {
float f = (float)chks[c].at(rowNum);
if(!dapt.isValid(c, f)) { dapt.addBad(rowNum, c); continue; }
dapt.add(f, rowNum, c);
}
if (c != ncolumns - 1) {
rowIsValid |= true;
}
}
if (!rowIsValid) dapt.markIgnoredRow(j);
}
} catch (Throwable t) {
//
}
}
};
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/singlenoderf/Data.java
|
package hex.singlenoderf;
import water.MemoryManager;
import water.util.Utils;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Random;
public class Data implements Iterable<Data.Row> {
/** Use stratified sampling */
boolean _stratify;
/** Random generator to make decision about missing data. */
final Random _rng;
public final class Row {
int _index;
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(_index).append(" [").append(classOf()).append("]:");
for( int i = 0; i < _dapt.columns(); ++i ) sb.append(_dapt.hasBadValue(_index, i) ? "NA" : _dapt.getEncodedColumnValue(_index, i)).append(',');
return sb.toString();
}
public int classOf() { return _dapt.classOf(_index); }
public final short getEncodedColumnValue(int colIndex) {
return _dapt.getEncodedColumnValue(_index, colIndex); }
public final short getEncodedClassColumnValue() {
return _dapt.getEncodedClassColumnValue(_index);
}
public final float getRawClassColumnValueFromBin() {
return _dapt.getRawClassColumnValueFromBin(_index);
}
public final boolean hasValidValue(int colIndex) { return !_dapt.hasBadValue(_index, colIndex); }
public final boolean isValid() { return !_dapt.isBadRow(_index); }
public final boolean isValidRaw() { return !_dapt.isBadRowRaw(_index); }
public final double getRawColumnValue(int colIndex) { return _dapt.getRawColumnValue(_index, colIndex); }
}
protected final DataAdapter _dapt;
/** Returns new Data object that stores all adapter's rows unchanged. */
public static Data make(DataAdapter da) { return new Data(da); }
protected Data(DataAdapter dapt) {
_dapt = dapt;
_rng = Utils.getDeterRNG(0x7b85dfe19122f0d5L);
_columnInfo = new ColumnInfo[_dapt.columns()];
for(int i = 0; i<_columnInfo.length; i++)
_columnInfo[i] = new ColumnInfo(i);
}
protected int start() { return 0; }
protected int end() { return _dapt._numRows; }
public final int rows() { return end() - start(); }
public final int columns() { return _dapt.columns(); }
public final int classes() { return _dapt.classes(); }
public final long seed() { return _dapt.seed(); }
public final String colName(int i) { return _dapt.columnName(i); }
public final float unmap(int col, int split) { return _dapt.unmap(col, split); }
public final int columnArity(int colIndex) { return _dapt.columnArity(colIndex); }
public final int columnArityOfClassCol() { return _dapt.columnArityOfClassCol(); }
/** Transforms given binned index (short) into 0..N-1 corresponding to predictor class */
public final int unmapClass(int clazz) {return _dapt.unmapClass(clazz); }
public final boolean isFloat(int col) { return _dapt.isFloat(col); }
public final double[] classWt() { return _dapt._classWt; }
public final boolean isIgnored(int col) { return _dapt.isIgnored(col); }
public final float computeAverage() {
float av = 0.f;
int nobs = 0;
for (Row r: this) {
if (r.isValid()) {
av += r.getRawClassColumnValueFromBin();
}
nobs++;
}
return nobs == 0 ? 0 : av / (float)(nobs);
}
public double[] unpackRow(Row r) {
double[] res = new double[_dapt._c.length-1];
for (int i = 0; i < _dapt._c.length-1; ++i) res[i] = r.getRawColumnValue(i);
return res;
}
public Row at(int i) { Row _r = new Row(); _r._index = permute(i); return _r;}
public final Iterator<Row> iterator() { return new RowIter(start(), end()); }
private class RowIter implements Iterator<Row> {
final Row _r = new Row();
int _pos = 0; final int _end;
public RowIter(int start, int end) { _pos = start; _end = end; }
public boolean hasNext() { return _pos < _end; }
public Row next() { _r._index = permute(_pos++); return _r; }
public void remove() { throw new RuntimeException("Unsupported"); }
}
// ----------------------
private int filterInv(Tree.SplitNode node, int[] permutation, Statistic ls, Statistic rs) {
final Row row = new Row();
int l = start(), r = end() - 1;
while (l <= r) {
int permIdx = row._index = permutation[l];
boolean putToLeft;
if (node.canDecideAbout(row)) { // are we splitting over existing value
putToLeft = node.isIn(row);
} else { // make a random choice about non
putToLeft = _rng.nextBoolean();
}
if (putToLeft) {
ls.addQ(row, ls._regression);
++l;
} else {
rs.addQ(row, rs._regression);
permutation[l] = permutation[r];
permutation[r--] = permIdx;
}
}
return l;
}
public long[] nonOOB() {
ArrayList<Integer> res = new ArrayList<Integer>();
for (Row r : this) res.add(r._index);
long[] rr = new long[res.size()];
for (int i = 0; i < rr.length; ++i) rr[i] = res.get(i);
return rr;
}
// Filter a column, with all valid data. i.e., skip the invalid check
private int filterVal(Tree.SplitNode node, int[] permutation, Statistic ls, Statistic rs) {
final int l =filterVal1(node,permutation);
filterVal3(permutation,ls,start(),l);
filterVal3(permutation,rs,l,end());
return l;
}
// Hand-inlining for performance... CNC
private int filterVal1(Tree.SplitNode node, int[] permutation) {
int cidx = node._column; // Decision column guiding the split
DataAdapter.Col cs[] = _dapt._c;
short bins[] = cs[cidx]._binned; // Bin#'s for each row
byte binb[] = cs[cidx]._rawB; // Bin#'s for each row
int split = node._split; // Value to split on
// Move the data into left/right halves
int l = start(), r = end() - 1;
while (l <= r) {
int permIdx = permutation[l];
int val = bins==null ? (0xFF&binb[permIdx]) : bins[permIdx];
if( val <= split ) {
++l;
} else {
permutation[l] = permutation[r];
permutation[r--] = permIdx;
}
}
return l;
}
// Update the histogram
private void filterVal3(int[] permutation, Statistic s, final int lo, final int hi) {
if (!s._regression) {
DataAdapter.Col cs[] = _dapt._c;
short classs[]= cs[_dapt.classColIdx()]._binned;
int cds[][][] = s._columnDists;
int fs[] = s._features;
// Run this loop by-feature instead of by-row - so that the updates in the
// inner loops do not need to start from loading the feature array.
for (int f : fs) {
if (f == -1) break; // Short features.
int cdsf[][] = cds[f]; // Histogram per-column (by value & class)
short[] bins = cs[f]._binned; // null if byte col, otherwise bin#
if (bins != null) { // binned?
for (int i = lo; i < hi; i++) { // Binned-loop
int permIdx = permutation[i]; // Get the row
int val = bins[permIdx]; // Bin-for-row
if (val == DataAdapter.BAD) continue; // ignore bad rows
int cls = classs[permIdx]; // Class-for-row
if (cls == DataAdapter.BAD) continue; // ignore rows with NA in response column
cdsf[val][cls]++; // Bump histogram
}
} else { // not binned?
byte[] raw = cs[f]._rawB; // Raw unbinned byte array
for (int i = lo; i < hi; i++) { // not-binned loop
int permIdx = permutation[i]; // Get the row
int val = (0xFF & raw[permIdx]);// raw byte value, has no bad rows
int cls = classs[permIdx] & 0xFF; // Class-for-row
cdsf[val][cls]++; // Bump histogram
}
}
}
} else {
DataAdapter.Col cols[] = _dapt._c;
float[] response;
if (cols[_dapt.classColIdx()]._binned == null) {
response = new float[cols[_dapt.classColIdx()]._rawB.length];
for (int b = 0; b < response.length; ++b)
response[b] = (float)(0xFF & cols[_dapt.classColIdx()]._rawB[b]);
} else {
response = new float[cols[_dapt.classColIdx()]._binned.length];
for (int f = 0; f < response.length; ++f)
response[f] = cols[_dapt.classColIdx()]._binned2raw[cols[_dapt.classColIdx()]._binned[f]];
}
int cds[][][] = s._columnDistsRegression;
int fs[] = s._features;
for (int f: fs) {
if (f == -1) break;
int cdsf[][] = cds[f];
short[] bins = cols[f]._binned;
if (bins != null) {
for (int i = lo; i < hi; i++) {
int permIdx = permutation[i];
int val = bins[permIdx];
if (val == DataAdapter.BAD) continue; // ignore bad rows
float resp = response[permIdx]; // Class-for-row
int response_bin = _dapt.getEncodedClassColumnValue(permIdx); //cols[cols.length-1]._binned[permIdx]; //cols[_dapt.classColIdx()]._binned == null ? (cols[_dapt.classColIdx()]._rawB[permIdx] & 0xFF) : cols[_dapt.classColIdx()]._binned[permIdx];
if (resp == DataAdapter.BAD) continue; // ignore rows with NA in response column
cdsf[val][response_bin]++; // = resp; // Bump histogram
}
} else {
byte[] raw = cols[f]._rawB;
for (int i = lo; i < hi; i++) {
int permIdx = permutation[i];
int val = raw[permIdx]&0xFF;
if (val == DataAdapter.BAD) continue;
short resp = cols[cols.length-1]._binned[permIdx];
if (resp == DataAdapter.BAD) continue;
int response_bin = _dapt.getEncodedClassColumnValue(permIdx); //cols[cols.length-1]._binned[permIdx]; //cols[_dapt.classColIdx()]._binned == null ? (cols[_dapt.classColIdx()]._rawB[permIdx] & 0xFF) : cols[_dapt.classColIdx()]._binned[permIdx];
cdsf[val][response_bin]++; // = resp;
}
}
}
}
}
public void filter(Tree.SplitNode node, Data[] result, Statistic ls, Statistic rs) {
int[] permutation = getPermutationArray();
int cidx = node._column;
int l = _dapt.hasAnyInvalid(cidx) || _dapt.hasAnyInvalid(_dapt.columns()-1)
? filterInv(node,permutation,ls,rs)
: filterVal(node,permutation,ls,rs);
ColumnInfo[] linfo = _columnInfo.clone();
ColumnInfo[] rinfo = _columnInfo.clone();
linfo[node._column]= linfo[node._column].left(node._split);
rinfo[node._column]= rinfo[node._column].right(node._split);
result[0]= new Subset(this, permutation, start(), l);
result[1]= new Subset(this, permutation, l, end());
result[0]._columnInfo = linfo;
result[1]._columnInfo = rinfo;
}
public Data sampleWithReplacement(double bagSizePct, short[] complement) {
// Make sure that values come in order
int size = (int)(rows() * bagSizePct);
/* NOTE: Before changing used generator think about which kind of random generator you need:
* if always deterministic or non-deterministic version - see hex.speedrf.Utils.get{Deter}RNG */
Random r = Utils.getRNG(seed());
for( int i = 0; i < size; ++i)
complement[permute(r.nextInt(rows()))]++;
int[] sample = MemoryManager.malloc4(size);
for( int i = 0, j = 0; i < sample.length;) {
while(complement[j]==0) j++;
for (int k = 0; k < complement[j]; k++) sample[i++] = j;
j++;
}
return new Subset(this, sample, 0, sample.length);
}
public Data complement(Data parent, short[] complement) { throw new RuntimeException("Only for subsets."); }
@Override public Data clone() throws CloneNotSupportedException { return this; }
protected int permute(int idx) { return idx; }
protected int[] getPermutationArray() {
int[] perm = MemoryManager.malloc4(rows());
for( int i = 0; i < perm.length; ++i ) perm[i] = i;
return perm;
}
public int colMinIdx(int i) { return _columnInfo[i].min; }
public int colMaxIdx(int i) { return _columnInfo[i].max; }
class ColumnInfo {
private final int col;
int min, max;
ColumnInfo(int col_) { col=col_; max = _dapt.columnArity(col_) - 1; }
ColumnInfo left(int idx) {
ColumnInfo res = new ColumnInfo(col);
res.max = idx < max ? idx : max;
res.min = min;
return res;
}
ColumnInfo right(int idx) {
ColumnInfo res = new ColumnInfo(col);
res.min = idx >= min ? (idx+1) : min;
res.max = max;
return res;
}
public String toString() { return col + "["+ min +","+ max + "]"; }
}
ColumnInfo[] _columnInfo;
}
class Subset extends Data {
private final int[] _permutation;
private final int _start, _end;
@Override protected int[] getPermutationArray() { return _permutation; }
@Override protected int permute(int idx) { return _permutation[idx]; }
@Override protected int start() { return _start; }
@Override protected int end() { return _end; }
@Override public Subset clone() throws CloneNotSupportedException { return new Subset(this,_permutation.clone(),_start,_end); }
/** Creates new subset of the given data adapter. The permutation is an array
* of original row indices of the DataAdapter object that will be used. */
public Subset(Data data, int[] permutation, int start, int end) {
super(data._dapt);
_start = start;
_end = end;
_permutation = permutation;
}
@Override public Data complement(Data parent, short[] complement) {
int size= 0;
for (short aComplement : complement) if (aComplement == 0) size++;
int[] p = MemoryManager.malloc4(size);
int pos = 0;
for(int i=0;i<complement.length; i++) if (complement[i]==0) p[pos++] = i;
return new Subset(this, p, 0, p.length);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/singlenoderf/DataAdapter.java
|
package hex.singlenoderf;
import jsr166y.ForkJoinTask;
import jsr166y.RecursiveAction;
import water.*;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.Log;
import water.util.Log.Tag.Sys;
import java.text.DecimalFormat;
import java.util.Arrays;
/**A DataAdapter maintains an encoding of the original data. Every raw value (of type float)
* is represented by a short value. When the number of unique raw value is larger that binLimit,
* the DataAdapter will perform binning on the data and use the same short encoded value to
* represent several consecutive raw values.
*
* Missing values, NaNs and Infinity are treated as BAD data. */
final class DataAdapter {
/** Place holder for missing data, NaN, Inf in short encoding.*/
static final short BAD = Short.MIN_VALUE;
/** Number of classes. */
private final int _numClasses;
/** Columns. */
final Col[] _c;
/** Seed for sampling */
private final long _seed;
/** Number of rows */
public final int _numRows;
/** Class weights */
public final double[] _classWt;
/** Use regression */
public final boolean _regression;
public Key _jobKey;
DataAdapter(Frame fr, SpeeDRFModel model, int[] modelDataMap, int rows,
long unique, long seed, int binLimit, double[] classWt) {
// assert model._dataKey == fr._key;
_seed = seed+(unique<<16); // This is important to preserve sampling selection!!!
/* Maximum arity for a column (not a hard limit) */
_numRows = rows;
_jobKey = model.jobKey;
_numClasses = model.regression ? 1 : model.classes();
_regression = model.regression;
_c = new Col[fr.numCols()];
for( int i = 0; i < _c.length; i++ ) {
if(model.jobKey != null && !Job.isRunning(model.jobKey)) throw new Job.JobCancelledException();
assert fr._names[modelDataMap[i]].equals(fr._names[i]);
Vec v = fr.vecs()[i];
if( isByteCol(v,rows, i == _c.length-1, _regression) ) // we do not bin for small values
_c[i] = new Col(fr._names[i], rows, i == _c.length-1);
else
_c[i] = new Col(fr._names[i], rows, i == _c.length-1, binLimit, !(v.isEnum() || v.isInt()));
}
boolean trivial = true;
if (classWt != null) for(double f: classWt) if (f != 1.0) trivial = false;
_classWt = trivial ? null : classWt;
}
static boolean isByteCol( Vec C, int rows, boolean isClass, boolean regression) {
if (regression) {
return !isClass && (C.isInt() || C.isEnum()) && C.min() >= 0 && C.length() == rows && (C.max() < 255 || C.max() < 256 && C.length() == rows);
}
return (C.isInt() || C.isEnum()) && !isClass && C.min() >= 0 && C.length()==rows &&
(C.max()<255 || C.max() <256 && C.length()==rows);
}
/** Given a value in enum format, returns: the value in the original format if no
* binning was applied, or if binning was applied a value that is inbetween
* the idx and the next value. If the idx is the last value return (2*idx+1)/2. */
public float unmap(int col, int idx){ return _c[col].rawSplit(idx); }
public boolean isFloat(int col) { return _c[col].isFloat(); }
public long seed() { return _seed; }
public int columns() { return _c.length;}
public int classOf(int idx) { return _c[_c.length-1].get(idx); }
/** The number of possible prediction classes. */
public int classes() { return _numClasses; }
/** Transforms given binned index (short) from class column into a value from interval [0..N-1]
* corresponding to a particular predictor class */
public int unmapClass(int clazz) {
Col c = _c[_c.length-1];
if (c._isByte)
return clazz;
else {
// OK, this is not fully correct bad handle corner-cases like for example dataset uses classes only
// with 0 and 3. Our API reports that there are 4 classes but in fact there are only 2 classes.
if (clazz >= c._binned2raw.length) clazz = c._binned2raw.length - 1;
return (int) (c.raw(clazz) - c._min);
}
}
/** Returns the number of bins, i.e. the number of distinct values in the column. */
public int columnArity(int col) { return _c[col].arity(); }
public int columnArityOfClassCol() { return _c[_c.length - 1].arity(); }
/** Return a short that represents the binned value of the original row,column value. */
public short getEncodedColumnValue(int row, int col) { return _c[col].get(row); }
public short getEncodedClassColumnValue(int row) { return _c[_c.length-1].get(row); }
public double getRawColumnValue(int row, int col) { return _c[col].getRaw(row); }
public float getRawClassColumnValueFromBin(int row) {
int idx = _c.length-1;
short btor = _c[idx].get(row);
if (_c[idx]._binned == null) {
return (float)(0xFF & _c[idx]._rawB[row]);
}
return _c[_c.length-1]._binned2raw[btor];
}
public void shrink() {
if(_jobKey != null && !Job.isRunning(_jobKey)) throw new Job.JobCancelledException();
// for ( Col c: _c) c.shrink();
// sort columns in parallel: c.shrink() calls single-threaded Arrays.sort()
RecursiveAction [] ras = new RecursiveAction[_c.length];
int i=0;
for ( final Col c: _c) {
ras[i++] = new RecursiveAction() {
@Override public void compute() { c.shrink(); }
};
}
ForkJoinTask.invokeAll(ras);
}
public String columnName(int i) { return _c[i].name(); }
public boolean isValid(int col, float f) {
return !_c[col].isFloat() || !Float.isInfinite(f);
}
public final void add(float v, int row, int col) {
_c[col].add (row,v); }
public final void add1(int v, int row, int col) {
_c[col].add1(row,v); }
public final void addBad(int row, int col) { _c[col].addBad(row); }
public final boolean hasBadValue(int row, int col) { return _c[col].isBad(row); }
public final boolean isBadRow(int row) { return _c[_c.length-1].isBad(row); }
public final boolean isBadRowRaw(int row) { return _c[_c.length-1].isBadRaw(row); }
public final boolean isIgnored(int col) { return _c[col].isIgnored(); }
public final void markIgnoredRow(int row) { _c[_c.length-1].addBad(row); }
public final int classColIdx() { return _c.length - 1; }
public final boolean hasAnyInvalid(int col) { return _c[col]._invalidValues!=0; }
static class Col {
/** Encoded values*/
short[] _binned;
/** Original values, kept only during inhale*/
float[] _raw;
/** Original values which we do not want to bin */
byte[] _rawB;
/** Map from binned to original*/
float[] _binned2raw;
final boolean _isClass, _isFloat, _isByte;
final int _colBinLimit;
final String _name;
/** Total number of bad values in the column. */
int _invalidValues;
float _min, _max;
int _arity;
static final DecimalFormat df = new DecimalFormat ("0.##");
boolean _ignored;
Col(String s, int rows, boolean isClass) {
_name = s; _isClass = isClass;
_rawB = MemoryManager.malloc1(rows);
_isFloat = false;
_isByte = true;
_colBinLimit = 0;
}
Col(String s, int rows, boolean isClass, int binLimit, boolean isFloat) {
_name = s; _isFloat = isFloat; _isClass = isClass; _colBinLimit = binLimit; _isByte = false;
_raw = MemoryManager.malloc4f(rows);
_ignored = false;
}
boolean isFloat() { return _isFloat; }
boolean isIgnored() { return _ignored; }
int arity() { return _ignored ? -1 : _arity; }
String name() { return _name; }
short get(int row) { return (short) (_isByte ? (_rawB[row]&0xFF) : _binned[row]); }
double getRaw(int row) { return (double)(_isByte ? (_rawB[row]&0xFF) : _binned2raw[_binned[row]]);}
void add(int row, float val) {
_raw [row] = val; }
void add1(int row, int val) {
_rawB[row] = (byte)val; }
void addBad(int row) { if (!_isByte) _raw[row] = Float.NaN; else _rawB[row] = (byte)255; }
private boolean isBadRaw(float f) { return Float.isNaN(f); }
boolean isBad(int row) {
return _isByte ? (_rawB[row]&0xFF)==255 : _binned[row] == BAD;
}
/** For all columns - encode all floats as unique shorts. */
void shrink() {
if (_isByte) {
_arity = 256;
return ; // do not shrink byte columns
}
float[] vs = _raw.clone();
Arrays.sort(vs); // Sort puts all Float.NaN at the end of the array (according Float.NaN doc)
int ndups = 0, i = 0, nans = 0; // Counter of all NaNs
while(i < vs.length-1) { // count dups
int j = i+1;
if (isBadRaw(vs[i])) { nans = vs.length - i; break; } // skip all NaNs
if (isBadRaw(vs[j])) { nans = vs.length - j; break; } // there is only one remaining NaN (do not forget on it)
while(j < vs.length && vs[i] == vs[j]){ ++ndups; ++j; }
i = j;
}
_invalidValues = nans;
if ( vs.length <= nans) {
// to many NaNs in the column => ignore it
_ignored = true;
_raw = null;
Log.info(Sys.RANDF, "Ignore column: " + this);
return;
}
int n = vs.length - ndups - nans;
int rem = n % _colBinLimit;
int maxBinSize = (n > _colBinLimit) ? (n / _colBinLimit + Math.min(rem,1)) : 1;
// Assign shorts to floats, with binning.
_binned2raw = MemoryManager.malloc4f(Math.min(n, _colBinLimit)); // if n is smaller than bin limit no need to compact
int smax = 0, cntCurBin = 1;
i = 0;
_binned2raw[0] = vs[i];
for(; i < vs.length; ++i) {
if(isBadRaw(vs[i])) break; // the first NaN, there are only NaN in the rest of vs[] array
if(vs[i] == _binned2raw[smax]) continue; // remove dups
if( ++cntCurBin > maxBinSize ) {
if(rem > 0 && --rem == 0)--maxBinSize; // check if we can reduce the bin size
++smax;
cntCurBin = 1;
}
_binned2raw[smax] = vs[i];
}
++smax;
// for(i = 0; i< vs.length; i++) if (!isBadRaw(vs[i])) break;
// All Float.NaN are at the end of vs => min is stored in vs[0]
_min = vs[0];
for(i = vs.length -1; i>= 0; i--) if (!isBadRaw(vs[i])) break;
_max = vs[i];
vs = null; // GCed
_binned = MemoryManager.malloc2(_raw.length);
// Find the bin value by lookup in bin2raw array which is sorted so we can do binary lookup.
for(i = 0; i < _raw.length; i++)
if (isBadRaw(_raw[i]))
_binned[i] = BAD;
else {
short idx = (short) Arrays.binarySearch(_binned2raw, _raw[i]);
if (idx >= 0) _binned[i] = idx;
else _binned[i] = (short) (-idx - 1); // this occurs when we are looking for a binned value, we return the smaller value in the array.
assert _binned[i] < _binned2raw.length;
}
if( n > _colBinLimit ) Log.info(Sys.RANDF,this+" this column's arity was cut from "+n+" to "+smax);
_arity = _binned2raw.length;
_raw = null; // GCced
}
/**Given an encoded short value, return the original float*/
public float raw(int idx) { return _binned2raw[idx]; }
/**Given an encoded short value, return the float that splits that value with the next.*/
public float rawSplit(int idx){
if (_isByte) return idx; // treat index as value
if (idx == BAD) return Float.NaN;
float flo = _binned2raw[idx]; // Convert to the original values
float fhi = (idx+1 < _binned2raw.length)? _binned2raw[idx+1] : flo+1.f;
//assert flo < fmid && fmid < fhi : "Values " + flo +","+fhi ; // Assert that the float will properly split
return (flo+fhi)/2.0f;
}
int rows() { return _isByte ? _rawB.length : _binned.length; }
@Override public String toString() {
String res = "Column("+_name+"){";
if (_ignored) res+="IGNORED";
else {
res+= " ["+df.format(_min) +","+df.format(_max)+"]";
res+=",bad values=" + _invalidValues + "/" + rows();
if (_isClass) res+= " CLASS ";
}
res += "}";
return res;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/singlenoderf/EntropyStatistic.java
|
package hex.singlenoderf;
import hex.singlenoderf.*;
import hex.singlenoderf.Data;
import water.util.Utils;
import java.util.Random;
/**The entropy formula is the classic Shannon entropy gain, which is:
*
* - \sum(p_i * log2(_pi))
*
* where p_i is the probability of i-th class occurring. The entropy is
* calculated for the left and right node after the given split and they are
* combined together weighted on their probability.
*
* ent left * weight left + ent right * weight right
* --------------------------------------------------
* total weight
*
* And to get the gain, this is subtracted from potential maximum of 1
* simulating the previous node. The biggest gain is selected as the tree split.
*
* The same is calculated also for exclusion, where left stands for the rows
* where column equals to the split point and right stands for all others.
*/
class EntropyStatistic extends Statistic {
public EntropyStatistic(Data data, int features, long seed, int exclusiveSplitLimit) { super(data, features, seed, exclusiveSplitLimit, false /*classification*/); }
/** LessThenEqual splits s*/
@Override protected Split ltSplit(int col, Data d, int[] dist, int distWeight, Random rand) {
final int[] distL = new int[d.classes()], distR = dist.clone();
final double upperBoundReduction = upperBoundReduction(d.classes());
double maxReduction = -1;
int bestSplit = -1;
int totL = 0, totR = 0; // Totals in the distribution
int classL = 0, classR = 0; // Count of non-zero classes in the left/right distributions
for (int e: distR) { // All zeros for the left, but need to compute for the right
totR += e;
if( e != 0 ) classR++;
}
// For this one column, look at all his split points and find the one with the best gain.
for (int i = 0; i < _columnDists[col].length - 1; ++i) {
int [] cdis = _columnDists[col][i];
for (int j = 0; j < distL.length; ++j) {
int v = cdis[j];
if( v == 0 ) continue; // No rows with this class
totL += v; totR -= v;
if( distL[j]== 0 ) classL++; // One-time transit from zero to non-zero for class j
distL[j] += v; distR[j] -= v;
if( distR[j]== 0 ) classR--; // One-time transit from non-zero to zero for class j
}
if (totL == 0) continue; // Totals are zero ==> this will not actually split anything
if (totR == 0) continue; // Totals are zero ==> this will not actually split anything
// Compute gain.
// If the distribution has only 1 class, the gain will be zero.
double eL = 0, eR = 0;
if( classL > 1 ) for (int e: distL) eL += gain(e,totL);
if( classR > 1 ) for (int e: distR) eR += gain(e,totR);
double eReduction = upperBoundReduction - ( (eL * totL + eR * totR) / (totL + totR) );
if (eReduction == maxReduction) {
// For now, don't break ties. Most ties are because we have several
// splits with NO GAIN. This happens *billions* of times in a standard
// covtype RF, because we have >100K leaves per tree (and 50 trees and
// 54 columns per leave and however many bins per column), and most
// leaves have no gain at most split points.
//if (rand.nextInt(10)<2) bestSplit=i;
} else if (eReduction > maxReduction) {
bestSplit = i; maxReduction = eReduction;
}
}
return bestSplit == -1
? Split.impossible(Utils.maxIndex(dist,_random))
: Split.split(col,bestSplit,maxReduction);
}
/**Gain function*/
private double gain(int e, int tot) {
if (e == 0) return 0;
double v = e/(double)tot;
double r = v * Math.log(v) / log2;
return -r;
}
/**Maximal entropy*/
private double upperBoundReduction(double classes) {
double p = 1/classes;
double r = p * Math.log(p)/log2 * classes;
return -r;
}
/**Compute an exclusive split (i.e. 'feature' '==' 'val') */
@Override protected Split eqSplit(int col, Data d, int[] dist, int distWeight, Random rand) {
final int[] distR = new int[d.classes()], distL = dist.clone();
final double upperBoundReduction = upperBoundReduction(d.classes());
double maxReduction = -1;
int bestSplit = -1;
int min = d.colMinIdx(col);
int max = d.colMaxIdx(col);
for (int i = min; i < max+1; ++i) {
for (int j = 0; j < distR.length; ++j) {
int v = _columnDists[col][i][j];
distL[j] += distR[j];
distR[j] = v;
distL[j] -= v;
}
int totL = 0, totR = 0;
for (int e: distL) totL += e;
if (totL == 0) continue;
for (int e: distR) totR += e;
if (totR == 0) continue;
double eL = 0, eR = 0;
for (int e: distL) eL += gain(e,totL);
for (int e: distR) eR += gain(e,totR);
double eReduction = upperBoundReduction - ( (eL * totL + eR * totR) / (totL + totR) );
if (eReduction == maxReduction){
if (rand.nextInt(10)<2) bestSplit=i; // randomly pick one out of several
} else if (eReduction > maxReduction) {
bestSplit = i; maxReduction = eReduction;
}
if (i==0 && d.columnArity(col) == 1) break; // for boolean features, only one split needs to be evaluated
}
return bestSplit == -1
? Split.impossible(Utils.maxIndex(dist,_random))
: Split.exclusion(col,bestSplit,maxReduction);
}
@Override
protected Split ltSplit(int colIndex, Data d, float[] dist, float distWeight, Random rand) {
return null; //not called for classification
}
@Override
protected Split eqSplit(int colIndex, Data d, float[] dist, float distWeight, Random rand) {
return null; //not called for classification
}
static final double log2 = Math.log(2);
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/singlenoderf/GiniStatistic.java
|
package hex.singlenoderf;
import water.util.Utils;
import java.util.Random;
/** Computes the gini split statistics.
*
* The Gini fitness is calculated as a probability that the element will be
* misclassified, which is:
*
* 1 - \sum(p_i^2)
*
* This is computed for the left and right subtrees and added together:
*
* gini left * weight left + gini right * weight left
* --------------------------------------------------
* weight total
*
* And subtracted from an ideal worst 1 to simulate the gain from previous node.
* The best gain is then selected. Same is done for exclusions, where again
* left stands for the rows with column value equal to the split value and
* right for all different ones.
*/
public class GiniStatistic extends Statistic {
public GiniStatistic(Data data, int features, long seed, int exclusiveSplitLimit) { super(data, features, seed, exclusiveSplitLimit, false /*classification*/); }
private double gini(int[] dd, int sum) {
double result = 1.0;
double sd = (double)sum;
for (int d : dd) {
double tmp = ((double)d)/sd;
result -= tmp*tmp;
}
return result;
}
@Override protected Split ltSplit(int col, Data d, int[] dist, int distWeight, Random _) {
int[] leftDist = new int[d.classes()];
int[] riteDist = dist.clone();
int lW = 0;
int rW = distWeight;
double totWeight = rW;
// we are not a single class, calculate the best split for the column
int bestSplit = -1;
double bestFitness = 0.0;
assert leftDist.length==_columnDists[col][0].length;
for (int i = 0; i < _columnDists[col].length-1; ++i) {
// first copy the i-th guys from rite to left
for (int j = 0; j < leftDist.length; ++j) {
int t = _columnDists[col][i][j];
lW += t;
rW -= t;
leftDist[j] += t;
riteDist[j] -= t;
}
// now make sure we have something to split
if( lW == 0 || rW == 0 ) continue;
double f = 1.0 -
(gini(leftDist,lW) * ((double)lW / totWeight) +
gini(riteDist,rW) * ((double)rW / totWeight));
if( f>bestFitness ) { // Take split with largest fitness
bestSplit = i;
bestFitness = f;
}
}
return bestSplit == -1
? Split.impossible(Utils.maxIndex(dist, _random))
: Split.split(col, bestSplit, bestFitness);
}
@Override protected Split eqSplit(int colIndex, Data d, int[] dist, int distWeight, Random _) {
int[] inclDist = new int[d.classes()];
int[] exclDist = dist.clone();
// we are not a single class, calculate the best split for the column
int bestSplit = -1;
double bestFitness = 0.0; // Fitness to maximize
for( int i = 0; i < _columnDists[colIndex].length-1; ++i ) {
// first copy the i-th guys from rite to left
int sumt = 0;
for( int j = 0; j < inclDist.length; ++j ) {
int t = _columnDists[colIndex][i][j];
sumt += t;
inclDist[j] = t;
exclDist[j] = dist[j] - t;
}
int inclW = sumt;
int exclW = distWeight - inclW;
// now make sure we have something to split
if( inclW == 0 || exclW == 0 ) continue;
double f = 1.0 -
(gini(inclDist,inclW) * ((double)inclW / distWeight) +
gini(exclDist,exclW) * ((double)exclW / distWeight));
if( f>bestFitness ) { // Take split with largest fitness
bestSplit = i;
bestFitness = f;
}
}
return bestSplit == -1
? Split.impossible(Utils.maxIndex(dist, _random))
: Split.exclusion(colIndex, bestSplit, bestFitness);
}
@Override
protected Split ltSplit(int colIndex, Data d, float[] dist, float distWeight, Random rand) {
return null; //not called for classification
}
@Override
protected Split eqSplit(int colIndex, Data d, float[] dist, float distWeight, Random rand) {
return null; //not called for classification
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/singlenoderf/MSEStatistic.java
|
package hex.singlenoderf;
import water.util.Utils;
import java.util.Random;
/** Computes the mse split statistics.
*
* For regression: Try to minimize the squared error at each split.
*/
public class MSEStatistic extends Statistic {
public MSEStatistic(Data data, int features, long seed, int exclusiveSplitLimit) {
super(data, features, seed, exclusiveSplitLimit, true /*regression*/);
}
private float computeAv(float[] dist, Data d, int sum) {
float res = 0f;
for (int i = 0; i < dist.length; ++i) {
int tmp = (int) dist[i];
res += d._dapt._c[d._dapt._c.length - 1]._binned2raw[i] * tmp;
}
return sum == 0 ? Float.POSITIVE_INFINITY : res / (float) sum;
}
private float[] computeDist(Data d, int colIndex) {
float[] res = new float[d.columnArityOfClassCol()];
for (int i = 0; i < _columnDistsRegression[colIndex].length - 1; ++i) {
for (int j = 0; j < _columnDistsRegression[colIndex][i].length - 1; ++j) {
res[j] += _columnDistsRegression[colIndex][i][j];
}
}
return res;
}
@Override
protected Split ltSplit(int colIndex, Data d, float[] dist, float distWeight, Random rand) {
float bestSoFar = Float.POSITIVE_INFINITY;
int bestSplit = -1;
int lW = 0;
int rW = d.rows();
float[] leftDist = new float[d.columnArityOfClassCol()];
float[] riteDist = computeDist(d, colIndex); //dist.clone();
for (int j = 0; j < _columnDistsRegression[colIndex].length - 1; ++j) {
for (int i = 0; i < dist.length; ++i) {
int t = _columnDistsRegression[colIndex][j][i];
lW += t;
rW -= t;
leftDist[i] += t;
riteDist[i] -= t;
}
float Y_R = computeAv(riteDist, d, rW);
float Y_L = computeAv(leftDist, d, lW);
float newSplitValue = Y_R + Y_L;
if (newSplitValue < bestSoFar) {
bestSoFar = newSplitValue;
bestSplit = j;
}
}
return (bestSplit == -1 || bestSoFar == Float.POSITIVE_INFINITY)
? Split.impossible(Utils.maxIndex(computeDist(d, colIndex), _random))
: Split.split(colIndex, bestSplit, bestSoFar);
}
@Override
protected Split eqSplit(int colIndex, Data d, float[] dist, float distWeight, Random rand) {
// we are not a single class, calculate the best split for the column
int bestSplit = -1;
float bestSoFar = 0.f; // Fitness to maximize
for( int i = 0; i < _columnDists[colIndex].length-1; ++i ) {
float Y_incl = 0.f;
float Y_excl = distWeight;
int nobs_incl = 0;
int nobs_excl = d.rows();
for (float aDist : dist) {
Y_incl += aDist;
Y_excl -= aDist;
nobs_incl++;
nobs_excl--;
float newSplitValue = (Y_incl * Y_incl / (float) nobs_incl) + (Y_excl * Y_excl / (float) nobs_excl);
if (newSplitValue > bestSoFar) {
bestSoFar = newSplitValue;
bestSplit = i;
}
}
}
return bestSplit == -1
? Split.impossible(Utils.maxIndex(dist, _random))
: Split.exclusion(colIndex, bestSplit, bestSoFar);
}
@Override protected Split ltSplit(int col, Data d, int[] dist, int distWeight, Random r) {
return null; //not called for regression
}
@Override protected Split eqSplit(int colIndex, Data d, int[] dist, int distWeight, Random r) {
return null; //not called for regression
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/singlenoderf/Sampling.java
|
package hex.singlenoderf;
import water.*;
import water.util.Utils;
//import hex.singlenoderf.TreeP;
import java.util.ArrayList;
import java.util.Arrays;
public abstract class Sampling {
/** Available sampling strategies. */
public enum Strategy {
RANDOM(0); //,
int _id; // redundant id
private Strategy(int id) { _id = id; }
}
abstract Data sample(final Data data, long seed, Key modelKey, boolean local_mode);
/** Deterministically sample the Data at the bagSizePct. Toss out
invalid rows (as-if not sampled), but maintain the sampling rate. */
final static class Random extends Sampling {
final double _bagSizePct;
final int[] _rowsPerChunks;
public Random(double bagSizePct, int[] rowsPerChunks) { _bagSizePct = bagSizePct; _rowsPerChunks = rowsPerChunks; }
@Override Data sample(final Data data, long seed, Key modelKey, boolean local_mode) {
SpeeDRFModel m = UKV.get(modelKey);
int [] sample;
sample = sampleFair(data,seed,_rowsPerChunks);
// add the remaining rows
Arrays.sort(sample); // we want an ordered sample
return new Subset(data, sample, 0, sample.length);
}
/** Roll a fair die for sampling, resetting the random die every numrows. */
private int[] sampleFair(final Data data, long seed, int[] rowsPerChunks ) {
// init
java.util.Random rand = null;
int rows = data.rows();
int size = bagSize(rows,_bagSizePct);
int[] sample = MemoryManager.malloc4((int) (size * 1.10));
float f = (float) _bagSizePct;
int cnt = 0; // Counter for resetting Random
int j = 0; // Number of selected samples
int cidx = 0; // Chunks counter
// compute
for( int i=0; i<rows; i++ ) {
if( cnt--==0 ) {
/* NOTE: Before changing used generator think about which kind of random generator you need:
* if always deterministic or non-deterministic version - see hex.singlenoderf.Utils.get{Deter}RNG */
long chunkSamplingSeed = chunkSampleSeed(seed, i);
// DEBUG: System.err.println(seed + " : " + i + " (sampling)");
rand = Utils.getDeterRNG(chunkSamplingSeed);
cnt = rowsPerChunks[cidx++]-1;
}
float randFloat = rand.nextFloat();
if( randFloat < f ) {
if( j == sample.length ) sample = Arrays.copyOfRange(sample,0,(int)(1 + sample.length*1.2));
sample[j++] = i;
}
}
return Arrays.copyOf(sample,j); // Trim out bad rows
}
}
/**
* ! CRITICAL code !
* This method returns the correct seed based on initial seed and row index.
* WARNING : this method is crucial for correct replay of sampling.
*/
static final long chunkSampleSeed(long seed, int rowIdx) { return seed + ((long)rowIdx<<16); }
static final int bagSize( int rows, double bagSizePct ) {
int size = (int)(rows * bagSizePct);
return (size>0 || rows==0) ? size : 1;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/singlenoderf/SpeeDRF.java
|
package hex.singlenoderf;
import dontweave.gson.JsonObject;
import hex.ConfusionMatrix;
import hex.FrameTask;
import hex.VarImp;
import hex.drf.DRF;
import water.*;
import water.Timer;
import water.api.AUCData;
import water.api.Constants;
import water.api.DocGen;
import water.api.ParamImportance;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.*;
import java.util.*;
import static water.util.MRUtils.sampleFrameStratified;
public class SpeeDRF extends Job.ValidatedJob {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
public static DocGen.FieldDoc[] DOC_FIELDS;
public static final String DOC_GET = "SpeeDRF";
@API(help = "Number of trees", filter = Default.class, json = true, lmin = 1, lmax = Integer.MAX_VALUE, importance = ParamImportance.CRITICAL)
public int ntrees = 50;
@API(help = "Number of features to randomly select at each split.", filter = Default.class, json = true, lmin = -1, lmax = Integer.MAX_VALUE, importance = ParamImportance.SECONDARY)
public int mtries = -1;
@API(help = "Max Depth", filter = Default.class, json = true, lmin = 0, lmax = Integer.MAX_VALUE, importance = ParamImportance.CRITICAL)
public int max_depth = 20;
@API(help = "Split Criterion Type", filter = Default.class, json=true, importance = ParamImportance.SECONDARY)
public Tree.SelectStatType select_stat_type = Tree.SelectStatType.ENTROPY;
// @API(help = "Use local data. Auto-enabled if data does not fit in a single node.") /*, filter = Default.class, json = true, importance = ParamImportance.EXPERT) */
// public boolean local_mode = false;
/* Legacy parameter: */
public double[] class_weights = null;
@API(help = "Sampling Strategy", filter = Default.class, json = true, importance = ParamImportance.SECONDARY)
public Sampling.Strategy sampling_strategy = Sampling.Strategy.RANDOM;
@API(help = "Sampling Rate at each split.", filter = Default.class, json = true, dmin = 0, dmax = 1, importance = ParamImportance.EXPERT)
public double sample_rate = 0.67;
// @API(help ="Score each iteration", filter = Default.class, json = true, importance = ParamImportance.SECONDARY)
public boolean score_each_iteration = false;
@API(help = "Create the Score POJO", filter = Default.class, json = true, importance = ParamImportance.EXPERT)
public boolean score_pojo = true;
/*Imbalanced Classes*/
/**
* For imbalanced data, balance training data class counts via
* over/under-sampling. This can result in improved predictive accuracy.
*/
@API(help = "Balance training data class counts via over/under-sampling (for imbalanced data)", filter = Default.class, json = true, importance = ParamImportance.EXPERT)
public boolean balance_classes = false;
/**
* When classes are balanced, limit the resulting dataset size to the
* specified multiple of the original dataset size.
*/
@API(help = "Maximum relative size of the training data after balancing class counts (can be less than 1.0)", filter = Default.class, json = true, dmin=1e-3, importance = ParamImportance.EXPERT)
public float max_after_balance_size = Float.POSITIVE_INFINITY;
@API(help = "Out of bag error estimate", filter = Default.class, json = true, importance = ParamImportance.SECONDARY)
public boolean oobee = true;
@API(help = "Variable Importance", filter = Default.class, json = true)
public boolean importance = false;
public Key _modelKey = dest();
/* Advanced settings */
@API(help = "bin limit", filter = Default.class, json = true, lmin = 0, lmax = 65534, importance = ParamImportance.EXPERT)
public int nbins = 1024;
@API(help = "seed", filter = Default.class, json = true, importance = ParamImportance.EXPERT)
public long seed = -1;
@API(help = "Tree splits and extra statistics printed to stdout.", filter = Default.class, json = true, importance = ParamImportance.EXPERT)
public boolean verbose = false;
@API(help = "split limit", importance = ParamImportance.EXPERT)
public int _exclusiveSplitLimit = 0;
private static Random _seedGenerator = Utils.getDeterRNG( new Random().nextLong() );//0xd280524ad7fe0602L);
private boolean regression;
public DRFParams drfParams;
private long use_seed;
Tree.StatType stat_type;
/** Return the query link to this page */
public static String link(Key k, String content) {
RString rs = new RString("<a href='/2/SpeeDRF.query?source=%$key'>%content</a>");
rs.replace("key", k.toString());
rs.replace("content", content);
return rs.toString();
}
protected SpeeDRFModel makeModel( SpeeDRFModel model, double err, ConfusionMatrix cm, VarImp varimp, AUCData validAUC) {
return new SpeeDRFModel(model, err, cm, varimp, validAUC);
}
@Override protected void queryArgumentValueSet(Argument arg, java.util.Properties inputArgs) {
super.queryArgumentValueSet(arg, inputArgs);
if (arg._name.equals("classification")) {
arg._hideInQuery = true;
}
if (arg._name.equals("balance_classes")) {
arg.setRefreshOnChange();
if(regression) {
arg.disable("Class balancing is only for classification.");
}
}
// Regression is selected if classification is false and vice-versa.
if (arg._name.equals("classification")) {
regression = !this.classification;
}
// Regression only accepts the MSE stat type.
if (arg._name.equals("select_stat_type")) {
if(regression) {
arg.disable("Minimize MSE for regression.");
}
}
// Class weights depend on the source data set an response value to be specified and are invalid for regression
if (arg._name.equals("class_weights")) {
if (source == null || response == null) {
arg.disable("Requires source and response to be specified.");
}
if (regression) {
arg.disable("No class weights for regression.");
}
}
// Prevent Stratified Local when building regression tress.
if (arg._name.equals("sampling_strategy")) {
arg.setRefreshOnChange();
if (regression) {
arg.disable("Random Sampling for regression trees.");
}
}
// Variable Importance disabled in SpeeDRF regression currently
if (arg._name.equals("importance")) {
if (regression) {
arg.disable("Variable Importance not supported in SpeeDRF regression.");
}
}
// max balance size depends on balance_classes to be enabled
if(classification) {
if(arg._name.equals("max_after_balance_size") && !balance_classes) {
arg.disable("Requires balance classes flag to be set.", inputArgs);
}
}
}
// Put here all precondition verification
@Override protected void init() {
super.init();
assert 0 <= ntrees && ntrees < 1000000; // Sanity check
// Not enough rows to run
if (source.numRows() - response.naCnt() <=0)
throw new IllegalArgumentException("Dataset contains too many NAs!");
if( !classification && (!(response.isEnum() || response.isInt())))
throw new IllegalArgumentException("Classification cannot be performed on a float column!");
if(classification) {
if (0.0f > sample_rate || sample_rate > 1.0f)
throw new IllegalArgumentException("Sampling rate must be in [0,1] but found " + sample_rate);
}
if(regression) throw new IllegalArgumentException("SpeeDRF does not currently support regression.");
}
@Override protected void execImpl() {
SpeeDRFModel rf_model;
try {
source.read_lock(self());
if (validation != null && validation != source) validation.read_lock(self());
buildForest();
if (n_folds > 0) CrossValUtils.crossValidate(this);
} catch (JobCancelledException ex){
rf_model = UKV.get(dest());
state = JobState.CANCELLED; //for JSON REST response
rf_model.get_params().state = state; //for parameter JSON on the HTML page
Log.info("Random Forest was cancelled.");
} catch(Exception ex) {
ex.printStackTrace();
throw new RuntimeException(ex);
} finally {
source.unlock(self());
if (validation != null && validation != source) validation.unlock(self());
remove();
state = UKV.<Job>get(self()).state;
// Argh, this is horrible
new TAtomic<SpeeDRFModel>() {
@Override
public SpeeDRFModel atomic(SpeeDRFModel m) {
if (m != null) m.get_params().state = state;
return m;
}
}.invoke(dest());
emptyLTrash();
cleanup();
}
}
@Override protected Response redirect() { return SpeeDRFProgressPage.redirect(this, self(), dest()); }
private void buildForest() {
logStart();
SpeeDRFModel model = null;
try {
Frame train = setTrain();
Frame test = setTest();
Vec resp = regression ? null : train.lastVec().toEnum();
if (resp != null) gtrash(resp);
float[] priorDist = setPriorDist(train);
train = setStrat(train, test, resp);
model = initModel(train, test, priorDist);
model.start_training(null);
model.write_lock(self());
drfParams = DRFParams.create(train.find(resp), model.N, model.max_depth, (int) train.numRows(), model.nbins,
model.statType, use_seed, model.weights, mtries, model.sampling_strategy, (float) sample_rate, model.strata_samples, model.verbose ? 100 : 1, _exclusiveSplitLimit, true, regression);
DRFTask tsk = new DRFTask(self(), train, drfParams, model._key, model.src_key);
tsk.validateInputData(train);
tsk.invokeOnAllNodes();
Log.info("Tree building complete. Scoring...");
model = UKV.get(dest());
model.scoreAllTrees(test == null ? train : test, resp);
// Launch a Variable Importance Task
if (importance && !regression) {
Log.info("Scoring complete. Performing Variable Importance Calculations.");
model.current_status = "Performing Variable Importance Calculation.";
Timer VITimer = new Timer();
model.variableImportanceCalc(train, resp);
Log.info("Variable Importance on "+(train.numCols()-1)+" variables and "+ ntrees +" trees done in " + VITimer);
}
Log.info("Generating Tree Stats");
JsonObject trees = new JsonObject();
trees.addProperty(Constants.TREE_COUNT, model.size());
if( model.size() > 0 ) {
trees.add(Constants.TREE_DEPTH, model.depth().toJson());
trees.add(Constants.TREE_LEAVES, model.leaves().toJson());
}
model.generateHTMLTreeStats(new StringBuilder(), trees);
model.current_status = "Model Complete";
} finally {
if (model != null) {
model.unlock(self());
model.stop_training();
}
}
}
public SpeeDRFModel initModel(Frame train, Frame test, float[] priorDist) {
setStatType();
setSeed(seed);
if (mtries == -1) setMtry(regression, train.numCols() - 1);
Key src_key = source._key;
int src_ncols = source.numCols();
SpeeDRFModel model = new SpeeDRFModel(dest(), src_key, train, regression ? null : train.lastVec().domain(), this, priorDist);
// Model INPUTS
model.src_key = src_key.toString();
model.verbose = verbose; model.verbose_output = new String[]{""};
model.validation = test != null;
model.confusion = null;
model.zeed = use_seed;
model.cmDomain = getCMDomain();
model.nbins = nbins;
model.max_depth = max_depth;
model.oobee = validation == null && oobee;
model.statType = regression ? Tree.StatType.MSE : stat_type;
model.testKey = validation == null ? null : validation._key;
model.importance = importance;
model.regression = regression;
model.features = src_ncols;
model.sampling_strategy = regression ? Sampling.Strategy.RANDOM : sampling_strategy;
model.sample = (float) sample_rate;
model.weights = regression ? null : class_weights;
model.time = 0;
model.N = ntrees;
model.useNonLocal = true;
if (!regression) model.setModelClassDistribution(new MRUtils.ClassDist(train.lastVec()).doAll(train.lastVec()).rel_dist());
model.resp_min = (int) train.lastVec().min();
model.mtry = mtries;
int csize = H2O.CLOUD.size();
model.local_forests = new Key[csize][]; for(int i=0;i<csize;i++) model.local_forests[i] = new Key[0];
model.node_split_features = new int[csize];
model.t_keys = new Key[0];
model.dtreeKeys = new Key[ntrees][regression ? 1 : model.classes()];
model.time = 0;
for( Key tkey : model.t_keys ) assert DKV.get(tkey)!=null;
model.jobKey = self();
model.score_pojo = score_pojo;
model.current_status = "Initializing Model";
// Model OUTPUTS
model.varimp = null;
model.validAUC = null;
model.cms = new ConfusionMatrix[1];
model.errs = new double[]{-1.0};
return model;
}
private void setStatType() { if (regression) stat_type = Tree.StatType.MSE; stat_type = select_stat_type == Tree.SelectStatType.ENTROPY ? Tree.StatType.ENTROPY : Tree.StatType.GINI; }
private void setSeed(long s) {
if (s == -1) { seed = _seedGenerator.nextLong(); use_seed = seed; }
else {
_seedGenerator = Utils.getDeterRNG(s);
use_seed = _seedGenerator.nextLong();
}
}
private void setMtry(boolean reg, int numCols) { mtries = reg ? (int) Math.floor((float) (numCols) / 3.0f) : (int) Math.floor(Math.sqrt(numCols)); }
private Frame setTrain() { Frame train = FrameTask.DataInfo.prepareFrame(source, response, ignored_cols, !regression /*toEnum is TRUE if regression is FALSE*/, false, false); if (train.lastVec().masterVec() != null && train.lastVec() != response) gtrash(train.lastVec()); return train; }
private Frame setTest() {
if (validation == null) return null;
Frame test = null;
ArrayList<Integer> v_ignored_cols = new ArrayList<Integer>();
for (int ignored_col : ignored_cols) if (validation.find(source.names()[ignored_col]) != -1) v_ignored_cols.add(ignored_col);
int[] v_ignored = new int[v_ignored_cols.size()];
for (int i = 0; i < v_ignored.length; ++i) v_ignored[i] = v_ignored_cols.get(i);
if (validation != null) test = FrameTask.DataInfo.prepareFrame(validation, validation.vecs()[validation.find(source.names()[source.find(response)])], v_ignored, !regression, false, false);
if (test != null && test.lastVec().masterVec() != null) gtrash(test.lastVec());
return test;
}
private Frame setStrat(Frame train, Frame test, Vec resp) {
Frame fr = train;
float[] trainSamplingFactors;
if (classification && balance_classes) {
assert resp != null : "Regression called and stratified sampling was invoked to balance classes!";
// Handle imbalanced classes by stratified over/under-sampling
// initWorkFrame sets the modeled class distribution, and model.score() corrects the probabilities back using the distribution ratios
int response_idx = fr.find(_responseName);
fr.replace(response_idx, resp);
trainSamplingFactors = new float[resp.domain().length]; //leave initialized to 0 -> will be filled up below
Frame stratified = sampleFrameStratified(fr, resp, trainSamplingFactors, (long) (max_after_balance_size * fr.numRows()), use_seed, true, false);
if (stratified != fr) {
fr = stratified;
gtrash(stratified);
}
}
// Check that that test/train data are consistent, throw warning if not
if(classification && validation != null) {
assert resp != null : "Regression called and stratified sampling was invoked to balance classes!";
Vec testresp = test.lastVec().toEnum();
gtrash(testresp);
if (!isSubset(testresp.domain(), resp.domain())) {
Log.warn("Test set domain: " + Arrays.toString(testresp.domain()) + " \nTrain set domain: " + Arrays.toString(resp.domain()));
Log.warn("Train and Validation data have inconsistent response columns! Test data has a response not found in the Train data!");
}
}
return fr;
}
private float[] setPriorDist(Frame train) { return classification ? new MRUtils.ClassDist(train.lastVec()).doAll(train.lastVec()).rel_dist() : null; }
public Frame score( Frame fr ) { return ((SpeeDRFModel)UKV.get(dest())).score(fr); }
private boolean isSubset(String[] sub, String[] container) {
HashSet<String> hs = new HashSet<String>();
Collections.addAll(hs, container);
for (String s: sub) {
if (!hs.contains(s)) return false;
}
return true;
}
public final static class DRFTask extends DRemoteTask {
/** The RF Model. Contains the dataset being worked on, the classification
* column, and the training columns. */
// private final SpeeDRFModel _rfmodel;
private final Key _rfmodel;
/** Job representing this DRF execution. */
private final Key _jobKey;
/** RF parameters. */
private final DRFParams _params;
private final Frame _fr;
private final String _key;
DRFTask(Key jobKey, Frame frameKey, DRFParams params, Key rfmodel, String src_key) {
_jobKey = jobKey; _fr = frameKey; _params = params; _rfmodel = rfmodel; _key = src_key;
}
/**Inhale the data, build a DataAdapter and kick-off the computation.
* */
@Override public final void lcompute() {
final DataAdapter dapt = DABuilder.create(_params, _rfmodel).build(_fr, _params._useNonLocalData);
if (dapt == null) {
tryComplete();
return;
}
Data localData = Data.make(dapt);
int numSplitFeatures = howManySplitFeatures();
int ntrees = howManyTrees();
int[] rowsPerChunks = howManyRPC(_fr);
updateRFModel(_rfmodel, numSplitFeatures);
updateRFModelStatus(_rfmodel, "Building Forest");
updateRFModelLocalForests(_rfmodel, ntrees);
Log.info("Dispalying local forest stats:");
SpeeDRF.build(_jobKey, _rfmodel, _params, localData, ntrees, numSplitFeatures, rowsPerChunks);
tryComplete();
}
static void updateRFModel(Key modelKey, final int numSplitFeatures) {
final int idx = H2O.SELF.index();
new TAtomic<SpeeDRFModel>() {
@Override public SpeeDRFModel atomic(SpeeDRFModel old) {
if(old == null) return null;
SpeeDRFModel newModel = (SpeeDRFModel)old.clone();
newModel.node_split_features[idx] = numSplitFeatures;
return newModel;
}
}.invoke(modelKey);
}
static void updateRFModelLocalForests(Key modelKey, final int num_trees) {
final int selfIdx = H2O.SELF.index();
new TAtomic<SpeeDRFModel>() {
@Override public SpeeDRFModel atomic(SpeeDRFModel old) {
if (old == null) return null;
SpeeDRFModel newModel = (SpeeDRFModel)old.clone();
newModel.local_forests[selfIdx] = new Key[num_trees];
return newModel;
}
}.invoke(modelKey);
}
static void updateRFModelStatus(Key modelKey, final String status) {
new TAtomic<SpeeDRFModel>() {
@Override public SpeeDRFModel atomic(SpeeDRFModel old) {
if(old == null) return null;
SpeeDRFModel newModel = (SpeeDRFModel)old.clone();
newModel.current_status = status;
return newModel;
}
}.invoke(modelKey);
}
/** Unless otherwise specified each split looks at sqrt(#features). */
private int howManySplitFeatures() {
return _params.num_split_features;
}
/** Figure the number of trees to make locally, so the total hits ntrees.
* Divide equally amongst all the nodes that actually have data. First:
* compute how many nodes have data. Give each Node ntrees/#nodes worth of
* trees. Round down for later nodes, and round up for earlier nodes.
*/
private int howManyTrees() {
Frame fr = _fr;
final long num_chunks = fr.anyVec().nChunks();
final int num_nodes = H2O.CLOUD.size();
HashSet<H2ONode> nodes = new HashSet<H2ONode>();
for( int i=0; i<num_chunks; i++ ) {
nodes.add(fr.anyVec().chunkKey(i).home_node());
if( nodes.size() == num_nodes ) // All of nodes covered?
break; // That means we are done.
}
H2ONode[] array = nodes.toArray(new H2ONode[nodes.size()]);
Arrays.sort(array);
// Give each H2ONode ntrees/#nodes worth of trees. Round down for later nodes,
// and round up for earlier nodes
int ntrees = _params.num_trees / nodes.size();
if( Arrays.binarySearch(array, H2O.SELF) < _params.num_trees - ntrees*nodes.size() )
++ntrees;
return ntrees;
}
private int[] howManyRPC(Frame fr) {
int[] result = new int[fr.anyVec().nChunks()];
for(int i = 0; i < result.length; ++i) {
result[i] = fr.anyVec().chunkLen(i);
}
return result;
}
private void validateInputData(Frame fr) {
Vec[] vecs = fr.vecs();
Vec c = vecs[vecs.length-1];
if (!_params.regression) {
final int classes = c.cardinality();
if (!(2 <= classes && classes <= 254))
throw new IllegalArgumentException("Response contains " + classes + " classes, but algorithm supports only 254 levels");
}
if (_params.num_split_features!=-1 && (_params.num_split_features< 1 || _params.num_split_features>vecs.length-1))
throw new IllegalArgumentException("Number of split features exceeds available data. Should be in [1,"+(vecs.length-1)+"]");
ChunkAllocInfo cai = new ChunkAllocInfo();
boolean can_load_all = canLoadAll(fr, cai);
if (_params._useNonLocalData && !can_load_all) {
String heap_warning = "This algorithm requires loading of all data from remote nodes." +
"\nThe node " + cai.node + " requires " + PrettyPrint.bytes(cai.requiredMemory) + " more memory to load all data and perform computation but there is only " + PrettyPrint.bytes(cai.availableMemory) + " of available memory." +
"\n\nPlease provide more memory for JVMs \n\n-OR-\n\n Try Big Data Random Forest: ";
Log.warn(heap_warning);
throw new IllegalArgumentException(heap_warning + DRF.link(Key.make(_key), "Big Data Random Forest") );
}
if (can_load_all) {
_params._useNonLocalData = true;
Log.info("Enough available free memory to compute on all data. Pulling all data locally and then launching RF.");
}
}
private boolean canLoadAll(final Frame fr, ChunkAllocInfo cai) {
int nchks = fr.anyVec().nChunks();
long localBytes = 0l;
for (int i = 0; i < nchks; ++i) {
Key k = fr.anyVec().chunkKey(i);
if (k.home()) {
localBytes += fr.anyVec().chunkForChunkIdx(i).byteSize();
}
}
long memForNonLocal = fr.byteSize() - localBytes;
// Also must add in the RF internal data structure overhead
memForNonLocal += fr.numRows() * fr.numCols();
for(int i = 0; i < H2O.CLOUD._memary.length; i++) {
HeartBeat hb = H2O.CLOUD._memary[i]._heartbeat;
long nodeFreeMemory = (long)(hb.get_max_mem() * 0.8); // * OVERHEAD_MAGIC;
Log.debug(Log.Tag.Sys.RANDF, i + ": computed available mem: " + PrettyPrint.bytes(nodeFreeMemory));
Log.debug(Log.Tag.Sys.RANDF, i + ": remote chunks require: " + PrettyPrint.bytes(memForNonLocal));
if (nodeFreeMemory - memForNonLocal <= 0 || (nodeFreeMemory <= TWO_HUNDRED_MB && memForNonLocal >= ONE_FIFTY_MB)) {
Log.info("Node free memory raw: "+nodeFreeMemory);
cai.node = H2O.CLOUD._memary[i];
cai.availableMemory = nodeFreeMemory;
cai.requiredMemory = memForNonLocal;
return false;
}
}
return true;
}
/** Helper POJO to store required chunk allocation. */
private static class ChunkAllocInfo {
H2ONode node;
long availableMemory;
long requiredMemory;
}
static final float OVERHEAD_MAGIC = 3/8.f; // memory overhead magic
static final long TWO_HUNDRED_MB = 200 * 1024 * 1024;
static final long ONE_FIFTY_MB = 150 * 1024 * 1024;
@Override
public void reduce(DRemoteTask drt) { }
}
private static final long ROOT_SEED_ADD = 0x026244fd935c5111L;
private static final long TREE_SEED_INIT = 0x1321e74a0192470cL;
/** Build random forest for data stored on this node. */
public static void build(
final Key jobKey,
final Key modelKey,
final DRFParams drfParams,
final Data localData,
int ntrees,
int numSplitFeatures,
int[] rowsPerChunks) {
Timer t_alltrees = new Timer();
Tree[] trees = new Tree[ntrees];
Log.info(Log.Tag.Sys.RANDF,"Building "+ntrees+" trees");
Log.info(Log.Tag.Sys.RANDF,"Number of split features: "+ numSplitFeatures);
Log.info(Log.Tag.Sys.RANDF,"Starting RF computation with "+ localData.rows()+" rows ");
Random rnd = Utils.getRNG(localData.seed() + ROOT_SEED_ADD);
Sampling sampler = createSampler(drfParams, rowsPerChunks);
byte producerId = (byte) H2O.SELF.index();
for (int i = 0; i < ntrees; ++i) {
long treeSeed = rnd.nextLong() + TREE_SEED_INIT; // make sure that enough bits is initialized
trees[i] = new Tree(jobKey, modelKey, localData, producerId, drfParams.max_depth, drfParams.stat_type, numSplitFeatures, treeSeed,
i, drfParams._exclusiveSplitLimit, sampler, drfParams._verbose, drfParams.regression, !drfParams._useNonLocalData, ((SpeeDRFModel)UKV.get(modelKey)).score_pojo);
}
Log.info("Invoking the tree build tasks on all nodes.");
DRemoteTask.invokeAll(trees);
Log.info(Log.Tag.Sys.RANDF,"All trees ("+ntrees+") done in "+ t_alltrees);
}
static Sampling createSampler(final DRFParams params, int[] rowsPerChunks) {
switch(params.sampling_strategy) {
case RANDOM : return new Sampling.Random(params.sample, rowsPerChunks);
default:
assert false : "Unsupported sampling strategy";
return null;
}
}
/** RF execution parameters. */
public final static class DRFParams extends Iced {
/** Total number of trees */
int num_trees;
/** If true, build trees in parallel (default: true) */
boolean parallel;
/** Maximum depth for trees (default MaxInt) */
int max_depth;
/** Split statistic */
Tree.StatType stat_type;
/** Feature holding the classifier (default: #features-1) */
int classcol;
/** Utilized sampling method */
Sampling.Strategy sampling_strategy;
/** Proportion of observations to use for building each individual tree (default: .67)*/
float sample;
/** Limit of the cardinality of a feature before we bin. */
int bin_limit;
/** Weights of the different features (default: 1/features) */
double[] class_weights;
/** Arity under which we may use exclusive splits */
public int _exclusiveSplitLimit;
/** Output warnings and info*/
public int _verbose;
/** Number of features which are tried at each split
* If it is equal to -1 then it is computed as sqrt(num of usable columns) */
int num_split_features;
/** Defined stratas samples for each class */
float[] strata_samples;
/** Utilize not only local data but try to use data from other nodes. */
boolean _useNonLocalData;
/** Number of rows per chunk - used to replay sampling */
int _numrows;
/** Pseudo random seed initializing RF algorithm */
long seed;
/** Build regression trees if true */
boolean regression;
public static DRFParams create(int col, int ntrees, int depth, int numrows, int binLimit,
Tree.StatType statType, long seed, double[] classWt,
int numSplitFeatures, Sampling.Strategy samplingStrategy, float sample,
float[] strataSamples, int verbose, int exclusiveSplitLimit,
boolean useNonLocalData, boolean regression) {
DRFParams drfp = new DRFParams();
drfp.num_trees = ntrees;
drfp.max_depth = depth;
drfp.sample = sample;
drfp.bin_limit = binLimit;
drfp.stat_type = statType;
drfp.seed = seed;
drfp.class_weights = classWt;
drfp.num_split_features = numSplitFeatures;
drfp.sampling_strategy = samplingStrategy;
drfp.strata_samples = strataSamples;
drfp._numrows = numrows;
drfp._useNonLocalData = useNonLocalData;
drfp._exclusiveSplitLimit = exclusiveSplitLimit;
drfp._verbose = verbose;
drfp.classcol = col;
drfp.regression = regression;
drfp.parallel = true;
return drfp;
}
}
/**
* Cross-Validate a SpeeDRF model by building new models on N train/test holdout splits
* @param splits Frames containing train/test splits
* @param cv_preds Array of Frames to store the predictions for each cross-validation run
* @param offsets Array to store the offsets of starting row indices for each cross-validation run
* @param i Which fold of cross-validation to perform
*/
@Override public void crossValidate(Frame[] splits, Frame[] cv_preds, long[] offsets, int i) {
// Train a clone with slightly modified parameters (to account for cross-validation)
final SpeeDRF cv = (SpeeDRF) this.clone();
cv.genericCrossValidation(splits, offsets, i);
cv_preds[i] = ((SpeeDRFModel) UKV.get(cv.dest())).score(cv.validation);
new TAtomic<SpeeDRFModel>() {
@Override public SpeeDRFModel atomic(SpeeDRFModel m) {
if (!keep_cross_validation_splits && /*paranoid*/ cv.dest().toString().contains("xval")) {
m.get_params().source = null;
m.get_params().validation=null;
m.get_params().response=null;
}
return m;
}
}.invoke(cv.dest());
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/singlenoderf/SpeeDRFModel.java
|
package hex.singlenoderf;
import dontweave.gson.JsonArray;
import dontweave.gson.JsonElement;
import dontweave.gson.JsonObject;
import dontweave.gson.JsonPrimitive;
import hex.ConfusionMatrix;
import hex.VarImp;
import hex.gbm.DTree;
import hex.gbm.DTree.TreeModel.TreeStats;
import water.*;
import water.api.*;
import water.api.Request.API;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.util.Counter;
import water.util.ModelUtils;
import java.util.Arrays;
import java.util.Random;
import static hex.singlenoderf.VariableImportance.asVotes;
public class SpeeDRFModel extends Model implements Job.Progress {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
/**
* Model Parameters
*/
/* Number of features these trees are built for */ int features;
/* Sampling strategy used for model */ Sampling.Strategy sampling_strategy;
/* Key name */ String src_key;
@API(help = " Sampling rate used when building trees.") float sample;
@API(help = "Strata sampling rate used for local-node strata-sampling") float[] strata_samples;
@API(help = "Number of split features defined by user.") int mtry;
/* Number of computed split features per node */ int[] node_split_features;
@API(help = "Number of keys the model expects to be built for it.") int N;
@API(help = "Max depth to grow trees to") int max_depth;
@API(help = "All the trees in the model.") Key[] t_keys;
/* Local forests produced by nodes */ Key[][] local_forests;
/* Errors Per Tree */ long[] errorsPerTree;
/* Total time in seconds to produce the model */ long time;
/* Is there a validation set?*/ boolean validation;
/* Response Min */ int resp_min;
/* Class weights */ double[] weights;
@API(help = "bin limit") int nbins;
/* Raw tree data. for faster classification passes */ transient byte[][] trees;
@API(help = "Job key") Key jobKey;
/* Destination Key */ Key dest_key;
/* Current model status */ String current_status;
@API(help = "MSE by tree") double[] errs;
/* Statistic Type */ Tree.StatType statType;
@API(help = "Test Key") Key testKey;
/* Out of bag error estimate */ boolean oobee;
/* Seed */ protected long zeed;
/* Variable Importance */ boolean importance;
/* Final Confusion Matrix */ CMTask.CMFinal confusion;
@API(help = "Confusion Matrices") ConfusionMatrix[] cms;
/* Confusion Matrix */ long[][] cm;
@API(help = "Tree Statistics") TreeStats treeStats;
@API(help = "cmDomain") String[] cmDomain;
@API(help = "AUC") public AUCData validAUC;
@API(help = "Variable Importance") public VarImp varimp;
/* Regression or Classification */ boolean regression;
/* Score each iteration? */ boolean score_each;
@API(help = "CV Error") public double cv_error;
@API(help = "Verbose Mode") public boolean verbose;
@API(help = "Verbose Output") public String[] verbose_output;
@API(help = "Use non-local data") public boolean useNonLocal;
@API(help = "Dtree keys") public Key[/*ntree*/][/*nclass*/] dtreeKeys;
@API(help = "DTree Model") public SpeeDRFModel_DTree dtreeTreeModel = null;
@API(help = "score_pojo boolean") public boolean score_pojo;
private float _ss; private float _cnt;
/**
* Extra helper variables.
*/
private transient VariableImportance.TreeMeasures[/*features*/] _treeMeasuresOnOOB;
// Tree votes/SSE per individual features on permutated OOB rows
private transient VariableImportance.TreeMeasures[/*features*/] _treeMeasuresOnSOOB;
public static final String JSON_CONFUSION_KEY = "confusion_key";
public static final String JSON_CM_TYPE = "type";
public static final String JSON_CM_HEADER = "header";
public static final String JSON_CM_MATRIX = "scores";
public static final String JSON_CM_TREES = "used_trees";
public static final String JSON_CM_CLASS_ERR = "classification_error";
public static final String JSON_CM_ROWS = "rows";
public static final String JSON_CM_ROWS_SKIPPED = "rows_skipped";
public static final String JSON_CM_CLASSES_ERRORS = "classes_errors";
@API(help = "Model parameters", json = true)
private final SpeeDRF parameters;
@Override public final SpeeDRF get_params() { return parameters; }
@Override public final Request2 job() { return get_params(); }
@Override public final VarImp varimp() { return varimp; }
public float[] priordist() { return _priorClassDist; }
public float[] modeldist() { return _modelClassDist; }
public SpeeDRFModel(Key selfKey, Key dataKey, Frame fr, String[] domain, SpeeDRF params, float[] priorDist) {
super(selfKey, dataKey, fr, priorDist);
this.dest_key = selfKey;
this.parameters = params;
score_each = params.score_each_iteration;
regression = !(params.classification);
}
protected SpeeDRFModel(SpeeDRFModel model, double err, ConfusionMatrix cm, VarImp varimp, AUCData auc) {
super(model._key,model._dataKey,model._names,model._domains, model._priorClassDist,model._modelClassDist,model.training_start_time,model.training_duration_in_ms);
this.features = model.features;
this.sampling_strategy = model.sampling_strategy;
this.sample = model.sample;
this.strata_samples = model.strata_samples;
this.mtry = model.mtry;
this.node_split_features = model.node_split_features;
this.N = model.N;
this.max_depth = model.max_depth;
this.t_keys = model.t_keys;
this.local_forests = model.local_forests;
this.time = model.time;
this.weights = model.weights;
this.nbins = model.nbins;
this.trees = model.trees;
this.jobKey = model.jobKey;
this.dest_key = model.dest_key;
this.current_status = model.current_status;
this.errs = model.errs;
this.statType = model.statType;
this.testKey = model.testKey;
this.oobee = model.oobee;
this.zeed = model.zeed;
this.importance = model.importance;
this.confusion = model.confusion;
this.cms = Arrays.copyOf(model.cms, model.cms.length+1);
this.cms[this.cms.length-1] = cm;
this.parameters = model.parameters;
this.cm = cm._arr;
this.treeStats = model.treeStats;
this.cmDomain = model.cmDomain;
this.validAUC = auc;
this.varimp = varimp;
this.regression = model.regression;
this.score_each = model.score_each;
this.cv_error = err;
this.verbose = model.verbose;
this.verbose_output = model.verbose_output;
this.useNonLocal = model.useNonLocal;
this.errorsPerTree = model.errorsPerTree;
this.resp_min = model.resp_min;
this.validation = model.validation;
this.src_key = model.src_key;
this.score_pojo = model.score_pojo;
}
public int treeCount() { return t_keys.length; }
public int size() { return t_keys.length; }
public int classes() { return nclasses(); }
@Override public ConfusionMatrix cm() { return validAUC == null ? cms[cms.length-1] : validAUC.CM(); }
private void scoreOnTest(Frame fr, Vec modelResp) {
Frame scored = score(fr);
water.api.ConfusionMatrix cm = new water.api.ConfusionMatrix();
cm.vactual = fr.lastVec();
cm.vpredict = scored.anyVec();
cm.invoke();
// Regression scoring
if (regression) {
float mse = (float) cm.mse;
errs[errs.length - 1] = mse;
cms[cms.length - 1] = null;
// Classification scoring
} else {
Vec lv = scored.lastVec();
double mse = CMTask.MSETask.doTask(scored.add("actual", fr.lastVec()));
this.cm = cm.cm;
errs[errs.length - 1] = (float)mse;
ConfusionMatrix new_cm = new ConfusionMatrix(this.cm);
cms[cms.length - 1] = new_cm;
// Create the ROC Plot
if (classes() == 2) {
Vec v = null;
Frame fa = null;
if (lv.isInt()) {
fa = new MRTask2() {
@Override public void map(Chunk[] cs, NewChunk nchk) {
int rows = cs[0]._len;
int cols = cs.length - 1;
for (int r = 0; r < rows; ++r) {
nchk.addNum(cs[cols].at0(r) == 0 ? 1e-10 : 1.0 - 1e-10);
}
}
}.doAll(1, scored).outputFrame(null,null);
v = fa.anyVec();
}
AUC auc_calc = new AUC();
auc_calc.vactual = cm.vactual;
auc_calc.vpredict = v == null ? lv : v; // lastVec is class1
auc_calc.invoke();
validAUC = auc_calc.data();
if (v != null) UKV.remove(v._key);
if (fa != null) fa.delete();
UKV.remove(lv._key);
}
}
scored.remove("actual");
scored.delete();
}
private void scoreOnTrain(Frame fr, Vec modelResp) {
final CMTask cmTask = CMTask.scoreTask(this, treeCount(), oobee, fr, modelResp);
if (regression) {
float mse = cmTask._ss / ( (float) (cmTask._rowcnt));
errs[errs.length - 1] = mse;
cms[cms.length - 1] = null;
} else {
confusion = CMTask.CMFinal.make(cmTask._matrix, this, classNames(), cmTask._errorsPerTree, oobee, cmTask._sum, cmTask._cms);
this.cm = cmTask._matrix._matrix;
errorsPerTree = cmTask._errorsPerTree;
errs[errs.length - 1] = confusion.mse();
cms[cms.length - 1] = new ConfusionMatrix(confusion._matrix);
if (classes() == 2) validAUC = makeAUC(toCMArray(confusion._cms), ModelUtils.DEFAULT_THRESHOLDS, cmDomain);
}
}
void scoreAllTrees(Frame fr, Vec modelResp) {
if (this.validation) scoreOnTest(fr, modelResp);
else scoreOnTrain(fr, modelResp);
}
void variableImportanceCalc(Frame fr, Vec modelResp) { varimp = doVarImpCalc(fr, this, modelResp); }
public static SpeeDRFModel make(SpeeDRFModel old, Key tkey, Key dtKey, int nodeIdx, String tString, int tree_id) {
// Create a new model for atomic update
SpeeDRFModel m = (SpeeDRFModel)old.clone();
// Update the tree keys with the new one (tkey)
m.t_keys = Arrays.copyOf(old.t_keys, old.t_keys.length + 1);
m.t_keys[m.t_keys.length-1] = tkey;
// Update the dtree keys with the new one (dtkey)
m.dtreeKeys[tree_id][0] = dtKey;
// Update the local_forests
m.local_forests[nodeIdx][tree_id] = tkey;
// Update the treeStrings?
if (old.verbose_output.length < 2) {
m.verbose_output = Arrays.copyOf(old.verbose_output, old.verbose_output.length + 1);
m.verbose_output[m.verbose_output.length - 1] = tString;
}
m.errs = Arrays.copyOf(old.errs, old.errs.length+1);
m.errs[m.errs.length - 1] = -1.0;
m.cms = Arrays.copyOf(old.cms, old.cms.length+1);
m.cms[m.cms.length-1] = null;
return m;
}
public String name(int atree) {
if( atree == -1 ) atree = size();
assert atree <= size();
return _key.toString() + "[" + atree + "]";
}
/** Return the bits for a particular tree */
public byte[] tree(int tree_id) {
byte[][] ts = trees;
if( ts == null ) trees = ts = new byte[tree_id+1][];
if( tree_id >= ts.length ) trees = ts = Arrays.copyOf(ts,tree_id+1);
if( ts[tree_id] == null ) ts[tree_id] = DKV.get(t_keys[tree_id]).memOrLoad();
return ts[tree_id];
}
/** Free all internal tree keys. */
@Override public Futures delete_impl(Futures fs) {
for( Key k : t_keys ) UKV.remove(k,fs);
for (Key[] ka : local_forests) for (Key k : ka) if (k != null) UKV.remove(k, fs);
return fs;
}
/**
* Classify a row according to one particular tree.
* @param tree_id the number of the tree to use
* @param chunks the chunk we are using
* @param row the row number in the chunk
* @param modelDataMap mapping from model/tree columns to data columns
* @return the predicted response class, or class+1 for broken rows
*/
public float classify0(int tree_id, Chunk[] chunks, int row, int modelDataMap[], short badrow, boolean regression) {
return Tree.classify(new AutoBuffer(tree(tree_id)), chunks, row, modelDataMap, badrow, regression);
}
private void vote(Chunk[] chks, int row, int modelDataMap[], int[] votes) {
int numClasses = classes();
assert votes.length == numClasses + 1 /* +1 to catch broken rows */;
for( int i = 0; i < treeCount(); i++ )
votes[(int)classify0(i, chks, row, modelDataMap, (short) numClasses, false)]++;
}
public short classify(Chunk[] chks, int row, int modelDataMap[], int[] votes, double[] classWt, Random rand ) {
// Vote all the trees for the row
vote(chks, row, modelDataMap, votes);
return classify(votes, classWt, rand);
}
public short classify(int[] votes, double[] classWt, Random rand) {
// Scale the votes by class weights: it as-if rows of the weighted classes
// were replicated many times so get many votes.
if( classWt != null )
for( int i=0; i<votes.length-1; i++ )
votes[i] = (int) (votes[i] * classWt[i]);
// Tally results
int result = 0;
int tied = 1;
for( int i = 1; i < votes.length - 1; i++ )
if( votes[i] > votes[result] ) { result=i; tied=1; }
else if( votes[i] == votes[result] ) { tied++; }
if( tied == 1 ) return (short) result;
// Tie-breaker logic
int j = rand == null ? 0 : rand.nextInt(tied); // From zero to number of tied classes-1
int k = 0;
for( int i = 0; i < votes.length - 1; i++ )
if( votes[i]==votes[result] && (k++ >= j) )
return (short)i;
throw H2O.unimpl();
}
// The seed for a given tree
long seed(int ntree) { return UDP.get8(tree(ntree), 4); }
// The producer for a given tree
byte producerId(int ntree) { return tree(ntree)[12]; }
// Lazy initialization of tree leaves, depth
private transient Counter _tl, _td;
/** Internal computation of depth and number of leaves. */
public void find_leaves_depth() {
// if( _tl != null ) return;
_td = new Counter();
_tl = new Counter();
for( Key tkey : t_keys ) {
long dl = Tree.depth_leaves(new AutoBuffer(DKV.get(tkey).memOrLoad()), regression);
_td.add((int) (dl >> 32));
_tl.add((int) dl);
}
}
public Counter leaves() { find_leaves_depth(); return _tl; }
public Counter depth() { find_leaves_depth(); return _td; }
private static int find(String n, String[] names) {
if( n == null ) return -1;
for( int j = 0; j<names.length; j++ )
if( n.equals(names[j]) )
return j;
return -1;
}
public int[] colMap(Frame df) {
int res[] = new int[df._names.length]; //new int[names.length];
for(int i = 0; i < res.length; i++) {
res[i] = find(df.names()[i], _names);
}
return res;
}
@Override protected float[] score0(double[] data, float[] preds) {
int numClasses = classes();
if (numClasses == 1) {
float p = 0.f;
for (int i = 0; i < treeCount(); ++i) {
p += Tree.classify(new AutoBuffer(tree(i)), data, 0.0, true) / (1. * treeCount());
}
return new float[]{p};
} else {
int votes[] = new int[numClasses + 1/* +1 to catch broken rows */];
preds = new float[numClasses + 1];
for( int i = 0; i < treeCount(); i++ ) {
// DTree.TreeModel.CompressedTree t = UKV.get(dtreeKeys[i][0]);
votes[(int) Tree.classify(new AutoBuffer(tree(i)), data, numClasses, false)]++;
}
float s = 0.f;
for (int v : votes) s += (float)v;
if (get_params().balance_classes) {
for (int i = 0; i < votes.length - 1; ++i)
preds[i+1] = ( (float)votes[i] / treeCount());
return preds;
}
for (int i = 0; i < votes.length - 1; ++i)
preds[i+1] = ( (float)votes[i] / (float)treeCount());
// preds[0] = (float) (classify(votes, null, null) + resp_min);
preds[0] = ModelUtils.getPrediction(preds, data);
float[] rawp = new float[preds.length + 1];
for (int i = 0; i < votes.length; ++i) rawp[i+1] = (float)votes[i];
return preds;
}
}
@Override public float progress() { return get_params().cv_progress(t_keys.length / (float) N); }
static String[] cfDomain(final CMTask.CMFinal cm, int maxClasses) {
String[] dom = cm.domain();
if (dom.length > maxClasses)
throw new IllegalArgumentException("The column has more than "+maxClasses+" values. Are you sure you have that many classes?");
return dom;
}
private boolean errsNotNull() {
boolean allMinus1 = true;
if (errs == null) return false;
for (double err : errs) {
if (err > -1) allMinus1 = false;
}
return !allMinus1;
}
public void generateHTML(String title, StringBuilder sb) {
String style = "<style>\n"+
"td, th { min-width:60px;}\n"+
"</style>\n";
sb.append(style);
DocGen.HTML.title(sb,title);
sb.append("<div class=\"alert\">").append("Actions: ");
sb.append(Inspect2.link("Inspect training data (" + _dataKey.toString() + ")", _dataKey)).append(", ");
if (validation)
sb.append(Inspect2.link("Inspect testing data (" + testKey.toString() + ")", testKey)).append(", ");
sb.append(Predict.link(_key, "Score on dataset" ));
if (this.size() > 0 && this.size() < N && !Job.findJob(jobKey).isCancelledOrCrashed()) {
sb.append(", ");
sb.append("<i class=\"icon-stop\"></i> ").append(Cancel.link(jobKey, "Cancel training"));
}
sb.append("</div>");
DocGen.HTML.paragraph(sb,"Model Key: "+_key);
DocGen.HTML.paragraph(sb,"Max max_depth: "+max_depth+", Nbins: "+nbins+", Trees: " + this.size());
DocGen.HTML.paragraph(sb, "Sample Rate: "+sample + ", User Seed: "+get_params().seed+ ", Internal Seed: "+zeed+", mtry: "+mtry);
sb.append("</pre>");
if (this.size() > 0 && this.size() < N) sb.append("Current Status: ").append("Building Random Forest");
else {
if (this.size() == N && !this.current_status.equals("Performing Variable Importance Calculation.")) {
sb.append("Current Status: ").append("Complete.");
} else {
if( Job.findJob(jobKey).isCancelledOrCrashed()) {
sb.append("Current Status: ").append("Cancelled.");
} else {
sb.append("Current Status: ").append(this.current_status);
}
}
}
if (_have_cv_results) {
sb.append("<div class=\"alert\">Scoring results reported for ").append(this.parameters.n_folds).append("-fold cross-validated training data ").append(Inspect2.link(_dataKey.toString(), _dataKey)).append("</div>");
} else {
if (testKey != null)
sb.append("<div class=\"alert\">Reported on ").append(Inspect2.link(testKey.toString(), testKey)).append("</div>");
else
sb.append("<div class=\"alert\">Reported on ").append( oobee ? "OOB" : "training" ).append(" data</div>");
}
//build cm
if(!regression) {
// if (confusion != null && confusion.valid() && (this.N * .25 > 0) && classes() >= 2) {
// buildCM(sb);
// } else {
if (this.cms[this.cms.length - 1] != null && (this.N * .25 > 0 && classes() >= 2) ) {
this.cms[this.cms.length - 1].toHTML(sb, this.cmDomain);
// }
}
}
sb.append("<br />");
if( errsNotNull() && this.size() > 0) {
DocGen.HTML.section(sb,"Mean Squared Error by Tree");
DocGen.HTML.arrayHead(sb);
sb.append("<tr style='min-width:60px'><th>Trees</th>");
int last = this.size(); // + 1;
for( int i=last; i>=0; i-- )
sb.append("<td style='min-width:60px'>").append(i).append("</td>");
sb.append("</tr>");
sb.append("<tr style='min-width: 60px;'><th style='min-width: 60px;' class='warning'>MSE</th>");
for( int i=last; i>=0; i-- )
sb.append( (!(Double.isNaN(errs[i]) || errs[i] <= 0.0)) ? String.format("<td style='min-width:60px'>%5.5f</td>",errs[i]) : "<td style='min-width:60px'>---</td>");
sb.append("</tr>");
DocGen.HTML.arrayTail(sb);
}
sb.append("<br/>");
JsonObject trees = new JsonObject();
trees.addProperty(Constants.TREE_COUNT, this.size());
if( this.size() > 0 ) {
trees.add(Constants.TREE_DEPTH, this.depth().toJson());
trees.add(Constants.TREE_LEAVES, this.leaves().toJson());
}
if (validAUC != null) {
generateHTMLAUC(sb);
}
generateHTMLTreeStats(sb, trees);
if (varimp != null) {
generateHTMLVarImp(sb);
}
printCrossValidationModelsHTML(sb);
}
public DTree.TreeModel transform2DTreeModel() {
if (dtreeTreeModel != null) {
dtreeTreeModel = new SpeeDRFModel_DTree(dtreeTreeModel, dtreeKeys, treeStats); //freshen the dtreeTreeModel
return dtreeTreeModel;
}
Key key = Key.make();
Key model_key = _key;
Key dataKey = _dataKey;
Key testKey = null;
String[] names = _names;
String[][] domains = _domains;
String[] cmDomain = this.cmDomain;
int ntrees = treeCount();
int min_rows = 0;
int nbins = this.nbins;
int mtries = this.mtry;
long seed = -1;
int num_folds = 0;
float[] priorClassDist = null;
float[] classDist = null;
// dummy model
dtreeTreeModel = new SpeeDRFModel_DTree(model_key, model_key, dataKey,testKey,names,domains,cmDomain,ntrees, max_depth, min_rows, nbins, mtries, num_folds, priorClassDist, classDist);
// update the model
dtreeTreeModel = new SpeeDRFModel_DTree(dtreeTreeModel, dtreeKeys, treeStats);
dtreeTreeModel.isFromSpeeDRF=true; // tells the toJava method the model is translated from a speedrf model.
return dtreeTreeModel;
}
public static class SpeeDRFModel_DTree extends DTree.TreeModel {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
Key modelKey;
public SpeeDRFModel_DTree(Key key, Key modelKey, Key dataKey, Key testKey, String names[], String domains[][], String[] cmDomain, int ntrees, int max_depth, int min_rows, int nbins, int mtries, int num_folds, float[] priorClassDist, float[] classDist) {
super(key,dataKey,testKey,names,domains,cmDomain,ntrees, max_depth, min_rows, nbins, num_folds, priorClassDist, classDist);
this.modelKey = modelKey;
}
public SpeeDRFModel_DTree(SpeeDRFModel_DTree prior, Key[][] treeKeys, TreeStats tstats) {
super(prior, treeKeys, null, prior.cms, tstats, null, null);
}
@Override
protected void generateModelDescription(StringBuilder sb) { }
}
@Override public ModelAutobufferSerializer getModelSerializer() {
// Return a serializer which knows how to serialize keys
return new ModelAutobufferSerializer() {
@Override protected AutoBuffer postSave(Model m, AutoBuffer ab) {
int ntrees = N;
ab.put4(ntrees);
// must fill out t_keys and dtreeKeys
for (int i = 0; i < ntrees; ++i) {
byte[] bits = tree(i);
ab.putA1(bits);
for (int j = 0; j < nclasses(); ++j) {
if (dtreeKeys[i][j] == null) continue;
Value v = DKV.get(dtreeKeys[i][j]);
if (v == null) continue;
DTree.TreeModel.CompressedTree t = v.get();
ab.put(t);
}
}
return ab;
}
@Override protected AutoBuffer postLoad(Model m, AutoBuffer ab) {
int ntrees = ab.get4();
Futures fs = new Futures();
for (int i = 0; i < ntrees; ++i) {
DKV.put(t_keys[i],new Value(t_keys[i],ab.getA1()), fs);
for (int j = 0; j < nclasses(); ++j) {
if (dtreeKeys[i][j] == null) continue;
UKV.put(dtreeKeys[i][j], new Value(dtreeKeys[i][j], ab.get(DTree.TreeModel.CompressedTree.class)), fs);
}
}
fs.blockForPending();
return ab;
}
};
}
static final String NA = "---";
public void generateHTMLTreeStats(StringBuilder sb, JsonObject trees) {
DocGen.HTML.section(sb,"Tree stats");
DocGen.HTML.arrayHead(sb);
sb.append("<tr><th> </th>").append("<th>Min</th><th>Mean</th><th>Max</th></tr>");
TreeStats treeStats = new TreeStats();
double[] depth_stats = stats(trees.get(Constants.TREE_DEPTH));
double[] leaf_stats = stats(trees.get(Constants.TREE_LEAVES));
sb.append("<tr><th>Depth</th>")
.append("<td>").append(depth_stats != null ? (int)depth_stats[0] : NA).append("</td>")
.append("<td>").append(depth_stats != null ? depth_stats[1] : NA).append("</td>")
.append("<td>").append(depth_stats != null ? (int)depth_stats[2] : NA).append("</td></tr>");
sb.append("<th>Leaves</th>")
.append("<td>").append(leaf_stats != null ? (int)leaf_stats[0] : NA).append("</td>")
.append("<td>").append(leaf_stats != null ? leaf_stats[1] : NA).append("</td>")
.append("<td>").append(leaf_stats != null ? (int)leaf_stats[2] : NA).append("</td></tr>");
DocGen.HTML.arrayTail(sb);
if(depth_stats != null && leaf_stats != null) {
treeStats.minDepth = (int)depth_stats[0];
treeStats.meanDepth = (float)depth_stats[1];
treeStats.maxDepth = (int)depth_stats[2];
treeStats.minLeaves = (int)leaf_stats[0];
treeStats.meanLeaves = (float)leaf_stats[1];
treeStats.maxLeaves = (int)leaf_stats[2];
treeStats.setNumTrees(N);
} else {
treeStats = null;
}
this.treeStats = treeStats;
}
private static double[] stats(JsonElement json) {
if( json == null ) {
return null;
} else {
JsonObject obj = json.getAsJsonObject();
return new double[]{
Math.round(obj.get(Constants.MIN).getAsDouble() * 1000.0) / 1000.0,
Math.round(obj.get(Constants.MEAN).getAsDouble() * 1000.0) / 1000.0,
Math.round(obj.get(Constants.MAX).getAsDouble() * 1000.0) / 1000.0};
}
}
public void buildCM(StringBuilder sb) {
int tasks = this.N;
int finished = this.size();
int modelSize = tasks * 25/100;
modelSize = modelSize == 0 || finished==tasks ? finished : modelSize * (finished/modelSize);
if (confusion!=null && confusion.valid() && modelSize > 0) {
//finished += 1;
JsonObject cm = new JsonObject();
JsonArray cmHeader = new JsonArray();
JsonArray matrix = new JsonArray();
cm.addProperty(JSON_CM_TYPE, oobee ? "OOB" : "training");
cm.addProperty(JSON_CM_CLASS_ERR, confusion.classError());
cm.addProperty(JSON_CM_ROWS_SKIPPED, confusion.skippedRows());
cm.addProperty(JSON_CM_ROWS, confusion.rows());
// create the header
for (String s : cfDomain(confusion, 1024))
cmHeader.add(new JsonPrimitive(s));
cm.add(JSON_CM_HEADER,cmHeader);
// add the matrix
final int nclasses = confusion.dimension();
JsonArray classErrors = new JsonArray();
for (int crow = 0; crow < nclasses; ++crow) {
JsonArray row = new JsonArray();
int classHitScore = 0;
for (int ccol = 0; ccol < nclasses; ++ccol) {
row.add(new JsonPrimitive(confusion.matrix(crow,ccol)));
if (crow!=ccol) classHitScore += confusion.matrix(crow,ccol);
}
// produce infinity members in case of 0.f/0
classErrors.add(new JsonPrimitive((float)classHitScore / (classHitScore + confusion.matrix(crow,crow))));
matrix.add(row);
}
cm.add(JSON_CM_CLASSES_ERRORS, classErrors);
cm.add(JSON_CM_MATRIX,matrix);
cm.addProperty(JSON_CM_TREES,modelSize);
// Signal end only and only if all trees were generated and confusion matrix is valid
DocGen.HTML.section(sb, "Confusion Matrix:");
if (cm.has(JSON_CM_MATRIX)) {
sb.append("<dl class='dl-horizontal'>");
sb.append("<dt>classification error</dt><dd>").append(String.format("%5.5f %%", 100*cm.get(JSON_CM_CLASS_ERR).getAsFloat())).append("</dd>");
long rows = cm.get(JSON_CM_ROWS).getAsLong();
long skippedRows = cm.get(JSON_CM_ROWS_SKIPPED).getAsLong();
sb.append("<dt>used / skipped rows </dt><dd>").append(String.format("%d / %d (%3.1f %%)", rows, skippedRows, (double)skippedRows*100/(skippedRows+rows))).append("</dd>");
sb.append("<dt>trees used</dt><dd>").append(cm.get(JSON_CM_TREES).getAsInt()).append("</dd>");
sb.append("</dl>");
sb.append("<table class='table table-striped table-bordered table-condensed'>");
sb.append("<tr style='min-width: 60px;'><th style='min-width: 60px;'>Actual \\ Predicted</th>");
JsonArray header = (JsonArray) cm.get(JSON_CM_HEADER);
for (JsonElement e: header)
sb.append("<th style='min-width: 60px;'>").append(e.getAsString()).append("</th>");
sb.append("<th style='min-width: 60px;'>Error</th></tr>");
int classes = header.size();
long[] totals = new long[classes];
JsonArray matrix2 = (JsonArray) cm.get(JSON_CM_MATRIX);
long sumTotal = 0;
long sumError = 0;
for (int crow = 0; crow < classes; ++crow) {
JsonArray row = (JsonArray) matrix2.get(crow);
long total = 0;
long error = 0;
sb.append("<tr style='min-width: 60px;'><th style='min-width: 60px;'>").append(header.get(crow).getAsString()).append("</th>");
for (int ccol = 0; ccol < classes; ++ccol) {
long num = row.get(ccol).getAsLong();
total += num;
totals[ccol] += num;
if (ccol == crow) {
sb.append("<td style='background-color:LightGreen; min-width: 60px;'>");
} else {
sb.append("<td styile='min-width: 60px;'>");
error += num;
}
sb.append(num);
sb.append("</td>");
}
sb.append("<td style='min-width: 60px;'>");
sb.append(String.format("%.05f = %,d / %d", (double)error/total, error, total));
sb.append("</td></tr>");
sumTotal += total;
sumError += error;
}
sb.append("<tr style='min-width: 60px;'><th style='min-width: 60px;'>Totals</th>");
for (long total : totals) sb.append("<td style='min-width: 60px;'>").append(total).append("</td>");
sb.append("<td style='min-width: 60px;'><b>");
sb.append(String.format("%.05f = %,d / %d", (double)sumError/sumTotal, sumError, sumTotal));
sb.append("</b></td></tr>");
sb.append("</table>");
} else {
sb.append("<div class='alert alert-info'>");
sb.append("Confusion matrix is being computed into the key:</br>");
sb.append(cm.get(JSON_CONFUSION_KEY).getAsString());
sb.append("</div>");
}
}
}
private static ConfusionMatrix[] toCMArray(long[][][] cms) {
int n = cms.length;
ConfusionMatrix[] res = new ConfusionMatrix[n];
for (int i = 0; i < n; i++) res[i] = new ConfusionMatrix(cms[i]);
return res;
}
protected static AUCData makeAUC(ConfusionMatrix[] cms, float[] threshold, String[] cmDomain) {
return cms != null ? new AUC(cms, threshold, cmDomain).data() : null;
}
protected void generateHTMLAUC(StringBuilder sb) {
validAUC.toHTML(sb);
}
protected void generateHTMLVarImp(StringBuilder sb) {
if (varimp!=null) {
// Set up variable names for importance
varimp.setVariables(Arrays.copyOf(_names, _names.length-1));
varimp.toHTML(this, sb);
}
}
protected VarImp doVarImpCalc(final Frame fr, final SpeeDRFModel model, final Vec resp) {
_treeMeasuresOnOOB = new VariableImportance.TreeVotes[fr.numCols() - 1];
_treeMeasuresOnSOOB = new VariableImportance.TreeVotes[fr.numCols() - 1];
for (int i=0; i<fr.numCols() - 1; i++) _treeMeasuresOnOOB[i] = new VariableImportance.TreeVotes(treeCount());
for (int i=0; i<fr.numCols() - 1; i++) _treeMeasuresOnSOOB[i] = new VariableImportance.TreeVotes(treeCount());
final int ncols = fr.numCols();
final int trees = treeCount();
for (int i=0; i<ncols - 1; i++) _treeMeasuresOnSOOB[i] = new VariableImportance.TreeVotes(trees);
Futures fs = new Futures();
for (int var=0; var<ncols - 1; var++) {
final int variable = var;
H2O.H2OCountedCompleter task4var = new H2O.H2OCountedCompleter() {
@Override public void compute2() {
VariableImportance.TreeVotes[] cd = VariableImportance.collectVotes(trees, classes(), fr, ncols - 1, sample, variable, model, resp);
asVotes(_treeMeasuresOnOOB[variable]).append(cd[0]);
asVotes(_treeMeasuresOnSOOB[variable]).append(cd[1]);
tryComplete();
}
};
H2O.submitTask(task4var);
fs.add(task4var);
}
fs.blockForPending();
// Compute varimp for individual features (_ncols)
final float[] varimp = new float[ncols - 1]; // output variable importance
float[] varimpSD = new float[ncols - 1]; // output variable importance sd
for (int var=0; var<ncols - 1; var++) {
long[] votesOOB = asVotes(_treeMeasuresOnOOB[var]).votes();
long[] votesSOOB = asVotes(_treeMeasuresOnSOOB[var]).votes();
float imp = 0.f;
float v = 0.f;
long[] nrows = asVotes(_treeMeasuresOnOOB[var]).nrows();
for (int i = 0; i < votesOOB.length; ++i) {
double delta = ((float) (votesOOB[i] - votesSOOB[i])) / (float) nrows[i];
imp += delta;
v += delta * delta;
}
imp /= model.treeCount();
varimp[var] = imp;
varimpSD[var] = (float)Math.sqrt( (v/model.treeCount() - imp*imp) / model.treeCount() );
}
return new VarImp.VarImpMDA(varimp, varimpSD, model.treeCount());
}
public static float[] computeVarImpSD(float[][] vote_diffs) {
float[] res = new float[vote_diffs.length];
for (int var = 0; var < vote_diffs.length; ++var) {
float mean_diffs = 0.f;
float r = 0.f;
for (float d: vote_diffs[var]) mean_diffs += d / (float) vote_diffs.length;
for (float d: vote_diffs[var]) {
r += (d - mean_diffs) * (d - mean_diffs);
}
r *= 1.f / (float)vote_diffs[var].length;
res[var] = (float) Math.sqrt(r);
}
return res;
}
@Override protected void setCrossValidationError(Job.ValidatedJob job, double cv_error, water.api.ConfusionMatrix cm, AUCData auc, HitRatio hr) {
_have_cv_results = true;
SpeeDRFModel drfm = ((SpeeDRF)job).makeModel(this, cv_error, cm.cm == null ? null : new ConfusionMatrix(cm.cm, this.nclasses()), this.varimp, auc);
drfm._have_cv_results = true;
DKV.put(this._key, drfm); //overwrite this model
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/singlenoderf/SpeeDRFModelView.java
|
package hex.singlenoderf;
import water.DKV;
import water.UKV;
import water.Key;
import water.Request2;
import water.api.DocGen;
import water.api.Request;
public class SpeeDRFModelView extends Request2 {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="SpeeDRF Model Key", required = true, filter = SpeeDRFKeyFilter.class)
Key _modelKey;
class SpeeDRFKeyFilter extends H2OKey { public SpeeDRFKeyFilter() { super("",true); } }
@API(help="SpeeDRF Model")
SpeeDRFModel speedrf_model;
public static String link(String txt, Key model) {
return "<a href='/2/SpeeDRFModelView.html?_modelKey=" + model + "'>" + txt + "</a>";
}
public static Response redirect(Request req, Key modelKey) {
return Response.redirect(req, "/2/SpeeDRFModelView", "_modelKey", modelKey);
}
@Override public void toJava(StringBuilder sb) { speedrf_model.transform2DTreeModel().toJavaHtml(sb); }
@Override public boolean toHTML(StringBuilder sb){
speedrf_model.generateHTML("", sb);
return true;
}
@Override public String serveJava() {
SpeeDRFModel m = UKV.get(_modelKey);
if (m!=null) {
return m.transform2DTreeModel().toJava();
} else {
return "";
}
}
@Override protected Response serve() {
speedrf_model = DKV.get(_modelKey).get();
return Response.done(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/singlenoderf/SpeeDRFProgressPage.java
|
package hex.singlenoderf;
import water.DKV;
import water.Job;
import water.Key;
import water.Value;
import water.api.Progress2;
import water.api.Request;
public class SpeeDRFProgressPage extends Progress2 {
/** Return {@link water.api.RequestBuilders.Response} for finished job. */
@Override protected Response jobDone(final Key dst) {
return SpeeDRFModelView.redirect(this, dst);
}
public static Response redirect(Request req, Key jobkey, Key dest) {
return Response.redirect(req, "/2/SpeeDRFProgressPage", JOB_KEY, jobkey, DEST_KEY, dest);
}
@Override public boolean toHTML( StringBuilder sb ) {
Job jjob = Job.findJob(job_key);
if (jjob ==null) return true;
Value v = DKV.get(jjob.dest());
if(v != null){
SpeeDRFModel m = v.get();
m.generateHTML("SpeeDRF", sb);
} else
sb.append("<b>No model yet.</b>");
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/singlenoderf/Statistic.java
|
package hex.singlenoderf;
import hex.singlenoderf.Data.Row;
import water.util.Utils;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Random;
/** Keeps track of the column distributions and analyzes the column splits in the
* end producing the single split that will be used for the node. */
abstract class Statistic {
/** Column distributions: column x arity x classes
* Remembers the number of rows of the given column index, encodedValue, class. */
protected final int[][][] _columnDists;
protected final int[] _features; // Columns/features that are currently used.
protected Random _random; // Pseudo random number generator
private long _seed; // Seed for prng
private HashSet<Integer> _remembered; // Features already used
final double[] _classWt; // Class weights
private int _exclusiveSplitLimit;
protected final int[/*num_features*/][/*column_bins*/][/*response_bins*/] _columnDistsRegression;
boolean _regression;
/** Returns the best split for a given column */
protected abstract Split ltSplit(int colIndex, Data d, int[] dist, int distWeight, Random rand);
protected abstract Split eqSplit(int colIndex, Data d, int[] dist, int distWeight, Random rand);
protected abstract Split ltSplit(int colIndex, Data d, float[] dist, float distWeight, Random rand);
protected abstract Split eqSplit(int colIndex, Data d, float[] dist, float distWeight, Random rand);
/** Split descriptor for a particular column.
* Holds the column name and the split point, which is the last column class
* that will go to the left tree. If the column index is -1 then the split
* value indicates the return value of the node.
*/
static class Split {
final int _column, _split;
final float _splitReg;
final double _fitness;
Split(int column, int split, double fitness) {
_column = column; _split = split; _fitness = fitness; _splitReg = -1.f;
}
/** A constant split used for true leaf nodes where all rows are of the same class. */
static Split constant(int result) { return new Split(-1, result, -1); }
/** An impossible split, which behaves like a constant split. However impossible split
* occurs when there are different row classes present, but they all have
* the same column value and therefore no split can be made.
*/
static Split impossible(int result) { return new Split(-2, result, -1); }
/** Classic split. All lower or equal than split value go left, all greater go right. */
static Split split(int column, int split, double fitness) { return new Split(column, split,fitness); }
/** Return an impossible split that has the best fitness */
static Split defaultSplit() { return new Split(-2,0,-Double.MAX_VALUE); }
/** Exclusion split. All equal to split value go left, all different go right. */
static Split exclusion(int column, int split, double fitness) { return new ExclusionSplit(column,split,fitness); }
final boolean isLeafNode() { return _column < 0; }
final boolean isConstant() { return _column == -1; }
final boolean isImpossible() { return _column == -2; }
final boolean betterThan(Split other) { return _fitness > other._fitness; }
final boolean isExclusion() { return this instanceof ExclusionSplit; }
}
/** An exclusion split. */
static class ExclusionSplit extends Split {
ExclusionSplit(int column, int split, double fitness) { super(column, split,fitness); }
}
/** Aggregates the given column's distribution to the provided array and
* returns the sum of weights of that array. */
private int aggregateColumn(int colIndex, int[] dist) {
int sum = 0;
for (int j = 0; j < _columnDists[colIndex].length; ++j) {
for (int i = 0; i < dist.length; ++i) {
int tmp = _columnDists[colIndex][j][i];
sum += tmp;
dist[i] += tmp;
}
}
return sum;
}
/** Sum up the target responses and return that value (this will be the unweighted "mean"
*
* @param colIndex: The column we're summing over
* @param dist: The *raw* response value for each bin.
* @return The unweighted mean.
*/
private float aggregateColumn(int colIndex, float[] dist) {
float sum = 0.f;
for (int j = 0; j < _columnDistsRegression[colIndex].length; ++j) {
for (int i = 0; i < dist.length; ++i) {
float tmp = _columnDistsRegression[colIndex][j][i];
sum += tmp;
dist[i] += tmp;
}
}
return sum;
}
Statistic(Data data, int featuresPerSplit, long seed, int exclusiveSplitLimit, boolean regression) {
_columnDistsRegression = new int[data.columns() - 1][][];
_columnDists = new int[data.columns()-1][][];
_regression = regression;
if (!regression) {
_random = Utils.getRNG(seed);
// first create the column distributions
for (int i = 0; i < _columnDists.length; ++i)
if (!data.isIgnored(i))
_columnDists[i] = new int[data.columnArity(i)+1][data.classes()];
// create the columns themselves
_features = new int[featuresPerSplit];
_remembered = null;
_classWt = data.classWt(); // Class weights
_exclusiveSplitLimit = exclusiveSplitLimit;
} else {
_random = Utils.getRNG(seed);
for (int i = 0; i < _columnDistsRegression.length; ++i)
if(!data.isIgnored(i)) {
DataAdapter.Col c = data._dapt._c[i];
int colBins = c._isByte ? Utils.maxValue(c._rawB) : c._binned.length;
_columnDistsRegression[i] = new int[colBins + 1][ data.columnArityOfClassCol()];
}
_features = new int[featuresPerSplit];
_remembered = null;
_classWt = data.classWt();
_exclusiveSplitLimit = exclusiveSplitLimit;
}
}
/** Remember features used for this split so we can grab different features
* and avoid these useless ones. Returns false if no more features are left. */
boolean rememberFeatures(Data data) {
if( _remembered == null ) _remembered = new HashSet<Integer>();
for(int f : _features) if ( f != -1 ) _remembered.add(f);
for(int i=0;i<data.columns()-1;i++) if(isColumnUsable(data,i)) return true;
return false;
}
/**We are done with this particular split and can forget the features we have
* used to compute it.*/
void forgetFeatures() { _remembered = null; }
/**Features can be used in a split if they are not already used. */
private boolean isColumnUsable(Data d, int i) {
assert i < d.columns()-1; // Last column is class
return !d.isIgnored(i) && (_remembered == null || !_remembered.contains(i)) && d.colMaxIdx(i) != d.colMinIdx(i);
}
/** Resets the statistic for the next split. Pick a subset of the features and zero out
* distributions. Implementation uses reservoir sampling (http://en.wikipedia.org/wiki/Reservoir_sampling)
* to select features. Features that (a) have been marked as ignore, (b) that have already been
* tried at this split, (c) the class feature, will not be selected. */
void reset(Data data, long seed, boolean regression) {
if (!regression) {
_random = Utils.getRNG(_seed = seed);
int i = 0, j = 0, featuresPerSplit = _features.length;
Arrays.fill(_features, -1);
for( ; j < featuresPerSplit && i < data.columns()-1; i++) if (isColumnUsable(data, i)) _features[j++] = i;
for( ; i < data.columns()-1; i++ ) {
if( !isColumnUsable(data, i) ) continue;
int k = _random.nextInt(j+1); // Reservoir sampling: take a random number in the interval [0,index] (inclusive)
if( k < featuresPerSplit ) _features[k] = i;
j++;
}
for( int f : _features) if (f != -1) for( int[] d: _columnDists[f]) Arrays.fill(d,0); // reset the column distributions
} else {
_random = Utils.getRNG(_seed = seed);
int i = 0, j = 0, featuresPerSplit = _features.length;
Arrays.fill(_features, -1);
for( ; j < featuresPerSplit && i < data.columns()-1; i++) if (isColumnUsable(data, i)) _features[j++] = i;
for( ; i < data.columns()-1; i++ ) {
if( !isColumnUsable(data, i) ) continue;
int k = _random.nextInt(j+1); // Reservoir sampling: take a random number in the interval [0,index] (inclusive)
if( k < featuresPerSplit ) _features[k] = i;
j++;
}
for( int f : _features) if (f != -1) for( int[] d: _columnDistsRegression[f]) Arrays.fill(d,0); // reset the column distributions
}
}
/** Adds the given row to the statistic. Updates the column distributions for
* the analyzed columns. */
void addQ(Row row, boolean regression) {
final int cls = row.classOf(); //regression ? -1 : row.classOf();
for (int f : _features)
if ( f != -1) {
if (row.isValid() && row.hasValidValue(f)) {
if (!regression) {
short val = row.getEncodedColumnValue(f);
_columnDists[f][val][cls]++;
} else {
short val = row.getEncodedColumnValue(f);
if (val == DataAdapter.BAD) continue;
int resp = row.getEncodedClassColumnValue();
if (resp == DataAdapter.BAD) continue;
// short val2 = row.getEncodedClassColumnValue();
_columnDistsRegression[f][val][resp]++; // = row.getRawClassColumnValueFromBin();
}
}
}
}
/** Adds the given row to the statistic. Updates the column distributions for
* the analyzed columns. This version knows the row is always valid (always
* has a valid class), and is hand-inlined. */
// void addQValid( final int cls, final int ridx, final DataAdapter.Col cs[]) {
// for (int f : _features) {
// if (f == -1) break;
// short[] bins = cs[f]._binned; // null if byte col, otherwise bin#
// int val;
// if (bins != null) { // binned?
// val = bins[ridx]; // Grab bin#
// if (val == DataAdapter.BAD) continue; // ignore bad rows
// } else { // not binned?
// val = (0xFF & cs[f]._rawB[ridx]); // raw byte value, has no bad rows
// }
// _columnDists[f][val][cls]++;
// }
// }
/** Apply any class weights to the distributions.*/
void applyClassWeights() {
if( _classWt == null ) return;
if (_regression) return;
for( int f : _features ) // For all columns, get the distribution
if ( f != -1)
for( int[] clss : _columnDists[f] ) // For all distributions, get the class distribution
for( int cls=0; cls<clss.length; cls++ )
clss[cls] = (int)(clss[cls]*_classWt[cls]); // Scale by the class weights
}
/** Calculates the best split and returns it. The split can be either a split
* which is a node where all rows with given column value smaller or equal to
* the split value will go to the left and all greater will go to the right.
* Or it can be an exclusion split, where all rows with column value equal to
* split value go to the left and all others go to the right.
*/
Split split(Data d, boolean expectLeaf) {
if(!_regression) {
int[] dist = new int[d.classes()];
boolean valid = false;
for(int f : _features) valid |= f != -1;
if (!valid) return Split.defaultSplit(); // there are no features left...
int distWeight = aggregateColumn(_features[0], dist); // initialize the distribution array
int m = Utils.maxIndex(dist, _random);
if( expectLeaf || (dist[m] == distWeight )) return Split.constant(m); // check if we are leaf node
Split bestSplit = Split.defaultSplit();
for( int f : _features ) { // try the splits
if ( f == -1 ) continue;
Split s = pickAndSplit(d,f, dist, distWeight, _random);
if( s.betterThan(bestSplit) ) bestSplit = s;
}
if( !bestSplit.isImpossible() ) return bestSplit;
if( !rememberFeatures(d) ) return bestSplit; // Enough features to try again?
reset(d,_seed+(1L<<16), _regression); // Reset with new features
for(Row r: d) addQ(r, _regression); // Reload the distributions
applyClassWeights(); // Weight the distributions
return split(d,expectLeaf);
} else {
float[] dist = new float[d.columnArityOfClassCol()];
boolean valid = false;
for(int f: _features) valid |= f != -1;
if(!valid) return Split.defaultSplit();
float unweightedMean = aggregateColumn(_features[0], dist);
int m = Utils.maxIndex(dist, _random);
if(expectLeaf || (dist[m] == unweightedMean)) return Split.constant(m);
Split bestSplit = Split.defaultSplit();
for (int f: _features) {
if (f == -1) continue;
Split s = pickAndSplit(d,f,dist,unweightedMean,_random);
if (s.betterThan(bestSplit)) bestSplit = s;
}
if (!bestSplit.isImpossible()) return bestSplit;
if (!rememberFeatures(d)) return bestSplit;
reset(d, _seed+(1L<<16), _regression);
for(Row r: d) addQ(r,_regression);
return split(d, expectLeaf);
}
}
private Split pickAndSplit(Data d, int col, int[] dist, int distWeight, Random rand) {
boolean isBool = d.columnArity(col) == 1; //screwed up api, 1 means 2.
boolean isBig = d.columnArity(col) > _exclusiveSplitLimit;
boolean isFloat = d.isFloat(col);
if (isBool) return eqSplit(col,d,dist,distWeight,_random);
else if (isBig || isFloat) return ltSplit(col,d, dist, distWeight, _random);
else {
Split s1 = eqSplit(col,d,dist,distWeight,_random);
if (s1.isImpossible()) return s1;
Split s2 = ltSplit(col,d, dist, distWeight, _random);
return s1.betterThan(s2) ? s1 : s2;
}
}
private Split pickAndSplit(Data d, int col, float[] dist, float distWeight, Random rand) {
boolean isBool = d.columnArity(col) == 1; //screwed up api, 1 means 2.
boolean isBig = d.columnArity(col) > _exclusiveSplitLimit;
boolean isFloat = d.isFloat(col);
if (isBool) return eqSplit(col,d,dist,distWeight,_random);
else if (isBig || isFloat) return ltSplit(col,d, dist, distWeight, _random);
else {
Split s1 = eqSplit(col,d,dist,distWeight,_random);
if (s1.isImpossible()) return s1;
Split s2 = ltSplit(col,d, dist, distWeight, _random);
return s1.betterThan(s2) ? s1 : s2;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/singlenoderf/Tree.java
|
package hex.singlenoderf;
import hex.gbm.DTree.TreeModel;
import hex.singlenoderf.Data.Row;
import hex.singlenoderf.Tree.SplitNode.SplitInfo;
import jsr166y.CountedCompleter;
import jsr166y.RecursiveTask;
import org.apache.commons.lang.NotImplementedException;
import water.*;
import water.H2O.H2OCountedCompleter;
import water.fvec.Chunk;
import water.util.Log;
import water.util.SB;
import water.util.Utils;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class Tree extends H2OCountedCompleter {
static public enum SelectStatType {ENTROPY, GINI};
static public enum StatType { ENTROPY, GINI, MSE};
/** Left and right seed initializer number for statistics */
public static final long LTSS_INIT = 0xe779aef0a6fd0c16L;
public static final long RTSS_INIT = 0x5e63c6377e5297a7L;
/** Left and right seed initializer number for subtrees */
public static final long RTS_INIT = 0xa7a34721109d3708L;
public static final long LTS_INIT = 0x264ccf88cf4dec32L;
/** If the number of rows is higher then given number, fork-join is used to build
* subtrees, else subtrees are built sequentially
*/
public static final int ROWS_FORK_TRESHOLD = 1<<11;
final Key _jobKey; // DRF job building this tree
final Key _modelKey; // A model key of the forest
final StatType _type; // Flavor of split logic
final Data _data; // Data source
final hex.singlenoderf.Sampling _sampler; // Sampling strategy
final int _data_id; // Data-subset identifier (so trees built on this subset are not validated on it)
final int _maxDepth; // Tree-depth cutoff
final int _numSplitFeatures; // Number of features to check at each splitting (~ split features)
INode _tree; // Root of decision tree
ThreadLocal<hex.singlenoderf.Statistic>[] _stats = new ThreadLocal[2];
final long _seed; // Pseudo random seed: used to playback sampling
int _exclusiveSplitLimit;
int _verbose;
final byte _producerId; // Id of node producing this tree
final boolean _regression; // If true, will build regression tree.
boolean _local_mode;
boolean _score_pojo;
public TreeModel.CompressedTree compressedTree;
/**
* Constructor used to define the specs when building the tree from the top.
*/
public Tree(final Key jobKey, final Key modelKey, final Data data, byte producerId, int maxDepth, StatType stat,
int numSplitFeatures, long seed, int treeId, int exclusiveSplitLimit,
final hex.singlenoderf.Sampling sampler, int verbose, boolean regression, boolean local_mode, boolean score_pojo) {
_jobKey = jobKey;
_modelKey = modelKey;
_data = data;
_type = stat;
_data_id = treeId;
_maxDepth = maxDepth-1;
_numSplitFeatures = numSplitFeatures;
_seed = seed;
_sampler = sampler;
_exclusiveSplitLimit = exclusiveSplitLimit;
_verbose = verbose;
_producerId = producerId;
_regression = regression;
_local_mode = local_mode;
_score_pojo = score_pojo;
}
// Oops, uncaught exception
@Override public boolean onExceptionalCompletion( Throwable ex, CountedCompleter cc) {
// ex.printStackTrace();
return true;
}
private hex.singlenoderf.Statistic getStatistic(int index, Data data, long seed, int exclusiveSplitLimit) {
hex.singlenoderf.Statistic result = _stats[index].get();
if (_type == StatType.MSE) {
if(!_regression) {
throw H2O.unimpl();
}
if (result == null) {
result = new MSEStatistic(data, _numSplitFeatures, _seed, exclusiveSplitLimit);
_stats[index].set(result);
}
result.forgetFeatures();
} else {
if( result==null ) {
result = _type == StatType.GINI ?
new GiniStatistic(data,_numSplitFeatures, _seed, exclusiveSplitLimit) :
new EntropyStatistic(data,_numSplitFeatures, _seed, exclusiveSplitLimit);
_stats[index].set(result);
}
}
result.forgetFeatures(); // All new features
result.reset(data, seed, _regression);
return result;
}
private StringBuffer computeStatistics() {
StringBuffer sb = new StringBuffer();
ArrayList<SplitInfo>[] stats = new ArrayList[_data.columns()];
for (int i = 0; i < _data.columns()-1; i++) stats[i] = new ArrayList<SplitInfo>();
_tree.computeStats(stats);
for (int i = 0; i < _data.columns()-1; i++) {
String colname = _data.colName(i);
ArrayList<SplitInfo> colSplitStats = stats[i];
Collections.sort(colSplitStats);
int usage = 0;
for (SplitInfo si : colSplitStats) {
usage += si._used;
}
sb.append(colname).append(':').append(usage).append("x");
for (SplitInfo si : colSplitStats) {
sb.append(", <=").append(Utils.p2d(si.splitNode().split_value())).append('{').append(si.affectedLeaves()).append("}x"+si._used+" ");
}
sb.append('\n');
}
return sb;
}
// Actually build the tree
@Override public void compute2() {
if(Job.isRunning(_jobKey)) {
Timer timer = new Timer();
_stats[0] = new ThreadLocal<hex.singlenoderf.Statistic>();
_stats[1] = new ThreadLocal<hex.singlenoderf.Statistic>();
Data d = _sampler.sample(_data, _seed, _modelKey, _local_mode);
hex.singlenoderf.Statistic left = getStatistic(0, d, _seed, _exclusiveSplitLimit);
// calculate the split
for( Row r : d ) left.addQ(r, _regression);
if (!_regression)
left.applyClassWeights(); // Weight the distributions
hex.singlenoderf.Statistic.Split spl = left.split(d, false);
if(spl.isLeafNode()) {
if(_regression) {
float av = d.computeAverage();
_tree = new LeafNode(-1, d.rows(), av);
} else {
_tree = new LeafNode(_data.unmapClass(spl._split), d.rows(),-1);
}
} else {
_tree = new FJBuild (spl, d, 0, _seed).compute();
}
_stats = null; // GC
if(_jobKey != null && !Job.isRunning(_jobKey)) throw new Job.JobCancelledException();
// Atomically improve the Model as well
Key tkey = toKey();
Key dtreeKey = null;
if (_score_pojo) dtreeKey = toCompressedKey();
appendKey(_modelKey, tkey, dtreeKey, _verbose > 10 ? _tree.toString(new StringBuilder(""), Integer.MAX_VALUE).toString() : "", _data_id);
// appendKey(_modelKey, tkey, _verbose > 10 ? _tree.toString(new StringBuilder(""), Integer.MAX_VALUE).toString() : "", _data_id);
StringBuilder sb = new StringBuilder("[RF] Tree : ").append(_data_id+1);
sb.append(" d=").append(_tree.depth()).append(" leaves=").append(_tree.leaves()).append(" done in ").append(timer).append('\n');
Log.info(sb.toString());
if (_verbose > 10) {
// Log.info(Sys.RANDF, _tree.toString(sb, Integer.MAX_VALUE).toString());
// Log.info(Sys.RANDF, _tree.toJava(sb, Integer.MAX_VALUE).toString());
}
} else throw new Job.JobCancelledException();
// Wait for completion
tryComplete();
}
// Stupid static method to make a static anonymous inner class
// which serializes "for free".
static void appendKey(Key model, final Key tKey, final Key dtKey, final String tString, final int tree_id) {
final int selfIdx = H2O.SELF.index();
new TAtomic<SpeeDRFModel>() {
@Override public SpeeDRFModel atomic(SpeeDRFModel old) {
if(old == null) return null;
return SpeeDRFModel.make(old, tKey, dtKey, selfIdx, tString, tree_id);
}
}.invoke(model);
}
public static String deserialize(byte[] bytes) {
AutoBuffer ab = new AutoBuffer(bytes);
SB sb = new SB();
// skip meta data of the tree
ab.get4(); // Skip tree-id
ab.get8(); // Skip seed
ab.get1(); // Skip producer id
int cap = 0;
String abString = ab.toString();
Pattern pattern = Pattern.compile("<= .* <= (.*?) <=");
Matcher matcher = pattern.matcher(abString);
if (matcher.find()) {
// System.out.println(matcher.group(1));
cap = Integer.valueOf(matcher.group(1));
}
// skip meta data of the tree
ab.get4(); // Skip tree-id
ab.get8(); // Skip seed
ab.get1(); // Skip producer id
while (ab.position() < cap) {
byte currentNodeType = (byte) ab.get1();
if (currentNodeType == 'S') {
int _col = ab.get2();
float splitValue = ab.get4f();
sb.p("C").p(_col).p(" <= ").p(splitValue).p("(");
} else if (currentNodeType == '[') {
int cls = ab.get1();
sb.p("["+cls+"]");
}
}
return sb.toString();
}
private class FJBuild extends RecursiveTask<INode> {
final hex.singlenoderf.Statistic.Split _split;
final Data _data;
final int _depth;
final long _seed;
FJBuild(hex.singlenoderf.Statistic.Split split, Data data, int depth, long seed) {
_split = split; _data = data; _depth = depth; _seed = seed;
}
@Override public INode compute() {
hex.singlenoderf.Statistic left = getStatistic(0,_data, _seed + LTSS_INIT, _exclusiveSplitLimit); // first get the statistics
hex.singlenoderf.Statistic rite = getStatistic(1,_data, _seed + RTSS_INIT, _exclusiveSplitLimit);
if(_jobKey != null && !Job.isRunning(_jobKey)) throw new Job.JobCancelledException();
Data[] res = new Data[2]; // create the data, node and filter the data
int c = _split._column, s = _split._split;
assert c != _data.columns()-1; // Last column is the class column
SplitNode nd = _split.isExclusion() ?
new ExclusionNode(c, s, _data.colName(c), _data.unmap(c,s)) :
new SplitNode (c, s, _data.colName(c), _data.unmap(c,s));
_data.filter(nd,res,left,rite);
FJBuild fj0 = null, fj1 = null;
hex.singlenoderf.Statistic.Split ls = left.split(res[0], _depth >= _maxDepth); // get the splits
hex.singlenoderf.Statistic.Split rs = rite.split(res[1], _depth >= _maxDepth);
if(_jobKey != null && !Job.isRunning(_jobKey)) throw new Job.JobCancelledException();
if (ls.isLeafNode() || ls.isImpossible()) {
if (_regression) {
float av = res[0].computeAverage();
nd._l = new LeafNode(-1, res[0].rows(), av);
} else {
nd._l = new LeafNode(_data.unmapClass(ls._split), res[0].rows(),-1); // create leaf nodes if any
}
}
else fj0 = new FJBuild(ls,res[0],_depth+1, _seed + LTS_INIT);
if (rs.isLeafNode() || rs.isImpossible()) {
if (_regression) {
float av = res[1].computeAverage();
nd._r = new LeafNode(-1, res[1].rows(), av);
} else {
nd._r = new LeafNode(_data.unmapClass(rs._split), res[1].rows(),-1);
}
}
else fj1 = new FJBuild(rs,res[1],_depth+1, _seed - RTS_INIT);
// Recursively build the splits, in parallel
if (_data.rows() > ROWS_FORK_TRESHOLD) {
if( fj0 != null && (fj1!=null ) ) fj0.fork();
if( fj1 != null ) nd._r = fj1.compute();
if( fj0 != null ) nd._l = (fj1!=null ) ? fj0.join() : fj0.compute();
} else {
if( fj1 != null ) nd._r = fj1.compute();
if( fj0 != null ) nd._l = fj0.compute();
}
/* Degenerate trees such as the following can occur when an impossible split was found.
y<=1.1 This is unusual enough to ignore.
/ \
y<=1.0 0
/ \
1 1 */
// FIXME there is still issue with redundant trees!
return nd;
}
}
public static abstract class INode {
abstract float classify(Row r);
abstract int depth(); // Depth of deepest leaf
abstract int leaves(); // Number of leaves
abstract void computeStats(ArrayList<SplitInfo>[] stats);
abstract StringBuilder toString( StringBuilder sb, int len );
final boolean isLeaf() { return depth() == 0; }
abstract StringBuilder toJava( StringBuilder sb, int len, int... depth );
public abstract void print(TreePrinter treePrinter) throws IOException;
abstract void write( AutoBuffer bs );
int _size; // Byte-size in serialized form
final int size( ) { return _size==0 ? (_size=size_impl()) : _size; }
abstract int size_impl();
AutoBuffer compress(AutoBuffer ab) { return ab;}
int dtreeSize() {return -1;}
}
/** Leaf node that for any row returns its the data class it belongs to. */
static class LeafNode extends INode {
final float _c; // The continuous response
final int _class; // A category reported by the inner node
final int _rows; // A number of classified rows (only meaningful for training)
/**
* Construct a new leaf node.
* @param c - a particular value of class predictor from interval [0,N-1]
* OR possibly -1 for regression
* @param r - A continous response.
* @param rows - numbers of rows with the predictor value
*/
LeafNode(int c, int rows, float r) {
assert -1 <= c && c <= 254; // sanity check
_class = c; // Class from 0 to _N-1
_rows = rows;
_c = r;
}
@Override public int depth() { return 0; }
@Override public int leaves() { return 1; }
@Override public void computeStats(ArrayList<SplitInfo>[] stats) { /* do nothing for leaves */ }
@Override public float classify(Row r) { if (_class == -1) { return _c; } else return (float)_class; }
@Override public StringBuilder toString(StringBuilder sb, int n ) { return sb.append('[').append(_class).append(']').append('{').append(_rows).append('}'); }
@Override public StringBuilder toJava(StringBuilder sb, int n, int... depth ) { return sb.append(_class).append(' '); }
@Override public void print(TreePrinter p) throws IOException { p.printNode(this); }
@Override void write( AutoBuffer bs ) {
bs.put1('['); // Leaf indicator
if (_class == -1) {
bs.put4f(_c);
} else {
bs.put1(_class);
}
}
@Override int size_impl( ) {
if (_class == -1) {
return 5;
}
return 2; } // 2 bytes in serialized form
@Override AutoBuffer compress(AutoBuffer ab) {
assert !Float.isNaN(classify( null ));
// a little hacky here
return ab.put4f(classify( null ));
}
@Override int dtreeSize() { return 4; }
}
/** Gini classifier node. */
static class SplitNode extends INode {
final int _column;
final int _split;
INode _l, _r;
int _depth, _leaves, _size;
String _name;
float _originalSplit;
public SplitNode(int column, int split, String columnName, float originalSplit) {
_name = columnName;
_column = column;
_split = split;
_originalSplit = originalSplit;
}
static class SplitInfo implements Comparable<SplitInfo> {
/**first node which introduce split*/
final SplitNode _splitNode;
int _affectedLeaves;
int _used;
SplitInfo (SplitNode splitNode, int affectedLeaves) { _splitNode = splitNode; _affectedLeaves = affectedLeaves; _used = 1; }
final SplitNode splitNode() { return _splitNode; }
final int affectedLeaves() { return _affectedLeaves; }
static SplitInfo info(SplitNode splitNode, int leavesAffected) {
return new SplitInfo(splitNode, leavesAffected);
}
@Override public int compareTo(SplitInfo o) {
if (o._affectedLeaves == _affectedLeaves) return 0;
else if (_affectedLeaves < o._affectedLeaves) return 1;
else return -1;
}
}
@Override float classify(Row r) { return r.getEncodedColumnValue(_column) <= _split ? _l.classify(r) : _r.classify(r); }
@Override public int depth() { return _depth != 0 ? _depth : (_depth = Math.max(_l.depth(), _r.depth()) + 1); }
@Override public int leaves() { return _leaves != 0 ? _leaves : (_leaves=_l.leaves() + _r.leaves()); }
@Override void computeStats(ArrayList<SplitInfo>[] stats) {
SplitInfo splitInfo = null;
// Find the same split
for (SplitInfo si : stats[_column]) {
if (si.splitNode()._split == _split) {
splitInfo = si;
break;
}
}
if (splitInfo == null) {
stats[_column].add(SplitInfo.info(this, _leaves));
} else {
splitInfo._affectedLeaves += leaves();
splitInfo._used += 1;
}
_l.computeStats(stats);
_r.computeStats(stats);
}
// Computes the original split-value, as a float. Returns a float to keep
// the final size small for giant trees.
protected final float split_value() { return _originalSplit; }
@Override public void print(TreePrinter p) throws IOException { p.printNode(this); }
@Override public String toString() { return "S "+_column +"<=" + _originalSplit + " ("+_l+","+_r+")"; }
@Override public StringBuilder toString( StringBuilder sb, int n ) {
sb.append(_name).append("<=").append(Utils.p2d(split_value())).append('@').append(leaves()).append(" (");
if( sb.length() > n ) return sb;
sb = _l.toString(sb,n).append(',');
if( sb.length() > n ) return sb;
sb = _r.toString(sb,n).append(')');
return sb;
}
@Override public StringBuilder toJava( StringBuilder sb, int n, int... depth) {
int d = 0;
if (depth.length==0) {
//root
sb.append(" static final float predict(double[] data) {\n" +
" float pred = ");
} else {d = depth[0];}
// d is the distance from the node to its root.
sb.append("Double.isNaN(data["+Integer.toString(_column)+"]) || (float) data["+Integer.toString(_column)+"] /* "+_name+"*/ ").append("<= ").append(Utils.p2d(split_value())).append("\n");
if( sb.length() > n ) return sb;
for (int i = -3 ; i < d ; i++) {sb.append(" ");}
sb.append(" ? ");
sb = _l.toJava(sb,n,d+1).append("\n");
if( sb.length() > n ) return sb;
for (int i = -3 ; i < d ; i++) {sb.append(" ");}
sb.append(" : ");
sb = _r.toJava(sb,n,d+1);
return sb;
}
@Override void write( AutoBuffer bs ) {
bs.put1('S'); // Node indicator
assert Short.MIN_VALUE <= _column && _column < Short.MAX_VALUE;
bs.put2((short) _column);
bs.put4f(split_value());
int skip = _l.size(); // Drop down the amount to skip over the left column
if( skip <= 254 ) bs.put1(skip);
else { bs.put1(0);
if (! ((-1<<24) <= skip && skip < (1<<24))) throw H2O.fail("Trees have grown too deep. Use BigData RF or limit the tree depth for your model. For more information, contact support: support@0xdata.com");
bs.put3(skip);
}
_l.write(bs);
_r.write(bs);
}
@Override public int size_impl( ) {
// Size is: 1 byte indicator, 2 bytes col, 4 bytes val, the skip, then left, right
return _size=(1+2+4+(( _l.size() <= 254 ) ? 1 : 4)+_l.size()+_r.size());
}
public boolean isIn(final Row row) { return row.getEncodedColumnValue(_column) <= _split; }
public final boolean canDecideAbout(final Row row) { return row.hasValidValue(_column); }
@Override public int dtreeSize() {
int result = 1+2+4;
int skip = _l.dtreeSize();
result += skip;
result += _r.dtreeSize();
if ( _l instanceof LeafNode) { skip=0;}
else {
if (skip < 256) skip=1;
else if (skip < 65535) skip=2;
else if (skip < (1<<24)) skip=3;
else skip=4;
}
result += skip;
return result;
}
@Override AutoBuffer compress(AutoBuffer ab) {
int pos = ab.position();
int size = 7;
byte _nodeType=0;
// left child type
if (_l instanceof LeafNode) _nodeType |= 0x30; // 00110000 = 0x30
int leftSize = _l.dtreeSize(); // size of the left child
size += leftSize;
if (leftSize < 256) _nodeType |= 0x00;
else if (leftSize < 65535) _nodeType |= 0x01;
else if (leftSize < (1<<24)) _nodeType |= 0x02;
else _nodeType |= 0x03;
// right child type
if (_r instanceof LeafNode) _nodeType |= 0xC0; // 11000000 = 0xC0
ab.put1(_nodeType);
assert _column != -1;
ab.put2((short)_column);
ab.put4f(_originalSplit); // assuming we only have _equal == 0 or 1 which is binary split
if( _l instanceof LeafNode ) { /* don't have skip size if left child is leaf.*/}
else {
if(leftSize < 256) {ab.put1( leftSize); size += 1;}
else if (leftSize < 65535) {ab.put2((short)leftSize); size += 2;}
else if (leftSize < (1<<24)) {ab.put3( leftSize); size += 3;}
else {ab.put4( leftSize); size += 4;}// 1<<31-1
}
size += _r.dtreeSize();
_l.compress(ab);
_r.compress(ab);
assert size == ab.position()-pos:"reported size = " + size + " , real size = " + (ab.position()-pos);
return ab;
}
}
/** Node that classifies one column category to the left and the others to the right. */
static class ExclusionNode extends SplitNode {
public ExclusionNode(int column, int val, String cname, float origSplit) { super(column,val,cname,origSplit); }
@Override float classify(Row r) { return r.getEncodedColumnValue(_column) == _split ? _l.classify(r) : _r.classify(r); }
@Override public void print(TreePrinter p) throws IOException { p.printNode(this); }
@Override public String toString() { return "E "+_column +"==" + _split + " ("+_l+","+_r+")"; }
@Override public StringBuilder toString( StringBuilder sb, int n ) {
sb.append(_name).append("==").append(_split).append('@').append(leaves()).append(" (");
if( sb.length() > n ) return sb;
sb = _l.toString(sb,n).append(',');
if( sb.length() > n ) return sb;
sb = _r.toString(sb,n).append(')');
return sb;
}
public int size_impl( ) {
// Size is: 1 byte indicator, 2 bytes col, 4 bytes val, the skip, then left, right
return _size=(1+2+4+(( _l.size() <= 254 ) ? 1 : 4)+_l.size()+_r.size());
}
@Override void write( AutoBuffer bs ) {
bs.put1('E'); // Node indicator
assert Short.MIN_VALUE <= _column && _column < Short.MAX_VALUE;
bs.put2((short)_column);
bs.put4f(split_value());
int skip = _l.size(); // Drop down the amount to skip over the left column
if( skip <= 254 ) bs.put1(skip);
else { bs.put1(0); bs.put3(skip); }
_l.write(bs);
_r.write(bs);
}
public boolean isIn(Row row) { return row.getEncodedColumnValue(_column) == _split; }
@Override AutoBuffer compress(AutoBuffer ab) {
int pos = ab.position();
int size = 7;
byte _nodeType= 0x04; // 00000100
// left child type
if (_l instanceof LeafNode) _nodeType |= 0x30; // 00110000 = 0x30
int leftSize = _l.dtreeSize(); // size of the left child
size += leftSize;
if (leftSize < 256) _nodeType |= 0x00;
else if (leftSize < 65535) _nodeType |= 0x01;
else if (leftSize < (1<<24)) _nodeType |= 0x02;
else _nodeType |= 0x03;
// right child type
if (_r instanceof LeafNode) _nodeType |= 0xC0; // 11000000 = 0xC0
ab.put1(_nodeType);
assert _column != -1;
ab.put2((short)_column);
ab.put4f(_originalSplit); // assuming we only have _equal == 0 or 1 which is binary split
if( _l instanceof LeafNode ) { /* don't have skip size if left child is leaf.*/}
else {
if(leftSize < 256) {ab.put1( leftSize); size += 1;}
else if (leftSize < 65535) {ab.put2((short)leftSize); size += 2;}
else if (leftSize < (1<<24)) {ab.put3( leftSize); size += 3;}
else {ab.put4( leftSize); size += 4;}// 1<<31-1
}
size += _r.dtreeSize();
_l.compress(ab);
_r.compress(ab);
assert size == ab.position()-pos:"reported size = " + size + " , real size = " + (ab.position()-pos);
return ab;
}
@Override public int dtreeSize() {
int result = 1+2+4;
int skip = _l.dtreeSize();
result += skip;
result += _r.dtreeSize();
if ( _l instanceof LeafNode) { skip=0;}
else {
if (skip < 256) skip=1;
else if (skip < 65535) skip=2;
else if (skip < (1<<24)) skip=3;
else skip=4;
}
result += skip;
return result;
}
}
public float classify(Row r) { return _tree.classify(r); }
public String toString() { return _tree.toString(); }
public int leaves() { return _tree.leaves(); }
public int depth() { return _tree.depth(); }
// Write the Tree to a random Key homed here.
public Key toKey() {
AutoBuffer bs = new AutoBuffer();
bs.put4(_data_id);
bs.put8(_seed);
bs.put1(_producerId);
_tree.write(bs);
Key key = Key.make((byte)1,Key.DFJ_INTERNAL_USER, H2O.SELF);
DKV.put(key,new Value(key, bs.buf()));
return key;
}
public Key toCompressedKey() {
AutoBuffer bs = new AutoBuffer();
TreeModel.CompressedTree compressedTree = compress();
Key key = Key.make((byte)1,Key.DFJ_INTERNAL_USER, H2O.SELF);
UKV.put(key, new Value(key, compressedTree));
return key;
}
/** Classify this serialized tree - withOUT inflating it to a full tree.
Use row 'row' in the dataset 'ary' (with pre-fetched bits 'databits')
Returns classes from 0 to N-1*/
public static float classify( AutoBuffer ts, Chunk[] chks, int row, int modelDataMap[], short badData, boolean regression ) {
ts.get4(); // Skip tree-id
ts.get8(); // Skip seed
ts.get1(); // Skip producer id
byte b;
while( (b = (byte) ts.get1()) != '[' ) { // While not a leaf indicator
assert b == '(' || b == 'S' || b == 'E';
int col = modelDataMap[ts.get2()]; // Column number in model-space mapped to data-space
float fcmp = ts.get4f(); // Float to compare against
if( chks[col].isNA0(row) ) return badData;
float fdat = (float)chks[col].at0(row);
int skip = (ts.get1()&0xFF);
if( skip == 0 ) skip = ts.get3();
if (b == 'E') {
if (fdat != fcmp)
ts.position(ts.position() + skip);
} else {
// Picking right subtree? then skip left subtree
if( fdat > fcmp ) ts.position(ts.position() + skip);
}
}
if (regression) {
return ts.get4f();
}
return (float)((short) ( ts.get1()&0xFF )); // Return the leaf's class
}
// Classify on the compressed tree bytes, from the pre-packed double data
public static double classify( AutoBuffer ts, double[] ds, double badat, boolean regression ) {
ts.get4(); // Skip tree-id
ts.get8(); // Skip seed
ts.get1(); // Skip producer id
byte b;
while( (b = (byte) ts.get1()) != '[' ) { // While not a leaf indicator
assert b == '(' || b == 'S' || b == 'E';
int col = ts.get2(); // Column number in model-space
float fcmp = ts.get4f(); // Float to compare against
float fdat = Double.isNaN(ds[col]) ? fcmp - 1 : (float)ds[col];
int skip = (ts.get1()&0xFF);
if( skip == 0 ) skip = ts.get3();
if (b == 'E') {
if (fdat != fcmp)
ts.position(ts.position() + skip);
} else {
// Picking right subtree? then skip left subtree
if( fdat > fcmp ) ts.position(ts.position() + skip);
}
}
if(regression) return ts.get4f();
return ts.get1()&0xFF; // Return the leaf's class
}
public static int dataId( byte[] bits) { return UDP.get4(bits, 0); }
public static long seed ( byte[] bits) { return UDP.get8(bits, 4); }
public static byte producerId( byte[] bits) { return bits[0+4+8]; }
/** Abstract visitor class for serialized trees.*/
public static abstract class TreeVisitor<T extends Exception> {
protected TreeVisitor<T> leaf( int tclass ) throws T { return this; }
protected TreeVisitor<T> pre( int col, float fcmp, int off0, int offl, int offr ) throws T { return this; }
protected TreeVisitor<T> mid( int col, float fcmp ) throws T { return this; }
protected TreeVisitor<T> post( int col, float fcmp ) throws T { return this; }
protected TreeVisitor<T> leafFloat(float fl) throws T { return this; }
long result( ) { return 0; }
protected final AutoBuffer _ts;
protected final boolean _regression;
public TreeVisitor( AutoBuffer tbits, boolean regression ) {
_ts = tbits;
_ts.get4(); // Skip tree ID
_ts.get8(); // Skip seed
_ts.get1(); // Skip producer id
_regression = regression;
}
public final TreeVisitor<T> visit() throws T {
byte b = (byte) _ts.get1();
if( b == '[' ) {
if (_regression) return leafFloat(_ts.get4f());
return leaf(_ts.get1()&0xFF);
}
assert b == '(' || b == 'S' || b =='E' : b;
int off0 = _ts.position()-1; // Offset to start of *this* node
int col = _ts.get2(); // Column number
float fcmp = _ts.get4f(); // Float to compare against
int skip = (_ts.get1()&0xFF);
if( skip == 0 ) skip = _ts.get3();
int offl = _ts.position(); // Offset to start of *left* node
int offr = _ts.position()+skip; // Offset to start of *right* node
return pre(col,fcmp,off0,offl,offr).visit().mid(col,fcmp).visit().post(col,fcmp);
}
}
/** Return (depth<<32)|(leaves), in 1 pass. */
public static long depth_leaves( AutoBuffer tbits, boolean regression ) {
return new TreeVisitor<RuntimeException>(tbits, regression) {
int _maxdepth, _depth, _leaves;
@Override protected TreeVisitor leafFloat(float fl) { _leaves++; if(_depth > _maxdepth) _maxdepth = _depth; return this; }
@Override protected TreeVisitor leaf(int tclass ) { _leaves++; if( _depth > _maxdepth ) _maxdepth = _depth; return this; }
@Override protected TreeVisitor pre (int col, float fcmp, int off0, int offl, int offr ) { _depth++; return this; }
@Override protected TreeVisitor post(int col, float fcmp ) { _depth--; return this; }
@Override long result( ) {return ((long)_maxdepth<<32) | _leaves; }
}.visit().result();
}
// Build a compressed-tree struct
public TreeModel.CompressedTree compress() {
// Log.info(Sys.RANDF, _tree.toString(new StringBuilder(), Integer.MAX_VALUE).toString());
int size = _tree.dtreeSize();
if (_tree instanceof LeafNode) {
size += 3;
}
AutoBuffer ab = new AutoBuffer(size);
if( _tree instanceof LeafNode)
ab.put1(0).put2((char)65535);
_tree.compress(ab);
assert ab.position() == size: "Actual size doesn't agree calculated size.";
char _nclass = (char)_data.classes();
return new TreeModel.CompressedTree(ab.buf(),_nclass,_seed);
}
public TreeModel.CompressedTree getCompressedTree() {
if (compressedTree!=null) { return compressedTree; }
else { compressedTree = compress(); }
return compressedTree;
}
/**
* @param tree binary form of a singlenoderf.Tree
* @return AutoBuffer that contain all bytes in the singlenoderf.Tree
*/
public static byte[] toDTreeCompressedTreeAB(byte[] tree, boolean regression) {
AutoBuffer ab = new AutoBuffer(tree);
AutoBuffer result = new AutoBuffer();
return toDTreeCompressedTree(ab, regression).buf();
}
/**
* @param ab AutoBuffer that contains the remaining tree nodes that we want to serialize.
* @return binary form of a DTree.CompressedTree as a AutoBuffer
*/
public static AutoBuffer toDTreeCompressedTree(AutoBuffer ab, boolean regression) {
AutoBuffer result = new AutoBuffer();
// get the length of the buffer
int cap=0;
String abString = ab.toString();
Pattern pattern = Pattern.compile("<= .* <= (.*?) <=");
Matcher matcher = pattern.matcher(abString);
if (matcher.find()) {
// System.out.println(matcher.group(1));
cap = Integer.valueOf(matcher.group(1));
}
// skip meta data of the tree
ab.get4(); // Skip tree-id
ab.get8(); // Skip seed
ab.get1(); // Skip producer id
while (ab.position() < cap) {
byte _nodeType = 0;
byte currentNodeType = (byte) ab.get1();
if (currentNodeType == 'S' || currentNodeType == 'E') {
if (currentNodeType == 'E') {
_nodeType |= 0x04; // 00000100
}
int _col = ab.get2();
float splitValue = ab.get4f();
int skipSize = ab.get1();
int skip;
if (skipSize == 0) {
// 4 bytes total
_nodeType |= 0x02; // 3 bytes to store skip
skip = ab.get3();
} else {/* single byte for left size */ skip=skipSize; /* 1 byte to store skip*/}
int currentPosition = ab.position();
byte leftType = (byte) ab.get1();
ab.position(currentPosition+skip); // jump to the right child.
byte rightType = (byte) ab.get1();
ab.position(currentPosition);
if (leftType == '[') { _nodeType |= 0x30; }
if (rightType == '[') { _nodeType |= 0xC0; }
// int leftLeaves = getNumLeaves(ab, skip, regression); // number of left leaves.
int skipModify = getSkip(ab, skip, regression);
skip += skipModify;
if (skip > 255) { _nodeType |= 0x02; }
result.put1(_nodeType);
result.put2((short) _col);
result.put4f(splitValue);
if (skip <= 255) {
if (leftType == 'S' || leftType == 'E') result.put1(skip); // leaf will have no skip size because its size is fixed.
}
else {
result.put3(skip);
}
}
else if (currentNodeType == '[') {
// result.put1(0).put2((short)65535); // if leaf then over look top level
if (regression) { result.put4f(ab.get4f());}
else { result.put4f((float)ab.get1());}
} else { /* running out of the buffer*/ return result;}
}
return result;
}
public static int getNumLeaves(AutoBuffer ab, int leftSize, boolean regression) {
int result = 0;
int startPos = ab.position();
while (ab.position() < startPos + leftSize) {
byte currentNodeType = (byte) ab.get1();
if (currentNodeType == 'S' || currentNodeType == 'E') {
ab.get2(); ab.get4f(); // skip col and split value.
int skipSize = ab.get1();
if (skipSize == 0) { ab.get3();}
} else if (currentNodeType == '[') {
result ++;
if (regression) ab.get4f();
else ab.get1();
} else {
throw new NotImplementedException();
}
}
ab.position(startPos); // return to the original position so the buffer seems untouched.
return result;
}
public static int getSkip(AutoBuffer ab, int leftSize, boolean regression) {
int numLeaves = 0;
int numLeftLeaves = 0;
int startPos = ab.position();
boolean prevIsS = false;
while (ab.position() < startPos + leftSize) {
byte currentNodeType = (byte) ab.get1();
if (currentNodeType == 'S' || currentNodeType == 'E') {
ab.get2(); ab.get4f(); // skip col and split value.
int skipSize = ab.get1();
if (skipSize == 0) { ab.get3();}
prevIsS = true;
} else if (currentNodeType == '[') {
numLeaves ++;
if (regression) ab.get4f();
else ab.get1();
if (prevIsS) numLeftLeaves++;
prevIsS = false;
} else {
throw new NotImplementedException();
}
}
ab.position(startPos);
return 2*numLeaves - numLeftLeaves; // only for regression tree.
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/singlenoderf/TreePrinter.java
|
package hex.singlenoderf;
import hex.singlenoderf.Tree.ExclusionNode;
import hex.singlenoderf.Tree.LeafNode;
import hex.singlenoderf.Tree.SplitNode;
import java.io.IOException;
public abstract class TreePrinter {
protected final String[] _classNames;
protected final int[] _colMapping;
public TreePrinter(int[] colMapping, String[] classNames) {
_classNames = classNames;
_colMapping = colMapping;
}
abstract void printNode(LeafNode t) throws IOException;
abstract void printNode(SplitNode t) throws IOException;
abstract void printNode(ExclusionNode t) throws IOException;
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/hex
|
java-sources/ai/h2o/h2o-classic/2.8/hex/singlenoderf/VariableImportance.java
|
package hex.singlenoderf;
import hex.ShuffleTask;
//import hex.gbm.DTree.TreeModel.CompressedTree;
import java.util.ArrayList;
//import java.util.Arrays;
import java.util.Random;
import water.AutoBuffer;
import water.Iced;
//import water.Key;
import water.MRTask2;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
//import water.util.ModelUtils;
import water.util.Utils;
/** Score given tree model and preserve errors per tree in form of votes (for classification)
* or MSE (for regression).
*
* This is different from Model.score() function since the MR task
* uses inverse loop: first over all trees and over all rows in chunk.
*/
public class VariableImportance extends MRTask2<VariableImportance> {
/* @IN */ final private float _rate;
// /* @IN */ private int _trees; // FIXME: Pass only tree-keys since serialized trees are passed over wire !!!
/* @IN */ final private int _var;
/* @IN */ final private boolean _oob;
/* @IN */ final private int _ncols;
/* @IN */ final private int _nclasses;
/* @IN */ final private boolean _classification;
/* @IN */ final private SpeeDRFModel _model;
/* @IN */ final private int[] _modelDataMap;
/* @IN */ private Frame _data;
/* @IN */ private int _classcol;
/** Computed mapping of model prediction classes to confusion matrix classes */
/* @IN */ private int[] _model_classes_mapping;
/** Computed mapping of data prediction classes to confusion matrix classes */
/* @IN */ private int[] _data_classes_mapping;
/** Difference between model cmin and CM cmin */
/* @IN */ private int _cmin_model_mapping;
/** Difference between data cmin and CM cmin */
/* @IN */ private int _cmin_data_mapping;
/* @IN */ private int _cmin;
/* @INOUT */ private final int _ntrees;
// /* @OUT */ private long [/*ntrees*/] _votes; // Number of correct votes per tree (for classification only)
/* @OUT */ private long [/*ntrees*/] _nrows; // Number of scored row per tree (for classification/regression)
// /* @OUT */ private float[/*ntrees*/] _sse; // Sum of squared errors per tree (for regression only)
/* @OUT */ private long [/*ntrees*/] _votesSOOB;
/* @OUT */ private long [/*ntrees*/] _votesOOB;
/* @OUT */ private long [/*ntrees*/] _voteDiffs;
/* @OUT */ private float _varimp;
/* @OUT */ private float _varimpSD;
/* @OUT */ private int[] _oobs;
private VariableImportance(int trees, int nclasses, int ncols, float rate, int variable, SpeeDRFModel model, Frame fr, Vec resp) {
_ncols = ncols;
_rate = rate; _var = variable;
_oob = true; _ntrees = trees;
_nclasses = nclasses;
_classification = (nclasses>1);
_classcol = fr.numCols() - 1;
_data = fr;
_cmin = (int) resp.min();
_model = model;
_modelDataMap = _model.colMap(_data);
init(resp);
}
private void init(Vec resp) {
Vec respData = _data.vecs()[_classcol];
int model_min = (int) resp.min();
int data_min = (int)respData.min();
if (resp._domain!=null) {
assert respData._domain != null;
_model_classes_mapping = new int[resp._domain.length];
_data_classes_mapping = new int[respData._domain.length];
// compute mapping
alignEnumDomains(resp._domain, respData._domain, _model_classes_mapping, _data_classes_mapping);
} else {
assert respData._domain == null;
_model_classes_mapping = null;
_data_classes_mapping = null;
// compute mapping
_cmin_model_mapping = model_min - Math.min(model_min, data_min);
_cmin_data_mapping = data_min - Math.min(model_min, data_min);
}
}
@Override public void map(Chunk[] chks) {
_votesOOB = new long[_ntrees];
_votesSOOB = new long[_ntrees];
_voteDiffs = new long[_ntrees];
_varimp = 0.f;
_varimpSD = 0.f;
_nrows = new long[_ntrees];
double[] data = new double[_ncols];
float [] preds = new float[_nclasses+1];
final int rows = chks[0]._len;
int _N = _nclasses;
int[] soob = null; // shuffled oob rows
boolean collectOOB = true;
final int cmin = _cmin;
//Need the chunk of code to score over every tree...
//Doesn't do anything with the first tree, we score time last *manually* (after looping over all da trees)
long seedForOob = ShuffleTask.seed(chks[0].cidx());
for( int ntree = 0; ntree < _ntrees; ntree++ ) {
int oobcnt = 0;
ArrayList<Integer> oob = new ArrayList<Integer>(); // oob rows
long treeSeed = _model.seed(ntree);
byte producerId = _model.producerId(ntree);
int init_row = (int)chks[0]._start;
long seed = Sampling.chunkSampleSeed(treeSeed, init_row);
Random rand = Utils.getDeterRNG(seed);
// Now for all rows, classify & vote!
for (int row = 0; row < rows; row++) {
// int row = r + (int)chks[0]._start;
// ------ THIS CODE is crucial and serve to replay the same sequence
// of random numbers as in the method Data.sampleFair()
// Skip row used during training if OOB is computed
float sampledItem = rand.nextFloat();
// Bail out of broken rows with NA in class column.
// Do not skip yet the rows with NAs in the rest of columns
if (chks[_ncols - 1].isNA0(row)) continue;
if (sampledItem < _model.sample) continue;
oob.add(row);
oobcnt++;
// Predict with this tree - produce 0-based class index
int prediction = (int) _model.classify0(ntree, chks, row, _modelDataMap, (short) _N, false);
if (prediction >= _nclasses) continue; // Junk row cannot be predicted
// Check tree miss
int alignedPrediction = alignModelIdx(prediction);
int alignedData = alignDataIdx((int) chks[_classcol].at80(row) - cmin);
if (alignedPrediction == alignedData) _votesOOB[ntree]++;
}
_oobs = new int[oob.size()];
for (int i = 0; i < oob.size(); ++i) _oobs[i] = oob.get(i);
//score on shuffled data...
if (soob==null || soob.length < oobcnt) soob = new int[oobcnt];
Utils.shuffleArray(_oobs, oobcnt, soob, seedForOob, 0); // Shuffle array and copy results into <code>soob</code>
for(int j = 0; j < oobcnt; j++) {
int row = _oobs[j];
// Do scoring:
// - prepare a row data
for (int i=0;i<chks.length - 1;i++) {
data[i] = chks[i].at0(row); // 1+i - one free is expected by prediction
}
// - permute variable
if (_var>=0) data[_var] = chks[_var].at0(soob[j]);
else assert false;
// - score data
// - score only the tree
int prediction = (int) Tree.classify(new AutoBuffer(_model.tree(ntree)), data, (double)_N, false); //.classify0(ntree, _data, chks, row, _modelDataMap, numClasses );
if( prediction >= _nclasses ) continue;
int pred = alignModelIdx(prediction);
int actu = alignDataIdx((int) chks[_classcol].at80(_oobs[j]) - cmin);
if (pred == actu) _votesSOOB[ntree]++;
_nrows[ntree]++;
}
}
}
@Override public void reduce( VariableImportance t ) {
Utils.add(_votesOOB, t._votesOOB);
Utils.add(_votesSOOB, t._votesSOOB);
Utils.add(_nrows, t._nrows);
}
/** Transforms 0-based class produced by model to CF zero-based */
private int alignModelIdx(int modelClazz) {
if (_model_classes_mapping!=null)
return _model_classes_mapping[modelClazz];
else
return modelClazz + _cmin_model_mapping;
}
/** Transforms 0-based class from input data to CF zero-based */
private int alignDataIdx(int dataClazz) {
if (_data_classes_mapping!=null)
return _data_classes_mapping[dataClazz];
else
return dataClazz + _cmin_data_mapping;
}
public static int alignEnumDomains(final String[] modelDomain, final String[] dataDomain, int[] modelMapping, int[] dataMapping) {
assert modelMapping!=null && modelMapping.length == modelDomain.length;
assert dataMapping!=null && dataMapping.length == dataDomain.length;
int idx = 0, idxM = 0, idxD = 0;
while(idxM!=modelDomain.length || idxD!=dataDomain.length) {
if (idxM==modelDomain.length) { dataMapping[idxD++] = idx++; continue; }
if (idxD==dataDomain.length) { modelMapping[idxM++] = idx++; continue; }
int c = modelDomain[idxM].compareTo(dataDomain[idxD]);
if (c < 0) {
modelMapping[idxM] = idx;
idxM++;
} else if (c > 0) {
dataMapping[idxD] = idx;
idxD++;
} else { // strings are identical
modelMapping[idxM] = idx;
dataMapping[idxD] = idx;
idxM++; idxD++;
}
idx++;
}
return idx;
}
public TreeVotes[] resultVotes() {
return new TreeVotes[]{new TreeVotes(_votesOOB, _nrows, _ntrees), new TreeVotes(_votesSOOB, _nrows, _ntrees)};
}
// public TreeSSE resultSSE () { return new TreeSSE (_sse, _nrows, _ntrees); }
/* This is a copy of score0 method from DTree:615 */
// private void score0(double data[], float preds[], CompressedTree[] ts) {
// for( int c=0; c<ts.length; c++ )
// if( ts[c] != null )
// preds[ts.length==1?0:c+1] += ts[c].score(data);
// }
// private Chunk chk_resp( Chunk chks[] ) { return chks[_ncols]; }
//
// private Random rngForTree(CompressedTree[] ts, int cidx) {
// return _oob ? ts[0].rngForChunk(cidx) : new DummyRandom(); // k-class set of trees shares the same random number
// }
/* For bulk scoring
public static TreeVotes collect(TreeModel tmodel, Frame f, int ncols, float rate, int variable) {
CompressedTree[][] trees = new CompressedTree[tmodel.ntrees()][];
for (int tidx = 0; tidx < tmodel.ntrees(); tidx++) trees[tidx] = tmodel.ctree(tidx);
return new TreeVotesCollector(trees, tmodel.nclasses(), ncols, rate, variable).doAll(f).result();
}*/
// VariableImportance(int trees, int nclasses, int ncols, float rate, int variable, SpeeDRFModel model)
public static TreeVotes[] collectVotes(int trees, int nclasses, Frame f, int ncols, float rate, int variable, SpeeDRFModel model, Vec resp) {
return new VariableImportance(trees, nclasses, ncols, rate, variable, model, f, resp).doAll(f).resultVotes();
}
// public static TreeSSE collectSSE(CompressedTree[/*nclass || 1 for regression*/] tree, int nclasses, Frame f, int ncols, float rate, int variable) {
// return new TreeMeasuresCollector(new CompressedTree[][] {tree}, nclasses, ncols, rate, variable).doAll(f).resultSSE();
// }
// private static final class DummyRandom extends Random {
// @Override public final float nextFloat() { return 1.0f; }
// }
/** A simple holder for set of different tree measurements. */
public static abstract class TreeMeasures<T extends TreeMeasures> extends Iced {
/** Actual number of trees which votes are stored in this object */
protected int _ntrees;
/** Number of processed row per tree. */
protected long[/*ntrees*/] _nrows;
public TreeMeasures(int initialCapacity) { _nrows = new long[initialCapacity]; }
public TreeMeasures(long[] nrows, int ntrees) { _nrows = nrows; _ntrees = ntrees;}
/** Returns number of rows which were used during voting per individual tree. */
public final long[] nrows() { return _nrows; }
/** Returns number of voting predictors */
public final int npredictors() { return _ntrees; }
/** Returns a list of accuracies per tree. */
public abstract double accuracy(int tidx);
public final double[] accuracy() {
double[] r = new double[_ntrees];
// Average of all trees
for (int tidx=0; tidx<_ntrees; tidx++) r[tidx] = accuracy(tidx);
return r;
}
/** Compute variable importance with respect to given votes.
* The given {@link T} object represents correct votes.
* This object represents votes over shuffled data.
*
* @param right individual tree measurements performed over not shuffled data.
* @return computed importance and standard deviation
*/
public abstract double[/*2*/] imp(T right);
public abstract T append(T t);
}
/** A class holding tree votes. */
public static class TreeVotes extends TreeMeasures<TreeVotes> {
/** Number of correct votes per tree */
private long[/*ntrees*/] _votes;
public TreeVotes(int initialCapacity) {
super(initialCapacity);
_votes = new long[initialCapacity];
}
public TreeVotes(long[] votes, long[] nrows, int ntrees) {
super(nrows, ntrees);
_votes = votes;
}
/** Returns number of positive votes per tree. */
public final long[] votes() { return _votes; }
/** Returns accuracy per individual trees. */
@Override public final double accuracy(int tidx) {
assert tidx < _nrows.length && tidx < _votes.length;
return ((double) _votes[tidx]) / _nrows[tidx];
}
/** Compute variable importance with respect to given votes.
* The given {@link TreeVotes} object represents correct votes.
* This object represents votes over shuffled data.
*
* @param right individual tree voters performed over not shuffled data.
* @return computed importance and standard deviation
*/
@Override public final double[/*2*/] imp(TreeVotes right) {
assert npredictors() == right.npredictors();
int ntrees = npredictors();
double imp = 0;
double sd = 0;
// Over all trees
for (int tidx = 0; tidx < ntrees; tidx++) {
assert right.nrows()[tidx] == nrows()[tidx];
double delta = ((double) (right.votes()[tidx] - votes()[tidx])) / nrows()[tidx];
imp += delta;
sd += delta * delta;
}
double av = imp / ntrees;
double csd = Math.sqrt( (sd/ntrees - av*av) / ntrees );
return new double[] { av, csd};
}
/** Append a tree votes to a list of trees. */
public TreeVotes append(long rightVotes, long allRows) {
assert _votes.length > _ntrees && _votes.length == _nrows.length : "TreeVotes inconsistency!";
_votes[_ntrees] = rightVotes;
_nrows[_ntrees] = allRows;
_ntrees++;
return this;
}
@Override public TreeVotes append(final TreeVotes tv) {
for (int i=0; i<tv.npredictors(); i++)
append(tv._votes[i], tv._nrows[i]);
return this;
}
}
/** A simple holder serving SSE per tree. */
// public static class TreeSSE extends TreeMeasures<TreeSSE> {
// /** SSE per tree */
// private float[/*ntrees*/] _sse;
//
// public TreeSSE(int initialCapacity) {
// super(initialCapacity);
// _sse = new float[initialCapacity];
// }
// public TreeSSE(float[] sse, long[] nrows, int ntrees) {
// super(nrows, ntrees);
// _sse = sse;
// }
// @Override public double accuracy(int tidx) {
// return _sse[tidx] / _nrows[tidx];
// }
// @Override public double[] imp(TreeSSE right) {
// assert npredictors() == right.npredictors();
// int ntrees = npredictors();
// double imp = 0;
// double sd = 0;
// // Over all trees
// for (int tidx = 0; tidx < ntrees; tidx++) {
// assert right.nrows()[tidx] == nrows()[tidx]; // check that we iterate over same OOB rows
// double delta = ((double) (_sse[tidx] - right._sse[tidx])) / nrows()[tidx];
// imp += delta;
// sd += delta * delta;
// }
// double av = imp / ntrees;
// double csd = Math.sqrt( (sd/ntrees - av*av) / ntrees );
// return new double[] { av, csd };
// }
// @Override public TreeSSE append(TreeSSE t) {
// for (int i=0; i<t.npredictors(); i++)
// append(t._sse[i], t._nrows[i]);
// return this;
// }
// /** Append a tree sse to a list of trees. */
// public TreeSSE append(float sse, long allRows) {
// assert _sse.length > _ntrees && _sse.length == _nrows.length : "TreeVotes inconsistency!";
// _sse [_ntrees] = sse;
// _nrows[_ntrees] = allRows;
// _ntrees++;
// return this;
// }
// }
public static TreeVotes asVotes(TreeMeasures tm) { return (TreeVotes) tm; }
// public static TreeSSE asSSE (TreeMeasures tm) { return (TreeSSE) tm; }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/jsr166y/ConcurrentLinkedDeque.java
|
/*
* Written by Doug Lea and Martin Buchholz with assistance from members of
* JCP JSR-166 Expert Group and released to the public domain, as explained
* at http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166y;
import java.util.AbstractCollection;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Deque;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.Queue;
/**
* An unbounded concurrent {@linkplain Deque deque} based on linked nodes.
* Concurrent insertion, removal, and access operations execute safely
* across multiple threads.
* A {@code ConcurrentLinkedDeque} is an appropriate choice when
* many threads will share access to a common collection.
* Like most other concurrent collection implementations, this class
* does not permit the use of {@code null} elements.
*
* <p>Iterators are <i>weakly consistent</i>, returning elements
* reflecting the state of the deque at some point at or since the
* creation of the iterator. They do <em>not</em> throw {@link
* java.util.ConcurrentModificationException
* ConcurrentModificationException}, and may proceed concurrently with
* other operations.
*
* <p>Beware that, unlike in most collections, the {@code size} method
* is <em>NOT</em> a constant-time operation. Because of the
* asynchronous nature of these deques, determining the current number
* of elements requires a traversal of the elements, and so may report
* inaccurate results if this collection is modified during traversal.
* Additionally, the bulk operations {@code addAll},
* {@code removeAll}, {@code retainAll}, {@code containsAll},
* {@code equals}, and {@code toArray} are <em>not</em> guaranteed
* to be performed atomically. For example, an iterator operating
* concurrently with an {@code addAll} operation might view only some
* of the added elements.
*
* <p>This class and its iterator implement all of the <em>optional</em>
* methods of the {@link Deque} and {@link Iterator} interfaces.
*
* <p>Memory consistency effects: As with other concurrent collections,
* actions in a thread prior to placing an object into a
* {@code ConcurrentLinkedDeque}
* <a href="package-summary.html#MemoryVisibility"><i>happen-before</i></a>
* actions subsequent to the access or removal of that element from
* the {@code ConcurrentLinkedDeque} in another thread.
*
* <p>This class is a member of the
* <a href="{@docRoot}/../technotes/guides/collections/index.html">
* Java Collections Framework</a>.
*
* @since 1.7
* @author Doug Lea
* @author Martin Buchholz
* @param <E> the type of elements held in this collection
*/
public class ConcurrentLinkedDeque<E>
extends AbstractCollection<E>
implements Deque<E>, java.io.Serializable {
/*
* This is an implementation of a concurrent lock-free deque
* supporting interior removes but not interior insertions, as
* required to support the entire Deque interface.
*
* We extend the techniques developed for ConcurrentLinkedQueue and
* LinkedTransferQueue (see the internal docs for those classes).
* Understanding the ConcurrentLinkedQueue implementation is a
* prerequisite for understanding the implementation of this class.
*
* The data structure is a symmetrical doubly-linked "GC-robust"
* linked list of nodes. We minimize the number of volatile writes
* using two techniques: advancing multiple hops with a single CAS
* and mixing volatile and non-volatile writes of the same memory
* locations.
*
* A node contains the expected E ("item") and links to predecessor
* ("prev") and successor ("next") nodes:
*
* class Node<E> { volatile Node<E> prev, next; volatile E item; }
*
* A node p is considered "live" if it contains a non-null item
* (p.item != null). When an item is CASed to null, the item is
* atomically logically deleted from the collection.
*
* At any time, there is precisely one "first" node with a null
* prev reference that terminates any chain of prev references
* starting at a live node. Similarly there is precisely one
* "last" node terminating any chain of next references starting at
* a live node. The "first" and "last" nodes may or may not be live.
* The "first" and "last" nodes are always mutually reachable.
*
* A new element is added atomically by CASing the null prev or
* next reference in the first or last node to a fresh node
* containing the element. The element's node atomically becomes
* "live" at that point.
*
* A node is considered "active" if it is a live node, or the
* first or last node. Active nodes cannot be unlinked.
*
* A "self-link" is a next or prev reference that is the same node:
* p.prev == p or p.next == p
* Self-links are used in the node unlinking process. Active nodes
* never have self-links.
*
* A node p is active if and only if:
*
* p.item != null ||
* (p.prev == null && p.next != p) ||
* (p.next == null && p.prev != p)
*
* The deque object has two node references, "head" and "tail".
* The head and tail are only approximations to the first and last
* nodes of the deque. The first node can always be found by
* following prev pointers from head; likewise for tail. However,
* it is permissible for head and tail to be referring to deleted
* nodes that have been unlinked and so may not be reachable from
* any live node.
*
* There are 3 stages of node deletion;
* "logical deletion", "unlinking", and "gc-unlinking".
*
* 1. "logical deletion" by CASing item to null atomically removes
* the element from the collection, and makes the containing node
* eligible for unlinking.
*
* 2. "unlinking" makes a deleted node unreachable from active
* nodes, and thus eventually reclaimable by GC. Unlinked nodes
* may remain reachable indefinitely from an iterator.
*
* Physical node unlinking is merely an optimization (albeit a
* critical one), and so can be performed at our convenience. At
* any time, the set of live nodes maintained by prev and next
* links are identical, that is, the live nodes found via next
* links from the first node is equal to the elements found via
* prev links from the last node. However, this is not true for
* nodes that have already been logically deleted - such nodes may
* be reachable in one direction only.
*
* 3. "gc-unlinking" takes unlinking further by making active
* nodes unreachable from deleted nodes, making it easier for the
* GC to reclaim future deleted nodes. This step makes the data
* structure "gc-robust", as first described in detail by Boehm
* (http://portal.acm.org/citation.cfm?doid=503272.503282).
*
* GC-unlinked nodes may remain reachable indefinitely from an
* iterator, but unlike unlinked nodes, are never reachable from
* head or tail.
*
* Making the data structure GC-robust will eliminate the risk of
* unbounded memory retention with conservative GCs and is likely
* to improve performance with generational GCs.
*
* When a node is dequeued at either end, e.g. via poll(), we would
* like to break any references from the node to active nodes. We
* develop further the use of self-links that was very effective in
* other concurrent collection classes. The idea is to replace
* prev and next pointers with special values that are interpreted
* to mean off-the-list-at-one-end. These are approximations, but
* good enough to preserve the properties we want in our
* traversals, e.g. we guarantee that a traversal will never visit
* the same element twice, but we don't guarantee whether a
* traversal that runs out of elements will be able to see more
* elements later after enqueues at that end. Doing gc-unlinking
* safely is particularly tricky, since any node can be in use
* indefinitely (for example by an iterator). We must ensure that
* the nodes pointed at by head/tail never get gc-unlinked, since
* head/tail are needed to get "back on track" by other nodes that
* are gc-unlinked. gc-unlinking accounts for much of the
* implementation complexity.
*
* Since neither unlinking nor gc-unlinking are necessary for
* correctness, there are many implementation choices regarding
* frequency (eagerness) of these operations. Since volatile
* reads are likely to be much cheaper than CASes, saving CASes by
* unlinking multiple adjacent nodes at a time may be a win.
* gc-unlinking can be performed rarely and still be effective,
* since it is most important that long chains of deleted nodes
* are occasionally broken.
*
* The actual representation we use is that p.next == p means to
* goto the first node (which in turn is reached by following prev
* pointers from head), and p.next == null && p.prev == p means
* that the iteration is at an end and that p is a (static final)
* dummy node, NEXT_TERMINATOR, and not the last active node.
* Finishing the iteration when encountering such a TERMINATOR is
* good enough for read-only traversals, so such traversals can use
* p.next == null as the termination condition. When we need to
* find the last (active) node, for enqueueing a new node, we need
* to check whether we have reached a TERMINATOR node; if so,
* restart traversal from tail.
*
* The implementation is completely directionally symmetrical,
* except that most public methods that iterate through the list
* follow next pointers ("forward" direction).
*
* We believe (without full proof) that all single-element deque
* operations (e.g., addFirst, peekLast, pollLast) are linearizable
* (see Herlihy and Shavit's book). However, some combinations of
* operations are known not to be linearizable. In particular,
* when an addFirst(A) is racing with pollFirst() removing B, it is
* possible for an observer iterating over the elements to observe
* A B C and subsequently observe A C, even though no interior
* removes are ever performed. Nevertheless, iterators behave
* reasonably, providing the "weakly consistent" guarantees.
*
* Empirically, microbenchmarks suggest that this class adds about
* 40% overhead relative to ConcurrentLinkedQueue, which feels as
* good as we can hope for.
*/
private static final long serialVersionUID = 876323262645176354L;
/**
* A node from which the first node on list (that is, the unique node p
* with p.prev == null && p.next != p) can be reached in O(1) time.
* Invariants:
* - the first node is always O(1) reachable from head via prev links
* - all live nodes are reachable from the first node via succ()
* - head != null
* - (tmp = head).next != tmp || tmp != head
* - head is never gc-unlinked (but may be unlinked)
* Non-invariants:
* - head.item may or may not be null
* - head may not be reachable from the first or last node, or from tail
*/
private transient volatile Node<E> head;
/**
* A node from which the last node on list (that is, the unique node p
* with p.next == null && p.prev != p) can be reached in O(1) time.
* Invariants:
* - the last node is always O(1) reachable from tail via next links
* - all live nodes are reachable from the last node via pred()
* - tail != null
* - tail is never gc-unlinked (but may be unlinked)
* Non-invariants:
* - tail.item may or may not be null
* - tail may not be reachable from the first or last node, or from head
*/
private transient volatile Node<E> tail;
private static final Node<Object> PREV_TERMINATOR, NEXT_TERMINATOR;
@SuppressWarnings("unchecked")
Node<E> prevTerminator() {
return (Node<E>) PREV_TERMINATOR;
}
@SuppressWarnings("unchecked")
Node<E> nextTerminator() {
return (Node<E>) NEXT_TERMINATOR;
}
static final class Node<E> {
volatile Node<E> prev;
volatile E item;
volatile Node<E> next;
Node() { // default constructor for NEXT_TERMINATOR, PREV_TERMINATOR
}
/**
* Constructs a new node. Uses relaxed write because item can
* only be seen after publication via casNext or casPrev.
*/
Node(E item) {
UNSAFE.putObject(this, itemOffset, item);
}
boolean casItem(E cmp, E val) {
return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
}
void lazySetNext(Node<E> val) {
UNSAFE.putOrderedObject(this, nextOffset, val);
}
boolean casNext(Node<E> cmp, Node<E> val) {
return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
}
void lazySetPrev(Node<E> val) {
UNSAFE.putOrderedObject(this, prevOffset, val);
}
boolean casPrev(Node<E> cmp, Node<E> val) {
return UNSAFE.compareAndSwapObject(this, prevOffset, cmp, val);
}
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long prevOffset;
private static final long itemOffset;
private static final long nextOffset;
static {
try {
UNSAFE = getUnsafe();
Class<?> k = Node.class;
prevOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("prev"));
itemOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("item"));
nextOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("next"));
} catch (Exception e) {
throw new Error(e);
}
}
}
/**
* Links e as first element.
*/
private void linkFirst(E e) {
checkNotNull(e);
final Node<E> newNode = new Node<E>(e);
restartFromHead:
for (;;)
for (Node<E> h = head, p = h, q;;) {
if ((q = p.prev) != null &&
(q = (p = q).prev) != null)
// Check for head updates every other hop.
// If p == q, we are sure to follow head instead.
p = (h != (h = head)) ? h : q;
else if (p.next == p) // PREV_TERMINATOR
continue restartFromHead;
else {
// p is first node
newNode.lazySetNext(p); // CAS piggyback
if (p.casPrev(null, newNode)) {
// Successful CAS is the linearization point
// for e to become an element of this deque,
// and for newNode to become "live".
if (p != h) // hop two nodes at a time
casHead(h, newNode); // Failure is OK.
return;
}
// Lost CAS race to another thread; re-read prev
}
}
}
/**
* Links e as last element.
*/
private void linkLast(E e) {
checkNotNull(e);
final Node<E> newNode = new Node<E>(e);
restartFromTail:
for (;;)
for (Node<E> t = tail, p = t, q;;) {
if ((q = p.next) != null &&
(q = (p = q).next) != null)
// Check for tail updates every other hop.
// If p == q, we are sure to follow tail instead.
p = (t != (t = tail)) ? t : q;
else if (p.prev == p) // NEXT_TERMINATOR
continue restartFromTail;
else {
// p is last node
newNode.lazySetPrev(p); // CAS piggyback
if (p.casNext(null, newNode)) {
// Successful CAS is the linearization point
// for e to become an element of this deque,
// and for newNode to become "live".
if (p != t) // hop two nodes at a time
casTail(t, newNode); // Failure is OK.
return;
}
// Lost CAS race to another thread; re-read next
}
}
}
private static final int HOPS = 2;
/**
* Unlinks non-null node x.
*/
void unlink(Node<E> x) {
// assert x != null;
// assert x.item == null;
// assert x != PREV_TERMINATOR;
// assert x != NEXT_TERMINATOR;
final Node<E> prev = x.prev;
final Node<E> next = x.next;
if (prev == null) {
unlinkFirst(x, next);
} else if (next == null) {
unlinkLast(x, prev);
} else {
// Unlink interior node.
//
// This is the common case, since a series of polls at the
// same end will be "interior" removes, except perhaps for
// the first one, since end nodes cannot be unlinked.
//
// At any time, all active nodes are mutually reachable by
// following a sequence of either next or prev pointers.
//
// Our strategy is to find the unique active predecessor
// and successor of x. Try to fix up their links so that
// they point to each other, leaving x unreachable from
// active nodes. If successful, and if x has no live
// predecessor/successor, we additionally try to gc-unlink,
// leaving active nodes unreachable from x, by rechecking
// that the status of predecessor and successor are
// unchanged and ensuring that x is not reachable from
// tail/head, before setting x's prev/next links to their
// logical approximate replacements, self/TERMINATOR.
Node<E> activePred, activeSucc;
boolean isFirst, isLast;
int hops = 1;
// Find active predecessor
for (Node<E> p = prev; ; ++hops) {
if (p.item != null) {
activePred = p;
isFirst = false;
break;
}
Node<E> q = p.prev;
if (q == null) {
if (p.next == p)
return;
activePred = p;
isFirst = true;
break;
}
else if (p == q)
return;
else
p = q;
}
// Find active successor
for (Node<E> p = next; ; ++hops) {
if (p.item != null) {
activeSucc = p;
isLast = false;
break;
}
Node<E> q = p.next;
if (q == null) {
if (p.prev == p)
return;
activeSucc = p;
isLast = true;
break;
}
else if (p == q)
return;
else
p = q;
}
// TODO: better HOP heuristics
if (hops < HOPS
// always squeeze out interior deleted nodes
&& (isFirst | isLast))
return;
// Squeeze out deleted nodes between activePred and
// activeSucc, including x.
skipDeletedSuccessors(activePred);
skipDeletedPredecessors(activeSucc);
// Try to gc-unlink, if possible
if ((isFirst | isLast) &&
// Recheck expected state of predecessor and successor
(activePred.next == activeSucc) &&
(activeSucc.prev == activePred) &&
(isFirst ? activePred.prev == null : activePred.item != null) &&
(isLast ? activeSucc.next == null : activeSucc.item != null)) {
updateHead(); // Ensure x is not reachable from head
updateTail(); // Ensure x is not reachable from tail
// Finally, actually gc-unlink
x.lazySetPrev(isFirst ? prevTerminator() : x);
x.lazySetNext(isLast ? nextTerminator() : x);
}
}
}
/**
* Unlinks non-null first node.
*/
private void unlinkFirst(Node<E> first, Node<E> next) {
// assert first != null;
// assert next != null;
// assert first.item == null;
for (Node<E> o = null, p = next, q;;) {
if (p.item != null || (q = p.next) == null) {
if (o != null && p.prev != p && first.casNext(next, p)) {
skipDeletedPredecessors(p);
if (first.prev == null &&
(p.next == null || p.item != null) &&
p.prev == first) {
updateHead(); // Ensure o is not reachable from head
updateTail(); // Ensure o is not reachable from tail
// Finally, actually gc-unlink
o.lazySetNext(o);
o.lazySetPrev(prevTerminator());
}
}
return;
}
else if (p == q)
return;
else {
o = p;
p = q;
}
}
}
/**
* Unlinks non-null last node.
*/
private void unlinkLast(Node<E> last, Node<E> prev) {
// assert last != null;
// assert prev != null;
// assert last.item == null;
for (Node<E> o = null, p = prev, q;;) {
if (p.item != null || (q = p.prev) == null) {
if (o != null && p.next != p && last.casPrev(prev, p)) {
skipDeletedSuccessors(p);
if (last.next == null &&
(p.prev == null || p.item != null) &&
p.next == last) {
updateHead(); // Ensure o is not reachable from head
updateTail(); // Ensure o is not reachable from tail
// Finally, actually gc-unlink
o.lazySetPrev(o);
o.lazySetNext(nextTerminator());
}
}
return;
}
else if (p == q)
return;
else {
o = p;
p = q;
}
}
}
/**
* Guarantees that any node which was unlinked before a call to
* this method will be unreachable from head after it returns.
* Does not guarantee to eliminate slack, only that head will
* point to a node that was active while this method was running.
*/
private final void updateHead() {
// Either head already points to an active node, or we keep
// trying to cas it to the first node until it does.
Node<E> h, p, q;
restartFromHead:
while ((h = head).item == null && (p = h.prev) != null) {
for (;;) {
if ((q = p.prev) == null ||
(q = (p = q).prev) == null) {
// It is possible that p is PREV_TERMINATOR,
// but if so, the CAS is guaranteed to fail.
if (casHead(h, p))
return;
else
continue restartFromHead;
}
else if (h != head)
continue restartFromHead;
else
p = q;
}
}
}
/**
* Guarantees that any node which was unlinked before a call to
* this method will be unreachable from tail after it returns.
* Does not guarantee to eliminate slack, only that tail will
* point to a node that was active while this method was running.
*/
private final void updateTail() {
// Either tail already points to an active node, or we keep
// trying to cas it to the last node until it does.
Node<E> t, p, q;
restartFromTail:
while ((t = tail).item == null && (p = t.next) != null) {
for (;;) {
if ((q = p.next) == null ||
(q = (p = q).next) == null) {
// It is possible that p is NEXT_TERMINATOR,
// but if so, the CAS is guaranteed to fail.
if (casTail(t, p))
return;
else
continue restartFromTail;
}
else if (t != tail)
continue restartFromTail;
else
p = q;
}
}
}
private void skipDeletedPredecessors(Node<E> x) {
whileActive:
do {
Node<E> prev = x.prev;
// assert prev != null;
// assert x != NEXT_TERMINATOR;
// assert x != PREV_TERMINATOR;
Node<E> p = prev;
findActive:
for (;;) {
if (p.item != null)
break findActive;
Node<E> q = p.prev;
if (q == null) {
if (p.next == p)
continue whileActive;
break findActive;
}
else if (p == q)
continue whileActive;
else
p = q;
}
// found active CAS target
if (prev == p || x.casPrev(prev, p))
return;
} while (x.item != null || x.next == null);
}
private void skipDeletedSuccessors(Node<E> x) {
whileActive:
do {
Node<E> next = x.next;
// assert next != null;
// assert x != NEXT_TERMINATOR;
// assert x != PREV_TERMINATOR;
Node<E> p = next;
findActive:
for (;;) {
if (p.item != null)
break findActive;
Node<E> q = p.next;
if (q == null) {
if (p.prev == p)
continue whileActive;
break findActive;
}
else if (p == q)
continue whileActive;
else
p = q;
}
// found active CAS target
if (next == p || x.casNext(next, p))
return;
} while (x.item != null || x.prev == null);
}
/**
* Returns the successor of p, or the first node if p.next has been
* linked to self, which will only be true if traversing with a
* stale pointer that is now off the list.
*/
final Node<E> succ(Node<E> p) {
// TODO: should we skip deleted nodes here?
Node<E> q = p.next;
return (p == q) ? first() : q;
}
/**
* Returns the predecessor of p, or the last node if p.prev has been
* linked to self, which will only be true if traversing with a
* stale pointer that is now off the list.
*/
final Node<E> pred(Node<E> p) {
Node<E> q = p.prev;
return (p == q) ? last() : q;
}
/**
* Returns the first node, the unique node p for which:
* p.prev == null && p.next != p
* The returned node may or may not be logically deleted.
* Guarantees that head is set to the returned node.
*/
Node<E> first() {
restartFromHead:
for (;;)
for (Node<E> h = head, p = h, q;;) {
if ((q = p.prev) != null &&
(q = (p = q).prev) != null)
// Check for head updates every other hop.
// If p == q, we are sure to follow head instead.
p = (h != (h = head)) ? h : q;
else if (p == h
// It is possible that p is PREV_TERMINATOR,
// but if so, the CAS is guaranteed to fail.
|| casHead(h, p))
return p;
else
continue restartFromHead;
}
}
/**
* Returns the last node, the unique node p for which:
* p.next == null && p.prev != p
* The returned node may or may not be logically deleted.
* Guarantees that tail is set to the returned node.
*/
Node<E> last() {
restartFromTail:
for (;;)
for (Node<E> t = tail, p = t, q;;) {
if ((q = p.next) != null &&
(q = (p = q).next) != null)
// Check for tail updates every other hop.
// If p == q, we are sure to follow tail instead.
p = (t != (t = tail)) ? t : q;
else if (p == t
// It is possible that p is NEXT_TERMINATOR,
// but if so, the CAS is guaranteed to fail.
|| casTail(t, p))
return p;
else
continue restartFromTail;
}
}
// Minor convenience utilities
/**
* Throws NullPointerException if argument is null.
*
* @param v the element
*/
private static void checkNotNull(Object v) {
if (v == null)
throw new NullPointerException();
}
/**
* Returns element unless it is null, in which case throws
* NoSuchElementException.
*
* @param v the element
* @return the element
*/
private E screenNullResult(E v) {
if (v == null)
throw new NoSuchElementException();
return v;
}
/**
* Creates an array list and fills it with elements of this list.
* Used by toArray.
*
* @return the arrayList
*/
private ArrayList<E> toArrayList() {
ArrayList<E> list = new ArrayList<E>();
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null)
list.add(item);
}
return list;
}
/**
* Constructs an empty deque.
*/
public ConcurrentLinkedDeque() {
head = tail = new Node<E>(null);
}
/**
* Constructs a deque initially containing the elements of
* the given collection, added in traversal order of the
* collection's iterator.
*
* @param c the collection of elements to initially contain
* @throws NullPointerException if the specified collection or any
* of its elements are null
*/
public ConcurrentLinkedDeque(Collection<? extends E> c) {
// Copy c into a private chain of Nodes
Node<E> h = null, t = null;
for (E e : c) {
checkNotNull(e);
Node<E> newNode = new Node<E>(e);
if (h == null)
h = t = newNode;
else {
t.lazySetNext(newNode);
newNode.lazySetPrev(t);
t = newNode;
}
}
initHeadTail(h, t);
}
/**
* Initializes head and tail, ensuring invariants hold.
*/
private void initHeadTail(Node<E> h, Node<E> t) {
if (h == t) {
if (h == null)
h = t = new Node<E>(null);
else {
// Avoid edge case of a single Node with non-null item.
Node<E> newNode = new Node<E>(null);
t.lazySetNext(newNode);
newNode.lazySetPrev(t);
t = newNode;
}
}
head = h;
tail = t;
}
/**
* Inserts the specified element at the front of this deque.
* As the deque is unbounded, this method will never throw
* {@link IllegalStateException}.
*
* @throws NullPointerException if the specified element is null
*/
public void addFirst(E e) {
linkFirst(e);
}
/**
* Inserts the specified element at the end of this deque.
* As the deque is unbounded, this method will never throw
* {@link IllegalStateException}.
*
* <p>This method is equivalent to {@link #add}.
*
* @throws NullPointerException if the specified element is null
*/
public void addLast(E e) {
linkLast(e);
}
/**
* Inserts the specified element at the front of this deque.
* As the deque is unbounded, this method will never return {@code false}.
*
* @return {@code true} (as specified by {@link Deque#offerFirst})
* @throws NullPointerException if the specified element is null
*/
public boolean offerFirst(E e) {
linkFirst(e);
return true;
}
/**
* Inserts the specified element at the end of this deque.
* As the deque is unbounded, this method will never return {@code false}.
*
* <p>This method is equivalent to {@link #add}.
*
* @return {@code true} (as specified by {@link Deque#offerLast})
* @throws NullPointerException if the specified element is null
*/
public boolean offerLast(E e) {
linkLast(e);
return true;
}
public E peekFirst() {
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null)
return item;
}
return null;
}
public E peekLast() {
for (Node<E> p = last(); p != null; p = pred(p)) {
E item = p.item;
if (item != null)
return item;
}
return null;
}
/**
* @throws NoSuchElementException {@inheritDoc}
*/
public E getFirst() {
return screenNullResult(peekFirst());
}
/**
* @throws NoSuchElementException {@inheritDoc}
*/
public E getLast() {
return screenNullResult(peekLast());
}
public E pollFirst() {
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null && p.casItem(item, null)) {
unlink(p);
return item;
}
}
return null;
}
public E pollLast() {
for (Node<E> p = last(); p != null; p = pred(p)) {
E item = p.item;
if (item != null && p.casItem(item, null)) {
unlink(p);
return item;
}
}
return null;
}
/**
* @throws NoSuchElementException {@inheritDoc}
*/
public E removeFirst() {
return screenNullResult(pollFirst());
}
/**
* @throws NoSuchElementException {@inheritDoc}
*/
public E removeLast() {
return screenNullResult(pollLast());
}
// *** Queue and stack methods ***
/**
* Inserts the specified element at the tail of this deque.
* As the deque is unbounded, this method will never return {@code false}.
*
* @return {@code true} (as specified by {@link Queue#offer})
* @throws NullPointerException if the specified element is null
*/
public boolean offer(E e) {
return offerLast(e);
}
/**
* Inserts the specified element at the tail of this deque.
* As the deque is unbounded, this method will never throw
* {@link IllegalStateException} or return {@code false}.
*
* @return {@code true} (as specified by {@link Collection#add})
* @throws NullPointerException if the specified element is null
*/
public boolean add(E e) {
return offerLast(e);
}
public E poll() { return pollFirst(); }
public E remove() { return removeFirst(); }
public E peek() { return peekFirst(); }
public E element() { return getFirst(); }
public void push(E e) { addFirst(e); }
public E pop() { return removeFirst(); }
/**
* Removes the first element {@code e} such that
* {@code o.equals(e)}, if such an element exists in this deque.
* If the deque does not contain the element, it is unchanged.
*
* @param o element to be removed from this deque, if present
* @return {@code true} if the deque contained the specified element
* @throws NullPointerException if the specified element is null
*/
public boolean removeFirstOccurrence(Object o) {
checkNotNull(o);
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null && o.equals(item) && p.casItem(item, null)) {
unlink(p);
return true;
}
}
return false;
}
/**
* Removes the last element {@code e} such that
* {@code o.equals(e)}, if such an element exists in this deque.
* If the deque does not contain the element, it is unchanged.
*
* @param o element to be removed from this deque, if present
* @return {@code true} if the deque contained the specified element
* @throws NullPointerException if the specified element is null
*/
public boolean removeLastOccurrence(Object o) {
checkNotNull(o);
for (Node<E> p = last(); p != null; p = pred(p)) {
E item = p.item;
if (item != null && o.equals(item) && p.casItem(item, null)) {
unlink(p);
return true;
}
}
return false;
}
/**
* Returns {@code true} if this deque contains at least one
* element {@code e} such that {@code o.equals(e)}.
*
* @param o element whose presence in this deque is to be tested
* @return {@code true} if this deque contains the specified element
*/
public boolean contains(Object o) {
if (o == null) return false;
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null && o.equals(item))
return true;
}
return false;
}
/**
* Returns {@code true} if this collection contains no elements.
*
* @return {@code true} if this collection contains no elements
*/
public boolean isEmpty() {
return peekFirst() == null;
}
/**
* Returns the number of elements in this deque. If this deque
* contains more than {@code Integer.MAX_VALUE} elements, it
* returns {@code Integer.MAX_VALUE}.
*
* <p>Beware that, unlike in most collections, this method is
* <em>NOT</em> a constant-time operation. Because of the
* asynchronous nature of these deques, determining the current
* number of elements requires traversing them all to count them.
* Additionally, it is possible for the size to change during
* execution of this method, in which case the returned result
* will be inaccurate. Thus, this method is typically not very
* useful in concurrent applications.
*
* @return the number of elements in this deque
*/
public int size() {
int count = 0;
for (Node<E> p = first(); p != null; p = succ(p))
if (p.item != null)
// Collection.size() spec says to max out
if (++count == Integer.MAX_VALUE)
break;
return count;
}
/**
* Removes the first element {@code e} such that
* {@code o.equals(e)}, if such an element exists in this deque.
* If the deque does not contain the element, it is unchanged.
*
* @param o element to be removed from this deque, if present
* @return {@code true} if the deque contained the specified element
* @throws NullPointerException if the specified element is null
*/
public boolean remove(Object o) {
return removeFirstOccurrence(o);
}
/**
* Appends all of the elements in the specified collection to the end of
* this deque, in the order that they are returned by the specified
* collection's iterator. Attempts to {@code addAll} of a deque to
* itself result in {@code IllegalArgumentException}.
*
* @param c the elements to be inserted into this deque
* @return {@code true} if this deque changed as a result of the call
* @throws NullPointerException if the specified collection or any
* of its elements are null
* @throws IllegalArgumentException if the collection is this deque
*/
public boolean addAll(Collection<? extends E> c) {
if (c == this)
// As historically specified in AbstractQueue#addAll
throw new IllegalArgumentException();
// Copy c into a private chain of Nodes
Node<E> beginningOfTheEnd = null, last = null;
for (E e : c) {
checkNotNull(e);
Node<E> newNode = new Node<E>(e);
if (beginningOfTheEnd == null)
beginningOfTheEnd = last = newNode;
else {
last.lazySetNext(newNode);
newNode.lazySetPrev(last);
last = newNode;
}
}
if (beginningOfTheEnd == null)
return false;
// Atomically append the chain at the tail of this collection
restartFromTail:
for (;;)
for (Node<E> t = tail, p = t, q;;) {
if ((q = p.next) != null &&
(q = (p = q).next) != null)
// Check for tail updates every other hop.
// If p == q, we are sure to follow tail instead.
p = (t != (t = tail)) ? t : q;
else if (p.prev == p) // NEXT_TERMINATOR
continue restartFromTail;
else {
// p is last node
beginningOfTheEnd.lazySetPrev(p); // CAS piggyback
if (p.casNext(null, beginningOfTheEnd)) {
// Successful CAS is the linearization point
// for all elements to be added to this deque.
if (!casTail(t, last)) {
// Try a little harder to update tail,
// since we may be adding many elements.
t = tail;
if (last.next == null)
casTail(t, last);
}
return true;
}
// Lost CAS race to another thread; re-read next
}
}
}
/**
* Removes all of the elements from this deque.
*/
public void clear() {
while (pollFirst() != null)
;
}
/**
* Returns an array containing all of the elements in this deque, in
* proper sequence (from first to last element).
*
* <p>The returned array will be "safe" in that no references to it are
* maintained by this deque. (In other words, this method must allocate
* a new array). The caller is thus free to modify the returned array.
*
* <p>This method acts as bridge between array-based and collection-based
* APIs.
*
* @return an array containing all of the elements in this deque
*/
public Object[] toArray() {
return toArrayList().toArray();
}
/**
* Returns an array containing all of the elements in this deque,
* in proper sequence (from first to last element); the runtime
* type of the returned array is that of the specified array. If
* the deque fits in the specified array, it is returned therein.
* Otherwise, a new array is allocated with the runtime type of
* the specified array and the size of this deque.
*
* <p>If this deque fits in the specified array with room to spare
* (i.e., the array has more elements than this deque), the element in
* the array immediately following the end of the deque is set to
* {@code null}.
*
* <p>Like the {@link #toArray()} method, this method acts as
* bridge between array-based and collection-based APIs. Further,
* this method allows precise control over the runtime type of the
* output array, and may, under certain circumstances, be used to
* save allocation costs.
*
* <p>Suppose {@code x} is a deque known to contain only strings.
* The following code can be used to dump the deque into a newly
* allocated array of {@code String}:
*
* <pre> {@code String[] y = x.toArray(new String[0]);}</pre>
*
* Note that {@code toArray(new Object[0])} is identical in function to
* {@code toArray()}.
*
* @param a the array into which the elements of the deque are to
* be stored, if it is big enough; otherwise, a new array of the
* same runtime type is allocated for this purpose
* @return an array containing all of the elements in this deque
* @throws ArrayStoreException if the runtime type of the specified array
* is not a supertype of the runtime type of every element in
* this deque
* @throws NullPointerException if the specified array is null
*/
public <T> T[] toArray(T[] a) {
return toArrayList().toArray(a);
}
/**
* Returns an iterator over the elements in this deque in proper sequence.
* The elements will be returned in order from first (head) to last (tail).
*
* <p>The returned iterator is a "weakly consistent" iterator that
* will never throw {@link java.util.ConcurrentModificationException
* ConcurrentModificationException}, and guarantees to traverse
* elements as they existed upon construction of the iterator, and
* may (but is not guaranteed to) reflect any modifications
* subsequent to construction.
*
* @return an iterator over the elements in this deque in proper sequence
*/
public Iterator<E> iterator() {
return new Itr();
}
/**
* Returns an iterator over the elements in this deque in reverse
* sequential order. The elements will be returned in order from
* last (tail) to first (head).
*
* <p>The returned iterator is a "weakly consistent" iterator that
* will never throw {@link java.util.ConcurrentModificationException
* ConcurrentModificationException}, and guarantees to traverse
* elements as they existed upon construction of the iterator, and
* may (but is not guaranteed to) reflect any modifications
* subsequent to construction.
*
* @return an iterator over the elements in this deque in reverse order
*/
public Iterator<E> descendingIterator() {
return new DescendingItr();
}
private abstract class AbstractItr implements Iterator<E> {
/**
* Next node to return item for.
*/
private Node<E> nextNode;
/**
* nextItem holds on to item fields because once we claim
* that an element exists in hasNext(), we must return it in
* the following next() call even if it was in the process of
* being removed when hasNext() was called.
*/
private E nextItem;
/**
* Node returned by most recent call to next. Needed by remove.
* Reset to null if this element is deleted by a call to remove.
*/
private Node<E> lastRet;
abstract Node<E> startNode();
abstract Node<E> nextNode(Node<E> p);
AbstractItr() {
advance();
}
/**
* Sets nextNode and nextItem to next valid node, or to null
* if no such.
*/
private void advance() {
lastRet = nextNode;
Node<E> p = (nextNode == null) ? startNode() : nextNode(nextNode);
for (;; p = nextNode(p)) {
if (p == null) {
// p might be active end or TERMINATOR node; both are OK
nextNode = null;
nextItem = null;
break;
}
E item = p.item;
if (item != null) {
nextNode = p;
nextItem = item;
break;
}
}
}
public boolean hasNext() {
return nextItem != null;
}
public E next() {
E item = nextItem;
if (item == null) throw new NoSuchElementException();
advance();
return item;
}
public void remove() {
Node<E> l = lastRet;
if (l == null) throw new IllegalStateException();
l.item = null;
unlink(l);
lastRet = null;
}
}
/** Forward iterator */
private class Itr extends AbstractItr {
Node<E> startNode() { return first(); }
Node<E> nextNode(Node<E> p) { return succ(p); }
}
/** Descending iterator */
private class DescendingItr extends AbstractItr {
Node<E> startNode() { return last(); }
Node<E> nextNode(Node<E> p) { return pred(p); }
}
/**
* Saves the state to a stream (that is, serializes it).
*
* @serialData All of the elements (each an {@code E}) in
* the proper order, followed by a null
* @param s the stream
*/
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
// Write out any hidden stuff
s.defaultWriteObject();
// Write out all elements in the proper order.
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null)
s.writeObject(item);
}
// Use trailing null as sentinel
s.writeObject(null);
}
/**
* Reconstitutes the instance from a stream (that is, deserializes it).
* @param s the stream
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
// Read in elements until trailing null sentinel found
Node<E> h = null, t = null;
Object item;
while ((item = s.readObject()) != null) {
@SuppressWarnings("unchecked")
Node<E> newNode = new Node<E>((E) item);
if (h == null)
h = t = newNode;
else {
t.lazySetNext(newNode);
newNode.lazySetPrev(t);
t = newNode;
}
}
initHeadTail(h, t);
}
private boolean casHead(Node<E> cmp, Node<E> val) {
return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val);
}
private boolean casTail(Node<E> cmp, Node<E> val) {
return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val);
}
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long headOffset;
private static final long tailOffset;
static {
PREV_TERMINATOR = new Node<Object>();
PREV_TERMINATOR.next = PREV_TERMINATOR;
NEXT_TERMINATOR = new Node<Object>();
NEXT_TERMINATOR.prev = NEXT_TERMINATOR;
try {
UNSAFE = getUnsafe();
Class<?> k = ConcurrentLinkedDeque.class;
headOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("head"));
tailOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("tail"));
} catch (Exception e) {
throw new Error(e);
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException se) {
try {
return java.security.AccessController.doPrivileged
(new java.security
.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
java.lang.reflect.Field f = sun.misc
.Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
return (sun.misc.Unsafe) f.get(null);
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/jsr166y/CountedCompleter.java
|
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166y;
/**
* A resultless {@link ForkJoinTask} with a completion action
* performed when triggered and there are no remaining pending
* actions. Uses of CountedCompleter are similar to those of other
* completion based components (such as {@link
* java.nio.channels.CompletionHandler}) except that multiple
* <em>pending</em> completions may be necessary to trigger the {@link
* #onCompletion} action, not just one. Unless initialized otherwise,
* the {@link #getPendingCount pending count} starts at zero, but may
* be (atomically) changed using methods {@link #setPendingCount},
* {@link #addToPendingCount}, and {@link
* #compareAndSetPendingCount}. Upon invocation of {@link
* #tryComplete}, if the pending action count is nonzero, it is
* decremented; otherwise, the completion action is performed, and if
* this completer itself has a completer, the process is continued
* with its completer. As is the case with related synchronization
* components such as {@link Phaser} and {@link
* java.util.concurrent.Semaphore} these methods affect only internal
* counts; they do not establish any further internal bookkeeping. In
* particular, the identities of pending tasks are not maintained. As
* illustrated below, you can create subclasses that do record some or
* all pended tasks or their results when needed.
*
* <p>A concrete CountedCompleter class must define method {@link
* #compute}, that should, in almost all use cases, invoke {@code
* tryComplete()} once before returning. The class may also optionally
* override method {@link #onCompletion} to perform an action upon
* normal completion, and method {@link #onExceptionalCompletion} to
* perform an action upon any exception.
*
* <p>A CountedCompleter that does not itself have a completer (i.e.,
* one for which {@link #getCompleter} returns {@code null}) can be
* used as a regular ForkJoinTask with this added functionality.
* However, any completer that in turn has another completer serves
* only as an internal helper for other computations, so its own task
* status (as reported in methods such as {@link ForkJoinTask#isDone})
* is arbitrary; this status changes only upon explicit invocations of
* {@link #complete}, {@link ForkJoinTask#cancel}, {@link
* ForkJoinTask#completeExceptionally} or upon exceptional completion
* of method {@code compute}. Upon any exceptional completion, the
* exception may be relayed to a task's completer (and its completer,
* and so on), if one exists and it has not otherwise already
* completed.
*
* <p><b>Sample Usages.</b>
*
* <p><b>Parallel recursive decomposition.</b> CountedCompleters may
* be arranged in trees similar to those often used with {@link
* RecursiveAction}s, although the constructions involved in setting
* them up typically vary. Even though they entail a bit more
* bookkeeping, CountedCompleters may be better choices when applying
* a possibly time-consuming operation (that cannot be further
* subdivided) to each element of an array or collection; especially
* when the operation takes a significantly different amount of time
* to complete for some elements than others, either because of
* intrinsic variation (for example IO) or auxiliary effects such as
* garbage collection. Because CountedCompleters provide their own
* continuations, other threads need not block waiting to perform
* them.
*
* <p> For example, here is an initial version of a class that uses
* divide-by-two recursive decomposition to divide work into single
* pieces (leaf tasks). Even when work is split into individual calls,
* tree-based techniques are usually preferable to directly forking
* leaf tasks, because they reduce inter-thread communication and
* improve load balancing. In the recursive case, the second of each
* pair of subtasks to finish triggers completion of its parent
* (because no result combination is performed, the default no-op
* implementation of method {@code onCompletion} is not overridden). A
* static utility method sets up the base task and invokes it:
*
* <pre> {@code
* class MyOperation<E> { void apply(E e) { ... } }
*
* class ForEach<E> extends CountedCompleter {
*
* public static <E> void forEach(ForkJoinPool pool, E[] array, MyOperation<E> op) {
* pool.invoke(new ForEach<E>(null, array, op, 0, array.length));
* }
*
* final E[] array; final MyOperation<E> op; final int lo, hi;
* ForEach(CountedCompleter p, E[] array, MyOperation<E> op, int lo, int hi) {
* super(p);
* this.array = array; this.op = op; this.lo = lo; this.hi = hi;
* }
*
* public void compute() { // version 1
* if (hi - lo >= 2) {
* int mid = (lo + hi) >>> 1;
* setPendingCount(2); // must set pending count before fork
* new ForEach(this, array, op, mid, hi).fork(); // right child
* new ForEach(this, array, op, lo, mid).fork(); // left child
* }
* else if (hi > lo)
* op.apply(array[lo]);
* tryComplete();
* }
* } }</pre>
*
* This design can be improved by noticing that in the recursive case,
* the task has nothing to do after forking its right task, so can
* directly invoke its left task before returning. (This is an analog
* of tail recursion removal.) Also, because the task returns upon
* executing its left task (rather than falling through to invoke
* tryComplete) the pending count is set to one:
*
* <pre> {@code
* class ForEach<E> ...
* public void compute() { // version 2
* if (hi - lo >= 2) {
* int mid = (lo + hi) >>> 1;
* setPendingCount(1); // only one pending
* new ForEach(this, array, op, mid, hi).fork(); // right child
* new ForEach(this, array, op, lo, mid).compute(); // direct invoke
* }
* else {
* if (hi > lo)
* op.apply(array[lo]);
* tryComplete();
* }
* }
* }</pre>
*
* As a further improvement, notice that the left task need not even
* exist. Instead of creating a new one, we can iterate using the
* original task, and add a pending count for each fork:
*
* <pre> {@code
* class ForEach<E> ...
* public void compute() { // version 3
* int l = lo, h = hi;
* while (h - l >= 2) {
* int mid = (l + h) >>> 1;
* addToPendingCount(1);
* new ForEach(this, array, op, mid, h).fork(); // right child
* h = mid;
* }
* if (h > l)
* op.apply(array[l]);
* tryComplete();
* }
* }</pre>
*
* Additional improvements of such classes might entail precomputing
* pending counts so that they can be established in constructors,
* specializing classes for leaf steps, subdividing by say, four,
* instead of two per iteration, and using an adaptive threshold
* instead of always subdividing down to single elements.
*
* <p><b>Recording subtasks.</b> CountedCompleter tasks that combine
* results of multiple subtasks usually need to access these results
* in method {@link #onCompletion}. As illustrated in the following
* class (that performs a simplified form of map-reduce where mappings
* and reductions are all of type {@code E}), one way to do this in
* divide and conquer designs is to have each subtask record its
* sibling, so that it can be accessed in method {@code onCompletion}.
* For clarity, this class uses explicit left and right subtasks, but
* variants of other streamlinings seen in the above example may also
* apply.
*
* <pre> {@code
* class MyMapper<E> { E apply(E v) { ... } }
* class MyReducer<E> { E apply(E x, E y) { ... } }
* class MapReducer<E> extends CountedCompleter {
* final E[] array; final MyMapper<E> mapper;
* final MyReducer<E> reducer; final int lo, hi;
* MapReducer sibling;
* E result;
* MapReducer(CountedCompleter p, E[] array, MyMapper<E> mapper,
* MyReducer<E> reducer, int lo, int hi) {
* super(p);
* this.array = array; this.mapper = mapper;
* this.reducer = reducer; this.lo = lo; this.hi = hi;
* }
* public void compute() {
* if (hi - lo >= 2) {
* int mid = (lo + hi) >>> 1;
* MapReducer<E> left = new MapReducer(this, array, mapper, reducer, lo, mid);
* MapReducer<E> right = new MapReducer(this, array, mapper, reducer, mid, hi);
* left.sibling = right;
* right.sibling = left;
* setPendingCount(1); // only right is pending
* right.fork();
* left.compute(); // directly execute left
* }
* else {
* if (hi > lo)
* result = mapper.apply(array[lo]);
* tryComplete();
* }
* }
* public void onCompletion(CountedCompleter caller) {
* if (caller != this) {
* MapReducer<E> child = (MapReducer<E>)caller;
* MapReducer<E> sib = child.sibling;
* if (sib == null || sib.result == null)
* result = child.result;
* else
* result = reducer.apply(child.result, sib.result);
* }
* }
*
* public static <E> E mapReduce(ForkJoinPool pool, E[] array,
* MyMapper<E> mapper, MyReducer<E> reducer) {
* MapReducer<E> mr = new MapReducer<E>(null, array, mapper,
* reducer, 0, array.length);
* pool.invoke(mr);
* return mr.result;
* }
* } }</pre>
*
* <p><b>Triggers.</b> Some CountedCompleters are themselves never
* forked, but instead serve as bits of plumbing in other designs;
* including those in which the completion of one of more async tasks
* triggers another async task. For example:
*
* <pre> {@code
* class HeaderBuilder extends CountedCompleter { ... }
* class BodyBuilder extends CountedCompleter { ... }
* class PacketSender extends CountedCompleter {
* PacketSender(...) { super(null, 1); ... } // trigger on second completion
* public void compute() { } // never called
* public void onCompletion(CountedCompleter caller) { sendPacket(); }
* }
* // sample use:
* PacketSender p = new PacketSender();
* new HeaderBuilder(p, ...).fork();
* new BodyBuilder(p, ...).fork();
* }</pre>
*
* @since 1.8
* @author Doug Lea
*/
public abstract class CountedCompleter extends ForkJoinTask<Void> {
private static final long serialVersionUID = 5232453752276485070L;
/** This task's completer, or null if none */
/*final*/ CountedCompleter completer;
/** The number of pending tasks until completion */
volatile int pending;
/**
* Creates a new CountedCompleter with the given completer
* and initial pending count.
*
* @param completer this tasks completer, or {@code null} if none
* @param initialPendingCount the initial pending count
*/
protected CountedCompleter(CountedCompleter completer,
int initialPendingCount) {
this.completer = completer;
this.pending = initialPendingCount;
}
/**
* Creates a new CountedCompleter with the given completer
* and an initial pending count of zero.
*
* @param completer this tasks completer, or {@code null} if none
*/
protected CountedCompleter(CountedCompleter completer) {
this.completer = completer;
}
/**
* Creates a new CountedCompleter with no completer
* and an initial pending count of zero.
*/
protected CountedCompleter() {
this.completer = null;
}
/**
* The main computation performed by this task.
*/
public abstract void compute();
/**
* Performs an action when method {@link #tryComplete} is invoked
* and there are no pending counts, or when the unconditional
* method {@link #complete} is invoked. By default, this method
* does nothing.
*
* @param caller the task invoking this method (which may
* be this task itself).
*/
public void onCompletion(CountedCompleter caller) {
}
/**
* Performs an action when method {@link #completeExceptionally}
* is invoked or method {@link #compute} throws an exception, and
* this task has not otherwise already completed normally. On
* entry to this method, this task {@link
* ForkJoinTask#isCompletedAbnormally}. The return value of this
* method controls further propagation: If {@code true} and this
* task has a completer, then this completer is also completed
* exceptionally. The default implementation of this method does
* nothing except return {@code true}.
*
* @param ex the exception
* @param caller the task invoking this method (which may
* be this task itself).
* @return true if this exception should be propagated to this
* tasks completer, if one exists.
*/
public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller) {
return true;
}
/**
* Returns the completer established in this task's constructor,
* or {@code null} if none.
*
* @return the completer
*/
public final CountedCompleter getCompleter() {
return completer;
}
// Cliff Click's Horrible Hack
// I must 'clone' or 'newInstance' these things... so to avoid a
// reflective Constructor call to set the parent I made the 'completer'
// field non-final. This happens immediately after clone/newInstance and a
// following volatile set of the pending() field while make the change
// visible to other threads. Example: old.clone().setCompleter(completer)
public final void setCompleter( CountedCompleter x ) { completer = x; }
/**
* Returns the current pending count.
*
* @return the current pending count
*/
public final int getPendingCount() {
return pending;
}
/**
* Sets the pending count to the given value.
*
* @param count the count
*/
public final void setPendingCount(int count) {
pending = count;
}
/**
* Adds (atomically) the given value to the pending count.
*
* @param delta the value to add
*/
public final void addToPendingCount(int delta) {
int c; // note: can replace with intrinsic in jdk8
do {} while (!U.compareAndSwapInt(this, PENDING, c = pending, c+delta));
}
/**
* Sets (atomically) the pending count to the given count only if
* it currently holds the given expected value.
*
* @param expected the expected value
* @param count the new value
* @return true is successful
*/
public final boolean compareAndSetPendingCount(int expected, int count) {
return U.compareAndSwapInt(this, PENDING, expected, count);
}
/**
* If the pending count is nonzero, decrements the count;
* otherwise invokes {@link #onCompletion} and then similarly
* tries to complete this task's completer, if one exists,
* else marks this task as complete.
*/
public final void tryComplete() {
CountedCompleter a = this, s = a;
for (int c;;) {
if ((c = a.pending) == 0) {
a.onCompletion(s);
if ((a = (s = a).completer) == null) {
s.quietlyComplete();
return;
}
}
else if (U.compareAndSwapInt(a, PENDING, c, c - 1))
return;
}
}
/**
* Regardless of pending count, invokes {@link #onCompletion},
* marks this task as complete with a {@code null} return value,
* and further triggers {@link #tryComplete} on this task's
* completer, if one exists. This method may be useful when
* forcing completion as soon as any one (versus all) of several
* subtask results are obtained.
*
* @param mustBeNull the {@code null} completion value
*/
public void complete(Void mustBeNull) {
CountedCompleter p;
onCompletion(this);
quietlyComplete();
if ((p = completer) != null)
p.tryComplete();
}
/**
* Support for FJT exception propagation
*/
void internalPropagateException(Throwable ex) {
CountedCompleter a = this, s = a;
while (a.onExceptionalCompletion(ex, s) &&
(a = (s = a).completer) != null && a.status >= 0)
a.recordExceptionalCompletion(ex);
}
/**
* Implements execution conventions for CountedCompleters
*/
protected final boolean exec() {
compute();
return false;
}
/**
* Always returns {@code null}.
*
* @return {@code null} always
*/
public final Void getRawResult() { return null; }
/**
* Requires null completion value.
*/
protected final void setRawResult(Void mustBeNull) { }
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long PENDING;
static {
try {
U = getUnsafe();
PENDING = U.objectFieldOffset
(CountedCompleter.class.getDeclaredField("pending"));
} catch (Exception e) {
throw new Error(e);
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException se) {
try {
return java.security.AccessController.doPrivileged
(new java.security
.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
java.lang.reflect.Field f = sun.misc
.Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
return (sun.misc.Unsafe) f.get(null);
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/jsr166y/ForkJoinPool.java
|
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166y;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.AbstractExecutorService;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.RunnableFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.AbstractQueuedSynchronizer;
import java.util.concurrent.locks.Condition;
/**
* An {@link ExecutorService} for running {@link ForkJoinTask}s.
* A {@code ForkJoinPool} provides the entry point for submissions
* from non-{@code ForkJoinTask} clients, as well as management and
* monitoring operations.
*
* <p>A {@code ForkJoinPool} differs from other kinds of {@link
* ExecutorService} mainly by virtue of employing
* <em>work-stealing</em>: all threads in the pool attempt to find and
* execute tasks submitted to the pool and/or created by other active
* tasks (eventually blocking waiting for work if none exist). This
* enables efficient processing when most tasks spawn other subtasks
* (as do most {@code ForkJoinTask}s), as well as when many small
* tasks are submitted to the pool from external clients. Especially
* when setting <em>asyncMode</em> to true in constructors, {@code
* ForkJoinPool}s may also be appropriate for use with event-style
* tasks that are never joined.
*
* <p>A {@code ForkJoinPool} is constructed with a given target
* parallelism level; by default, equal to the number of available
* processors. The pool attempts to maintain enough active (or
* available) threads by dynamically adding, suspending, or resuming
* internal worker threads, even if some tasks are stalled waiting to
* join others. However, no such adjustments are guaranteed in the
* face of blocked IO or other unmanaged synchronization. The nested
* {@link ManagedBlocker} interface enables extension of the kinds of
* synchronization accommodated.
*
* <p>In addition to execution and lifecycle control methods, this
* class provides status check methods (for example
* {@link #getStealCount}) that are intended to aid in developing,
* tuning, and monitoring fork/join applications. Also, method
* {@link #toString} returns indications of pool state in a
* convenient form for informal monitoring.
*
* <p> As is the case with other ExecutorServices, there are three
* main task execution methods summarized in the following table.
* These are designed to be used primarily by clients not already
* engaged in fork/join computations in the current pool. The main
* forms of these methods accept instances of {@code ForkJoinTask},
* but overloaded forms also allow mixed execution of plain {@code
* Runnable}- or {@code Callable}- based activities as well. However,
* tasks that are already executing in a pool should normally instead
* use the within-computation forms listed in the table unless using
* async event-style tasks that are not usually joined, in which case
* there is little difference among choice of methods.
*
* <table BORDER CELLPADDING=3 CELLSPACING=1>
* <tr>
* <td></td>
* <td ALIGN=CENTER> <b>Call from non-fork/join clients</b></td>
* <td ALIGN=CENTER> <b>Call from within fork/join computations</b></td>
* </tr>
* <tr>
* <td> <b>Arrange async execution</td>
* <td> {@link #execute(ForkJoinTask)}</td>
* <td> {@link ForkJoinTask#fork}</td>
* </tr>
* <tr>
* <td> <b>Await and obtain result</td>
* <td> {@link #invoke(ForkJoinTask)}</td>
* <td> {@link ForkJoinTask#invoke}</td>
* </tr>
* <tr>
* <td> <b>Arrange exec and obtain Future</td>
* <td> {@link #submit(ForkJoinTask)}</td>
* <td> {@link ForkJoinTask#fork} (ForkJoinTasks <em>are</em> Futures)</td>
* </tr>
* </table>
*
* <p><b>Sample Usage.</b> Normally a single {@code ForkJoinPool} is
* used for all parallel task execution in a program or subsystem.
* Otherwise, use would not usually outweigh the construction and
* bookkeeping overhead of creating a large set of threads. For
* example, a common pool could be used for the {@code SortTasks}
* illustrated in {@link RecursiveAction}. Because {@code
* ForkJoinPool} uses threads in {@linkplain java.lang.Thread#isDaemon
* daemon} mode, there is typically no need to explicitly {@link
* #shutdown} such a pool upon program exit.
*
* <pre> {@code
* static final ForkJoinPool mainPool = new ForkJoinPool();
* ...
* public void sort(long[] array) {
* mainPool.invoke(new SortTask(array, 0, array.length));
* }}</pre>
*
* <p><b>Implementation notes</b>: This implementation restricts the
* maximum number of running threads to 32767. Attempts to create
* pools with greater than the maximum number result in
* {@code IllegalArgumentException}.
*
* <p>This implementation rejects submitted tasks (that is, by throwing
* {@link RejectedExecutionException}) only when the pool is shut down
* or internal resources have been exhausted.
*
* @since 1.7
* @author Doug Lea
*/
public class ForkJoinPool extends AbstractExecutorService {
/*
* Implementation Overview
*
* This class and its nested classes provide the main
* functionality and control for a set of worker threads:
* Submissions from non-FJ threads enter into submission queues.
* Workers take these tasks and typically split them into subtasks
* that may be stolen by other workers. Preference rules give
* first priority to processing tasks from their own queues (LIFO
* or FIFO, depending on mode), then to randomized FIFO steals of
* tasks in other queues.
*
* WorkQueues
* ==========
*
* Most operations occur within work-stealing queues (in nested
* class WorkQueue). These are special forms of Deques that
* support only three of the four possible end-operations -- push,
* pop, and poll (aka steal), under the further constraints that
* push and pop are called only from the owning thread (or, as
* extended here, under a lock), while poll may be called from
* other threads. (If you are unfamiliar with them, you probably
* want to read Herlihy and Shavit's book "The Art of
* Multiprocessor programming", chapter 16 describing these in
* more detail before proceeding.) The main work-stealing queue
* design is roughly similar to those in the papers "Dynamic
* Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
* (http://research.sun.com/scalable/pubs/index.html) and
* "Idempotent work stealing" by Michael, Saraswat, and Vechev,
* PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
* The main differences ultimately stem from GC requirements that
* we null out taken slots as soon as we can, to maintain as small
* a footprint as possible even in programs generating huge
* numbers of tasks. To accomplish this, we shift the CAS
* arbitrating pop vs poll (steal) from being on the indices
* ("base" and "top") to the slots themselves. So, both a
* successful pop and poll mainly entail a CAS of a slot from
* non-null to null. Because we rely on CASes of references, we
* do not need tag bits on base or top. They are simple ints as
* used in any circular array-based queue (see for example
* ArrayDeque). Updates to the indices must still be ordered in a
* way that guarantees that top == base means the queue is empty,
* but otherwise may err on the side of possibly making the queue
* appear nonempty when a push, pop, or poll have not fully
* committed. Note that this means that the poll operation,
* considered individually, is not wait-free. One thief cannot
* successfully continue until another in-progress one (or, if
* previously empty, a push) completes. However, in the
* aggregate, we ensure at least probabilistic non-blockingness.
* If an attempted steal fails, a thief always chooses a different
* random victim target to try next. So, in order for one thief to
* progress, it suffices for any in-progress poll or new push on
* any empty queue to complete. (This is why we normally use
* method pollAt and its variants that try once at the apparent
* base index, else consider alternative actions, rather than
* method poll.)
*
* This approach also enables support of a user mode in which local
* task processing is in FIFO, not LIFO order, simply by using
* poll rather than pop. This can be useful in message-passing
* frameworks in which tasks are never joined. However neither
* mode considers affinities, loads, cache localities, etc, so
* rarely provide the best possible performance on a given
* machine, but portably provide good throughput by averaging over
* these factors. (Further, even if we did try to use such
* information, we do not usually have a basis for exploiting it.
* For example, some sets of tasks profit from cache affinities,
* but others are harmed by cache pollution effects.)
*
* WorkQueues are also used in a similar way for tasks submitted
* to the pool. We cannot mix these tasks in the same queues used
* for work-stealing (this would contaminate lifo/fifo
* processing). Instead, we loosely associate submission queues
* with submitting threads, using a form of hashing. The
* ThreadLocal Submitter class contains a value initially used as
* a hash code for choosing existing queues, but may be randomly
* repositioned upon contention with other submitters. In
* essence, submitters act like workers except that they never
* take tasks, and they are multiplexed on to a finite number of
* shared work queues. However, classes are set up so that future
* extensions could allow submitters to optionally help perform
* tasks as well. Insertion of tasks in shared mode requires a
* lock (mainly to protect in the case of resizing) but we use
* only a simple spinlock (using bits in field runState), because
* submitters encountering a busy queue move on to try or create
* other queues -- they block only when creating and registering
* new queues.
*
* Management
* ==========
*
* The main throughput advantages of work-stealing stem from
* decentralized control -- workers mostly take tasks from
* themselves or each other. We cannot negate this in the
* implementation of other management responsibilities. The main
* tactic for avoiding bottlenecks is packing nearly all
* essentially atomic control state into two volatile variables
* that are by far most often read (not written) as status and
* consistency checks.
*
* Field "ctl" contains 64 bits holding all the information needed
* to atomically decide to add, inactivate, enqueue (on an event
* queue), dequeue, and/or re-activate workers. To enable this
* packing, we restrict maximum parallelism to (1<<15)-1 (which is
* far in excess of normal operating range) to allow ids, counts,
* and their negations (used for thresholding) to fit into 16bit
* fields.
*
* Field "runState" contains 32 bits needed to register and
* deregister WorkQueues, as well as to enable shutdown. It is
* only modified under a lock (normally briefly held, but
* occasionally protecting allocations and resizings) but even
* when locked remains available to check consistency.
*
* Recording WorkQueues. WorkQueues are recorded in the
* "workQueues" array that is created upon pool construction and
* expanded if necessary. Updates to the array while recording
* new workers and unrecording terminated ones are protected from
* each other by a lock but the array is otherwise concurrently
* readable, and accessed directly. To simplify index-based
* operations, the array size is always a power of two, and all
* readers must tolerate null slots. Shared (submission) queues
* are at even indices, worker queues at odd indices. Grouping
* them together in this way simplifies and speeds up task
* scanning.
*
* All worker thread creation is on-demand, triggered by task
* submissions, replacement of terminated workers, and/or
* compensation for blocked workers. However, all other support
* code is set up to work with other policies. To ensure that we
* do not hold on to worker references that would prevent GC, ALL
* accesses to workQueues are via indices into the workQueues
* array (which is one source of some of the messy code
* constructions here). In essence, the workQueues array serves as
* a weak reference mechanism. Thus for example the wait queue
* field of ctl stores indices, not references. Access to the
* workQueues in associated methods (for example signalWork) must
* both index-check and null-check the IDs. All such accesses
* ignore bad IDs by returning out early from what they are doing,
* since this can only be associated with termination, in which
* case it is OK to give up. All uses of the workQueues array
* also check that it is non-null (even if previously
* non-null). This allows nulling during termination, which is
* currently not necessary, but remains an option for
* resource-revocation-based shutdown schemes. It also helps
* reduce JIT issuance of uncommon-trap code, which tends to
* unnecessarily complicate control flow in some methods.
*
* Event Queuing. Unlike HPC work-stealing frameworks, we cannot
* let workers spin indefinitely scanning for tasks when none can
* be found immediately, and we cannot start/resume workers unless
* there appear to be tasks available. On the other hand, we must
* quickly prod them into action when new tasks are submitted or
* generated. In many usages, ramp-up time to activate workers is
* the main limiting factor in overall performance (this is
* compounded at program start-up by JIT compilation and
* allocation). So we try to streamline this as much as possible.
* We park/unpark workers after placing in an event wait queue
* when they cannot find work. This "queue" is actually a simple
* Treiber stack, headed by the "id" field of ctl, plus a 15bit
* counter value (that reflects the number of times a worker has
* been inactivated) to avoid ABA effects (we need only as many
* version numbers as worker threads). Successors are held in
* field WorkQueue.nextWait. Queuing deals with several intrinsic
* races, mainly that a task-producing thread can miss seeing (and
* signalling) another thread that gave up looking for work but
* has not yet entered the wait queue. We solve this by requiring
* a full sweep of all workers (via repeated calls to method
* scan()) both before and after a newly waiting worker is added
* to the wait queue. During a rescan, the worker might release
* some other queued worker rather than itself, which has the same
* net effect. Because enqueued workers may actually be rescanning
* rather than waiting, we set and clear the "parker" field of
* WorkQueues to reduce unnecessary calls to unpark. (This
* requires a secondary recheck to avoid missed signals.) Note
* the unusual conventions about Thread.interrupts surrounding
* parking and other blocking: Because interrupts are used solely
* to alert threads to check termination, which is checked anyway
* upon blocking, we clear status (using Thread.interrupted)
* before any call to park, so that park does not immediately
* return due to status being set via some other unrelated call to
* interrupt in user code.
*
* Signalling. We create or wake up workers only when there
* appears to be at least one task they might be able to find and
* execute. When a submission is added or another worker adds a
* task to a queue that previously had fewer than two tasks, they
* signal waiting workers (or trigger creation of new ones if
* fewer than the given parallelism level -- see signalWork).
* These primary signals are buttressed by signals during rescans;
* together these cover the signals needed in cases when more
* tasks are pushed but untaken, and improve performance compared
* to having one thread wake up all workers.
*
* Trimming workers. To release resources after periods of lack of
* use, a worker starting to wait when the pool is quiescent will
* time out and terminate if the pool has remained quiescent for
* SHRINK_RATE nanosecs. This will slowly propagate, eventually
* terminating all workers after long periods of non-use.
*
* Shutdown and Termination. A call to shutdownNow atomically sets
* a runState bit and then (non-atomically) sets each worker's
* runState status, cancels all unprocessed tasks, and wakes up
* all waiting workers. Detecting whether termination should
* commence after a non-abrupt shutdown() call requires more work
* and bookkeeping. We need consensus about quiescence (i.e., that
* there is no more work). The active count provides a primary
* indication but non-abrupt shutdown still requires a rechecking
* scan for any workers that are inactive but not queued.
*
* Joining Tasks
* =============
*
* Any of several actions may be taken when one worker is waiting
* to join a task stolen (or always held) by another. Because we
* are multiplexing many tasks on to a pool of workers, we can't
* just let them block (as in Thread.join). We also cannot just
* reassign the joiner's run-time stack with another and replace
* it later, which would be a form of "continuation", that even if
* possible is not necessarily a good idea since we sometimes need
* both an unblocked task and its continuation to progress.
* Instead we combine two tactics:
*
* Helping: Arranging for the joiner to execute some task that it
* would be running if the steal had not occurred.
*
* Compensating: Unless there are already enough live threads,
* method tryCompensate() may create or re-activate a spare
* thread to compensate for blocked joiners until they unblock.
*
* A third form (implemented in tryRemoveAndExec and
* tryPollForAndExec) amounts to helping a hypothetical
* compensator: If we can readily tell that a possible action of a
* compensator is to steal and execute the task being joined, the
* joining thread can do so directly, without the need for a
* compensation thread (although at the expense of larger run-time
* stacks, but the tradeoff is typically worthwhile).
*
* The ManagedBlocker extension API can't use helping so relies
* only on compensation in method awaitBlocker.
*
* The algorithm in tryHelpStealer entails a form of "linear"
* helping: Each worker records (in field currentSteal) the most
* recent task it stole from some other worker. Plus, it records
* (in field currentJoin) the task it is currently actively
* joining. Method tryHelpStealer uses these markers to try to
* find a worker to help (i.e., steal back a task from and execute
* it) that could hasten completion of the actively joined task.
* In essence, the joiner executes a task that would be on its own
* local deque had the to-be-joined task not been stolen. This may
* be seen as a conservative variant of the approach in Wagner &
* Calder "Leapfrogging: a portable technique for implementing
* efficient futures" SIGPLAN Notices, 1993
* (http://portal.acm.org/citation.cfm?id=155354). It differs in
* that: (1) We only maintain dependency links across workers upon
* steals, rather than use per-task bookkeeping. This sometimes
* requires a linear scan of workQueues array to locate stealers,
* but often doesn't because stealers leave hints (that may become
* stale/wrong) of where to locate them. A stealHint is only a
* hint because a worker might have had multiple steals and the
* hint records only one of them (usually the most current).
* Hinting isolates cost to when it is needed, rather than adding
* to per-task overhead. (2) It is "shallow", ignoring nesting
* and potentially cyclic mutual steals. (3) It is intentionally
* racy: field currentJoin is updated only while actively joining,
* which means that we miss links in the chain during long-lived
* tasks, GC stalls etc (which is OK since blocking in such cases
* is usually a good idea). (4) We bound the number of attempts
* to find work (see MAX_HELP) and fall back to suspending the
* worker and if necessary replacing it with another.
*
* It is impossible to keep exactly the target parallelism number
* of threads running at any given time. Determining the
* existence of conservatively safe helping targets, the
* availability of already-created spares, and the apparent need
* to create new spares are all racy, so we rely on multiple
* retries of each. Compensation in the apparent absence of
* helping opportunities is challenging to control on JVMs, where
* GC and other activities can stall progress of tasks that in
* turn stall out many other dependent tasks, without us being
* able to determine whether they will ever require compensation.
* Even though work-stealing otherwise encounters little
* degradation in the presence of more threads than cores,
* aggressively adding new threads in such cases entails risk of
* unwanted positive feedback control loops in which more threads
* cause more dependent stalls (as well as delayed progress of
* unblocked threads to the point that we know they are available)
* leading to more situations requiring more threads, and so
* on. This aspect of control can be seen as an (analytically
* intractable) game with an opponent that may choose the worst
* (for us) active thread to stall at any time. We take several
* precautions to bound losses (and thus bound gains), mainly in
* methods tryCompensate and awaitJoin: (1) We only try
* compensation after attempting enough helping steps (measured
* via counting and timing) that we have already consumed the
* estimated cost of creating and activating a new thread. (2) We
* allow up to 50% of threads to be blocked before initially
* adding any others, and unless completely saturated, check that
* some work is available for a new worker before adding. Also, we
* create up to only 50% more threads until entering a mode that
* only adds a thread if all others are possibly blocked. All
* together, this means that we might be half as fast to react,
* and create half as many threads as possible in the ideal case,
* but present vastly fewer anomalies in all other cases compared
* to both more aggressive and more conservative alternatives.
*
* Style notes: There is a lot of representation-level coupling
* among classes ForkJoinPool, ForkJoinWorkerThread, and
* ForkJoinTask. The fields of WorkQueue maintain data structures
* managed by ForkJoinPool, so are directly accessed. There is
* little point trying to reduce this, since any associated future
* changes in representations will need to be accompanied by
* algorithmic changes anyway. Several methods intrinsically
* sprawl because they must accumulate sets of consistent reads of
* volatiles held in local variables. Methods signalWork() and
* scan() are the main bottlenecks, so are especially heavily
* micro-optimized/mangled. There are lots of inline assignments
* (of form "while ((local = field) != 0)") which are usually the
* simplest way to ensure the required read orderings (which are
* sometimes critical). This leads to a "C"-like style of listing
* declarations of these locals at the heads of methods or blocks.
* There are several occurrences of the unusual "do {} while
* (!cas...)" which is the simplest way to force an update of a
* CAS'ed variable. There are also other coding oddities that help
* some methods perform reasonably even when interpreted (not
* compiled).
*
* The order of declarations in this file is:
* (1) Static utility functions
* (2) Nested (static) classes
* (3) Static fields
* (4) Fields, along with constants used when unpacking some of them
* (5) Internal control methods
* (6) Callbacks and other support for ForkJoinTask methods
* (7) Exported methods
* (8) Static block initializing statics in minimally dependent order
*/
// Static utilities
/**
* If there is a security manager, makes sure caller has
* permission to modify threads.
*/
private static void checkPermission() {
SecurityManager security = System.getSecurityManager();
if (security != null)
security.checkPermission(modifyThreadPermission);
}
// Nested classes
/**
* Factory for creating new {@link ForkJoinWorkerThread}s.
* A {@code ForkJoinWorkerThreadFactory} must be defined and used
* for {@code ForkJoinWorkerThread} subclasses that extend base
* functionality or initialize threads with different contexts.
*/
public static interface ForkJoinWorkerThreadFactory {
/**
* Returns a new worker thread operating in the given pool.
*
* @param pool the pool this thread works in
* @throws NullPointerException if the pool is null
*/
public ForkJoinWorkerThread newThread(ForkJoinPool pool);
}
/**
* Default ForkJoinWorkerThreadFactory implementation; creates a
* new ForkJoinWorkerThread.
*/
static class DefaultForkJoinWorkerThreadFactory
implements ForkJoinWorkerThreadFactory {
public ForkJoinWorkerThread newThread(ForkJoinPool pool) {
return new ForkJoinWorkerThread(pool);
}
}
/**
* A simple non-reentrant lock used for exclusion when managing
* queues and workers. We use a custom lock so that we can readily
* probe lock state in constructions that check among alternative
* actions. The lock is normally only very briefly held, and
* sometimes treated as a spinlock, but other usages block to
* reduce overall contention in those cases where locked code
* bodies perform allocation/resizing.
*/
static final class Mutex extends AbstractQueuedSynchronizer {
public final boolean tryAcquire(int ignore) {
return compareAndSetState(0, 1);
}
public final boolean tryRelease(int ignore) {
setState(0);
return true;
}
public final void lock() { acquire(0); }
public final void unlock() { release(0); }
public final boolean isHeldExclusively() { return getState() == 1; }
public final Condition newCondition() { return new ConditionObject(); }
}
/**
* Class for artificial tasks that are used to replace the target
* of local joins if they are removed from an interior queue slot
* in WorkQueue.tryRemoveAndExec. We don't need the proxy to
* actually do anything beyond having a unique identity.
*/
static final class EmptyTask extends ForkJoinTask<Void> {
EmptyTask() { status = ForkJoinTask.NORMAL; } // force done
public final Void getRawResult() { return null; }
public final void setRawResult(Void x) {}
public final boolean exec() { return true; }
}
/**
* Queues supporting work-stealing as well as external task
* submission. See above for main rationale and algorithms.
* Implementation relies heavily on "Unsafe" intrinsics
* and selective use of "volatile":
*
* Field "base" is the index (mod array.length) of the least valid
* queue slot, which is always the next position to steal (poll)
* from if nonempty. Reads and writes require volatile orderings
* but not CAS, because updates are only performed after slot
* CASes.
*
* Field "top" is the index (mod array.length) of the next queue
* slot to push to or pop from. It is written only by owner thread
* for push, or under lock for trySharedPush, and accessed by
* other threads only after reading (volatile) base. Both top and
* base are allowed to wrap around on overflow, but (top - base)
* (or more commonly -(base - top) to force volatile read of base
* before top) still estimates size.
*
* The array slots are read and written using the emulation of
* volatiles/atomics provided by Unsafe. Insertions must in
* general use putOrderedObject as a form of releasing store to
* ensure that all writes to the task object are ordered before
* its publication in the queue. (Although we can avoid one case
* of this when locked in trySharedPush.) All removals entail a
* CAS to null. The array is always a power of two. To ensure
* safety of Unsafe array operations, all accesses perform
* explicit null checks and implicit bounds checks via
* power-of-two masking.
*
* In addition to basic queuing support, this class contains
* fields described elsewhere to control execution. It turns out
* to work better memory-layout-wise to include them in this
* class rather than a separate class.
*
* Performance on most platforms is very sensitive to placement of
* instances of both WorkQueues and their arrays -- we absolutely
* do not want multiple WorkQueue instances or multiple queue
* arrays sharing cache lines. (It would be best for queue objects
* and their arrays to share, but there is nothing available to
* help arrange that). Unfortunately, because they are recorded
* in a common array, WorkQueue instances are often moved to be
* adjacent by garbage collectors. To reduce impact, we use field
* padding that works OK on common platforms; this effectively
* trades off slightly slower average field access for the sake of
* avoiding really bad worst-case access. (Until better JVM
* support is in place, this padding is dependent on transient
* properties of JVM field layout rules.) We also take care in
* allocating, sizing and resizing the array. Non-shared queue
* arrays are initialized (via method growArray) by workers before
* use. Others are allocated on first use.
*/
static final class WorkQueue {
/**
* Capacity of work-stealing queue array upon initialization.
* Must be a power of two; at least 4, but should be larger to
* reduce or eliminate cacheline sharing among queues.
* Currently, it is much larger, as a partial workaround for
* the fact that JVMs often place arrays in locations that
* share GC bookkeeping (especially cardmarks) such that
* per-write accesses encounter serious memory contention.
*/
static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
/**
* Maximum size for queue arrays. Must be a power of two less
* than or equal to 1 << (31 - width of array entry) to ensure
* lack of wraparound of index calculations, but defined to a
* value a bit less than this to help users trap runaway
* programs before saturating systems.
*/
static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M
volatile long totalSteals; // cumulative number of steals
int seed; // for random scanning; initialize nonzero
volatile int eventCount; // encoded inactivation count; < 0 if inactive
int nextWait; // encoded record of next event waiter
int rescans; // remaining scans until block
int nsteals; // top-level task executions since last idle
final int mode; // lifo, fifo, or shared
int poolIndex; // index of this queue in pool (or 0)
int stealHint; // index of most recent known stealer
volatile int runState; // 1: locked, -1: terminate; else 0
volatile int base; // index of next slot for poll
int top; // index of next slot for push
ForkJoinTask<?>[] array; // the elements (initially unallocated)
final ForkJoinPool pool; // the containing pool (may be null)
final ForkJoinWorkerThread owner; // owning thread or null if shared
volatile Thread parker; // == owner during call to park; else null
volatile ForkJoinTask<?> currentJoin; // task being joined in awaitJoin
ForkJoinTask<?> currentSteal; // current non-local task being executed
// Heuristic padding to ameliorate unfortunate memory placements
Object p00, p01, p02, p03, p04, p05, p06, p07;
Object p08, p09, p0a, p0b, p0c, p0d, p0e;
WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode) {
this.mode = mode;
this.pool = pool;
this.owner = owner;
// Place indices in the center of array (that is not yet allocated)
base = top = INITIAL_QUEUE_CAPACITY >>> 1;
}
/**
* Returns the approximate number of tasks in the queue.
*/
final int queueSize() {
int n = base - top; // non-owner callers must read base first
return (n >= 0) ? 0 : -n; // ignore transient negative
}
/**
* Provides a more accurate estimate of whether this queue has
* any tasks than does queueSize, by checking whether a
* near-empty queue has at least one unclaimed task.
*/
final boolean isEmpty() {
ForkJoinTask<?>[] a; int m, s;
int n = base - (s = top);
return (n >= 0 ||
(n == -1 &&
((a = array) == null ||
(m = a.length - 1) < 0 ||
U.getObjectVolatile
(a, ((m & (s - 1)) << ASHIFT) + ABASE) == null)));
}
/**
* Pushes a task. Call only by owner in unshared queues.
*
* @param task the task. Caller must ensure non-null.
* @throw RejectedExecutionException if array cannot be resized
*/
final void push(ForkJoinTask<?> task) {
ForkJoinTask<?>[] a; ForkJoinPool p;
int s = top, m, n;
if ((a = array) != null) { // ignore if queue removed
U.putOrderedObject
(a, (((m = a.length - 1) & s) << ASHIFT) + ABASE, task);
if ((n = (top = s + 1) - base) <= 2) {
if ((p = pool) != null)
p.signalWork();
}
else if (n >= m)
growArray(true);
}
}
/**
* Pushes a task if lock is free and array is either big
* enough or can be resized to be big enough.
*
* @param task the task. Caller must ensure non-null.
* @return true if submitted
*/
final boolean trySharedPush(ForkJoinTask<?> task) {
boolean submitted = false;
if (runState == 0 && U.compareAndSwapInt(this, RUNSTATE, 0, 1)) {
ForkJoinTask<?>[] a = array;
int s = top;
try {
if ((a != null && a.length > s + 1 - base) ||
(a = growArray(false)) != null) { // must presize
int j = (((a.length - 1) & s) << ASHIFT) + ABASE;
U.putObject(a, (long)j, task); // don't need "ordered"
top = s + 1;
submitted = true;
}
} finally {
runState = 0; // unlock
}
}
return submitted;
}
/**
* Takes next task, if one exists, in LIFO order. Call only
* by owner in unshared queues. (We do not have a shared
* version of this method because it is never needed.)
*/
final ForkJoinTask<?> pop() {
ForkJoinTask<?>[] a; ForkJoinTask<?> t; int m;
if ((a = array) != null && (m = a.length - 1) >= 0) {
for (int s; (s = top - 1) - base >= 0;) {
long j = ((m & s) << ASHIFT) + ABASE;
if ((t = (ForkJoinTask<?>)U.getObject(a, j)) == null)
break;
if (U.compareAndSwapObject(a, j, t, null)) {
top = s;
return t;
}
}
}
return null;
}
/**
* Takes a task in FIFO order if b is base of queue and a task
* can be claimed without contention. Specialized versions
* appear in ForkJoinPool methods scan and tryHelpStealer.
*/
final ForkJoinTask<?> pollAt(int b) {
ForkJoinTask<?> t; ForkJoinTask<?>[] a;
if ((a = array) != null) {
int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) != null &&
base == b &&
U.compareAndSwapObject(a, j, t, null)) {
base = b + 1;
return t;
}
}
return null;
}
/**
* Takes next task, if one exists, in FIFO order.
*/
final ForkJoinTask<?> poll() {
ForkJoinTask<?>[] a; int b; ForkJoinTask<?> t;
while ((b = base) - top < 0 && (a = array) != null) {
int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
if (t != null) {
if (base == b &&
U.compareAndSwapObject(a, j, t, null)) {
base = b + 1;
return t;
}
}
else if (base == b) {
if (b + 1 == top)
break;
Thread.yield(); // wait for lagging update
}
}
return null;
}
/**
* Takes next task, if one exists, in order specified by mode.
*/
final ForkJoinTask<?> nextLocalTask() {
return mode == 0 ? pop() : poll();
}
/**
* Returns next task, if one exists, in order specified by mode.
*/
final ForkJoinTask<?> peek() {
ForkJoinTask<?>[] a = array; int m;
if (a == null || (m = a.length - 1) < 0)
return null;
int i = mode == 0 ? top - 1 : base;
int j = ((i & m) << ASHIFT) + ABASE;
return (ForkJoinTask<?>)U.getObjectVolatile(a, j);
}
/**
* Pops the given task only if it is at the current top.
*/
final boolean tryUnpush(ForkJoinTask<?> t) {
ForkJoinTask<?>[] a; int s;
if ((a = array) != null && (s = top) != base &&
U.compareAndSwapObject
(a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) {
top = s;
return true;
}
return false;
}
/**
* Polls the given task only if it is at the current base.
*/
final boolean pollFor(ForkJoinTask<?> task) {
ForkJoinTask<?>[] a; int b;
if ((b = base) - top < 0 && (a = array) != null) {
int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
if (U.getObjectVolatile(a, j) == task && base == b &&
U.compareAndSwapObject(a, j, task, null)) {
base = b + 1;
return true;
}
}
return false;
}
/**
* Initializes or doubles the capacity of array. Call either
* by owner or with lock held -- it is OK for base, but not
* top, to move while resizings are in progress.
*
* @param rejectOnFailure if true, throw exception if capacity
* exceeded (relayed ultimately to user); else return null.
*/
final ForkJoinTask<?>[] growArray(boolean rejectOnFailure) {
ForkJoinTask<?>[] oldA = array;
int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY;
if (size <= MAXIMUM_QUEUE_CAPACITY) {
int oldMask, t, b;
ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size];
if (oldA != null && (oldMask = oldA.length - 1) >= 0 &&
(t = top) - (b = base) > 0) {
int mask = size - 1;
do {
ForkJoinTask<?> x;
int oldj = ((b & oldMask) << ASHIFT) + ABASE;
int j = ((b & mask) << ASHIFT) + ABASE;
x = (ForkJoinTask<?>)U.getObjectVolatile(oldA, oldj);
if (x != null &&
U.compareAndSwapObject(oldA, oldj, x, null))
U.putObjectVolatile(a, j, x);
} while (++b != t);
}
return a;
}
else if (!rejectOnFailure)
return null;
else
throw new RejectedExecutionException("Queue capacity exceeded");
}
/**
* Removes and cancels all known tasks, ignoring any exceptions.
*/
final void cancelAll() {
ForkJoinTask.cancelIgnoringExceptions(currentJoin);
ForkJoinTask.cancelIgnoringExceptions(currentSteal);
for (ForkJoinTask<?> t; (t = poll()) != null; )
ForkJoinTask.cancelIgnoringExceptions(t);
}
/**
* Computes next value for random probes. Scans don't require
* a very high quality generator, but also not a crummy one.
* Marsaglia xor-shift is cheap and works well enough. Note:
* This is manually inlined in its usages in ForkJoinPool to
* avoid writes inside busy scan loops.
*/
final int nextSeed() {
int r = seed;
r ^= r << 13;
r ^= r >>> 17;
return seed = r ^= r << 5;
}
// Execution methods
/**
* Pops and runs tasks until empty.
*/
private void popAndExecAll() {
// A bit faster than repeated pop calls
ForkJoinTask<?>[] a; int m, s; long j; ForkJoinTask<?> t;
while ((a = array) != null && (m = a.length - 1) >= 0 &&
(s = top - 1) - base >= 0 &&
(t = ((ForkJoinTask<?>)
U.getObject(a, j = ((m & s) << ASHIFT) + ABASE)))
!= null) {
if (U.compareAndSwapObject(a, j, t, null)) {
top = s;
t.doExec();
}
}
}
/**
* Polls and runs tasks until empty.
*/
private void pollAndExecAll() {
for (ForkJoinTask<?> t; (t = poll()) != null;)
t.doExec();
}
/**
* If present, removes from queue and executes the given task, or
* any other cancelled task. Returns (true) immediately on any CAS
* or consistency check failure so caller can retry.
*
* @return 0 if no progress can be made, else positive
* (this unusual convention simplifies use with tryHelpStealer.)
*/
final int tryRemoveAndExec(ForkJoinTask<?> task) {
int stat = 1;
boolean removed = false, empty = true;
ForkJoinTask<?>[] a; int m, s, b, n;
if ((a = array) != null && (m = a.length - 1) >= 0 &&
(n = (s = top) - (b = base)) > 0) {
for (ForkJoinTask<?> t;;) { // traverse from s to b
int j = ((--s & m) << ASHIFT) + ABASE;
t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
if (t == null) // inconsistent length
break;
else if (t == task) {
if (s + 1 == top) { // pop
if (!U.compareAndSwapObject(a, j, task, null))
break;
top = s;
removed = true;
}
else if (base == b) // replace with proxy
removed = U.compareAndSwapObject(a, j, task,
new EmptyTask());
break;
}
else if (t.status >= 0)
empty = false;
else if (s + 1 == top) { // pop and throw away
if (U.compareAndSwapObject(a, j, t, null))
top = s;
break;
}
if (--n == 0) {
if (!empty && base == b)
stat = 0;
break;
}
}
}
if (removed)
task.doExec();
return stat;
}
/**
* Executes a top-level task and any local tasks remaining
* after execution.
*/
final void runTask(ForkJoinTask<?> t) {
if (t != null) {
currentSteal = t;
t.doExec();
if (top != base) { // process remaining local tasks
if (mode == 0)
popAndExecAll();
else
pollAndExecAll();
}
++nsteals;
currentSteal = null;
}
}
/**
* Executes a non-top-level (stolen) task.
*/
final void runSubtask(ForkJoinTask<?> t) {
if (t != null) {
ForkJoinTask<?> ps = currentSteal;
currentSteal = t;
t.doExec();
currentSteal = ps;
}
}
/**
* Returns true if owned and not known to be blocked.
*/
final boolean isApparentlyUnblocked() {
Thread wt; Thread.State s;
return (eventCount >= 0 &&
(wt = owner) != null &&
(s = wt.getState()) != Thread.State.BLOCKED &&
s != Thread.State.WAITING &&
s != Thread.State.TIMED_WAITING);
}
/**
* If this owned and is not already interrupted, try to
* interrupt and/or unpark, ignoring exceptions.
*/
final void interruptOwner() {
Thread wt, p;
if ((wt = owner) != null && !wt.isInterrupted()) {
try {
wt.interrupt();
} catch (SecurityException ignore) {
}
}
if ((p = parker) != null)
U.unpark(p);
}
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long RUNSTATE;
private static final int ABASE;
private static final int ASHIFT;
static {
int s;
try {
U = getUnsafe();
Class<?> k = WorkQueue.class;
Class<?> ak = ForkJoinTask[].class;
RUNSTATE = U.objectFieldOffset
(k.getDeclaredField("runState"));
ABASE = U.arrayBaseOffset(ak);
s = U.arrayIndexScale(ak);
} catch (Exception e) {
throw new Error(e);
}
if ((s & (s-1)) != 0)
throw new Error("data type scale not a power of two");
ASHIFT = 31 - Integer.numberOfLeadingZeros(s);
}
}
/**
* Per-thread records for threads that submit to pools. Currently
* holds only pseudo-random seed / index that is used to choose
* submission queues in method doSubmit. In the future, this may
* also incorporate a means to implement different task rejection
* and resubmission policies.
*
* Seeds for submitters and workers/workQueues work in basically
* the same way but are initialized and updated using slightly
* different mechanics. Both are initialized using the same
* approach as in class ThreadLocal, where successive values are
* unlikely to collide with previous values. This is done during
* registration for workers, but requires a separate AtomicInteger
* for submitters. Seeds are then randomly modified upon
* collisions using xorshifts, which requires a non-zero seed.
*/
static final class Submitter {
int seed;
Submitter() {
int s = nextSubmitterSeed.getAndAdd(SEED_INCREMENT);
seed = (s == 0) ? 1 : s; // ensure non-zero
}
}
/** ThreadLocal class for Submitters */
static final class ThreadSubmitter extends ThreadLocal<Submitter> {
public Submitter initialValue() { return new Submitter(); }
}
// static fields (initialized in static initializer below)
/**
* Creates a new ForkJoinWorkerThread. This factory is used unless
* overridden in ForkJoinPool constructors.
*/
public static final ForkJoinWorkerThreadFactory
defaultForkJoinWorkerThreadFactory;
/**
* Generator for assigning sequence numbers as pool names.
*/
private static final AtomicInteger poolNumberGenerator;
/**
* Generator for initial hashes/seeds for submitters. Accessed by
* Submitter class constructor.
*/
static final AtomicInteger nextSubmitterSeed;
/**
* Permission required for callers of methods that may start or
* kill threads.
*/
private static final RuntimePermission modifyThreadPermission;
/**
* Per-thread submission bookeeping. Shared across all pools
* to reduce ThreadLocal pollution and because random motion
* to avoid contention in one pool is likely to hold for others.
*/
private static final ThreadSubmitter submitters;
// static constants
/**
* The wakeup interval (in nanoseconds) for a worker waiting for a
* task when the pool is quiescent to instead try to shrink the
* number of workers. The exact value does not matter too
* much. It must be short enough to release resources during
* sustained periods of idleness, but not so short that threads
* are continually re-created.
*/
private static final long SHRINK_RATE =
1L * 1000L * 1000L * 1000L; // 1 seconds
/**
* The timeout value for attempted shrinkage, includes
* some slop to cope with system timer imprecision.
*/
private static final long SHRINK_TIMEOUT = SHRINK_RATE - (SHRINK_RATE / 10);
/**
* The maximum stolen->joining link depth allowed in method
* tryHelpStealer. Must be a power of two. This value also
* controls the maximum number of times to try to help join a task
* without any apparent progress or change in pool state before
* giving up and blocking (see awaitJoin). Depths for legitimate
* chains are unbounded, but we use a fixed constant to avoid
* (otherwise unchecked) cycles and to bound staleness of
* traversal parameters at the expense of sometimes blocking when
* we could be helping.
*/
private static final int MAX_HELP = 64;
/**
* Secondary time-based bound (in nanosecs) for helping attempts
* before trying compensated blocking in awaitJoin. Used in
* conjunction with MAX_HELP to reduce variance due to different
* polling rates associated with different helping options. The
* value should roughly approximate the time required to create
* and/or activate a worker thread.
*/
private static final long COMPENSATION_DELAY = 1L << 18; // ~0.25 millisec
/**
* Increment for seed generators. See class ThreadLocal for
* explanation.
*/
private static final int SEED_INCREMENT = 0x61c88647;
/**
* Bits and masks for control variables
*
* Field ctl is a long packed with:
* AC: Number of active running workers minus target parallelism (16 bits)
* TC: Number of total workers minus target parallelism (16 bits)
* ST: true if pool is terminating (1 bit)
* EC: the wait count of top waiting thread (15 bits)
* ID: poolIndex of top of Treiber stack of waiters (16 bits)
*
* When convenient, we can extract the upper 32 bits of counts and
* the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e =
* (int)ctl. The ec field is never accessed alone, but always
* together with id and st. The offsets of counts by the target
* parallelism and the positionings of fields makes it possible to
* perform the most common checks via sign tests of fields: When
* ac is negative, there are not enough active workers, when tc is
* negative, there are not enough total workers, and when e is
* negative, the pool is terminating. To deal with these possibly
* negative fields, we use casts in and out of "short" and/or
* signed shifts to maintain signedness.
*
* When a thread is queued (inactivated), its eventCount field is
* set negative, which is the only way to tell if a worker is
* prevented from executing tasks, even though it must continue to
* scan for them to avoid queuing races. Note however that
* eventCount updates lag releases so usage requires care.
*
* Field runState is an int packed with:
* SHUTDOWN: true if shutdown is enabled (1 bit)
* SEQ: a sequence number updated upon (de)registering workers (30 bits)
* INIT: set true after workQueues array construction (1 bit)
*
* The sequence number enables simple consistency checks:
* Staleness of read-only operations on the workQueues array can
* be checked by comparing runState before vs after the reads.
*/
// bit positions/shifts for fields
private static final int AC_SHIFT = 48;
private static final int TC_SHIFT = 32;
private static final int ST_SHIFT = 31;
private static final int EC_SHIFT = 16;
// bounds
private static final int SMASK = 0xffff; // short bits
private static final int MAX_CAP = 0x7fff; // max #workers - 1
private static final int SQMASK = 0xfffe; // even short bits
private static final int SHORT_SIGN = 1 << 15;
private static final int INT_SIGN = 1 << 31;
// masks
private static final long STOP_BIT = 0x0001L << ST_SHIFT;
private static final long AC_MASK = ((long)SMASK) << AC_SHIFT;
private static final long TC_MASK = ((long)SMASK) << TC_SHIFT;
// units for incrementing and decrementing
private static final long TC_UNIT = 1L << TC_SHIFT;
private static final long AC_UNIT = 1L << AC_SHIFT;
// masks and units for dealing with u = (int)(ctl >>> 32)
private static final int UAC_SHIFT = AC_SHIFT - 32;
private static final int UTC_SHIFT = TC_SHIFT - 32;
private static final int UAC_MASK = SMASK << UAC_SHIFT;
private static final int UTC_MASK = SMASK << UTC_SHIFT;
private static final int UAC_UNIT = 1 << UAC_SHIFT;
private static final int UTC_UNIT = 1 << UTC_SHIFT;
// masks and units for dealing with e = (int)ctl
private static final int E_MASK = 0x7fffffff; // no STOP_BIT
private static final int E_SEQ = 1 << EC_SHIFT;
// runState bits
private static final int SHUTDOWN = 1 << 31;
// access mode for WorkQueue
static final int LIFO_QUEUE = 0;
static final int FIFO_QUEUE = 1;
static final int SHARED_QUEUE = -1;
// Instance fields
/*
* Field layout order in this class tends to matter more than one
* would like. Runtime layout order is only loosely related to
* declaration order and may differ across JVMs, but the following
* empirically works OK on current JVMs.
*/
volatile long ctl; // main pool control
final int parallelism; // parallelism level
final int localMode; // per-worker scheduling mode
final int submitMask; // submit queue index bound
int nextSeed; // for initializing worker seeds
volatile int runState; // shutdown status and seq
WorkQueue[] workQueues; // main registry
final Mutex lock; // for registration
final Condition termination; // for awaitTermination
final ForkJoinWorkerThreadFactory factory; // factory for new workers
final Thread.UncaughtExceptionHandler ueh; // per-worker UEH
final AtomicLong stealCount; // collect counts when terminated
final AtomicInteger nextWorkerNumber; // to create worker name string
final String workerNamePrefix; // to create worker name string
// Creating, registering, and deregistering workers
/**
* Tries to create and start a worker
*/
private void addWorker() {
Throwable ex = null;
ForkJoinWorkerThread wt = null;
try {
if ((wt = factory.newThread(this)) != null) {
wt.start();
return;
}
} catch (Throwable e) {
ex = e;
}
deregisterWorker(wt, ex); // adjust counts etc on failure
}
/**
* Callback from ForkJoinWorkerThread constructor to assign a
* public name. This must be separate from registerWorker because
* it is called during the "super" constructor call in
* ForkJoinWorkerThread.
*/
final String nextWorkerName() {
return workerNamePrefix.concat
(Integer.toString(nextWorkerNumber.addAndGet(1)));
}
/**
* Callback from ForkJoinWorkerThread constructor to establish its
* poolIndex and record its WorkQueue. To avoid scanning bias due
* to packing entries in front of the workQueues array, we treat
* the array as a simple power-of-two hash table using per-thread
* seed as hash, expanding as needed.
*
* @param w the worker's queue
*/
final void registerWorker(WorkQueue w) {
Mutex lock = this.lock;
lock.lock();
try {
WorkQueue[] ws = workQueues;
if (w != null && ws != null) { // skip on shutdown/failure
int rs, n = ws.length, m = n - 1;
int s = nextSeed += SEED_INCREMENT; // rarely-colliding sequence
w.seed = (s == 0) ? 1 : s; // ensure non-zero seed
int r = (s << 1) | 1; // use odd-numbered indices
if (ws[r &= m] != null) { // collision
int probes = 0; // step by approx half size
int step = (n <= 4) ? 2 : ((n >>> 1) & SQMASK) + 2;
while (ws[r = (r + step) & m] != null) {
if (++probes >= n) {
workQueues = ws = Arrays.copyOf(ws, n <<= 1);
m = n - 1;
probes = 0;
}
}
}
w.eventCount = w.poolIndex = r; // establish before recording
ws[r] = w; // also update seq
runState = ((rs = runState) & SHUTDOWN) | ((rs + 2) & ~SHUTDOWN);
}
} finally {
lock.unlock();
}
}
/**
* Final callback from terminating worker, as well as upon failure
* to construct or start a worker in addWorker. Removes record of
* worker from array, and adjusts counts. If pool is shutting
* down, tries to complete termination.
*
* @param wt the worker thread or null if addWorker failed
* @param ex the exception causing failure, or null if none
*/
final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) {
Mutex lock = this.lock;
WorkQueue w = null;
if (wt != null && (w = wt.workQueue) != null) {
w.runState = -1; // ensure runState is set
stealCount.getAndAdd(w.totalSteals + w.nsteals);
int idx = w.poolIndex;
lock.lock();
try { // remove record from array
WorkQueue[] ws = workQueues;
if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w)
ws[idx] = null;
} finally {
lock.unlock();
}
}
long c; // adjust ctl counts
do {} while (!U.compareAndSwapLong
(this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) |
((c - TC_UNIT) & TC_MASK) |
(c & ~(AC_MASK|TC_MASK)))));
if (!tryTerminate(false, false) && w != null) {
w.cancelAll(); // cancel remaining tasks
if (w.array != null) // suppress signal if never ran
signalWork(); // wake up or create replacement
if (ex == null) // help clean refs on way out
ForkJoinTask.helpExpungeStaleExceptions();
}
if (ex != null) // rethrow
U.throwException(ex);
}
// Submissions
/**
* Unless shutting down, adds the given task to a submission queue
* at submitter's current queue index (modulo submission
* range). If no queue exists at the index, one is created. If
* the queue is busy, another index is randomly chosen. The
* submitMask bounds the effective number of queues to the
* (nearest power of two for) parallelism level.
*
* @param task the task. Caller must ensure non-null.
*/
private void doSubmit(ForkJoinTask<?> task) {
Submitter s = submitters.get();
for (int r = s.seed, m = submitMask;;) {
WorkQueue[] ws; WorkQueue q;
int k = r & m & SQMASK; // use only even indices
if (runState < 0 || (ws = workQueues) == null || ws.length <= k)
throw new RejectedExecutionException(); // shutting down
else if ((q = ws[k]) == null) { // create new queue
WorkQueue nq = new WorkQueue(this, null, SHARED_QUEUE);
Mutex lock = this.lock; // construct outside lock
lock.lock();
try { // recheck under lock
int rs = runState; // to update seq
if (ws == workQueues && ws[k] == null) {
ws[k] = nq;
runState = ((rs & SHUTDOWN) | ((rs + 2) & ~SHUTDOWN));
}
} finally {
lock.unlock();
}
}
else if (q.trySharedPush(task)) {
signalWork();
return;
}
else if (m > 1) { // move to a different index
r ^= r << 13; // same xorshift as WorkQueues
r ^= r >>> 17;
s.seed = r ^= r << 5;
}
else
Thread.yield(); // yield if no alternatives
}
}
// Maintaining ctl counts
/**
* Increments active count; mainly called upon return from blocking.
*/
final void incrementActiveCount() {
long c;
do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT));
}
/**
* Tries to activate or create a worker if too few are active.
*/
final void signalWork() {
long c; int u;
while ((u = (int)((c = ctl) >>> 32)) < 0) { // too few active
WorkQueue[] ws = workQueues; int e, i; WorkQueue w; Thread p;
if ((e = (int)c) > 0) { // at least one waiting
if (ws != null && (i = e & SMASK) < ws.length &&
(w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) {
long nc = (((long)(w.nextWait & E_MASK)) |
((long)(u + UAC_UNIT) << 32));
if (U.compareAndSwapLong(this, CTL, c, nc)) {
w.eventCount = (e + E_SEQ) & E_MASK;
if ((p = w.parker) != null)
U.unpark(p); // activate and release
break;
}
}
else
break;
}
else if (e == 0 && (u & SHORT_SIGN) != 0) { // too few total
long nc = (long)(((u + UTC_UNIT) & UTC_MASK) |
((u + UAC_UNIT) & UAC_MASK)) << 32;
if (U.compareAndSwapLong(this, CTL, c, nc)) {
addWorker();
break;
}
}
else
break;
}
}
// Scanning for tasks
/**
* Top-level runloop for workers, called by ForkJoinWorkerThread.run.
*/
final void runWorker(WorkQueue w) {
w.growArray(false); // initialize queue array in this thread
do { w.runTask(scan(w)); } while (w.runState >= 0);
}
/**
* Scans for and, if found, returns one task, else possibly
* inactivates the worker. This method operates on single reads of
* volatile state and is designed to be re-invoked continuously,
* in part because it returns upon detecting inconsistencies,
* contention, or state changes that indicate possible success on
* re-invocation.
*
* The scan searches for tasks across a random permutation of
* queues (starting at a random index and stepping by a random
* relative prime, checking each at least once). The scan
* terminates upon either finding a non-empty queue, or completing
* the sweep. If the worker is not inactivated, it takes and
* returns a task from this queue. On failure to find a task, we
* take one of the following actions, after which the caller will
* retry calling this method unless terminated.
*
* * If pool is terminating, terminate the worker.
*
* * If not a complete sweep, try to release a waiting worker. If
* the scan terminated because the worker is inactivated, then the
* released worker will often be the calling worker, and it can
* succeed obtaining a task on the next call. Or maybe it is
* another worker, but with same net effect. Releasing in other
* cases as well ensures that we have enough workers running.
*
* * If not already enqueued, try to inactivate and enqueue the
* worker on wait queue. Or, if inactivating has caused the pool
* to be quiescent, relay to idleAwaitWork to check for
* termination and possibly shrink pool.
*
* * If already inactive, and the caller has run a task since the
* last empty scan, return (to allow rescan) unless others are
* also inactivated. Field WorkQueue.rescans counts down on each
* scan to ensure eventual inactivation and blocking.
*
* * If already enqueued and none of the above apply, park
* awaiting signal,
*
* @param w the worker (via its WorkQueue)
* @return a task or null of none found
*/
private final ForkJoinTask<?> scan(WorkQueue w) {
WorkQueue[] ws; // first update random seed
int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5;
int rs = runState, m; // volatile read order matters
if ((ws = workQueues) != null && (m = ws.length - 1) > 0) {
int ec = w.eventCount; // ec is negative if inactive
int step = (r >>> 16) | 1; // relative prime
for (int j = (m + 1) << 2; ; r += step) {
WorkQueue q; ForkJoinTask<?> t; ForkJoinTask<?>[] a; int b;
if ((q = ws[r & m]) != null && (b = q.base) - q.top < 0 &&
(a = q.array) != null) { // probably nonempty
int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
t = (ForkJoinTask<?>)U.getObjectVolatile(a, i);
if (q.base == b && ec >= 0 && t != null &&
U.compareAndSwapObject(a, i, t, null)) {
if (q.top - (q.base = b + 1) > 1)
signalWork(); // help pushes signal
return t;
}
else if (ec < 0 || j <= m) {
rs = 0; // mark scan as imcomplete
break; // caller can retry after release
}
}
if (--j < 0)
break;
}
long c = ctl; int e = (int)c, a = (int)(c >> AC_SHIFT), nr, ns;
if (e < 0) // decode ctl on empty scan
w.runState = -1; // pool is terminating
else if (rs == 0 || rs != runState) { // incomplete scan
WorkQueue v; Thread p; // try to release a waiter
if (e > 0 && a < 0 && w.eventCount == ec &&
(v = ws[e & m]) != null && v.eventCount == (e | INT_SIGN)) {
long nc = ((long)(v.nextWait & E_MASK) |
((c + AC_UNIT) & (AC_MASK|TC_MASK)));
if (ctl == c && U.compareAndSwapLong(this, CTL, c, nc)) {
v.eventCount = (e + E_SEQ) & E_MASK;
if ((p = v.parker) != null)
U.unpark(p);
}
}
}
else if (ec >= 0) { // try to enqueue/inactivate
long nc = (long)ec | ((c - AC_UNIT) & (AC_MASK|TC_MASK));
w.nextWait = e;
w.eventCount = ec | INT_SIGN; // mark as inactive
if (ctl != c || !U.compareAndSwapLong(this, CTL, c, nc))
w.eventCount = ec; // unmark on CAS failure
else {
if ((ns = w.nsteals) != 0) {
w.nsteals = 0; // set rescans if ran task
w.rescans = (a > 0) ? 0 : a + parallelism;
w.totalSteals += ns;
}
if (a == 1 - parallelism) // quiescent
idleAwaitWork(w, nc, c);
}
}
else if (w.eventCount < 0) { // already queued
if ((nr = w.rescans) > 0) { // continue rescanning
int ac = a + parallelism;
if (((w.rescans = (ac < nr) ? ac : nr - 1) & 3) == 0)
Thread.yield(); // yield before block
}
else {
Thread.interrupted(); // clear status
Thread wt = Thread.currentThread();
U.putObject(wt, PARKBLOCKER, this);
w.parker = wt; // emulate LockSupport.park
if (w.eventCount < 0) // recheck
U.park(false, 0L);
w.parker = null;
U.putObject(wt, PARKBLOCKER, null);
}
}
}
return null;
}
/**
* If inactivating worker w has caused the pool to become
* quiescent, checks for pool termination, and, so long as this is
* not the only worker, waits for event for up to SHRINK_RATE
* nanosecs. On timeout, if ctl has not changed, terminates the
* worker, which will in turn wake up another worker to possibly
* repeat this process.
*
* @param w the calling worker
* @param currentCtl the ctl value triggering possible quiescence
* @param prevCtl the ctl value to restore if thread is terminated
*/
private void idleAwaitWork(WorkQueue w, long currentCtl, long prevCtl) {
if (w.eventCount < 0 && !tryTerminate(false, false) &&
(int)prevCtl != 0 && !hasQueuedSubmissions() && ctl == currentCtl) {
Thread wt = Thread.currentThread();
Thread.yield(); // yield before block
while (ctl == currentCtl) {
long startTime = System.nanoTime();
Thread.interrupted(); // timed variant of version in scan()
U.putObject(wt, PARKBLOCKER, this);
w.parker = wt;
if (ctl == currentCtl)
U.park(false, SHRINK_RATE);
w.parker = null;
U.putObject(wt, PARKBLOCKER, null);
if (ctl != currentCtl)
break;
if (System.nanoTime() - startTime >= SHRINK_TIMEOUT &&
U.compareAndSwapLong(this, CTL, currentCtl, prevCtl)) {
w.eventCount = (w.eventCount + E_SEQ) | E_MASK;
w.runState = -1; // shrink
break;
}
}
}
}
/**
* Tries to locate and execute tasks for a stealer of the given
* task, or in turn one of its stealers, Traces currentSteal ->
* currentJoin links looking for a thread working on a descendant
* of the given task and with a non-empty queue to steal back and
* execute tasks from. The first call to this method upon a
* waiting join will often entail scanning/search, (which is OK
* because the joiner has nothing better to do), but this method
* leaves hints in workers to speed up subsequent calls. The
* implementation is very branchy to cope with potential
* inconsistencies or loops encountering chains that are stale,
* unknown, or so long that they are likely cyclic.
*
* @param joiner the joining worker
* @param task the task to join
* @return 0 if no progress can be made, negative if task
* known complete, else positive
*/
private int tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) {
int stat = 0, steps = 0; // bound to avoid cycles
if (joiner != null && task != null) { // hoist null checks
restart: for (;;) {
ForkJoinTask<?> subtask = task; // current target
for (WorkQueue j = joiner, v;;) { // v is stealer of subtask
WorkQueue[] ws; int m, s, h;
if ((s = task.status) < 0) {
stat = s;
break restart;
}
if ((ws = workQueues) == null || (m = ws.length - 1) <= 0)
break restart; // shutting down
if ((v = ws[h = (j.stealHint | 1) & m]) == null ||
v.currentSteal != subtask) {
for (int origin = h;;) { // find stealer
if (((h = (h + 2) & m) & 15) == 1 &&
(subtask.status < 0 || j.currentJoin != subtask))
continue restart; // occasional staleness check
if ((v = ws[h]) != null &&
v.currentSteal == subtask) {
j.stealHint = h; // save hint
break;
}
if (h == origin)
break restart; // cannot find stealer
}
}
for (;;) { // help stealer or descend to its stealer
ForkJoinTask[] a; int b;
if (subtask.status < 0) // surround probes with
continue restart; // consistency checks
if ((b = v.base) - v.top < 0 && (a = v.array) != null) {
int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
ForkJoinTask<?> t =
(ForkJoinTask<?>)U.getObjectVolatile(a, i);
if (subtask.status < 0 || j.currentJoin != subtask ||
v.currentSteal != subtask)
continue restart; // stale
stat = 1; // apparent progress
if (t != null && v.base == b &&
U.compareAndSwapObject(a, i, t, null)) {
v.base = b + 1; // help stealer
joiner.runSubtask(t);
}
else if (v.base == b && ++steps == MAX_HELP)
break restart; // v apparently stalled
}
else { // empty -- try to descend
ForkJoinTask<?> next = v.currentJoin;
if (subtask.status < 0 || j.currentJoin != subtask ||
v.currentSteal != subtask)
continue restart; // stale
else if (next == null || ++steps == MAX_HELP)
break restart; // dead-end or maybe cyclic
else {
subtask = next;
j = v;
break;
}
}
}
}
}
}
return stat;
}
/**
* If task is at base of some steal queue, steals and executes it.
*
* @param joiner the joining worker
* @param task the task
*/
private void tryPollForAndExec(WorkQueue joiner, ForkJoinTask<?> task) {
WorkQueue[] ws;
if ((ws = workQueues) != null) {
for (int j = 1; j < ws.length && task.status >= 0; j += 2) {
WorkQueue q = ws[j];
if (q != null && q.pollFor(task)) {
joiner.runSubtask(task);
break;
}
}
}
}
/**
* Tries to decrement active count (sometimes implicitly) and
* possibly release or create a compensating worker in preparation
* for blocking. Fails on contention or termination. Otherwise,
* adds a new thread if no idle workers are available and either
* pool would become completely starved or: (at least half
* starved, and fewer than 50% spares exist, and there is at least
* one task apparently available). Even though the availability
* check requires a full scan, it is worthwhile in reducing false
* alarms.
*
* @param task if non-null, a task being waited for
* @param blocker if non-null, a blocker being waited for
* @return true if the caller can block, else should recheck and retry
*/
final boolean tryCompensate(ForkJoinTask<?> task, ManagedBlocker blocker) {
int pc = parallelism, e;
long c = ctl;
WorkQueue[] ws = workQueues;
if ((e = (int)c) >= 0 && ws != null) {
int u, a, ac, hc;
int tc = (short)((u = (int)(c >>> 32)) >>> UTC_SHIFT) + pc;
boolean replace = false;
if ((a = u >> UAC_SHIFT) <= 0) {
if ((ac = a + pc) <= 1)
replace = true;
else if ((e > 0 || (task != null &&
ac <= (hc = pc >>> 1) && tc < pc + hc))) {
WorkQueue w;
for (int j = 0; j < ws.length; ++j) {
if ((w = ws[j]) != null && !w.isEmpty()) {
replace = true;
break; // in compensation range and tasks available
}
}
}
}
if ((task == null || task.status >= 0) && // recheck need to block
(blocker == null || !blocker.isReleasable()) && ctl == c) {
if (!replace) { // no compensation
long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK);
if (U.compareAndSwapLong(this, CTL, c, nc))
return true;
}
else if (e != 0) { // release an idle worker
WorkQueue w; Thread p; int i;
if ((i = e & SMASK) < ws.length && (w = ws[i]) != null) {
long nc = ((long)(w.nextWait & E_MASK) |
(c & (AC_MASK|TC_MASK)));
if (w.eventCount == (e | INT_SIGN) &&
U.compareAndSwapLong(this, CTL, c, nc)) {
w.eventCount = (e + E_SEQ) & E_MASK;
if ((p = w.parker) != null)
U.unpark(p);
return true;
}
}
}
else if (tc < MAX_CAP) { // create replacement
long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK);
if (U.compareAndSwapLong(this, CTL, c, nc)) {
addWorker();
return true;
}
}
}
}
return false;
}
/**
* Helps and/or blocks until the given task is done.
*
* @param joiner the joining worker
* @param task the task
* @return task status on exit
*/
final int awaitJoin(WorkQueue joiner, ForkJoinTask<?> task) {
int s;
if ((s = task.status) >= 0) {
ForkJoinTask<?> prevJoin = joiner.currentJoin;
joiner.currentJoin = task;
long startTime = 0L;
for (int k = 0;;) {
if ((s = (joiner.isEmpty() ? // try to help
tryHelpStealer(joiner, task) :
joiner.tryRemoveAndExec(task))) == 0 &&
(s = task.status) >= 0) {
if (k == 0) {
startTime = System.nanoTime();
tryPollForAndExec(joiner, task); // check uncommon case
}
else if ((k & (MAX_HELP - 1)) == 0 &&
System.nanoTime() - startTime >=
COMPENSATION_DELAY &&
tryCompensate(task, null)) {
if (task.trySetSignal()) {
synchronized (task) {
if (task.status >= 0) {
try { // see ForkJoinTask
task.wait(); // for explanation
} catch (InterruptedException ie) {
}
}
else
task.notifyAll();
}
}
long c; // re-activate
do {} while (!U.compareAndSwapLong
(this, CTL, c = ctl, c + AC_UNIT));
}
}
if (s < 0 || (s = task.status) < 0) {
joiner.currentJoin = prevJoin;
break;
}
else if ((k++ & (MAX_HELP - 1)) == MAX_HELP >>> 1)
Thread.yield(); // for politeness
}
}
return s;
}
/**
* Stripped-down variant of awaitJoin used by timed joins. Tries
* to help join only while there is continuous progress. (Caller
* will then enter a timed wait.)
*
* @param joiner the joining worker
* @param task the task
* @return task status on exit
*/
final int helpJoinOnce(WorkQueue joiner, ForkJoinTask<?> task) {
int s;
while ((s = task.status) >= 0 &&
(joiner.isEmpty() ?
tryHelpStealer(joiner, task) :
joiner.tryRemoveAndExec(task)) != 0)
;
return s;
}
/**
* Returns a (probably) non-empty steal queue, if one is found
* during a random, then cyclic scan, else null. This method must
* be retried by caller if, by the time it tries to use the queue,
* it is empty.
*/
private WorkQueue findNonEmptyStealQueue(WorkQueue w) {
// Similar to loop in scan(), but ignoring submissions
int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5;
int step = (r >>> 16) | 1;
for (WorkQueue[] ws;;) {
int rs = runState, m;
if ((ws = workQueues) == null || (m = ws.length - 1) < 1)
return null;
for (int j = (m + 1) << 2; ; r += step) {
WorkQueue q = ws[((r << 1) | 1) & m];
if (q != null && !q.isEmpty())
return q;
else if (--j < 0) {
if (runState == rs)
return null;
break;
}
}
}
}
/**
* Runs tasks until {@code isQuiescent()}. We piggyback on
* active count ctl maintenance, but rather than blocking
* when tasks cannot be found, we rescan until all others cannot
* find tasks either.
*/
final void helpQuiescePool(WorkQueue w) {
for (boolean active = true;;) {
ForkJoinTask<?> localTask; // exhaust local queue
while ((localTask = w.nextLocalTask()) != null)
localTask.doExec();
WorkQueue q = findNonEmptyStealQueue(w);
if (q != null) {
ForkJoinTask<?> t; int b;
if (!active) { // re-establish active count
long c;
active = true;
do {} while (!U.compareAndSwapLong
(this, CTL, c = ctl, c + AC_UNIT));
}
if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null)
w.runSubtask(t);
}
else {
long c;
if (active) { // decrement active count without queuing
active = false;
do {} while (!U.compareAndSwapLong
(this, CTL, c = ctl, c -= AC_UNIT));
}
else
c = ctl; // re-increment on exit
if ((int)(c >> AC_SHIFT) + parallelism == 0) {
do {} while (!U.compareAndSwapLong
(this, CTL, c = ctl, c + AC_UNIT));
break;
}
}
}
}
/**
* Gets and removes a local or stolen task for the given worker.
*
* @return a task, if available
*/
final ForkJoinTask<?> nextTaskFor(WorkQueue w) {
for (ForkJoinTask<?> t;;) {
WorkQueue q; int b;
if ((t = w.nextLocalTask()) != null)
return t;
if ((q = findNonEmptyStealQueue(w)) == null)
return null;
if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null)
return t;
}
}
/**
* Returns the approximate (non-atomic) number of idle threads per
* active thread to offset steal queue size for method
* ForkJoinTask.getSurplusQueuedTaskCount().
*/
final int idlePerActive() {
// Approximate at powers of two for small values, saturate past 4
int p = parallelism;
int a = p + (int)(ctl >> AC_SHIFT);
return (a > (p >>>= 1) ? 0 :
a > (p >>>= 1) ? 1 :
a > (p >>>= 1) ? 2 :
a > (p >>>= 1) ? 4 :
8);
}
// Termination
/**
* Possibly initiates and/or completes termination. The caller
* triggering termination runs three passes through workQueues:
* (0) Setting termination status, followed by wakeups of queued
* workers; (1) cancelling all tasks; (2) interrupting lagging
* threads (likely in external tasks, but possibly also blocked in
* joins). Each pass repeats previous steps because of potential
* lagging thread creation.
*
* @param now if true, unconditionally terminate, else only
* if no work and no active workers
* @param enable if true, enable shutdown when next possible
* @return true if now terminating or terminated
*/
private boolean tryTerminate(boolean now, boolean enable) {
Mutex lock = this.lock;
for (long c;;) {
if (((c = ctl) & STOP_BIT) != 0) { // already terminating
if ((short)(c >>> TC_SHIFT) == -parallelism) {
lock.lock(); // don't need try/finally
termination.signalAll(); // signal when 0 workers
lock.unlock();
}
return true;
}
if (runState >= 0) { // not yet enabled
if (!enable)
return false;
lock.lock();
runState |= SHUTDOWN;
lock.unlock();
}
if (!now) { // check if idle & no tasks
if ((int)(c >> AC_SHIFT) != -parallelism ||
hasQueuedSubmissions())
return false;
// Check for unqueued inactive workers. One pass suffices.
WorkQueue[] ws = workQueues; WorkQueue w;
if (ws != null) {
for (int i = 1; i < ws.length; i += 2) {
if ((w = ws[i]) != null && w.eventCount >= 0)
return false;
}
}
}
if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) {
for (int pass = 0; pass < 3; ++pass) {
WorkQueue[] ws = workQueues;
if (ws != null) {
WorkQueue w;
int n = ws.length;
for (int i = 0; i < n; ++i) {
if ((w = ws[i]) != null) {
w.runState = -1;
if (pass > 0) {
w.cancelAll();
if (pass > 1)
w.interruptOwner();
}
}
}
// Wake up workers parked on event queue
int i, e; long cc; Thread p;
while ((e = (int)(cc = ctl) & E_MASK) != 0 &&
(i = e & SMASK) < n &&
(w = ws[i]) != null) {
long nc = ((long)(w.nextWait & E_MASK) |
((cc + AC_UNIT) & AC_MASK) |
(cc & (TC_MASK|STOP_BIT)));
if (w.eventCount == (e | INT_SIGN) &&
U.compareAndSwapLong(this, CTL, cc, nc)) {
w.eventCount = (e + E_SEQ) & E_MASK;
w.runState = -1;
if ((p = w.parker) != null)
U.unpark(p);
}
}
}
}
}
}
}
// Exported methods
// Constructors
/**
* Creates a {@code ForkJoinPool} with parallelism equal to {@link
* java.lang.Runtime#availableProcessors}, using the {@linkplain
* #defaultForkJoinWorkerThreadFactory default thread factory},
* no UncaughtExceptionHandler, and non-async LIFO processing mode.
*
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public ForkJoinPool() {
this(Runtime.getRuntime().availableProcessors(),
defaultForkJoinWorkerThreadFactory, null, false);
}
/**
* Creates a {@code ForkJoinPool} with the indicated parallelism
* level, the {@linkplain
* #defaultForkJoinWorkerThreadFactory default thread factory},
* no UncaughtExceptionHandler, and non-async LIFO processing mode.
*
* @param parallelism the parallelism level
* @throws IllegalArgumentException if parallelism less than or
* equal to zero, or greater than implementation limit
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public ForkJoinPool(int parallelism) {
this(parallelism, defaultForkJoinWorkerThreadFactory, null, false);
}
/**
* Creates a {@code ForkJoinPool} with the given parameters.
*
* @param parallelism the parallelism level. For default value,
* use {@link java.lang.Runtime#availableProcessors}.
* @param factory the factory for creating new threads. For default value,
* use {@link #defaultForkJoinWorkerThreadFactory}.
* @param handler the handler for internal worker threads that
* terminate due to unrecoverable errors encountered while executing
* tasks. For default value, use {@code null}.
* @param asyncMode if true,
* establishes local first-in-first-out scheduling mode for forked
* tasks that are never joined. This mode may be more appropriate
* than default locally stack-based mode in applications in which
* worker threads only process event-style asynchronous tasks.
* For default value, use {@code false}.
* @throws IllegalArgumentException if parallelism less than or
* equal to zero, or greater than implementation limit
* @throws NullPointerException if the factory is null
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public ForkJoinPool(int parallelism,
ForkJoinWorkerThreadFactory factory,
Thread.UncaughtExceptionHandler handler,
boolean asyncMode) {
checkPermission();
if (factory == null)
throw new NullPointerException();
if (parallelism <= 0 || parallelism > MAX_CAP)
throw new IllegalArgumentException();
this.parallelism = parallelism;
this.factory = factory;
this.ueh = handler;
this.localMode = asyncMode ? FIFO_QUEUE : LIFO_QUEUE;
long np = (long)(-parallelism); // offset ctl counts
this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
// Use nearest power 2 for workQueues size. See Hackers Delight sec 3.2.
int n = parallelism - 1;
n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16;
int size = (n + 1) << 1; // #slots = 2*#workers
this.submitMask = size - 1; // room for max # of submit queues
this.workQueues = new WorkQueue[size];
this.termination = (this.lock = new Mutex()).newCondition();
this.stealCount = new AtomicLong();
this.nextWorkerNumber = new AtomicInteger();
int pn = poolNumberGenerator.incrementAndGet();
StringBuilder sb = new StringBuilder("FJ-");
sb.append(Integer.toString(pn));
sb.append("-");
this.workerNamePrefix = sb.toString();
lock.lock();
this.runState = 1; // set init flag
lock.unlock();
}
// Execution methods
/**
* Performs the given task, returning its result upon completion.
* If the computation encounters an unchecked Exception or Error,
* it is rethrown as the outcome of this invocation. Rethrown
* exceptions behave in the same way as regular exceptions, but,
* when possible, contain stack traces (as displayed for example
* using {@code ex.printStackTrace()}) of both the current thread
* as well as the thread actually encountering the exception;
* minimally only the latter.
*
* @param task the task
* @return the task's result
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public <T> T invoke(ForkJoinTask<T> task) {
if (task == null)
throw new NullPointerException();
doSubmit(task);
return task.join();
}
/**
* Arranges for (asynchronous) execution of the given task.
*
* @param task the task
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public void execute(ForkJoinTask<?> task) {
if (task == null)
throw new NullPointerException();
doSubmit(task);
}
// AbstractExecutorService methods
/**
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public void execute(Runnable task) {
if (task == null)
throw new NullPointerException();
ForkJoinTask<?> job;
if (task instanceof ForkJoinTask<?>) // avoid re-wrap
job = (ForkJoinTask<?>) task;
else
job = new ForkJoinTask.AdaptedRunnableAction(task);
doSubmit(job);
}
/**
* Submits a ForkJoinTask for execution.
*
* @param task the task to submit
* @return the task
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) {
if (task == null)
throw new NullPointerException();
doSubmit(task);
return task;
}
/**
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public <T> ForkJoinTask<T> submit(Callable<T> task) {
ForkJoinTask<T> job = new ForkJoinTask.AdaptedCallable<T>(task);
doSubmit(job);
return job;
}
/**
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public <T> ForkJoinTask<T> submit(Runnable task, T result) {
ForkJoinTask<T> job = new ForkJoinTask.AdaptedRunnable<T>(task, result);
doSubmit(job);
return job;
}
/**
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public ForkJoinTask<?> submit(Runnable task) {
if (task == null)
throw new NullPointerException();
ForkJoinTask<?> job;
if (task instanceof ForkJoinTask<?>) // avoid re-wrap
job = (ForkJoinTask<?>) task;
else
job = new ForkJoinTask.AdaptedRunnableAction(task);
doSubmit(job);
return job;
}
/**
* @throws NullPointerException {@inheritDoc}
* @throws RejectedExecutionException {@inheritDoc}
*/
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) {
// In previous versions of this class, this method constructed
// a task to run ForkJoinTask.invokeAll, but now external
// invocation of multiple tasks is at least as efficient.
List<ForkJoinTask<T>> fs = new ArrayList<ForkJoinTask<T>>(tasks.size());
// Workaround needed because method wasn't declared with
// wildcards in return type but should have been.
@SuppressWarnings({"unchecked", "rawtypes"})
List<Future<T>> futures = (List<Future<T>>) (List) fs;
boolean done = false;
try {
for (Callable<T> t : tasks) {
ForkJoinTask<T> f = new ForkJoinTask.AdaptedCallable<T>(t);
doSubmit(f);
fs.add(f);
}
for (ForkJoinTask<T> f : fs)
f.quietlyJoin();
done = true;
return futures;
} finally {
if (!done)
for (ForkJoinTask<T> f : fs)
f.cancel(false);
}
}
/**
* Returns the factory used for constructing new workers.
*
* @return the factory used for constructing new workers
*/
public ForkJoinWorkerThreadFactory getFactory() {
return factory;
}
/**
* Returns the handler for internal worker threads that terminate
* due to unrecoverable errors encountered while executing tasks.
*
* @return the handler, or {@code null} if none
*/
public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() {
return ueh;
}
/**
* Returns the targeted parallelism level of this pool.
*
* @return the targeted parallelism level of this pool
*/
public int getParallelism() {
return parallelism;
}
/**
* Returns the number of worker threads that have started but not
* yet terminated. The result returned by this method may differ
* from {@link #getParallelism} when threads are created to
* maintain parallelism when others are cooperatively blocked.
*
* @return the number of worker threads
*/
public int getPoolSize() {
return parallelism + (short)(ctl >>> TC_SHIFT);
}
/**
* Returns {@code true} if this pool uses local first-in-first-out
* scheduling mode for forked tasks that are never joined.
*
* @return {@code true} if this pool uses async mode
*/
public boolean getAsyncMode() {
return localMode != 0;
}
/**
* Returns an estimate of the number of worker threads that are
* not blocked waiting to join tasks or for other managed
* synchronization. This method may overestimate the
* number of running threads.
*
* @return the number of worker threads
*/
public int getRunningThreadCount() {
int rc = 0;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 1; i < ws.length; i += 2) {
if ((w = ws[i]) != null && w.isApparentlyUnblocked())
++rc;
}
}
return rc;
}
/**
* Returns an estimate of the number of threads that are currently
* stealing or executing tasks. This method may overestimate the
* number of active threads.
*
* @return the number of active threads
*/
public int getActiveThreadCount() {
int r = parallelism + (int)(ctl >> AC_SHIFT);
return (r <= 0) ? 0 : r; // suppress momentarily negative values
}
/**
* Returns {@code true} if all worker threads are currently idle.
* An idle worker is one that cannot obtain a task to execute
* because none are available to steal from other threads, and
* there are no pending submissions to the pool. This method is
* conservative; it might not return {@code true} immediately upon
* idleness of all threads, but will eventually become true if
* threads remain inactive.
*
* @return {@code true} if all threads are currently idle
*/
public boolean isQuiescent() {
return (int)(ctl >> AC_SHIFT) + parallelism == 0;
}
/**
* Returns an estimate of the total number of tasks stolen from
* one thread's work queue by another. The reported value
* underestimates the actual total number of steals when the pool
* is not quiescent. This value may be useful for monitoring and
* tuning fork/join programs: in general, steal counts should be
* high enough to keep threads busy, but low enough to avoid
* overhead and contention across threads.
*
* @return the number of steals
*/
public long getStealCount() {
long count = stealCount.get();
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 1; i < ws.length; i += 2) {
if ((w = ws[i]) != null)
count += w.totalSteals;
}
}
return count;
}
/**
* Returns an estimate of the total number of tasks currently held
* in queues by worker threads (but not including tasks submitted
* to the pool that have not begun executing). This value is only
* an approximation, obtained by iterating across all threads in
* the pool. This method may be useful for tuning task
* granularities.
*
* @return the number of queued tasks
*/
public long getQueuedTaskCount() {
long count = 0;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 1; i < ws.length; i += 2) {
if ((w = ws[i]) != null)
count += w.queueSize();
}
}
return count;
}
/**
* Returns an estimate of the number of tasks submitted to this
* pool that have not yet begun executing. This method may take
* time proportional to the number of submissions.
*
* @return the number of queued submissions
*/
public int getQueuedSubmissionCount() {
int count = 0;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; i += 2) {
if ((w = ws[i]) != null)
count += w.queueSize();
}
}
return count;
}
/**
* Returns {@code true} if there are any tasks submitted to this
* pool that have not yet begun executing.
*
* @return {@code true} if there are any queued submissions
*/
public boolean hasQueuedSubmissions() {
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; i += 2) {
if ((w = ws[i]) != null && !w.isEmpty())
return true;
}
}
return false;
}
/**
* Removes and returns the next unexecuted submission if one is
* available. This method may be useful in extensions to this
* class that re-assign work in systems with multiple pools.
*
* @return the next submission, or {@code null} if none
*/
protected ForkJoinTask<?> pollSubmission() {
WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; i += 2) {
if ((w = ws[i]) != null && (t = w.poll()) != null)
return t;
}
}
return null;
}
/**
* Removes all available unexecuted submitted and forked tasks
* from scheduling queues and adds them to the given collection,
* without altering their execution status. These may include
* artificially generated or wrapped tasks. This method is
* designed to be invoked only when the pool is known to be
* quiescent. Invocations at other times may not remove all
* tasks. A failure encountered while attempting to add elements
* to collection {@code c} may result in elements being in
* neither, either or both collections when the associated
* exception is thrown. The behavior of this operation is
* undefined if the specified collection is modified while the
* operation is in progress.
*
* @param c the collection to transfer elements into
* @return the number of elements transferred
*/
protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
int count = 0;
WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; ++i) {
if ((w = ws[i]) != null) {
while ((t = w.poll()) != null) {
c.add(t);
++count;
}
}
}
}
return count;
}
/**
* Returns a string identifying this pool, as well as its state,
* including indications of run state, parallelism level, and
* worker and task counts.
*
* @return a string identifying this pool, as well as its state
*/
public String toString() {
// Use a single pass through workQueues to collect counts
long qt = 0L, qs = 0L; int rc = 0;
long st = stealCount.get();
long c = ctl;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; ++i) {
if ((w = ws[i]) != null) {
int size = w.queueSize();
if ((i & 1) == 0)
qs += size;
else {
qt += size;
st += w.totalSteals;
if (w.isApparentlyUnblocked())
++rc;
}
}
}
}
int pc = parallelism;
int tc = pc + (short)(c >>> TC_SHIFT);
int ac = pc + (int)(c >> AC_SHIFT);
if (ac < 0) // ignore transient negative
ac = 0;
String level;
if ((c & STOP_BIT) != 0)
level = (tc == 0) ? "Terminated" : "Terminating";
else
level = runState < 0 ? "Shutting down" : "Running";
return super.toString() +
"[" + level +
", parallelism = " + pc +
", size = " + tc +
", active = " + ac +
", running = " + rc +
", steals = " + st +
", tasks = " + qt +
", submissions = " + qs +
"]";
}
/**
* Initiates an orderly shutdown in which previously submitted
* tasks are executed, but no new tasks will be accepted.
* Invocation has no additional effect if already shut down.
* Tasks that are in the process of being submitted concurrently
* during the course of this method may or may not be rejected.
*
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public void shutdown() {
checkPermission();
tryTerminate(false, true);
}
/**
* Attempts to cancel and/or stop all tasks, and reject all
* subsequently submitted tasks. Tasks that are in the process of
* being submitted or executed concurrently during the course of
* this method may or may not be rejected. This method cancels
* both existing and unexecuted tasks, in order to permit
* termination in the presence of task dependencies. So the method
* always returns an empty list (unlike the case for some other
* Executors).
*
* @return an empty list
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public List<Runnable> shutdownNow() {
checkPermission();
tryTerminate(true, true);
return Collections.emptyList();
}
/**
* Returns {@code true} if all tasks have completed following shut down.
*
* @return {@code true} if all tasks have completed following shut down
*/
public boolean isTerminated() {
long c = ctl;
return ((c & STOP_BIT) != 0L &&
(short)(c >>> TC_SHIFT) == -parallelism);
}
/**
* Returns {@code true} if the process of termination has
* commenced but not yet completed. This method may be useful for
* debugging. A return of {@code true} reported a sufficient
* period after shutdown may indicate that submitted tasks have
* ignored or suppressed interruption, or are waiting for IO,
* causing this executor not to properly terminate. (See the
* advisory notes for class {@link ForkJoinTask} stating that
* tasks should not normally entail blocking operations. But if
* they do, they must abort them on interrupt.)
*
* @return {@code true} if terminating but not yet terminated
*/
public boolean isTerminating() {
long c = ctl;
return ((c & STOP_BIT) != 0L &&
(short)(c >>> TC_SHIFT) != -parallelism);
}
/**
* Returns {@code true} if this pool has been shut down.
*
* @return {@code true} if this pool has been shut down
*/
public boolean isShutdown() {
return runState < 0;
}
/**
* Blocks until all tasks have completed execution after a shutdown
* request, or the timeout occurs, or the current thread is
* interrupted, whichever happens first.
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @return {@code true} if this executor terminated and
* {@code false} if the timeout elapsed before termination
* @throws InterruptedException if interrupted while waiting
*/
public boolean awaitTermination(long timeout, TimeUnit unit)
throws InterruptedException {
long nanos = unit.toNanos(timeout);
final Mutex lock = this.lock;
lock.lock();
try {
for (;;) {
if (isTerminated())
return true;
if (nanos <= 0)
return false;
nanos = termination.awaitNanos(nanos);
}
} finally {
lock.unlock();
}
}
/**
* Interface for extending managed parallelism for tasks running
* in {@link ForkJoinPool}s.
*
* <p>A {@code ManagedBlocker} provides two methods. Method
* {@code isReleasable} must return {@code true} if blocking is
* not necessary. Method {@code block} blocks the current thread
* if necessary (perhaps internally invoking {@code isReleasable}
* before actually blocking). These actions are performed by any
* thread invoking {@link ForkJoinPool#managedBlock}. The
* unusual methods in this API accommodate synchronizers that may,
* but don't usually, block for long periods. Similarly, they
* allow more efficient internal handling of cases in which
* additional workers may be, but usually are not, needed to
* ensure sufficient parallelism. Toward this end,
* implementations of method {@code isReleasable} must be amenable
* to repeated invocation.
*
* <p>For example, here is a ManagedBlocker based on a
* ReentrantLock:
* <pre> {@code
* class ManagedLocker implements ManagedBlocker {
* final ReentrantLock lock;
* boolean hasLock = false;
* ManagedLocker(ReentrantLock lock) { this.lock = lock; }
* public boolean block() {
* if (!hasLock)
* lock.lock();
* return true;
* }
* public boolean isReleasable() {
* return hasLock || (hasLock = lock.tryLock());
* }
* }}</pre>
*
* <p>Here is a class that possibly blocks waiting for an
* item on a given queue:
* <pre> {@code
* class QueueTaker<E> implements ManagedBlocker {
* final BlockingQueue<E> queue;
* volatile E item = null;
* QueueTaker(BlockingQueue<E> q) { this.queue = q; }
* public boolean block() throws InterruptedException {
* if (item == null)
* item = queue.take();
* return true;
* }
* public boolean isReleasable() {
* return item != null || (item = queue.poll()) != null;
* }
* public E getItem() { // call after pool.managedBlock completes
* return item;
* }
* }}</pre>
*/
public static interface ManagedBlocker {
/**
* Possibly blocks the current thread, for example waiting for
* a lock or condition.
*
* @return {@code true} if no additional blocking is necessary
* (i.e., if isReleasable would return true)
* @throws InterruptedException if interrupted while waiting
* (the method is not required to do so, but is allowed to)
*/
boolean block() throws InterruptedException;
/**
* Returns {@code true} if blocking is unnecessary.
*/
boolean isReleasable();
}
/**
* Blocks in accord with the given blocker. If the current thread
* is a {@link ForkJoinWorkerThread}, this method possibly
* arranges for a spare thread to be activated if necessary to
* ensure sufficient parallelism while the current thread is blocked.
*
* <p>If the caller is not a {@link ForkJoinTask}, this method is
* behaviorally equivalent to
* <pre> {@code
* while (!blocker.isReleasable())
* if (blocker.block())
* return;
* }</pre>
*
* If the caller is a {@code ForkJoinTask}, then the pool may
* first be expanded to ensure parallelism, and later adjusted.
*
* @param blocker the blocker
* @throws InterruptedException if blocker.block did so
*/
public static void managedBlock(ManagedBlocker blocker)
throws InterruptedException {
Thread t = Thread.currentThread();
ForkJoinPool p = ((t instanceof ForkJoinWorkerThread) ?
((ForkJoinWorkerThread)t).pool : null);
while (!blocker.isReleasable()) {
if (p == null || p.tryCompensate(null, blocker)) {
try {
do {} while (!blocker.isReleasable() && !blocker.block());
} finally {
if (p != null)
p.incrementActiveCount();
}
break;
}
}
}
// AbstractExecutorService overrides. These rely on undocumented
// fact that ForkJoinTask.adapt returns ForkJoinTasks that also
// implement RunnableFuture.
protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
return new ForkJoinTask.AdaptedRunnable<T>(runnable, value);
}
protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) {
return new ForkJoinTask.AdaptedCallable<T>(callable);
}
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long CTL;
private static final long PARKBLOCKER;
private static final int ABASE;
private static final int ASHIFT;
static {
poolNumberGenerator = new AtomicInteger();
nextSubmitterSeed = new AtomicInteger(0x55555555);
modifyThreadPermission = new RuntimePermission("modifyThread");
defaultForkJoinWorkerThreadFactory =
new DefaultForkJoinWorkerThreadFactory();
submitters = new ThreadSubmitter();
int s;
try {
U = getUnsafe();
Class<?> k = ForkJoinPool.class;
Class<?> ak = ForkJoinTask[].class;
CTL = U.objectFieldOffset
(k.getDeclaredField("ctl"));
Class<?> tk = Thread.class;
PARKBLOCKER = U.objectFieldOffset
(tk.getDeclaredField("parkBlocker"));
ABASE = U.arrayBaseOffset(ak);
s = U.arrayIndexScale(ak);
} catch (Exception e) {
throw new Error(e);
}
if ((s & (s-1)) != 0)
throw new Error("data type scale not a power of two");
ASHIFT = 31 - Integer.numberOfLeadingZeros(s);
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException se) {
try {
return java.security.AccessController.doPrivileged
(new java.security
.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
java.lang.reflect.Field f = sun.misc
.Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
return (sun.misc.Unsafe) f.get(null);
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/jsr166y/ForkJoinTask.java
|
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166y;
import java.io.Serializable;
import java.util.Collection;
import java.util.List;
import java.util.RandomAccess;
import java.lang.ref.WeakReference;
import java.lang.ref.ReferenceQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.RunnableFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.ReentrantLock;
import java.lang.reflect.Constructor;
/**
* Abstract base class for tasks that run within a {@link ForkJoinPool}.
* A {@code ForkJoinTask} is a thread-like entity that is much
* lighter weight than a normal thread. Huge numbers of tasks and
* subtasks may be hosted by a small number of actual threads in a
* ForkJoinPool, at the price of some usage limitations.
*
* <p>A "main" {@code ForkJoinTask} begins execution when submitted
* to a {@link ForkJoinPool}. Once started, it will usually in turn
* start other subtasks. As indicated by the name of this class,
* many programs using {@code ForkJoinTask} employ only methods
* {@link #fork} and {@link #join}, or derivatives such as {@link
* #invokeAll(ForkJoinTask...) invokeAll}. However, this class also
* provides a number of other methods that can come into play in
* advanced usages, as well as extension mechanics that allow
* support of new forms of fork/join processing.
*
* <p>A {@code ForkJoinTask} is a lightweight form of {@link Future}.
* The efficiency of {@code ForkJoinTask}s stems from a set of
* restrictions (that are only partially statically enforceable)
* reflecting their main use as computational tasks calculating pure
* functions or operating on purely isolated objects. The primary
* coordination mechanisms are {@link #fork}, that arranges
* asynchronous execution, and {@link #join}, that doesn't proceed
* until the task's result has been computed. Computations should
* ideally avoid {@code synchronized} methods or blocks, and should
* minimize other blocking synchronization apart from joining other
* tasks or using synchronizers such as Phasers that are advertised to
* cooperate with fork/join scheduling. Subdividable tasks should also
* not perform blocking IO, and should ideally access variables that
* are completely independent of those accessed by other running
* tasks. These guidelines are loosely enforced by not permitting
* checked exceptions such as {@code IOExceptions} to be
* thrown. However, computations may still encounter unchecked
* exceptions, that are rethrown to callers attempting to join
* them. These exceptions may additionally include {@link
* RejectedExecutionException} stemming from internal resource
* exhaustion, such as failure to allocate internal task
* queues. Rethrown exceptions behave in the same way as regular
* exceptions, but, when possible, contain stack traces (as displayed
* for example using {@code ex.printStackTrace()}) of both the thread
* that initiated the computation as well as the thread actually
* encountering the exception; minimally only the latter.
*
* <p>It is possible to define and use ForkJoinTasks that may block,
* but doing do requires three further considerations: (1) Completion
* of few if any <em>other</em> tasks should be dependent on a task
* that blocks on external synchronization or IO. Event-style async
* tasks that are never joined (for example, those subclassing {@link
* CountedCompleter}) often fall into this category. (2) To minimize
* resource impact, tasks should be small; ideally performing only the
* (possibly) blocking action. (3) Unless the {@link
* ForkJoinPool.ManagedBlocker} API is used, or the number of possibly
* blocked tasks is known to be less than the pool's {@link
* ForkJoinPool#getParallelism} level, the pool cannot guarantee that
* enough threads will be available to ensure progress or good
* performance.
*
* <p>The primary method for awaiting completion and extracting
* results of a task is {@link #join}, but there are several variants:
* The {@link Future#get} methods support interruptible and/or timed
* waits for completion and report results using {@code Future}
* conventions. Method {@link #invoke} is semantically
* equivalent to {@code fork(); join()} but always attempts to begin
* execution in the current thread. The "<em>quiet</em>" forms of
* these methods do not extract results or report exceptions. These
* may be useful when a set of tasks are being executed, and you need
* to delay processing of results or exceptions until all complete.
* Method {@code invokeAll} (available in multiple versions)
* performs the most common form of parallel invocation: forking a set
* of tasks and joining them all.
*
* <p>In the most typical usages, a fork-join pair act like a call
* (fork) and return (join) from a parallel recursive function. As is
* the case with other forms of recursive calls, returns (joins)
* should be performed innermost-first. For example, {@code a.fork();
* b.fork(); b.join(); a.join();} is likely to be substantially more
* efficient than joining {@code a} before {@code b}.
*
* <p>The execution status of tasks may be queried at several levels
* of detail: {@link #isDone} is true if a task completed in any way
* (including the case where a task was cancelled without executing);
* {@link #isCompletedNormally} is true if a task completed without
* cancellation or encountering an exception; {@link #isCancelled} is
* true if the task was cancelled (in which case {@link #getException}
* returns a {@link java.util.concurrent.CancellationException}); and
* {@link #isCompletedAbnormally} is true if a task was either
* cancelled or encountered an exception, in which case {@link
* #getException} will return either the encountered exception or
* {@link java.util.concurrent.CancellationException}.
*
* <p>The ForkJoinTask class is not usually directly subclassed.
* Instead, you subclass one of the abstract classes that support a
* particular style of fork/join processing, typically {@link
* RecursiveAction} for most computations that do not return results,
* {@link RecursiveTask} for those that do, and {@link
* CountedCompleter} for those in which completed actions trigger
* other actions. Normally, a concrete ForkJoinTask subclass declares
* fields comprising its parameters, established in a constructor, and
* then defines a {@code compute} method that somehow uses the control
* methods supplied by this base class. While these methods have
* {@code public} access (to allow instances of different task
* subclasses to call each other's methods), some of them may only be
* called from within other ForkJoinTasks (as may be determined using
* method {@link #inForkJoinPool}). Attempts to invoke them in other
* contexts result in exceptions or errors, possibly including {@code
* ClassCastException}.
*
* <p>Method {@link #join} and its variants are appropriate for use
* only when completion dependencies are acyclic; that is, the
* parallel computation can be described as a directed acyclic graph
* (DAG). Otherwise, executions may encounter a form of deadlock as
* tasks cyclically wait for each other. However, this framework
* supports other methods and techniques (for example the use of
* {@link Phaser}, {@link #helpQuiesce}, and {@link #complete}) that
* may be of use in constructing custom subclasses for problems that
* are not statically structured as DAGs. To support such usages a
* ForkJoinTask may be atomically <em>tagged</em> with a {@code short}
* value using {@link #setForkJoinTaskTag} or {@link
* #compareAndSetForkJoinTaskTag} and checked using {@link
* #getForkJoinTaskTag}. The ForkJoinTask implementation does not use
* these {@code protected} methods or tags for any purpose, but they
* may be of use in the construction of specialized subclasses. For
* example, parallel graph traversals can use the supplied methods to
* avoid revisiting nodes/tasks that have already been processed.
* (Method names for tagging are bulky in part to encourage definition
* of methods that reflect their usage patterns.)
*
* <p>Most base support methods are {@code final}, to prevent
* overriding of implementations that are intrinsically tied to the
* underlying lightweight task scheduling framework. Developers
* creating new basic styles of fork/join processing should minimally
* implement {@code protected} methods {@link #exec}, {@link
* #setRawResult}, and {@link #getRawResult}, while also introducing
* an abstract computational method that can be implemented in its
* subclasses, possibly relying on other {@code protected} methods
* provided by this class.
*
* <p>ForkJoinTasks should perform relatively small amounts of
* computation. Large tasks should be split into smaller subtasks,
* usually via recursive decomposition. As a very rough rule of thumb,
* a task should perform more than 100 and less than 10000 basic
* computational steps, and should avoid indefinite looping. If tasks
* are too big, then parallelism cannot improve throughput. If too
* small, then memory and internal task maintenance overhead may
* overwhelm processing.
*
* <p>This class provides {@code adapt} methods for {@link Runnable}
* and {@link Callable}, that may be of use when mixing execution of
* {@code ForkJoinTasks} with other kinds of tasks. When all tasks are
* of this form, consider using a pool constructed in <em>asyncMode</em>.
*
* <p>ForkJoinTasks are {@code Serializable}, which enables them to be
* used in extensions such as remote execution frameworks. It is
* sensible to serialize tasks only before or after, but not during,
* execution. Serialization is not relied on during execution itself.
*
* @since 1.7
* @author Doug Lea
*/
public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
/*
* See the internal documentation of class ForkJoinPool for a
* general implementation overview. ForkJoinTasks are mainly
* responsible for maintaining their "status" field amidst relays
* to methods in ForkJoinWorkerThread and ForkJoinPool.
*
* The methods of this class are more-or-less layered into
* (1) basic status maintenance
* (2) execution and awaiting completion
* (3) user-level methods that additionally report results.
* This is sometimes hard to see because this file orders exported
* methods in a way that flows well in javadocs.
*/
/*
* The status field holds run control status bits packed into a
* single int to minimize footprint and to ensure atomicity (via
* CAS). Status is initially zero, and takes on nonnegative
* values until completed, upon which status (anded with
* DONE_MASK) holds value NORMAL, CANCELLED, or EXCEPTIONAL. Tasks
* undergoing blocking waits by other threads have the SIGNAL bit
* set. Completion of a stolen task with SIGNAL set awakens any
* waiters via notifyAll. Even though suboptimal for some
* purposes, we use basic builtin wait/notify to take advantage of
* "monitor inflation" in JVMs that we would otherwise need to
* emulate to avoid adding further per-task bookkeeping overhead.
* We want these monitors to be "fat", i.e., not use biasing or
* thin-lock techniques, so use some odd coding idioms that tend
* to avoid them, mainly by arranging that every synchronized
* block performs a wait, notifyAll or both.
*
* These control bits occupy only (some of) the upper half (16
* bits) of status field. The lower bits are used for user-defined
* tags.
*/
/** The run status of this task */
volatile int status; // accessed directly by pool and workers
static final int DONE_MASK = 0xf0000000; // mask out non-completion bits
static final int NORMAL = 0xf0000000; // must be negative
static final int CANCELLED = 0xc0000000; // must be < NORMAL
static final int EXCEPTIONAL = 0x80000000; // must be < CANCELLED
static final int SIGNAL = 0x00010000; // must be >= 1 << 16
static final int SMASK = 0x0000ffff; // short bits for tags
/**
* Marks completion and wakes up threads waiting to join this
* task.
*
* @param completion one of NORMAL, CANCELLED, EXCEPTIONAL
* @return completion status on exit
*/
private int setCompletion(int completion) {
for (int s;;) {
if ((s = status) < 0)
return s;
if (U.compareAndSwapInt(this, STATUS, s, s | completion)) {
if ((s >>> 16) != 0)
synchronized (this) { notifyAll(); }
return completion;
}
}
}
/**
* Primary execution method for stolen tasks. Unless done, calls
* exec and records status if completed, but doesn't wait for
* completion otherwise.
*
* @return status on exit from this method
*/
final int doExec() {
int s; boolean completed;
if ((s = status) >= 0) {
try {
completed = exec();
} catch (Throwable rex) {
return setExceptionalCompletion(rex);
}
if (completed)
s = setCompletion(NORMAL);
}
return s;
}
/**
* Tries to set SIGNAL status unless already completed. Used by
* ForkJoinPool. Other variants are directly incorporated into
* externalAwaitDone etc.
*
* @return true if successful
*/
final boolean trySetSignal() {
int s = status;
return s >= 0 && U.compareAndSwapInt(this, STATUS, s, s | SIGNAL);
}
/**
* Blocks a non-worker-thread until completion.
* @return status upon completion
*/
private int externalAwaitDone() {
boolean interrupted = false;
int s;
while ((s = status) >= 0) {
if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
synchronized (this) {
if (status >= 0) {
try {
wait();
} catch (InterruptedException ie) {
interrupted = true;
}
}
else
notifyAll();
}
}
}
if (interrupted)
Thread.currentThread().interrupt();
return s;
}
/**
* Blocks a non-worker-thread until completion or interruption.
*/
private int externalInterruptibleAwaitDone() throws InterruptedException {
int s;
if (Thread.interrupted())
throw new InterruptedException();
while ((s = status) >= 0) {
if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
synchronized (this) {
if (status >= 0)
wait();
else
notifyAll();
}
}
}
return s;
}
/**
* Implementation for join, get, quietlyJoin. Directly handles
* only cases of already-completed, external wait, and
* unfork+exec. Others are relayed to ForkJoinPool.awaitJoin.
*
* @return status upon completion
*/
private int doJoin() {
int s; Thread t; ForkJoinWorkerThread wt; ForkJoinPool.WorkQueue w;
if ((s = status) >= 0) {
if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) {
if (!(w = (wt = (ForkJoinWorkerThread)t).workQueue).
tryUnpush(this) || (s = doExec()) >= 0)
s = wt.pool.awaitJoin(w, this);
}
else
s = externalAwaitDone();
}
return s;
}
/**
* Implementation for invoke, quietlyInvoke.
*
* @return status upon completion
*/
private int doInvoke() {
int s; Thread t; ForkJoinWorkerThread wt;
if ((s = doExec()) >= 0) {
if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
s = (wt = (ForkJoinWorkerThread)t).pool.awaitJoin(wt.workQueue,
this);
else
s = externalAwaitDone();
}
return s;
}
// Exception table support
/**
* Table of exceptions thrown by tasks, to enable reporting by
* callers. Because exceptions are rare, we don't directly keep
* them with task objects, but instead use a weak ref table. Note
* that cancellation exceptions don't appear in the table, but are
* instead recorded as status values.
*
* Note: These statics are initialized below in static block.
*/
private static final ExceptionNode[] exceptionTable;
private static final ReentrantLock exceptionTableLock;
private static final ReferenceQueue<Object> exceptionTableRefQueue;
/**
* Fixed capacity for exceptionTable.
*/
private static final int EXCEPTION_MAP_CAPACITY = 32;
/**
* Key-value nodes for exception table. The chained hash table
* uses identity comparisons, full locking, and weak references
* for keys. The table has a fixed capacity because it only
* maintains task exceptions long enough for joiners to access
* them, so should never become very large for sustained
* periods. However, since we do not know when the last joiner
* completes, we must use weak references and expunge them. We do
* so on each operation (hence full locking). Also, some thread in
* any ForkJoinPool will call helpExpungeStaleExceptions when its
* pool becomes isQuiescent.
*/
static final class ExceptionNode extends WeakReference<ForkJoinTask<?>> {
final Throwable ex;
ExceptionNode next;
final long thrower; // use id not ref to avoid weak cycles
ExceptionNode(ForkJoinTask<?> task, Throwable ex, ExceptionNode next) {
super(task, exceptionTableRefQueue);
this.ex = ex;
this.next = next;
this.thrower = Thread.currentThread().getId();
}
}
/**
* Records exception and sets status.
*
* @return status on exit
*/
final int recordExceptionalCompletion(Throwable ex) {
int s;
if ((s = status) >= 0) {
int h = System.identityHashCode(this);
final ReentrantLock lock = exceptionTableLock;
lock.lock();
try {
expungeStaleExceptions();
ExceptionNode[] t = exceptionTable;
int i = h & (t.length - 1);
for (ExceptionNode e = t[i]; ; e = e.next) {
if (e == null) {
t[i] = new ExceptionNode(this, ex, t[i]);
break;
}
if (e.get() == this) // already present
break;
}
} finally {
lock.unlock();
}
s = setCompletion(EXCEPTIONAL);
}
return s;
}
/**
* Records exception and possibly propagates
*
* @return status on exit
*/
private int setExceptionalCompletion(Throwable ex) {
int s = recordExceptionalCompletion(ex);
if ((s & DONE_MASK) == EXCEPTIONAL)
internalPropagateException(ex);
return s;
}
/**
* Hook for exception propagation support for tasks with completers.
*/
void internalPropagateException(Throwable ex) {
}
/**
* Cancels, ignoring any exceptions thrown by cancel. Used during
* worker and pool shutdown. Cancel is spec'ed not to throw any
* exceptions, but if it does anyway, we have no recourse during
* shutdown, so guard against this case.
*/
static final void cancelIgnoringExceptions(ForkJoinTask<?> t) {
if (t != null && t.status >= 0) {
try {
t.cancel(false);
} catch (Throwable ignore) {
}
}
}
/**
* Removes exception node and clears status
*/
private void clearExceptionalCompletion() {
int h = System.identityHashCode(this);
final ReentrantLock lock = exceptionTableLock;
lock.lock();
try {
ExceptionNode[] t = exceptionTable;
int i = h & (t.length - 1);
ExceptionNode e = t[i];
ExceptionNode pred = null;
while (e != null) {
ExceptionNode next = e.next;
if (e.get() == this) {
if (pred == null)
t[i] = next;
else
pred.next = next;
break;
}
pred = e;
e = next;
}
expungeStaleExceptions();
status = 0;
} finally {
lock.unlock();
}
}
/**
* Returns a rethrowable exception for the given task, if
* available. To provide accurate stack traces, if the exception
* was not thrown by the current thread, we try to create a new
* exception of the same type as the one thrown, but with the
* recorded exception as its cause. If there is no such
* constructor, we instead try to use a no-arg constructor,
* followed by initCause, to the same effect. If none of these
* apply, or any fail due to other exceptions, we return the
* recorded exception, which is still correct, although it may
* contain a misleading stack trace.
*
* @return the exception, or null if none
*/
private Throwable getThrowableException() {
if ((status & DONE_MASK) != EXCEPTIONAL)
return null;
int h = System.identityHashCode(this);
ExceptionNode e;
final ReentrantLock lock = exceptionTableLock;
lock.lock();
try {
expungeStaleExceptions();
ExceptionNode[] t = exceptionTable;
e = t[h & (t.length - 1)];
while (e != null && e.get() != this)
e = e.next;
} finally {
lock.unlock();
}
Throwable ex;
if (e == null || (ex = e.ex) == null)
return null;
if (false && e.thrower != Thread.currentThread().getId()) {
Class<? extends Throwable> ec = ex.getClass();
try {
Constructor<?> noArgCtor = null;
Constructor<?>[] cs = ec.getConstructors();// public ctors only
for (int i = 0; i < cs.length; ++i) {
Constructor<?> c = cs[i];
Class<?>[] ps = c.getParameterTypes();
if (ps.length == 0)
noArgCtor = c;
else if (ps.length == 1 && ps[0] == Throwable.class)
return (Throwable)(c.newInstance(ex));
}
if (noArgCtor != null) {
Throwable wx = (Throwable)(noArgCtor.newInstance());
wx.initCause(ex);
return wx;
}
} catch (Exception ignore) {
}
}
return ex;
}
/**
* Poll stale refs and remove them. Call only while holding lock.
*/
private static void expungeStaleExceptions() {
for (Object x; (x = exceptionTableRefQueue.poll()) != null;) {
if (x instanceof ExceptionNode) {
ForkJoinTask<?> key = ((ExceptionNode)x).get();
ExceptionNode[] t = exceptionTable;
int i = System.identityHashCode(key) & (t.length - 1);
ExceptionNode e = t[i];
ExceptionNode pred = null;
while (e != null) {
ExceptionNode next = e.next;
if (e == x) {
if (pred == null)
t[i] = next;
else
pred.next = next;
break;
}
pred = e;
e = next;
}
}
}
}
/**
* If lock is available, poll stale refs and remove them.
* Called from ForkJoinPool when pools become quiescent.
*/
static final void helpExpungeStaleExceptions() {
final ReentrantLock lock = exceptionTableLock;
if (lock.tryLock()) {
try {
expungeStaleExceptions();
} finally {
lock.unlock();
}
}
}
/**
* Throws exception, if any, associated with the given status.
*/
private void reportException(int s) {
Throwable ex = ((s == CANCELLED) ? new CancellationException() :
(s == EXCEPTIONAL) ? getThrowableException() :
null);
if (ex != null)
U.throwException(ex);
}
// public methods
/**
* Arranges to asynchronously execute this task. While it is not
* necessarily enforced, it is a usage error to fork a task more
* than once unless it has completed and been reinitialized.
* Subsequent modifications to the state of this task or any data
* it operates on are not necessarily consistently observable by
* any thread other than the one executing it unless preceded by a
* call to {@link #join} or related methods, or a call to {@link
* #isDone} returning {@code true}.
*
* <p>This method may be invoked only from within {@code
* ForkJoinPool} computations (as may be determined using method
* {@link #inForkJoinPool}). Attempts to invoke in other contexts
* result in exceptions or errors, possibly including {@code
* ClassCastException}.
*
* @return {@code this}, to simplify usage
*/
public final ForkJoinTask<V> fork() {
((ForkJoinWorkerThread)Thread.currentThread()).workQueue.push(this);
return this;
}
/**
* Returns the result of the computation when it {@link #isDone is
* done}. This method differs from {@link #get()} in that
* abnormal completion results in {@code RuntimeException} or
* {@code Error}, not {@code ExecutionException}, and that
* interrupts of the calling thread do <em>not</em> cause the
* method to abruptly return by throwing {@code
* InterruptedException}.
*
* @return the computed result
*/
public final V join() {
int s;
if ((s = doJoin() & DONE_MASK) != NORMAL)
reportException(s);
return getRawResult();
}
/**
* Commences performing this task, awaits its completion if
* necessary, and returns its result, or throws an (unchecked)
* {@code RuntimeException} or {@code Error} if the underlying
* computation did so.
*
* @return the computed result
*/
public final V invoke() {
int s;
if ((s = doInvoke() & DONE_MASK) != NORMAL)
reportException(s);
return getRawResult();
}
/**
* Forks the given tasks, returning when {@code isDone} holds for
* each task or an (unchecked) exception is encountered, in which
* case the exception is rethrown. If more than one task
* encounters an exception, then this method throws any one of
* these exceptions. If any task encounters an exception, the
* other may be cancelled. However, the execution status of
* individual tasks is not guaranteed upon exceptional return. The
* status of each task may be obtained using {@link
* #getException()} and related methods to check if they have been
* cancelled, completed normally or exceptionally, or left
* unprocessed.
*
* <p>This method may be invoked only from within {@code
* ForkJoinPool} computations (as may be determined using method
* {@link #inForkJoinPool}). Attempts to invoke in other contexts
* result in exceptions or errors, possibly including {@code
* ClassCastException}.
*
* @param t1 the first task
* @param t2 the second task
* @throws NullPointerException if any task is null
*/
public static void invokeAll(ForkJoinTask<?> t1, ForkJoinTask<?> t2) {
int s1, s2;
t2.fork();
if ((s1 = t1.doInvoke() & DONE_MASK) != NORMAL)
t1.reportException(s1);
if ((s2 = t2.doJoin() & DONE_MASK) != NORMAL)
t2.reportException(s2);
}
/**
* Forks the given tasks, returning when {@code isDone} holds for
* each task or an (unchecked) exception is encountered, in which
* case the exception is rethrown. If more than one task
* encounters an exception, then this method throws any one of
* these exceptions. If any task encounters an exception, others
* may be cancelled. However, the execution status of individual
* tasks is not guaranteed upon exceptional return. The status of
* each task may be obtained using {@link #getException()} and
* related methods to check if they have been cancelled, completed
* normally or exceptionally, or left unprocessed.
*
* <p>This method may be invoked only from within {@code
* ForkJoinPool} computations (as may be determined using method
* {@link #inForkJoinPool}). Attempts to invoke in other contexts
* result in exceptions or errors, possibly including {@code
* ClassCastException}.
*
* @param tasks the tasks
* @throws NullPointerException if any task is null
*/
public static void invokeAll(ForkJoinTask<?>... tasks) {
Throwable ex = null;
int last = tasks.length - 1;
for (int i = last; i >= 0; --i) {
ForkJoinTask<?> t = tasks[i];
if (t == null) {
if (ex == null)
ex = new NullPointerException();
}
else if (i != 0)
t.fork();
else if (t.doInvoke() < NORMAL && ex == null)
ex = t.getException();
}
for (int i = 1; i <= last; ++i) {
ForkJoinTask<?> t = tasks[i];
if (t != null) {
if (ex != null)
t.cancel(false);
else if (t.doJoin() < NORMAL)
ex = t.getException();
}
}
if (ex != null)
U.throwException(ex);
}
/**
* Forks all tasks in the specified collection, returning when
* {@code isDone} holds for each task or an (unchecked) exception
* is encountered, in which case the exception is rethrown. If
* more than one task encounters an exception, then this method
* throws any one of these exceptions. If any task encounters an
* exception, others may be cancelled. However, the execution
* status of individual tasks is not guaranteed upon exceptional
* return. The status of each task may be obtained using {@link
* #getException()} and related methods to check if they have been
* cancelled, completed normally or exceptionally, or left
* unprocessed.
*
* <p>This method may be invoked only from within {@code
* ForkJoinPool} computations (as may be determined using method
* {@link #inForkJoinPool}). Attempts to invoke in other contexts
* result in exceptions or errors, possibly including {@code
* ClassCastException}.
*
* @param tasks the collection of tasks
* @return the tasks argument, to simplify usage
* @throws NullPointerException if tasks or any element are null
*/
public static <T extends ForkJoinTask<?>> Collection<T> invokeAll(Collection<T> tasks) {
if (!(tasks instanceof RandomAccess) || !(tasks instanceof List<?>)) {
invokeAll(tasks.toArray(new ForkJoinTask<?>[tasks.size()]));
return tasks;
}
@SuppressWarnings("unchecked")
List<? extends ForkJoinTask<?>> ts =
(List<? extends ForkJoinTask<?>>) tasks;
Throwable ex = null;
int last = ts.size() - 1;
for (int i = last; i >= 0; --i) {
ForkJoinTask<?> t = ts.get(i);
if (t == null) {
if (ex == null)
ex = new NullPointerException();
}
else if (i != 0)
t.fork();
else if (t.doInvoke() < NORMAL && ex == null)
ex = t.getException();
}
for (int i = 1; i <= last; ++i) {
ForkJoinTask<?> t = ts.get(i);
if (t != null) {
if (ex != null)
t.cancel(false);
else if (t.doJoin() < NORMAL)
ex = t.getException();
}
}
if (ex != null)
U.throwException(ex);
return tasks;
}
/**
* Attempts to cancel execution of this task. This attempt will
* fail if the task has already completed or could not be
* cancelled for some other reason. If successful, and this task
* has not started when {@code cancel} is called, execution of
* this task is suppressed. After this method returns
* successfully, unless there is an intervening call to {@link
* #reinitialize}, subsequent calls to {@link #isCancelled},
* {@link #isDone}, and {@code cancel} will return {@code true}
* and calls to {@link #join} and related methods will result in
* {@code CancellationException}.
*
* <p>This method may be overridden in subclasses, but if so, must
* still ensure that these properties hold. In particular, the
* {@code cancel} method itself must not throw exceptions.
*
* <p>This method is designed to be invoked by <em>other</em>
* tasks. To terminate the current task, you can just return or
* throw an unchecked exception from its computation method, or
* invoke {@link #completeExceptionally}.
*
* @param mayInterruptIfRunning this value has no effect in the
* default implementation because interrupts are not used to
* control cancellation.
*
* @return {@code true} if this task is now cancelled
*/
public boolean cancel(boolean mayInterruptIfRunning) {
return (setCompletion(CANCELLED) & DONE_MASK) == CANCELLED;
}
public final boolean isDone() {
return status < 0;
}
public final boolean isCancelled() {
return (status & DONE_MASK) == CANCELLED;
}
/**
* Returns {@code true} if this task threw an exception or was cancelled.
*
* @return {@code true} if this task threw an exception or was cancelled
*/
public final boolean isCompletedAbnormally() {
return status < NORMAL;
}
/**
* Returns {@code true} if this task completed without throwing an
* exception and was not cancelled.
*
* @return {@code true} if this task completed without throwing an
* exception and was not cancelled
*/
public final boolean isCompletedNormally() {
return (status & DONE_MASK) == NORMAL;
}
/**
* Returns the exception thrown by the base computation, or a
* {@code CancellationException} if cancelled, or {@code null} if
* none or if the method has not yet completed.
*
* @return the exception, or {@code null} if none
*/
public final Throwable getException() {
int s = status & DONE_MASK;
return ((s >= NORMAL) ? null :
(s == CANCELLED) ? new CancellationException() :
getThrowableException());
}
/**
* Completes this task abnormally, and if not already aborted or
* cancelled, causes it to throw the given exception upon
* {@code join} and related operations. This method may be used
* to induce exceptions in asynchronous tasks, or to force
* completion of tasks that would not otherwise complete. Its use
* in other situations is discouraged. This method is
* overridable, but overridden versions must invoke {@code super}
* implementation to maintain guarantees.
*
* @param ex the exception to throw. If this exception is not a
* {@code RuntimeException} or {@code Error}, the actual exception
* thrown will be a {@code RuntimeException} with cause {@code ex}.
*/
public void completeExceptionally(Throwable ex) {
setExceptionalCompletion((ex instanceof RuntimeException) ||
(ex instanceof Error) ? ex :
new RuntimeException(ex));
}
/**
* Completes this task, and if not already aborted or cancelled,
* returning the given value as the result of subsequent
* invocations of {@code join} and related operations. This method
* may be used to provide results for asynchronous tasks, or to
* provide alternative handling for tasks that would not otherwise
* complete normally. Its use in other situations is
* discouraged. This method is overridable, but overridden
* versions must invoke {@code super} implementation to maintain
* guarantees.
*
* @param value the result value for this task
*/
public void complete(V value) {
try {
setRawResult(value);
} catch (Throwable rex) {
setExceptionalCompletion(rex);
return;
}
setCompletion(NORMAL);
}
/**
* Completes this task normally without setting a value. The most
* recent value established by {@link #setRawResult} (or {@code
* null} by default) will be returned as the result of subsequent
* invocations of {@code join} and related operations.
*
* @since 1.8
*/
public final void quietlyComplete() {
setCompletion(NORMAL);
}
/**
* Waits if necessary for the computation to complete, and then
* retrieves its result.
*
* @return the computed result
* @throws CancellationException if the computation was cancelled
* @throws ExecutionException if the computation threw an
* exception
* @throws InterruptedException if the current thread is not a
* member of a ForkJoinPool and was interrupted while waiting
*/
public final V get() throws InterruptedException, ExecutionException {
int s = (Thread.currentThread() instanceof ForkJoinWorkerThread) ?
doJoin() : externalInterruptibleAwaitDone();
Throwable ex;
if ((s &= DONE_MASK) == CANCELLED)
throw new CancellationException();
if (s == EXCEPTIONAL && (ex = getThrowableException()) != null)
throw new ExecutionException(ex);
return getRawResult();
}
/**
* Waits if necessary for at most the given time for the computation
* to complete, and then retrieves its result, if available.
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @return the computed result
* @throws CancellationException if the computation was cancelled
* @throws ExecutionException if the computation threw an
* exception
* @throws InterruptedException if the current thread is not a
* member of a ForkJoinPool and was interrupted while waiting
* @throws TimeoutException if the wait timed out
*/
public final V get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
if (Thread.interrupted())
throw new InterruptedException();
// Messy in part because we measure in nanosecs, but wait in millisecs
int s; long ns, ms;
if ((s = status) >= 0 && (ns = unit.toNanos(timeout)) > 0L) {
long deadline = System.nanoTime() + ns;
ForkJoinPool p = null;
ForkJoinPool.WorkQueue w = null;
Thread t = Thread.currentThread();
if (t instanceof ForkJoinWorkerThread) {
ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t;
p = wt.pool;
w = wt.workQueue;
s = p.helpJoinOnce(w, this); // no retries on failure
}
boolean canBlock = false;
boolean interrupted = false;
try {
while ((s = status) >= 0) {
if (w != null && w.runState < 0)
cancelIgnoringExceptions(this);
else if (!canBlock) {
if (p == null || p.tryCompensate(this, null))
canBlock = true;
}
else {
if ((ms = TimeUnit.NANOSECONDS.toMillis(ns)) > 0L &&
U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
synchronized (this) {
if (status >= 0) {
try {
wait(ms);
} catch (InterruptedException ie) {
if (p == null)
interrupted = true;
}
}
else
notifyAll();
}
}
if ((s = status) < 0 || interrupted ||
(ns = deadline - System.nanoTime()) <= 0L)
break;
}
}
} finally {
if (p != null && canBlock)
p.incrementActiveCount();
}
if (interrupted)
throw new InterruptedException();
}
if ((s &= DONE_MASK) != NORMAL) {
Throwable ex;
if (s == CANCELLED)
throw new CancellationException();
if (s != EXCEPTIONAL)
throw new TimeoutException();
if ((ex = getThrowableException()) != null)
throw new ExecutionException(ex);
}
return getRawResult();
}
/**
* Joins this task, without returning its result or throwing its
* exception. This method may be useful when processing
* collections of tasks when some have been cancelled or otherwise
* known to have aborted.
*/
public final void quietlyJoin() {
doJoin();
}
/**
* Commences performing this task and awaits its completion if
* necessary, without returning its result or throwing its
* exception.
*/
public final void quietlyInvoke() {
doInvoke();
}
/**
* Possibly executes tasks until the pool hosting the current task
* {@link ForkJoinPool#isQuiescent is quiescent}. This method may
* be of use in designs in which many tasks are forked, but none
* are explicitly joined, instead executing them until all are
* processed.
*
* <p>This method may be invoked only from within {@code
* ForkJoinPool} computations (as may be determined using method
* {@link #inForkJoinPool}). Attempts to invoke in other contexts
* result in exceptions or errors, possibly including {@code
* ClassCastException}.
*/
public static void helpQuiesce() {
ForkJoinWorkerThread wt =
(ForkJoinWorkerThread)Thread.currentThread();
wt.pool.helpQuiescePool(wt.workQueue);
}
/**
* Resets the internal bookkeeping state of this task, allowing a
* subsequent {@code fork}. This method allows repeated reuse of
* this task, but only if reuse occurs when this task has either
* never been forked, or has been forked, then completed and all
* outstanding joins of this task have also completed. Effects
* under any other usage conditions are not guaranteed.
* This method may be useful when executing
* pre-constructed trees of subtasks in loops.
*
* <p>Upon completion of this method, {@code isDone()} reports
* {@code false}, and {@code getException()} reports {@code
* null}. However, the value returned by {@code getRawResult} is
* unaffected. To clear this value, you can invoke {@code
* setRawResult(null)}.
*/
public void reinitialize() {
if ((status & DONE_MASK) == EXCEPTIONAL)
clearExceptionalCompletion();
else
status = 0;
}
/**
* Returns the pool hosting the current task execution, or null
* if this task is executing outside of any ForkJoinPool.
*
* @see #inForkJoinPool
* @return the pool, or {@code null} if none
*/
public static ForkJoinPool getPool() {
Thread t = Thread.currentThread();
return (t instanceof ForkJoinWorkerThread) ?
((ForkJoinWorkerThread) t).pool : null;
}
/**
* Returns {@code true} if the current thread is a {@link
* ForkJoinWorkerThread} executing as a ForkJoinPool computation.
*
* @return {@code true} if the current thread is a {@link
* ForkJoinWorkerThread} executing as a ForkJoinPool computation,
* or {@code false} otherwise
*/
public static boolean inForkJoinPool() {
return Thread.currentThread() instanceof ForkJoinWorkerThread;
}
/**
* Tries to unschedule this task for execution. This method will
* typically succeed if this task is the most recently forked task
* by the current thread, and has not commenced executing in
* another thread. This method may be useful when arranging
* alternative local processing of tasks that could have been, but
* were not, stolen.
*
* <p>This method may be invoked only from within {@code
* ForkJoinPool} computations (as may be determined using method
* {@link #inForkJoinPool}). Attempts to invoke in other contexts
* result in exceptions or errors, possibly including {@code
* ClassCastException}.
*
* @return {@code true} if unforked
*/
public boolean tryUnfork() {
return ((ForkJoinWorkerThread)Thread.currentThread())
.workQueue.tryUnpush(this);
}
/**
* Returns an estimate of the number of tasks that have been
* forked by the current worker thread but not yet executed. This
* value may be useful for heuristic decisions about whether to
* fork other tasks.
*
* <p>This method may be invoked only from within {@code
* ForkJoinPool} computations (as may be determined using method
* {@link #inForkJoinPool}). Attempts to invoke in other contexts
* result in exceptions or errors, possibly including {@code
* ClassCastException}.
*
* @return the number of tasks
*/
public static int getQueuedTaskCount() {
return ((ForkJoinWorkerThread) Thread.currentThread())
.workQueue.queueSize();
}
/**
* Returns an estimate of how many more locally queued tasks are
* held by the current worker thread than there are other worker
* threads that might steal them. This value may be useful for
* heuristic decisions about whether to fork other tasks. In many
* usages of ForkJoinTasks, at steady state, each worker should
* aim to maintain a small constant surplus (for example, 3) of
* tasks, and to process computations locally if this threshold is
* exceeded.
*
* <p>This method may be invoked only from within {@code
* ForkJoinPool} computations (as may be determined using method
* {@link #inForkJoinPool}). Attempts to invoke in other contexts
* result in exceptions or errors, possibly including {@code
* ClassCastException}.
*
* @return the surplus number of tasks, which may be negative
*/
public static int getSurplusQueuedTaskCount() {
/*
* The aim of this method is to return a cheap heuristic guide
* for task partitioning when programmers, frameworks, tools,
* or languages have little or no idea about task granularity.
* In essence by offering this method, we ask users only about
* tradeoffs in overhead vs expected throughput and its
* variance, rather than how finely to partition tasks.
*
* In a steady state strict (tree-structured) computation,
* each thread makes available for stealing enough tasks for
* other threads to remain active. Inductively, if all threads
* play by the same rules, each thread should make available
* only a constant number of tasks.
*
* The minimum useful constant is just 1. But using a value of
* 1 would require immediate replenishment upon each steal to
* maintain enough tasks, which is infeasible. Further,
* partitionings/granularities of offered tasks should
* minimize steal rates, which in general means that threads
* nearer the top of computation tree should generate more
* than those nearer the bottom. In perfect steady state, each
* thread is at approximately the same level of computation
* tree. However, producing extra tasks amortizes the
* uncertainty of progress and diffusion assumptions.
*
* So, users will want to use values larger, but not much
* larger than 1 to both smooth over transient shortages and
* hedge against uneven progress; as traded off against the
* cost of extra task overhead. We leave the user to pick a
* threshold value to compare with the results of this call to
* guide decisions, but recommend values such as 3.
*
* When all threads are active, it is on average OK to
* estimate surplus strictly locally. In steady-state, if one
* thread is maintaining say 2 surplus tasks, then so are
* others. So we can just use estimated queue length.
* However, this strategy alone leads to serious mis-estimates
* in some non-steady-state conditions (ramp-up, ramp-down,
* other stalls). We can detect many of these by further
* considering the number of "idle" threads, that are known to
* have zero queued tasks, so compensate by a factor of
* (#idle/#active) threads.
*/
ForkJoinWorkerThread wt =
(ForkJoinWorkerThread)Thread.currentThread();
return wt.workQueue.queueSize() - wt.pool.idlePerActive();
}
// Extension methods
/**
* Returns the result that would be returned by {@link #join}, even
* if this task completed abnormally, or {@code null} if this task
* is not known to have been completed. This method is designed
* to aid debugging, as well as to support extensions. Its use in
* any other context is discouraged.
*
* @return the result, or {@code null} if not completed
*/
public abstract V getRawResult();
/**
* Forces the given value to be returned as a result. This method
* is designed to support extensions, and should not in general be
* called otherwise.
*
* @param value the value
*/
protected abstract void setRawResult(V value);
/**
* Immediately performs the base action of this task and returns
* true if, upon return from this method, this task is guaranteed
* to have completed normally. This method may return false
* otherwise, to indicate that this task is not necessarily
* complete (or is not known to be complete), for example in
* asynchronous actions that require explicit invocations of
* completion methods. This method may also throw an (unchecked)
* exception to indicate abnormal exit. This method is designed to
* support extensions, and should not in general be called
* otherwise.
*
* @return {@code true} if this task is known to have completed normally
*/
protected abstract boolean exec();
/**
* Returns, but does not unschedule or execute, a task queued by
* the current thread but not yet executed, if one is immediately
* available. There is no guarantee that this task will actually
* be polled or executed next. Conversely, this method may return
* null even if a task exists but cannot be accessed without
* contention with other threads. This method is designed
* primarily to support extensions, and is unlikely to be useful
* otherwise.
*
* <p>This method may be invoked only from within {@code
* ForkJoinPool} computations (as may be determined using method
* {@link #inForkJoinPool}). Attempts to invoke in other contexts
* result in exceptions or errors, possibly including {@code
* ClassCastException}.
*
* @return the next task, or {@code null} if none are available
*/
protected static ForkJoinTask<?> peekNextLocalTask() {
return ((ForkJoinWorkerThread) Thread.currentThread()).workQueue.peek();
}
/**
* Unschedules and returns, without executing, the next task
* queued by the current thread but not yet executed. This method
* is designed primarily to support extensions, and is unlikely to
* be useful otherwise.
*
* <p>This method may be invoked only from within {@code
* ForkJoinPool} computations (as may be determined using method
* {@link #inForkJoinPool}). Attempts to invoke in other contexts
* result in exceptions or errors, possibly including {@code
* ClassCastException}.
*
* @return the next task, or {@code null} if none are available
*/
protected static ForkJoinTask<?> pollNextLocalTask() {
return ((ForkJoinWorkerThread) Thread.currentThread())
.workQueue.nextLocalTask();
}
/**
* Unschedules and returns, without executing, the next task
* queued by the current thread but not yet executed, if one is
* available, or if not available, a task that was forked by some
* other thread, if available. Availability may be transient, so a
* {@code null} result does not necessarily imply quiescence
* of the pool this task is operating in. This method is designed
* primarily to support extensions, and is unlikely to be useful
* otherwise.
*
* <p>This method may be invoked only from within {@code
* ForkJoinPool} computations (as may be determined using method
* {@link #inForkJoinPool}). Attempts to invoke in other contexts
* result in exceptions or errors, possibly including {@code
* ClassCastException}.
*
* @return a task, or {@code null} if none are available
*/
protected static ForkJoinTask<?> pollTask() {
ForkJoinWorkerThread wt =
(ForkJoinWorkerThread)Thread.currentThread();
return wt.pool.nextTaskFor(wt.workQueue);
}
// tag operations
/**
* Returns the tag for this task.
*
* @return the tag for this task
* @since 1.8
*/
public final short getForkJoinTaskTag() {
return (short)status;
}
/**
* Atomically sets the tag value for this task.
*
* @param tag the tag value
* @return the previous value of the tag
* @since 1.8
*/
public final short setForkJoinTaskTag(short tag) {
for (int s;;) {
if (U.compareAndSwapInt(this, STATUS, s = status,
(s & ~SMASK) | (tag & SMASK)))
return (short)s;
}
}
/**
* Atomically conditionally sets the tag value for this task.
* Among other applications, tags can be used as visit markers
* in tasks operating on graphs, as in methods that check: {@code
* if (task.compareAndSetForkJoinTaskTag((short)0, (short)1))}
* before processing, otherwise exiting because the node has
* already been visited.
*
* @param e the expected tag value
* @param tag the new tag value
* @return true if successful; i.e., the current value was
* equal to e and is now tag.
* @since 1.8
*/
public final boolean compareAndSetForkJoinTaskTag(short e, short tag) {
for (int s;;) {
if ((short)(s = status) != e)
return false;
if (U.compareAndSwapInt(this, STATUS, s,
(s & ~SMASK) | (tag & SMASK)))
return true;
}
}
/**
* Adaptor for Runnables. This implements RunnableFuture
* to be compliant with AbstractExecutorService constraints
* when used in ForkJoinPool.
*/
static final class AdaptedRunnable<T> extends ForkJoinTask<T>
implements RunnableFuture<T> {
final Runnable runnable;
T result;
AdaptedRunnable(Runnable runnable, T result) {
if (runnable == null) throw new NullPointerException();
this.runnable = runnable;
this.result = result; // OK to set this even before completion
}
public final T getRawResult() { return result; }
public final void setRawResult(T v) { result = v; }
public final boolean exec() { runnable.run(); return true; }
public final void run() { invoke(); }
private static final long serialVersionUID = 5232453952276885070L;
}
/**
* Adaptor for Runnables without results
*/
static final class AdaptedRunnableAction extends ForkJoinTask<Void>
implements RunnableFuture<Void> {
final Runnable runnable;
AdaptedRunnableAction(Runnable runnable) {
if (runnable == null) throw new NullPointerException();
this.runnable = runnable;
}
public final Void getRawResult() { return null; }
public final void setRawResult(Void v) { }
public final boolean exec() { runnable.run(); return true; }
public final void run() { invoke(); }
private static final long serialVersionUID = 5232453952276885070L;
}
/**
* Adaptor for Callables
*/
static final class AdaptedCallable<T> extends ForkJoinTask<T>
implements RunnableFuture<T> {
final Callable<? extends T> callable;
T result;
AdaptedCallable(Callable<? extends T> callable) {
if (callable == null) throw new NullPointerException();
this.callable = callable;
}
public final T getRawResult() { return result; }
public final void setRawResult(T v) { result = v; }
public final boolean exec() {
try {
result = callable.call();
return true;
} catch (Error err) {
throw err;
} catch (RuntimeException rex) {
throw rex;
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
public final void run() { invoke(); }
private static final long serialVersionUID = 2838392045355241008L;
}
/**
* Returns a new {@code ForkJoinTask} that performs the {@code run}
* method of the given {@code Runnable} as its action, and returns
* a null result upon {@link #join}.
*
* @param runnable the runnable action
* @return the task
*/
public static ForkJoinTask<?> adapt(Runnable runnable) {
return new AdaptedRunnableAction(runnable);
}
/**
* Returns a new {@code ForkJoinTask} that performs the {@code run}
* method of the given {@code Runnable} as its action, and returns
* the given result upon {@link #join}.
*
* @param runnable the runnable action
* @param result the result upon completion
* @return the task
*/
public static <T> ForkJoinTask<T> adapt(Runnable runnable, T result) {
return new AdaptedRunnable<T>(runnable, result);
}
/**
* Returns a new {@code ForkJoinTask} that performs the {@code call}
* method of the given {@code Callable} as its action, and returns
* its result upon {@link #join}, translating any checked exceptions
* encountered into {@code RuntimeException}.
*
* @param callable the callable action
* @return the task
*/
public static <T> ForkJoinTask<T> adapt(Callable<? extends T> callable) {
return new AdaptedCallable<T>(callable);
}
// Serialization support
private static final long serialVersionUID = -7721805057305804111L;
/**
* Saves this task to a stream (that is, serializes it).
*
* @serialData the current run status and the exception thrown
* during execution, or {@code null} if none
*/
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
s.writeObject(getException());
}
/**
* Reconstitutes this task from a stream (that is, deserializes it).
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
Object ex = s.readObject();
if (ex != null)
setExceptionalCompletion((Throwable)ex);
}
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long STATUS;
static {
exceptionTableLock = new ReentrantLock();
exceptionTableRefQueue = new ReferenceQueue<Object>();
exceptionTable = new ExceptionNode[EXCEPTION_MAP_CAPACITY];
try {
U = getUnsafe();
STATUS = U.objectFieldOffset
(ForkJoinTask.class.getDeclaredField("status"));
} catch (Exception e) {
throw new Error(e);
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException se) {
try {
return java.security.AccessController.doPrivileged
(new java.security
.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
java.lang.reflect.Field f = sun.misc
.Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
return (sun.misc.Unsafe) f.get(null);
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/jsr166y/ForkJoinWorkerThread.java
|
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166y;
/**
* A thread managed by a {@link ForkJoinPool}, which executes
* {@link ForkJoinTask}s.
* This class is subclassable solely for the sake of adding
* functionality -- there are no overridable methods dealing with
* scheduling or execution. However, you can override initialization
* and termination methods surrounding the main task processing loop.
* If you do create such a subclass, you will also need to supply a
* custom {@link ForkJoinPool.ForkJoinWorkerThreadFactory} to use it
* in a {@code ForkJoinPool}.
*
* @since 1.7
* @author Doug Lea
*/
public class ForkJoinWorkerThread extends Thread {
/*
* ForkJoinWorkerThreads are managed by ForkJoinPools and perform
* ForkJoinTasks. For explanation, see the internal documentation
* of class ForkJoinPool.
*/
final ForkJoinPool.WorkQueue workQueue; // Work-stealing mechanics
final ForkJoinPool pool; // the pool this thread works in
/**
* Creates a ForkJoinWorkerThread operating in the given pool.
*
* @param pool the pool this thread works in
* @throws NullPointerException if pool is null
*/
protected ForkJoinWorkerThread(ForkJoinPool pool) {
super(pool.nextWorkerName());
setDaemon(true);
Thread.UncaughtExceptionHandler ueh = pool.ueh;
if (ueh != null)
setUncaughtExceptionHandler(ueh);
this.pool = pool;
pool.registerWorker(this.workQueue = new ForkJoinPool.WorkQueue
(pool, this, pool.localMode));
}
/**
* Returns the pool hosting this thread.
*
* @return the pool
*/
public ForkJoinPool getPool() {
return pool;
}
/**
* Returns the index number of this thread in its pool. The
* returned value ranges from zero to the maximum number of
* threads (minus one) that have ever been created in the pool.
* This method may be useful for applications that track status or
* collect results per-worker rather than per-task.
*
* @return the index number
*/
public int getPoolIndex() {
return workQueue.poolIndex;
}
/**
* Initializes internal state after construction but before
* processing any tasks. If you override this method, you must
* invoke {@code super.onStart()} at the beginning of the method.
* Initialization requires care: Most fields must have legal
* default values, to ensure that attempted accesses from other
* threads work correctly even before this thread starts
* processing tasks.
*/
protected void onStart() {
}
/**
* Performs cleanup associated with termination of this worker
* thread. If you override this method, you must invoke
* {@code super.onTermination} at the end of the overridden method.
*
* @param exception the exception causing this thread to abort due
* to an unrecoverable error, or {@code null} if completed normally
*/
protected void onTermination(Throwable exception) {
}
/**
* This method is required to be public, but should never be
* called explicitly. It performs the main run loop to execute
* {@link ForkJoinTask}s.
*/
public void run() {
Throwable exception = null;
try {
onStart();
pool.runWorker(workQueue);
} catch (Throwable ex) {
exception = ex;
} finally {
try {
onTermination(exception);
} catch (Throwable ex) {
if (exception == null)
exception = ex;
} finally {
pool.deregisterWorker(this, exception);
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/jsr166y/LinkedTransferQueue.java
|
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166y;
import java.util.AbstractQueue;
import java.util.Collection;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.Queue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.LockSupport;
/**
* An unbounded {@link TransferQueue} based on linked nodes.
* This queue orders elements FIFO (first-in-first-out) with respect
* to any given producer. The <em>head</em> of the queue is that
* element that has been on the queue the longest time for some
* producer. The <em>tail</em> of the queue is that element that has
* been on the queue the shortest time for some producer.
*
* <p>Beware that, unlike in most collections, the {@code size} method
* is <em>NOT</em> a constant-time operation. Because of the
* asynchronous nature of these queues, determining the current number
* of elements requires a traversal of the elements, and so may report
* inaccurate results if this collection is modified during traversal.
* Additionally, the bulk operations {@code addAll},
* {@code removeAll}, {@code retainAll}, {@code containsAll},
* {@code equals}, and {@code toArray} are <em>not</em> guaranteed
* to be performed atomically. For example, an iterator operating
* concurrently with an {@code addAll} operation might view only some
* of the added elements.
*
* <p>This class and its iterator implement all of the
* <em>optional</em> methods of the {@link Collection} and {@link
* Iterator} interfaces.
*
* <p>Memory consistency effects: As with other concurrent
* collections, actions in a thread prior to placing an object into a
* {@code LinkedTransferQueue}
* <a href="package-summary.html#MemoryVisibility"><i>happen-before</i></a>
* actions subsequent to the access or removal of that element from
* the {@code LinkedTransferQueue} in another thread.
*
* <p>This class is a member of the
* <a href="{@docRoot}/../technotes/guides/collections/index.html">
* Java Collections Framework</a>.
*
* @since 1.7
* @author Doug Lea
* @param <E> the type of elements held in this collection
*/
public class LinkedTransferQueue<E> extends AbstractQueue<E>
implements TransferQueue<E>, java.io.Serializable {
private static final long serialVersionUID = -3223113410248163686L;
/*
* *** Overview of Dual Queues with Slack ***
*
* Dual Queues, introduced by Scherer and Scott
* (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are
* (linked) queues in which nodes may represent either data or
* requests. When a thread tries to enqueue a data node, but
* encounters a request node, it instead "matches" and removes it;
* and vice versa for enqueuing requests. Blocking Dual Queues
* arrange that threads enqueuing unmatched requests block until
* other threads provide the match. Dual Synchronous Queues (see
* Scherer, Lea, & Scott
* http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf)
* additionally arrange that threads enqueuing unmatched data also
* block. Dual Transfer Queues support all of these modes, as
* dictated by callers.
*
* A FIFO dual queue may be implemented using a variation of the
* Michael & Scott (M&S) lock-free queue algorithm
* (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf).
* It maintains two pointer fields, "head", pointing to a
* (matched) node that in turn points to the first actual
* (unmatched) queue node (or null if empty); and "tail" that
* points to the last node on the queue (or again null if
* empty). For example, here is a possible queue with four data
* elements:
*
* head tail
* | |
* v v
* M -> U -> U -> U -> U
*
* The M&S queue algorithm is known to be prone to scalability and
* overhead limitations when maintaining (via CAS) these head and
* tail pointers. This has led to the development of
* contention-reducing variants such as elimination arrays (see
* Moir et al http://portal.acm.org/citation.cfm?id=1074013) and
* optimistic back pointers (see Ladan-Mozes & Shavit
* http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf).
* However, the nature of dual queues enables a simpler tactic for
* improving M&S-style implementations when dual-ness is needed.
*
* In a dual queue, each node must atomically maintain its match
* status. While there are other possible variants, we implement
* this here as: for a data-mode node, matching entails CASing an
* "item" field from a non-null data value to null upon match, and
* vice-versa for request nodes, CASing from null to a data
* value. (Note that the linearization properties of this style of
* queue are easy to verify -- elements are made available by
* linking, and unavailable by matching.) Compared to plain M&S
* queues, this property of dual queues requires one additional
* successful atomic operation per enq/deq pair. But it also
* enables lower cost variants of queue maintenance mechanics. (A
* variation of this idea applies even for non-dual queues that
* support deletion of interior elements, such as
* j.u.c.ConcurrentLinkedQueue.)
*
* Once a node is matched, its match status can never again
* change. We may thus arrange that the linked list of them
* contain a prefix of zero or more matched nodes, followed by a
* suffix of zero or more unmatched nodes. (Note that we allow
* both the prefix and suffix to be zero length, which in turn
* means that we do not use a dummy header.) If we were not
* concerned with either time or space efficiency, we could
* correctly perform enqueue and dequeue operations by traversing
* from a pointer to the initial node; CASing the item of the
* first unmatched node on match and CASing the next field of the
* trailing node on appends. (Plus some special-casing when
* initially empty). While this would be a terrible idea in
* itself, it does have the benefit of not requiring ANY atomic
* updates on head/tail fields.
*
* We introduce here an approach that lies between the extremes of
* never versus always updating queue (head and tail) pointers.
* This offers a tradeoff between sometimes requiring extra
* traversal steps to locate the first and/or last unmatched
* nodes, versus the reduced overhead and contention of fewer
* updates to queue pointers. For example, a possible snapshot of
* a queue is:
*
* head tail
* | |
* v v
* M -> M -> U -> U -> U -> U
*
* The best value for this "slack" (the targeted maximum distance
* between the value of "head" and the first unmatched node, and
* similarly for "tail") is an empirical matter. We have found
* that using very small constants in the range of 1-3 work best
* over a range of platforms. Larger values introduce increasing
* costs of cache misses and risks of long traversal chains, while
* smaller values increase CAS contention and overhead.
*
* Dual queues with slack differ from plain M&S dual queues by
* virtue of only sometimes updating head or tail pointers when
* matching, appending, or even traversing nodes; in order to
* maintain a targeted slack. The idea of "sometimes" may be
* operationalized in several ways. The simplest is to use a
* per-operation counter incremented on each traversal step, and
* to try (via CAS) to update the associated queue pointer
* whenever the count exceeds a threshold. Another, that requires
* more overhead, is to use random number generators to update
* with a given probability per traversal step.
*
* In any strategy along these lines, because CASes updating
* fields may fail, the actual slack may exceed targeted
* slack. However, they may be retried at any time to maintain
* targets. Even when using very small slack values, this
* approach works well for dual queues because it allows all
* operations up to the point of matching or appending an item
* (hence potentially allowing progress by another thread) to be
* read-only, thus not introducing any further contention. As
* described below, we implement this by performing slack
* maintenance retries only after these points.
*
* As an accompaniment to such techniques, traversal overhead can
* be further reduced without increasing contention of head
* pointer updates: Threads may sometimes shortcut the "next" link
* path from the current "head" node to be closer to the currently
* known first unmatched node, and similarly for tail. Again, this
* may be triggered with using thresholds or randomization.
*
* These ideas must be further extended to avoid unbounded amounts
* of costly-to-reclaim garbage caused by the sequential "next"
* links of nodes starting at old forgotten head nodes: As first
* described in detail by Boehm
* (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC
* delays noticing that any arbitrarily old node has become
* garbage, all newer dead nodes will also be unreclaimed.
* (Similar issues arise in non-GC environments.) To cope with
* this in our implementation, upon CASing to advance the head
* pointer, we set the "next" link of the previous head to point
* only to itself; thus limiting the length of connected dead lists.
* (We also take similar care to wipe out possibly garbage
* retaining values held in other Node fields.) However, doing so
* adds some further complexity to traversal: If any "next"
* pointer links to itself, it indicates that the current thread
* has lagged behind a head-update, and so the traversal must
* continue from the "head". Traversals trying to find the
* current tail starting from "tail" may also encounter
* self-links, in which case they also continue at "head".
*
* It is tempting in slack-based scheme to not even use CAS for
* updates (similarly to Ladan-Mozes & Shavit). However, this
* cannot be done for head updates under the above link-forgetting
* mechanics because an update may leave head at a detached node.
* And while direct writes are possible for tail updates, they
* increase the risk of long retraversals, and hence long garbage
* chains, which can be much more costly than is worthwhile
* considering that the cost difference of performing a CAS vs
* write is smaller when they are not triggered on each operation
* (especially considering that writes and CASes equally require
* additional GC bookkeeping ("write barriers") that are sometimes
* more costly than the writes themselves because of contention).
*
* *** Overview of implementation ***
*
* We use a threshold-based approach to updates, with a slack
* threshold of two -- that is, we update head/tail when the
* current pointer appears to be two or more steps away from the
* first/last node. The slack value is hard-wired: a path greater
* than one is naturally implemented by checking equality of
* traversal pointers except when the list has only one element,
* in which case we keep slack threshold at one. Avoiding tracking
* explicit counts across method calls slightly simplifies an
* already-messy implementation. Using randomization would
* probably work better if there were a low-quality dirt-cheap
* per-thread one available, but even ThreadLocalRandom is too
* heavy for these purposes.
*
* With such a small slack threshold value, it is not worthwhile
* to augment this with path short-circuiting (i.e., unsplicing
* interior nodes) except in the case of cancellation/removal (see
* below).
*
* We allow both the head and tail fields to be null before any
* nodes are enqueued; initializing upon first append. This
* simplifies some other logic, as well as providing more
* efficient explicit control paths instead of letting JVMs insert
* implicit NullPointerExceptions when they are null. While not
* currently fully implemented, we also leave open the possibility
* of re-nulling these fields when empty (which is complicated to
* arrange, for little benefit.)
*
* All enqueue/dequeue operations are handled by the single method
* "xfer" with parameters indicating whether to act as some form
* of offer, put, poll, take, or transfer (each possibly with
* timeout). The relative complexity of using one monolithic
* method outweighs the code bulk and maintenance problems of
* using separate methods for each case.
*
* Operation consists of up to three phases. The first is
* implemented within method xfer, the second in tryAppend, and
* the third in method awaitMatch.
*
* 1. Try to match an existing node
*
* Starting at head, skip already-matched nodes until finding
* an unmatched node of opposite mode, if one exists, in which
* case matching it and returning, also if necessary updating
* head to one past the matched node (or the node itself if the
* list has no other unmatched nodes). If the CAS misses, then
* a loop retries advancing head by two steps until either
* success or the slack is at most two. By requiring that each
* attempt advances head by two (if applicable), we ensure that
* the slack does not grow without bound. Traversals also check
* if the initial head is now off-list, in which case they
* start at the new head.
*
* If no candidates are found and the call was untimed
* poll/offer, (argument "how" is NOW) return.
*
* 2. Try to append a new node (method tryAppend)
*
* Starting at current tail pointer, find the actual last node
* and try to append a new node (or if head was null, establish
* the first node). Nodes can be appended only if their
* predecessors are either already matched or are of the same
* mode. If we detect otherwise, then a new node with opposite
* mode must have been appended during traversal, so we must
* restart at phase 1. The traversal and update steps are
* otherwise similar to phase 1: Retrying upon CAS misses and
* checking for staleness. In particular, if a self-link is
* encountered, then we can safely jump to a node on the list
* by continuing the traversal at current head.
*
* On successful append, if the call was ASYNC, return.
*
* 3. Await match or cancellation (method awaitMatch)
*
* Wait for another thread to match node; instead cancelling if
* the current thread was interrupted or the wait timed out. On
* multiprocessors, we use front-of-queue spinning: If a node
* appears to be the first unmatched node in the queue, it
* spins a bit before blocking. In either case, before blocking
* it tries to unsplice any nodes between the current "head"
* and the first unmatched node.
*
* Front-of-queue spinning vastly improves performance of
* heavily contended queues. And so long as it is relatively
* brief and "quiet", spinning does not much impact performance
* of less-contended queues. During spins threads check their
* interrupt status and generate a thread-local random number
* to decide to occasionally perform a Thread.yield. While
* yield has underdefined specs, we assume that it might help,
* and will not hurt, in limiting impact of spinning on busy
* systems. We also use smaller (1/2) spins for nodes that are
* not known to be front but whose predecessors have not
* blocked -- these "chained" spins avoid artifacts of
* front-of-queue rules which otherwise lead to alternating
* nodes spinning vs blocking. Further, front threads that
* represent phase changes (from data to request node or vice
* versa) compared to their predecessors receive additional
* chained spins, reflecting longer paths typically required to
* unblock threads during phase changes.
*
*
* ** Unlinking removed interior nodes **
*
* In addition to minimizing garbage retention via self-linking
* described above, we also unlink removed interior nodes. These
* may arise due to timed out or interrupted waits, or calls to
* remove(x) or Iterator.remove. Normally, given a node that was
* at one time known to be the predecessor of some node s that is
* to be removed, we can unsplice s by CASing the next field of
* its predecessor if it still points to s (otherwise s must
* already have been removed or is now offlist). But there are two
* situations in which we cannot guarantee to make node s
* unreachable in this way: (1) If s is the trailing node of list
* (i.e., with null next), then it is pinned as the target node
* for appends, so can only be removed later after other nodes are
* appended. (2) We cannot necessarily unlink s given a
* predecessor node that is matched (including the case of being
* cancelled): the predecessor may already be unspliced, in which
* case some previous reachable node may still point to s.
* (For further explanation see Herlihy & Shavit "The Art of
* Multiprocessor Programming" chapter 9). Although, in both
* cases, we can rule out the need for further action if either s
* or its predecessor are (or can be made to be) at, or fall off
* from, the head of list.
*
* Without taking these into account, it would be possible for an
* unbounded number of supposedly removed nodes to remain
* reachable. Situations leading to such buildup are uncommon but
* can occur in practice; for example when a series of short timed
* calls to poll repeatedly time out but never otherwise fall off
* the list because of an untimed call to take at the front of the
* queue.
*
* When these cases arise, rather than always retraversing the
* entire list to find an actual predecessor to unlink (which
* won't help for case (1) anyway), we record a conservative
* estimate of possible unsplice failures (in "sweepVotes").
* We trigger a full sweep when the estimate exceeds a threshold
* ("SWEEP_THRESHOLD") indicating the maximum number of estimated
* removal failures to tolerate before sweeping through, unlinking
* cancelled nodes that were not unlinked upon initial removal.
* We perform sweeps by the thread hitting threshold (rather than
* background threads or by spreading work to other threads)
* because in the main contexts in which removal occurs, the
* caller is already timed-out, cancelled, or performing a
* potentially O(n) operation (e.g. remove(x)), none of which are
* time-critical enough to warrant the overhead that alternatives
* would impose on other threads.
*
* Because the sweepVotes estimate is conservative, and because
* nodes become unlinked "naturally" as they fall off the head of
* the queue, and because we allow votes to accumulate even while
* sweeps are in progress, there are typically significantly fewer
* such nodes than estimated. Choice of a threshold value
* balances the likelihood of wasted effort and contention, versus
* providing a worst-case bound on retention of interior nodes in
* quiescent queues. The value defined below was chosen
* empirically to balance these under various timeout scenarios.
*
* Note that we cannot self-link unlinked interior nodes during
* sweeps. However, the associated garbage chains terminate when
* some successor ultimately falls off the head of the list and is
* self-linked.
*/
/** True if on multiprocessor */
private static final boolean MP =
Runtime.getRuntime().availableProcessors() > 1;
/**
* The number of times to spin (with randomly interspersed calls
* to Thread.yield) on multiprocessor before blocking when a node
* is apparently the first waiter in the queue. See above for
* explanation. Must be a power of two. The value is empirically
* derived -- it works pretty well across a variety of processors,
* numbers of CPUs, and OSes.
*/
private static final int FRONT_SPINS = 1 << 7;
/**
* The number of times to spin before blocking when a node is
* preceded by another node that is apparently spinning. Also
* serves as an increment to FRONT_SPINS on phase changes, and as
* base average frequency for yielding during spins. Must be a
* power of two.
*/
private static final int CHAINED_SPINS = FRONT_SPINS >>> 1;
/**
* The maximum number of estimated removal failures (sweepVotes)
* to tolerate before sweeping through the queue unlinking
* cancelled nodes that were not unlinked upon initial
* removal. See above for explanation. The value must be at least
* two to avoid useless sweeps when removing trailing nodes.
*/
static final int SWEEP_THRESHOLD = 32;
/**
* Queue nodes. Uses Object, not E, for items to allow forgetting
* them after use. Relies heavily on Unsafe mechanics to minimize
* unnecessary ordering constraints: Writes that are intrinsically
* ordered wrt other accesses or CASes use simple relaxed forms.
*/
static final class Node {
final boolean isData; // false if this is a request node
volatile Object item; // initially non-null if isData; CASed to match
volatile Node next;
volatile Thread waiter; // null until waiting
// CAS methods for fields
final boolean casNext(Node cmp, Node val) {
return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
}
final boolean casItem(Object cmp, Object val) {
// assert cmp == null || cmp.getClass() != Node.class;
return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
}
/**
* Constructs a new node. Uses relaxed write because item can
* only be seen after publication via casNext.
*/
Node(Object item, boolean isData) {
UNSAFE.putObject(this, itemOffset, item); // relaxed write
this.isData = isData;
}
/**
* Links node to itself to avoid garbage retention. Called
* only after CASing head field, so uses relaxed write.
*/
final void forgetNext() {
UNSAFE.putObject(this, nextOffset, this);
}
/**
* Sets item to self and waiter to null, to avoid garbage
* retention after matching or cancelling. Uses relaxed writes
* because order is already constrained in the only calling
* contexts: item is forgotten only after volatile/atomic
* mechanics that extract items. Similarly, clearing waiter
* follows either CAS or return from park (if ever parked;
* else we don't care).
*/
final void forgetContents() {
UNSAFE.putObject(this, itemOffset, this);
UNSAFE.putObject(this, waiterOffset, null);
}
/**
* Returns true if this node has been matched, including the
* case of artificial matches due to cancellation.
*/
final boolean isMatched() {
Object x = item;
return (x == this) || ((x == null) == isData);
}
/**
* Returns true if this is an unmatched request node.
*/
final boolean isUnmatchedRequest() {
return !isData && item == null;
}
/**
* Returns true if a node with the given mode cannot be
* appended to this node because this node is unmatched and
* has opposite data mode.
*/
final boolean cannotPrecede(boolean haveData) {
boolean d = isData;
Object x;
return d != haveData && (x = item) != this && (x != null) == d;
}
/**
* Tries to artificially match a data node -- used by remove.
*/
final boolean tryMatchData() {
// assert isData;
Object x = item;
if (x != null && x != this && casItem(x, null)) {
LockSupport.unpark(waiter);
return true;
}
return false;
}
private static final long serialVersionUID = -3375979862319811754L;
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long itemOffset;
private static final long nextOffset;
private static final long waiterOffset;
static {
try {
UNSAFE = getUnsafe();
Class<?> k = Node.class;
itemOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("item"));
nextOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("next"));
waiterOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("waiter"));
} catch (Exception e) {
throw new Error(e);
}
}
}
/** head of the queue; null until first enqueue */
transient volatile Node head;
/** tail of the queue; null until first append */
private transient volatile Node tail;
/** The number of apparent failures to unsplice removed nodes */
private transient volatile int sweepVotes;
// CAS methods for fields
private boolean casTail(Node cmp, Node val) {
return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val);
}
private boolean casHead(Node cmp, Node val) {
return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val);
}
private boolean casSweepVotes(int cmp, int val) {
return UNSAFE.compareAndSwapInt(this, sweepVotesOffset, cmp, val);
}
/*
* Possible values for "how" argument in xfer method.
*/
private static final int NOW = 0; // for untimed poll, tryTransfer
private static final int ASYNC = 1; // for offer, put, add
private static final int SYNC = 2; // for transfer, take
private static final int TIMED = 3; // for timed poll, tryTransfer
@SuppressWarnings("unchecked")
static <E> E cast(Object item) {
// assert item == null || item.getClass() != Node.class;
return (E) item;
}
/**
* Implements all queuing methods. See above for explanation.
*
* @param e the item or null for take
* @param haveData true if this is a put, else a take
* @param how NOW, ASYNC, SYNC, or TIMED
* @param nanos timeout in nanosecs, used only if mode is TIMED
* @return an item if matched, else e
* @throws NullPointerException if haveData mode but e is null
*/
private E xfer(E e, boolean haveData, int how, long nanos) {
if (haveData && (e == null))
throw new NullPointerException();
Node s = null; // the node to append, if needed
retry:
for (;;) { // restart on append race
for (Node h = head, p = h; p != null;) { // find & match first node
boolean isData = p.isData;
Object item = p.item;
if (item != p && (item != null) == isData) { // unmatched
if (isData == haveData) // can't match
break;
if (p.casItem(item, e)) { // match
for (Node q = p; q != h;) {
Node n = q.next; // update by 2 unless singleton
if (head == h && casHead(h, n == null ? q : n)) {
h.forgetNext();
break;
} // advance and retry
if ((h = head) == null ||
(q = h.next) == null || !q.isMatched())
break; // unless slack < 2
}
LockSupport.unpark(p.waiter);
return LinkedTransferQueue.<E>cast(item);
}
}
Node n = p.next;
p = (p != n) ? n : (h = head); // Use head if p offlist
}
if (how != NOW) { // No matches available
if (s == null)
s = new Node(e, haveData);
Node pred = tryAppend(s, haveData);
if (pred == null)
continue retry; // lost race vs opposite mode
if (how != ASYNC)
return awaitMatch(s, pred, e, (how == TIMED), nanos);
}
return e; // not waiting
}
}
/**
* Tries to append node s as tail.
*
* @param s the node to append
* @param haveData true if appending in data mode
* @return null on failure due to losing race with append in
* different mode, else s's predecessor, or s itself if no
* predecessor
*/
private Node tryAppend(Node s, boolean haveData) {
for (Node t = tail, p = t;;) { // move p to last node and append
Node n, u; // temps for reads of next & tail
if (p == null && (p = head) == null) {
if (casHead(null, s))
return s; // initialize
}
else if (p.cannotPrecede(haveData))
return null; // lost race vs opposite mode
else if ((n = p.next) != null) // not last; keep traversing
p = p != t && t != (u = tail) ? (t = u) : // stale tail
(p != n) ? n : null; // restart if off list
else if (!p.casNext(null, s))
p = p.next; // re-read on CAS failure
else {
if (p != t) { // update if slack now >= 2
while ((tail != t || !casTail(t, s)) &&
(t = tail) != null &&
(s = t.next) != null && // advance and retry
(s = s.next) != null && s != t);
}
return p;
}
}
}
/**
* Spins/yields/blocks until node s is matched or caller gives up.
*
* @param s the waiting node
* @param pred the predecessor of s, or s itself if it has no
* predecessor, or null if unknown (the null case does not occur
* in any current calls but may in possible future extensions)
* @param e the comparison value for checking match
* @param timed if true, wait only until timeout elapses
* @param nanos timeout in nanosecs, used only if timed is true
* @return matched item, or e if unmatched on interrupt or timeout
*/
private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) {
long lastTime = timed ? System.nanoTime() : 0L;
Thread w = Thread.currentThread();
int spins = -1; // initialized after first item and cancel checks
ThreadLocalRandom randomYields = null; // bound if needed
for (;;) {
Object item = s.item;
if (item != e) { // matched
// assert item != s;
s.forgetContents(); // avoid garbage
return LinkedTransferQueue.<E>cast(item);
}
if ((w.isInterrupted() || (timed && nanos <= 0)) &&
s.casItem(e, s)) { // cancel
unsplice(pred, s);
return e;
}
if (spins < 0) { // establish spins at/near front
if ((spins = spinsFor(pred, s.isData)) > 0)
randomYields = ThreadLocalRandom.current();
}
else if (spins > 0) { // spin
--spins;
if (randomYields.nextInt(CHAINED_SPINS) == 0)
Thread.yield(); // occasionally yield
}
else if (s.waiter == null) {
s.waiter = w; // request unpark then recheck
}
else if (timed) {
long now = System.nanoTime();
if ((nanos -= now - lastTime) > 0)
LockSupport.parkNanos(this, nanos);
lastTime = now;
}
else {
LockSupport.park(this);
}
}
}
/**
* Returns spin/yield value for a node with given predecessor and
* data mode. See above for explanation.
*/
private static int spinsFor(Node pred, boolean haveData) {
if (MP && pred != null) {
if (pred.isData != haveData) // phase change
return FRONT_SPINS + CHAINED_SPINS;
if (pred.isMatched()) // probably at front
return FRONT_SPINS;
if (pred.waiter == null) // pred apparently spinning
return CHAINED_SPINS;
}
return 0;
}
/* -------------- Traversal methods -------------- */
/**
* Returns the successor of p, or the head node if p.next has been
* linked to self, which will only be true if traversing with a
* stale pointer that is now off the list.
*/
final Node succ(Node p) {
Node next = p.next;
return (p == next) ? head : next;
}
/**
* Returns the first unmatched node of the given mode, or null if
* none. Used by methods isEmpty, hasWaitingConsumer.
*/
private Node firstOfMode(boolean isData) {
for (Node p = head; p != null; p = succ(p)) {
if (!p.isMatched())
return (p.isData == isData) ? p : null;
}
return null;
}
/**
* Returns the item in the first unmatched node with isData; or
* null if none. Used by peek.
*/
private E firstDataItem() {
for (Node p = head; p != null; p = succ(p)) {
Object item = p.item;
if (p.isData) {
if (item != null && item != p)
return LinkedTransferQueue.<E>cast(item);
}
else if (item == null)
return null;
}
return null;
}
/**
* Traverses and counts unmatched nodes of the given mode.
* Used by methods size and getWaitingConsumerCount.
*/
private int countOfMode(boolean data) {
int count = 0;
for (Node p = head; p != null; ) {
if (!p.isMatched()) {
if (p.isData != data)
return 0;
if (++count == Integer.MAX_VALUE) // saturated
break;
}
Node n = p.next;
if (n != p)
p = n;
else {
count = 0;
p = head;
}
}
return count;
}
final class Itr implements Iterator<E> {
private Node nextNode; // next node to return item for
private E nextItem; // the corresponding item
private Node lastRet; // last returned node, to support remove
private Node lastPred; // predecessor to unlink lastRet
/**
* Moves to next node after prev, or first node if prev null.
*/
private void advance(Node prev) {
/*
* To track and avoid buildup of deleted nodes in the face
* of calls to both Queue.remove and Itr.remove, we must
* include variants of unsplice and sweep upon each
* advance: Upon Itr.remove, we may need to catch up links
* from lastPred, and upon other removes, we might need to
* skip ahead from stale nodes and unsplice deleted ones
* found while advancing.
*/
Node r, b; // reset lastPred upon possible deletion of lastRet
if ((r = lastRet) != null && !r.isMatched())
lastPred = r; // next lastPred is old lastRet
else if ((b = lastPred) == null || b.isMatched())
lastPred = null; // at start of list
else {
Node s, n; // help with removal of lastPred.next
while ((s = b.next) != null &&
s != b && s.isMatched() &&
(n = s.next) != null && n != s)
b.casNext(s, n);
}
this.lastRet = prev;
for (Node p = prev, s, n;;) {
s = (p == null) ? head : p.next;
if (s == null)
break;
else if (s == p) {
p = null;
continue;
}
Object item = s.item;
if (s.isData) {
if (item != null && item != s) {
nextItem = LinkedTransferQueue.<E>cast(item);
nextNode = s;
return;
}
}
else if (item == null)
break;
// assert s.isMatched();
if (p == null)
p = s;
else if ((n = s.next) == null)
break;
else if (s == n)
p = null;
else
p.casNext(s, n);
}
nextNode = null;
nextItem = null;
}
Itr() {
advance(null);
}
public final boolean hasNext() {
return nextNode != null;
}
public final E next() {
Node p = nextNode;
if (p == null) throw new NoSuchElementException();
E e = nextItem;
advance(p);
return e;
}
public final void remove() {
final Node lastRet = this.lastRet;
if (lastRet == null)
throw new IllegalStateException();
this.lastRet = null;
if (lastRet.tryMatchData())
unsplice(lastPred, lastRet);
}
}
/* -------------- Removal methods -------------- */
/**
* Unsplices (now or later) the given deleted/cancelled node with
* the given predecessor.
*
* @param pred a node that was at one time known to be the
* predecessor of s, or null or s itself if s is/was at head
* @param s the node to be unspliced
*/
final void unsplice(Node pred, Node s) {
s.forgetContents(); // forget unneeded fields
/*
* See above for rationale. Briefly: if pred still points to
* s, try to unlink s. If s cannot be unlinked, because it is
* trailing node or pred might be unlinked, and neither pred
* nor s are head or offlist, add to sweepVotes, and if enough
* votes have accumulated, sweep.
*/
if (pred != null && pred != s && pred.next == s) {
Node n = s.next;
if (n == null ||
(n != s && pred.casNext(s, n) && pred.isMatched())) {
for (;;) { // check if at, or could be, head
Node h = head;
if (h == pred || h == s || h == null)
return; // at head or list empty
if (!h.isMatched())
break;
Node hn = h.next;
if (hn == null)
return; // now empty
if (hn != h && casHead(h, hn))
h.forgetNext(); // advance head
}
if (pred.next != pred && s.next != s) { // recheck if offlist
for (;;) { // sweep now if enough votes
int v = sweepVotes;
if (v < SWEEP_THRESHOLD) {
if (casSweepVotes(v, v + 1))
break;
}
else if (casSweepVotes(v, 0)) {
sweep();
break;
}
}
}
}
}
}
/**
* Unlinks matched (typically cancelled) nodes encountered in a
* traversal from head.
*/
private void sweep() {
for (Node p = head, s, n; p != null && (s = p.next) != null; ) {
if (!s.isMatched())
// Unmatched nodes are never self-linked
p = s;
else if ((n = s.next) == null) // trailing node is pinned
break;
else if (s == n) // stale
// No need to also check for p == s, since that implies s == n
p = head;
else
p.casNext(s, n);
}
}
/**
* Main implementation of remove(Object)
*/
private boolean findAndRemove(Object e) {
if (e != null) {
for (Node pred = null, p = head; p != null; ) {
Object item = p.item;
if (p.isData) {
if (item != null && item != p && e.equals(item) &&
p.tryMatchData()) {
unsplice(pred, p);
return true;
}
}
else if (item == null)
break;
pred = p;
if ((p = p.next) == pred) { // stale
pred = null;
p = head;
}
}
}
return false;
}
/**
* Creates an initially empty {@code LinkedTransferQueue}.
*/
public LinkedTransferQueue() {
}
/**
* Creates a {@code LinkedTransferQueue}
* initially containing the elements of the given collection,
* added in traversal order of the collection's iterator.
*
* @param c the collection of elements to initially contain
* @throws NullPointerException if the specified collection or any
* of its elements are null
*/
public LinkedTransferQueue(Collection<? extends E> c) {
this();
addAll(c);
}
/**
* Inserts the specified element at the tail of this queue.
* As the queue is unbounded, this method will never block.
*
* @throws NullPointerException if the specified element is null
*/
public void put(E e) {
xfer(e, true, ASYNC, 0);
}
/**
* Inserts the specified element at the tail of this queue.
* As the queue is unbounded, this method will never block or
* return {@code false}.
*
* @return {@code true} (as specified by
* {@link java.util.concurrent.BlockingQueue#offer(Object,long,TimeUnit)
* BlockingQueue.offer})
* @throws NullPointerException if the specified element is null
*/
public boolean offer(E e, long timeout, TimeUnit unit) {
xfer(e, true, ASYNC, 0);
return true;
}
/**
* Inserts the specified element at the tail of this queue.
* As the queue is unbounded, this method will never return {@code false}.
*
* @return {@code true} (as specified by {@link Queue#offer})
* @throws NullPointerException if the specified element is null
*/
public boolean offer(E e) {
xfer(e, true, ASYNC, 0);
return true;
}
/**
* Inserts the specified element at the tail of this queue.
* As the queue is unbounded, this method will never throw
* {@link IllegalStateException} or return {@code false}.
*
* @return {@code true} (as specified by {@link Collection#add})
* @throws NullPointerException if the specified element is null
*/
public boolean add(E e) {
xfer(e, true, ASYNC, 0);
return true;
}
/**
* Transfers the element to a waiting consumer immediately, if possible.
*
* <p>More precisely, transfers the specified element immediately
* if there exists a consumer already waiting to receive it (in
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
* otherwise returning {@code false} without enqueuing the element.
*
* @throws NullPointerException if the specified element is null
*/
public boolean tryTransfer(E e) {
return xfer(e, true, NOW, 0) == null;
}
/**
* Transfers the element to a consumer, waiting if necessary to do so.
*
* <p>More precisely, transfers the specified element immediately
* if there exists a consumer already waiting to receive it (in
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
* else inserts the specified element at the tail of this queue
* and waits until the element is received by a consumer.
*
* @throws NullPointerException if the specified element is null
*/
public void transfer(E e) throws InterruptedException {
if (xfer(e, true, SYNC, 0) != null) {
Thread.interrupted(); // failure possible only due to interrupt
throw new InterruptedException();
}
}
/**
* Transfers the element to a consumer if it is possible to do so
* before the timeout elapses.
*
* <p>More precisely, transfers the specified element immediately
* if there exists a consumer already waiting to receive it (in
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
* else inserts the specified element at the tail of this queue
* and waits until the element is received by a consumer,
* returning {@code false} if the specified wait time elapses
* before the element can be transferred.
*
* @throws NullPointerException if the specified element is null
*/
public boolean tryTransfer(E e, long timeout, TimeUnit unit)
throws InterruptedException {
if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null)
return true;
if (!Thread.interrupted())
return false;
throw new InterruptedException();
}
public E take() throws InterruptedException {
E e = xfer(null, false, SYNC, 0);
if (e != null)
return e;
Thread.interrupted();
throw new InterruptedException();
}
public E poll(long timeout, TimeUnit unit) throws InterruptedException {
E e = xfer(null, false, TIMED, unit.toNanos(timeout));
if (e != null || !Thread.interrupted())
return e;
throw new InterruptedException();
}
public E poll() {
return xfer(null, false, NOW, 0);
}
/**
* @throws NullPointerException {@inheritDoc}
* @throws IllegalArgumentException {@inheritDoc}
*/
public int drainTo(Collection<? super E> c) {
if (c == null)
throw new NullPointerException();
if (c == this)
throw new IllegalArgumentException();
int n = 0;
for (E e; (e = poll()) != null;) {
c.add(e);
++n;
}
return n;
}
/**
* @throws NullPointerException {@inheritDoc}
* @throws IllegalArgumentException {@inheritDoc}
*/
public int drainTo(Collection<? super E> c, int maxElements) {
if (c == null)
throw new NullPointerException();
if (c == this)
throw new IllegalArgumentException();
int n = 0;
for (E e; n < maxElements && (e = poll()) != null;) {
c.add(e);
++n;
}
return n;
}
/**
* Returns an iterator over the elements in this queue in proper sequence.
* The elements will be returned in order from first (head) to last (tail).
*
* <p>The returned iterator is a "weakly consistent" iterator that
* will never throw {@link java.util.ConcurrentModificationException
* ConcurrentModificationException}, and guarantees to traverse
* elements as they existed upon construction of the iterator, and
* may (but is not guaranteed to) reflect any modifications
* subsequent to construction.
*
* @return an iterator over the elements in this queue in proper sequence
*/
public Iterator<E> iterator() {
return new Itr();
}
public E peek() {
return firstDataItem();
}
/**
* Returns {@code true} if this queue contains no elements.
*
* @return {@code true} if this queue contains no elements
*/
public boolean isEmpty() {
for (Node p = head; p != null; p = succ(p)) {
if (!p.isMatched())
return !p.isData;
}
return true;
}
public boolean hasWaitingConsumer() {
return firstOfMode(false) != null;
}
/**
* Returns the number of elements in this queue. If this queue
* contains more than {@code Integer.MAX_VALUE} elements, returns
* {@code Integer.MAX_VALUE}.
*
* <p>Beware that, unlike in most collections, this method is
* <em>NOT</em> a constant-time operation. Because of the
* asynchronous nature of these queues, determining the current
* number of elements requires an O(n) traversal.
*
* @return the number of elements in this queue
*/
public int size() {
return countOfMode(true);
}
public int getWaitingConsumerCount() {
return countOfMode(false);
}
/**
* Removes a single instance of the specified element from this queue,
* if it is present. More formally, removes an element {@code e} such
* that {@code o.equals(e)}, if this queue contains one or more such
* elements.
* Returns {@code true} if this queue contained the specified element
* (or equivalently, if this queue changed as a result of the call).
*
* @param o element to be removed from this queue, if present
* @return {@code true} if this queue changed as a result of the call
*/
public boolean remove(Object o) {
return findAndRemove(o);
}
/**
* Returns {@code true} if this queue contains the specified element.
* More formally, returns {@code true} if and only if this queue contains
* at least one element {@code e} such that {@code o.equals(e)}.
*
* @param o object to be checked for containment in this queue
* @return {@code true} if this queue contains the specified element
*/
public boolean contains(Object o) {
if (o == null) return false;
for (Node p = head; p != null; p = succ(p)) {
Object item = p.item;
if (p.isData) {
if (item != null && item != p && o.equals(item))
return true;
}
else if (item == null)
break;
}
return false;
}
/**
* Always returns {@code Integer.MAX_VALUE} because a
* {@code LinkedTransferQueue} is not capacity constrained.
*
* @return {@code Integer.MAX_VALUE} (as specified by
* {@link java.util.concurrent.BlockingQueue#remainingCapacity()
* BlockingQueue.remainingCapacity})
*/
public int remainingCapacity() {
return Integer.MAX_VALUE;
}
/**
* Saves the state to a stream (that is, serializes it).
*
* @serialData All of the elements (each an {@code E}) in
* the proper order, followed by a null
* @param s the stream
*/
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
for (E e : this)
s.writeObject(e);
// Use trailing null as sentinel
s.writeObject(null);
}
/**
* Reconstitutes the Queue instance from a stream (that is,
* deserializes it).
*
* @param s the stream
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
for (;;) {
@SuppressWarnings("unchecked")
E item = (E) s.readObject();
if (item == null)
break;
else
offer(item);
}
}
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long headOffset;
private static final long tailOffset;
private static final long sweepVotesOffset;
static {
try {
UNSAFE = getUnsafe();
Class<?> k = LinkedTransferQueue.class;
headOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("head"));
tailOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("tail"));
sweepVotesOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("sweepVotes"));
} catch (Exception e) {
throw new Error(e);
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException se) {
try {
return java.security.AccessController.doPrivileged
(new java.security
.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
java.lang.reflect.Field f = sun.misc
.Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
return (sun.misc.Unsafe) f.get(null);
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/jsr166y/Phaser.java
|
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166y;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.LockSupport;
/**
* A reusable synchronization barrier, similar in functionality to
* {@link java.util.concurrent.CyclicBarrier CyclicBarrier} and
* {@link java.util.concurrent.CountDownLatch CountDownLatch}
* but supporting more flexible usage.
*
* <p> <b>Registration.</b> Unlike the case for other barriers, the
* number of parties <em>registered</em> to synchronize on a phaser
* may vary over time. Tasks may be registered at any time (using
* methods {@link #register}, {@link #bulkRegister}, or forms of
* constructors establishing initial numbers of parties), and
* optionally deregistered upon any arrival (using {@link
* #arriveAndDeregister}). As is the case with most basic
* synchronization constructs, registration and deregistration affect
* only internal counts; they do not establish any further internal
* bookkeeping, so tasks cannot query whether they are registered.
* (However, you can introduce such bookkeeping by subclassing this
* class.)
*
* <p> <b>Synchronization.</b> Like a {@code CyclicBarrier}, a {@code
* Phaser} may be repeatedly awaited. Method {@link
* #arriveAndAwaitAdvance} has effect analogous to {@link
* java.util.concurrent.CyclicBarrier#await CyclicBarrier.await}. Each
* generation of a phaser has an associated phase number. The phase
* number starts at zero, and advances when all parties arrive at the
* phaser, wrapping around to zero after reaching {@code
* Integer.MAX_VALUE}. The use of phase numbers enables independent
* control of actions upon arrival at a phaser and upon awaiting
* others, via two kinds of methods that may be invoked by any
* registered party:
*
* <ul>
*
* <li> <b>Arrival.</b> Methods {@link #arrive} and
* {@link #arriveAndDeregister} record arrival. These methods
* do not block, but return an associated <em>arrival phase
* number</em>; that is, the phase number of the phaser to which
* the arrival applied. When the final party for a given phase
* arrives, an optional action is performed and the phase
* advances. These actions are performed by the party
* triggering a phase advance, and are arranged by overriding
* method {@link #onAdvance(int, int)}, which also controls
* termination. Overriding this method is similar to, but more
* flexible than, providing a barrier action to a {@code
* CyclicBarrier}.
*
* <li> <b>Waiting.</b> Method {@link #awaitAdvance} requires an
* argument indicating an arrival phase number, and returns when
* the phaser advances to (or is already at) a different phase.
* Unlike similar constructions using {@code CyclicBarrier},
* method {@code awaitAdvance} continues to wait even if the
* waiting thread is interrupted. Interruptible and timeout
* versions are also available, but exceptions encountered while
* tasks wait interruptibly or with timeout do not change the
* state of the phaser. If necessary, you can perform any
* associated recovery within handlers of those exceptions,
* often after invoking {@code forceTermination}. Phasers may
* also be used by tasks executing in a {@link ForkJoinPool},
* which will ensure sufficient parallelism to execute tasks
* when others are blocked waiting for a phase to advance.
*
* </ul>
*
* <p> <b>Termination.</b> A phaser may enter a <em>termination</em>
* state, that may be checked using method {@link #isTerminated}. Upon
* termination, all synchronization methods immediately return without
* waiting for advance, as indicated by a negative return value.
* Similarly, attempts to register upon termination have no effect.
* Termination is triggered when an invocation of {@code onAdvance}
* returns {@code true}. The default implementation returns {@code
* true} if a deregistration has caused the number of registered
* parties to become zero. As illustrated below, when phasers control
* actions with a fixed number of iterations, it is often convenient
* to override this method to cause termination when the current phase
* number reaches a threshold. Method {@link #forceTermination} is
* also available to abruptly release waiting threads and allow them
* to terminate.
*
* <p> <b>Tiering.</b> Phasers may be <em>tiered</em> (i.e.,
* constructed in tree structures) to reduce contention. Phasers with
* large numbers of parties that would otherwise experience heavy
* synchronization contention costs may instead be set up so that
* groups of sub-phasers share a common parent. This may greatly
* increase throughput even though it incurs greater per-operation
* overhead.
*
* <p>In a tree of tiered phasers, registration and deregistration of
* child phasers with their parent are managed automatically.
* Whenever the number of registered parties of a child phaser becomes
* non-zero (as established in the {@link #Phaser(Phaser,int)}
* constructor, {@link #register}, or {@link #bulkRegister}), the
* child phaser is registered with its parent. Whenever the number of
* registered parties becomes zero as the result of an invocation of
* {@link #arriveAndDeregister}, the child phaser is deregistered
* from its parent.
*
* <p><b>Monitoring.</b> While synchronization methods may be invoked
* only by registered parties, the current state of a phaser may be
* monitored by any caller. At any given moment there are {@link
* #getRegisteredParties} parties in total, of which {@link
* #getArrivedParties} have arrived at the current phase ({@link
* #getPhase}). When the remaining ({@link #getUnarrivedParties})
* parties arrive, the phase advances. The values returned by these
* methods may reflect transient states and so are not in general
* useful for synchronization control. Method {@link #toString}
* returns snapshots of these state queries in a form convenient for
* informal monitoring.
*
* <p><b>Sample usages:</b>
*
* <p>A {@code Phaser} may be used instead of a {@code CountDownLatch}
* to control a one-shot action serving a variable number of parties.
* The typical idiom is for the method setting this up to first
* register, then start the actions, then deregister, as in:
*
* <pre> {@code
* void runTasks(List<Runnable> tasks) {
* final Phaser phaser = new Phaser(1); // "1" to register self
* // create and start threads
* for (final Runnable task : tasks) {
* phaser.register();
* new Thread() {
* public void run() {
* phaser.arriveAndAwaitAdvance(); // await all creation
* task.run();
* }
* }.start();
* }
*
* // allow threads to start and deregister self
* phaser.arriveAndDeregister();
* }}</pre>
*
* <p>One way to cause a set of threads to repeatedly perform actions
* for a given number of iterations is to override {@code onAdvance}:
*
* <pre> {@code
* void startTasks(List<Runnable> tasks, final int iterations) {
* final Phaser phaser = new Phaser() {
* protected boolean onAdvance(int phase, int registeredParties) {
* return phase >= iterations || registeredParties == 0;
* }
* };
* phaser.register();
* for (final Runnable task : tasks) {
* phaser.register();
* new Thread() {
* public void run() {
* do {
* task.run();
* phaser.arriveAndAwaitAdvance();
* } while (!phaser.isTerminated());
* }
* }.start();
* }
* phaser.arriveAndDeregister(); // deregister self, don't wait
* }}</pre>
*
* If the main task must later await termination, it
* may re-register and then execute a similar loop:
* <pre> {@code
* // ...
* phaser.register();
* while (!phaser.isTerminated())
* phaser.arriveAndAwaitAdvance();}</pre>
*
* <p>Related constructions may be used to await particular phase numbers
* in contexts where you are sure that the phase will never wrap around
* {@code Integer.MAX_VALUE}. For example:
*
* <pre> {@code
* void awaitPhase(Phaser phaser, int phase) {
* int p = phaser.register(); // assumes caller not already registered
* while (p < phase) {
* if (phaser.isTerminated())
* // ... deal with unexpected termination
* else
* p = phaser.arriveAndAwaitAdvance();
* }
* phaser.arriveAndDeregister();
* }}</pre>
*
*
* <p>To create a set of {@code n} tasks using a tree of phasers, you
* could use code of the following form, assuming a Task class with a
* constructor accepting a {@code Phaser} that it registers with upon
* construction. After invocation of {@code build(new Task[n], 0, n,
* new Phaser())}, these tasks could then be started, for example by
* submitting to a pool:
*
* <pre> {@code
* void build(Task[] tasks, int lo, int hi, Phaser ph) {
* if (hi - lo > TASKS_PER_PHASER) {
* for (int i = lo; i < hi; i += TASKS_PER_PHASER) {
* int j = Math.min(i + TASKS_PER_PHASER, hi);
* build(tasks, i, j, new Phaser(ph));
* }
* } else {
* for (int i = lo; i < hi; ++i)
* tasks[i] = new Task(ph);
* // assumes new Task(ph) performs ph.register()
* }
* }}</pre>
*
* The best value of {@code TASKS_PER_PHASER} depends mainly on
* expected synchronization rates. A value as low as four may
* be appropriate for extremely small per-phase task bodies (thus
* high rates), or up to hundreds for extremely large ones.
*
* <p><b>Implementation notes</b>: This implementation restricts the
* maximum number of parties to 65535. Attempts to register additional
* parties result in {@code IllegalStateException}. However, you can and
* should create tiered phasers to accommodate arbitrarily large sets
* of participants.
*
* @since 1.7
* @author Doug Lea
*/
public class Phaser {
/*
* This class implements an extension of X10 "clocks". Thanks to
* Vijay Saraswat for the idea, and to Vivek Sarkar for
* enhancements to extend functionality.
*/
/**
* Primary state representation, holding four bit-fields:
*
* unarrived -- the number of parties yet to hit barrier (bits 0-15)
* parties -- the number of parties to wait (bits 16-31)
* phase -- the generation of the barrier (bits 32-62)
* terminated -- set if barrier is terminated (bit 63 / sign)
*
* Except that a phaser with no registered parties is
* distinguished by the otherwise illegal state of having zero
* parties and one unarrived parties (encoded as EMPTY below).
*
* To efficiently maintain atomicity, these values are packed into
* a single (atomic) long. Good performance relies on keeping
* state decoding and encoding simple, and keeping race windows
* short.
*
* All state updates are performed via CAS except initial
* registration of a sub-phaser (i.e., one with a non-null
* parent). In this (relatively rare) case, we use built-in
* synchronization to lock while first registering with its
* parent.
*
* The phase of a subphaser is allowed to lag that of its
* ancestors until it is actually accessed -- see method
* reconcileState.
*/
private volatile long state;
private static final int MAX_PARTIES = 0xffff;
private static final int MAX_PHASE = Integer.MAX_VALUE;
private static final int PARTIES_SHIFT = 16;
private static final int PHASE_SHIFT = 32;
private static final int UNARRIVED_MASK = 0xffff; // to mask ints
private static final long PARTIES_MASK = 0xffff0000L; // to mask longs
private static final long COUNTS_MASK = 0xffffffffL;
private static final long TERMINATION_BIT = 1L << 63;
// some special values
private static final int ONE_ARRIVAL = 1;
private static final int ONE_PARTY = 1 << PARTIES_SHIFT;
private static final int ONE_DEREGISTER = ONE_ARRIVAL|ONE_PARTY;
private static final int EMPTY = 1;
// The following unpacking methods are usually manually inlined
private static int unarrivedOf(long s) {
int counts = (int)s;
return (counts == EMPTY) ? 0 : (counts & UNARRIVED_MASK);
}
private static int partiesOf(long s) {
return (int)s >>> PARTIES_SHIFT;
}
private static int phaseOf(long s) {
return (int)(s >>> PHASE_SHIFT);
}
private static int arrivedOf(long s) {
int counts = (int)s;
return (counts == EMPTY) ? 0 :
(counts >>> PARTIES_SHIFT) - (counts & UNARRIVED_MASK);
}
/**
* The parent of this phaser, or null if none
*/
private final Phaser parent;
/**
* The root of phaser tree. Equals this if not in a tree.
*/
private final Phaser root;
/**
* Heads of Treiber stacks for waiting threads. To eliminate
* contention when releasing some threads while adding others, we
* use two of them, alternating across even and odd phases.
* Subphasers share queues with root to speed up releases.
*/
private final AtomicReference<QNode> evenQ;
private final AtomicReference<QNode> oddQ;
private AtomicReference<QNode> queueFor(int phase) {
return ((phase & 1) == 0) ? evenQ : oddQ;
}
/**
* Returns message string for bounds exceptions on arrival.
*/
private String badArrive(long s) {
return "Attempted arrival of unregistered party for " +
stateToString(s);
}
/**
* Returns message string for bounds exceptions on registration.
*/
private String badRegister(long s) {
return "Attempt to register more than " +
MAX_PARTIES + " parties for " + stateToString(s);
}
/**
* Main implementation for methods arrive and arriveAndDeregister.
* Manually tuned to speed up and minimize race windows for the
* common case of just decrementing unarrived field.
*
* @param adjust value to subtract from state;
* ONE_ARRIVAL for arrive,
* ONE_DEREGISTER for arriveAndDeregister
*/
private int doArrive(int adjust) {
final Phaser root = this.root;
for (;;) {
long s = (root == this) ? state : reconcileState();
int phase = (int)(s >>> PHASE_SHIFT);
if (phase < 0)
return phase;
int counts = (int)s;
int unarrived = (counts == EMPTY) ? 0 : (counts & UNARRIVED_MASK);
if (unarrived <= 0)
throw new IllegalStateException(badArrive(s));
if (UNSAFE.compareAndSwapLong(this, stateOffset, s, s-=adjust)) {
if (unarrived == 1) {
long n = s & PARTIES_MASK; // base of next state
int nextUnarrived = (int)n >>> PARTIES_SHIFT;
if (root == this) {
if (onAdvance(phase, nextUnarrived))
n |= TERMINATION_BIT;
else if (nextUnarrived == 0)
n |= EMPTY;
else
n |= nextUnarrived;
int nextPhase = (phase + 1) & MAX_PHASE;
n |= (long)nextPhase << PHASE_SHIFT;
UNSAFE.compareAndSwapLong(this, stateOffset, s, n);
releaseWaiters(phase);
}
else if (nextUnarrived == 0) { // propagate deregistration
phase = parent.doArrive(ONE_DEREGISTER);
UNSAFE.compareAndSwapLong(this, stateOffset,
s, s | EMPTY);
}
else
phase = parent.doArrive(ONE_ARRIVAL);
}
return phase;
}
}
}
/**
* Implementation of register, bulkRegister
*
* @param registrations number to add to both parties and
* unarrived fields. Must be greater than zero.
*/
private int doRegister(int registrations) {
// adjustment to state
long adjust = ((long)registrations << PARTIES_SHIFT) | registrations;
final Phaser parent = this.parent;
int phase;
for (;;) {
long s = (parent == null) ? state : reconcileState();
int counts = (int)s;
int parties = counts >>> PARTIES_SHIFT;
int unarrived = counts & UNARRIVED_MASK;
if (registrations > MAX_PARTIES - parties)
throw new IllegalStateException(badRegister(s));
phase = (int)(s >>> PHASE_SHIFT);
if (phase < 0)
break;
if (counts != EMPTY) { // not 1st registration
if (parent == null || reconcileState() == s) {
if (unarrived == 0) // wait out advance
root.internalAwaitAdvance(phase, null);
else if (UNSAFE.compareAndSwapLong(this, stateOffset,
s, s + adjust))
break;
}
}
else if (parent == null) { // 1st root registration
long next = ((long)phase << PHASE_SHIFT) | adjust;
if (UNSAFE.compareAndSwapLong(this, stateOffset, s, next))
break;
}
else {
synchronized (this) { // 1st sub registration
if (state == s) { // recheck under lock
phase = parent.doRegister(1);
if (phase < 0)
break;
// finish registration whenever parent registration
// succeeded, even when racing with termination,
// since these are part of the same "transaction".
while (!UNSAFE.compareAndSwapLong
(this, stateOffset, s,
((long)phase << PHASE_SHIFT) | adjust)) {
s = state;
phase = (int)(root.state >>> PHASE_SHIFT);
// assert (int)s == EMPTY;
}
break;
}
}
}
}
return phase;
}
/**
* Resolves lagged phase propagation from root if necessary.
* Reconciliation normally occurs when root has advanced but
* subphasers have not yet done so, in which case they must finish
* their own advance by setting unarrived to parties (or if
* parties is zero, resetting to unregistered EMPTY state).
*
* @return reconciled state
*/
private long reconcileState() {
final Phaser root = this.root;
long s = state;
if (root != this) {
int phase, p;
// CAS to root phase with current parties, tripping unarrived
while ((phase = (int)(root.state >>> PHASE_SHIFT)) !=
(int)(s >>> PHASE_SHIFT) &&
!UNSAFE.compareAndSwapLong
(this, stateOffset, s,
s = (((long)phase << PHASE_SHIFT) |
((phase < 0) ? (s & COUNTS_MASK) :
(((p = (int)s >>> PARTIES_SHIFT) == 0) ? EMPTY :
((s & PARTIES_MASK) | p))))))
s = state;
}
return s;
}
/**
* Creates a new phaser with no initially registered parties, no
* parent, and initial phase number 0. Any thread using this
* phaser will need to first register for it.
*/
public Phaser() {
this(null, 0);
}
/**
* Creates a new phaser with the given number of registered
* unarrived parties, no parent, and initial phase number 0.
*
* @param parties the number of parties required to advance to the
* next phase
* @throws IllegalArgumentException if parties less than zero
* or greater than the maximum number of parties supported
*/
public Phaser(int parties) {
this(null, parties);
}
/**
* Equivalent to {@link #Phaser(Phaser, int) Phaser(parent, 0)}.
*
* @param parent the parent phaser
*/
public Phaser(Phaser parent) {
this(parent, 0);
}
/**
* Creates a new phaser with the given parent and number of
* registered unarrived parties. When the given parent is non-null
* and the given number of parties is greater than zero, this
* child phaser is registered with its parent.
*
* @param parent the parent phaser
* @param parties the number of parties required to advance to the
* next phase
* @throws IllegalArgumentException if parties less than zero
* or greater than the maximum number of parties supported
*/
public Phaser(Phaser parent, int parties) {
if (parties >>> PARTIES_SHIFT != 0)
throw new IllegalArgumentException("Illegal number of parties");
int phase = 0;
this.parent = parent;
if (parent != null) {
final Phaser root = parent.root;
this.root = root;
this.evenQ = root.evenQ;
this.oddQ = root.oddQ;
if (parties != 0)
phase = parent.doRegister(1);
}
else {
this.root = this;
this.evenQ = new AtomicReference<QNode>();
this.oddQ = new AtomicReference<QNode>();
}
this.state = (parties == 0) ? (long)EMPTY :
((long)phase << PHASE_SHIFT) |
((long)parties << PARTIES_SHIFT) |
((long)parties);
}
/**
* Adds a new unarrived party to this phaser. If an ongoing
* invocation of {@link #onAdvance} is in progress, this method
* may await its completion before returning. If this phaser has
* a parent, and this phaser previously had no registered parties,
* this child phaser is also registered with its parent. If
* this phaser is terminated, the attempt to register has
* no effect, and a negative value is returned.
*
* @return the arrival phase number to which this registration
* applied. If this value is negative, then this phaser has
* terminated, in which case registration has no effect.
* @throws IllegalStateException if attempting to register more
* than the maximum supported number of parties
*/
public int register() {
return doRegister(1);
}
/**
* Adds the given number of new unarrived parties to this phaser.
* If an ongoing invocation of {@link #onAdvance} is in progress,
* this method may await its completion before returning. If this
* phaser has a parent, and the given number of parties is greater
* than zero, and this phaser previously had no registered
* parties, this child phaser is also registered with its parent.
* If this phaser is terminated, the attempt to register has no
* effect, and a negative value is returned.
*
* @param parties the number of additional parties required to
* advance to the next phase
* @return the arrival phase number to which this registration
* applied. If this value is negative, then this phaser has
* terminated, in which case registration has no effect.
* @throws IllegalStateException if attempting to register more
* than the maximum supported number of parties
* @throws IllegalArgumentException if {@code parties < 0}
*/
public int bulkRegister(int parties) {
if (parties < 0)
throw new IllegalArgumentException();
if (parties == 0)
return getPhase();
return doRegister(parties);
}
/**
* Arrives at this phaser, without waiting for others to arrive.
*
* <p>It is a usage error for an unregistered party to invoke this
* method. However, this error may result in an {@code
* IllegalStateException} only upon some subsequent operation on
* this phaser, if ever.
*
* @return the arrival phase number, or a negative value if terminated
* @throws IllegalStateException if not terminated and the number
* of unarrived parties would become negative
*/
public int arrive() {
return doArrive(ONE_ARRIVAL);
}
/**
* Arrives at this phaser and deregisters from it without waiting
* for others to arrive. Deregistration reduces the number of
* parties required to advance in future phases. If this phaser
* has a parent, and deregistration causes this phaser to have
* zero parties, this phaser is also deregistered from its parent.
*
* <p>It is a usage error for an unregistered party to invoke this
* method. However, this error may result in an {@code
* IllegalStateException} only upon some subsequent operation on
* this phaser, if ever.
*
* @return the arrival phase number, or a negative value if terminated
* @throws IllegalStateException if not terminated and the number
* of registered or unarrived parties would become negative
*/
public int arriveAndDeregister() {
return doArrive(ONE_DEREGISTER);
}
/**
* Arrives at this phaser and awaits others. Equivalent in effect
* to {@code awaitAdvance(arrive())}. If you need to await with
* interruption or timeout, you can arrange this with an analogous
* construction using one of the other forms of the {@code
* awaitAdvance} method. If instead you need to deregister upon
* arrival, use {@code awaitAdvance(arriveAndDeregister())}.
*
* <p>It is a usage error for an unregistered party to invoke this
* method. However, this error may result in an {@code
* IllegalStateException} only upon some subsequent operation on
* this phaser, if ever.
*
* @return the arrival phase number, or the (negative)
* {@linkplain #getPhase() current phase} if terminated
* @throws IllegalStateException if not terminated and the number
* of unarrived parties would become negative
*/
public int arriveAndAwaitAdvance() {
// Specialization of doArrive+awaitAdvance eliminating some reads/paths
final Phaser root = this.root;
for (;;) {
long s = (root == this) ? state : reconcileState();
int phase = (int)(s >>> PHASE_SHIFT);
if (phase < 0)
return phase;
int counts = (int)s;
int unarrived = (counts == EMPTY) ? 0 : (counts & UNARRIVED_MASK);
if (unarrived <= 0)
throw new IllegalStateException(badArrive(s));
if (UNSAFE.compareAndSwapLong(this, stateOffset, s,
s -= ONE_ARRIVAL)) {
if (unarrived > 1)
return root.internalAwaitAdvance(phase, null);
if (root != this)
return parent.arriveAndAwaitAdvance();
long n = s & PARTIES_MASK; // base of next state
int nextUnarrived = (int)n >>> PARTIES_SHIFT;
if (onAdvance(phase, nextUnarrived))
n |= TERMINATION_BIT;
else if (nextUnarrived == 0)
n |= EMPTY;
else
n |= nextUnarrived;
int nextPhase = (phase + 1) & MAX_PHASE;
n |= (long)nextPhase << PHASE_SHIFT;
if (!UNSAFE.compareAndSwapLong(this, stateOffset, s, n))
return (int)(state >>> PHASE_SHIFT); // terminated
releaseWaiters(phase);
return nextPhase;
}
}
}
/**
* Awaits the phase of this phaser to advance from the given phase
* value, returning immediately if the current phase is not equal
* to the given phase value or this phaser is terminated.
*
* @param phase an arrival phase number, or negative value if
* terminated; this argument is normally the value returned by a
* previous call to {@code arrive} or {@code arriveAndDeregister}.
* @return the next arrival phase number, or the argument if it is
* negative, or the (negative) {@linkplain #getPhase() current phase}
* if terminated
*/
public int awaitAdvance(int phase) {
final Phaser root = this.root;
long s = (root == this) ? state : reconcileState();
int p = (int)(s >>> PHASE_SHIFT);
if (phase < 0)
return phase;
if (p == phase)
return root.internalAwaitAdvance(phase, null);
return p;
}
/**
* Awaits the phase of this phaser to advance from the given phase
* value, throwing {@code InterruptedException} if interrupted
* while waiting, or returning immediately if the current phase is
* not equal to the given phase value or this phaser is
* terminated.
*
* @param phase an arrival phase number, or negative value if
* terminated; this argument is normally the value returned by a
* previous call to {@code arrive} or {@code arriveAndDeregister}.
* @return the next arrival phase number, or the argument if it is
* negative, or the (negative) {@linkplain #getPhase() current phase}
* if terminated
* @throws InterruptedException if thread interrupted while waiting
*/
public int awaitAdvanceInterruptibly(int phase)
throws InterruptedException {
final Phaser root = this.root;
long s = (root == this) ? state : reconcileState();
int p = (int)(s >>> PHASE_SHIFT);
if (phase < 0)
return phase;
if (p == phase) {
QNode node = new QNode(this, phase, true, false, 0L);
p = root.internalAwaitAdvance(phase, node);
if (node.wasInterrupted)
throw new InterruptedException();
}
return p;
}
/**
* Awaits the phase of this phaser to advance from the given phase
* value or the given timeout to elapse, throwing {@code
* InterruptedException} if interrupted while waiting, or
* returning immediately if the current phase is not equal to the
* given phase value or this phaser is terminated.
*
* @param phase an arrival phase number, or negative value if
* terminated; this argument is normally the value returned by a
* previous call to {@code arrive} or {@code arriveAndDeregister}.
* @param timeout how long to wait before giving up, in units of
* {@code unit}
* @param unit a {@code TimeUnit} determining how to interpret the
* {@code timeout} parameter
* @return the next arrival phase number, or the argument if it is
* negative, or the (negative) {@linkplain #getPhase() current phase}
* if terminated
* @throws InterruptedException if thread interrupted while waiting
* @throws TimeoutException if timed out while waiting
*/
public int awaitAdvanceInterruptibly(int phase,
long timeout, TimeUnit unit)
throws InterruptedException, TimeoutException {
long nanos = unit.toNanos(timeout);
final Phaser root = this.root;
long s = (root == this) ? state : reconcileState();
int p = (int)(s >>> PHASE_SHIFT);
if (phase < 0)
return phase;
if (p == phase) {
QNode node = new QNode(this, phase, true, true, nanos);
p = root.internalAwaitAdvance(phase, node);
if (node.wasInterrupted)
throw new InterruptedException();
else if (p == phase)
throw new TimeoutException();
}
return p;
}
/**
* Forces this phaser to enter termination state. Counts of
* registered parties are unaffected. If this phaser is a member
* of a tiered set of phasers, then all of the phasers in the set
* are terminated. If this phaser is already terminated, this
* method has no effect. This method may be useful for
* coordinating recovery after one or more tasks encounter
* unexpected exceptions.
*/
public void forceTermination() {
// Only need to change root state
final Phaser root = this.root;
long s;
while ((s = root.state) >= 0) {
if (UNSAFE.compareAndSwapLong(root, stateOffset,
s, s | TERMINATION_BIT)) {
// signal all threads
releaseWaiters(0); // Waiters on evenQ
releaseWaiters(1); // Waiters on oddQ
return;
}
}
}
/**
* Returns the current phase number. The maximum phase number is
* {@code Integer.MAX_VALUE}, after which it restarts at
* zero. Upon termination, the phase number is negative,
* in which case the prevailing phase prior to termination
* may be obtained via {@code getPhase() + Integer.MIN_VALUE}.
*
* @return the phase number, or a negative value if terminated
*/
public final int getPhase() {
return (int)(root.state >>> PHASE_SHIFT);
}
/**
* Returns the number of parties registered at this phaser.
*
* @return the number of parties
*/
public int getRegisteredParties() {
return partiesOf(state);
}
/**
* Returns the number of registered parties that have arrived at
* the current phase of this phaser. If this phaser has terminated,
* the returned value is meaningless and arbitrary.
*
* @return the number of arrived parties
*/
public int getArrivedParties() {
return arrivedOf(reconcileState());
}
/**
* Returns the number of registered parties that have not yet
* arrived at the current phase of this phaser. If this phaser has
* terminated, the returned value is meaningless and arbitrary.
*
* @return the number of unarrived parties
*/
public int getUnarrivedParties() {
return unarrivedOf(reconcileState());
}
/**
* Returns the parent of this phaser, or {@code null} if none.
*
* @return the parent of this phaser, or {@code null} if none
*/
public Phaser getParent() {
return parent;
}
/**
* Returns the root ancestor of this phaser, which is the same as
* this phaser if it has no parent.
*
* @return the root ancestor of this phaser
*/
public Phaser getRoot() {
return root;
}
/**
* Returns {@code true} if this phaser has been terminated.
*
* @return {@code true} if this phaser has been terminated
*/
public boolean isTerminated() {
return root.state < 0L;
}
/**
* Overridable method to perform an action upon impending phase
* advance, and to control termination. This method is invoked
* upon arrival of the party advancing this phaser (when all other
* waiting parties are dormant). If this method returns {@code
* true}, this phaser will be set to a final termination state
* upon advance, and subsequent calls to {@link #isTerminated}
* will return true. Any (unchecked) Exception or Error thrown by
* an invocation of this method is propagated to the party
* attempting to advance this phaser, in which case no advance
* occurs.
*
* <p>The arguments to this method provide the state of the phaser
* prevailing for the current transition. The effects of invoking
* arrival, registration, and waiting methods on this phaser from
* within {@code onAdvance} are unspecified and should not be
* relied on.
*
* <p>If this phaser is a member of a tiered set of phasers, then
* {@code onAdvance} is invoked only for its root phaser on each
* advance.
*
* <p>To support the most common use cases, the default
* implementation of this method returns {@code true} when the
* number of registered parties has become zero as the result of a
* party invoking {@code arriveAndDeregister}. You can disable
* this behavior, thus enabling continuation upon future
* registrations, by overriding this method to always return
* {@code false}:
*
* <pre> {@code
* Phaser phaser = new Phaser() {
* protected boolean onAdvance(int phase, int parties) { return false; }
* }}</pre>
*
* @param phase the current phase number on entry to this method,
* before this phaser is advanced
* @param registeredParties the current number of registered parties
* @return {@code true} if this phaser should terminate
*/
protected boolean onAdvance(int phase, int registeredParties) {
return registeredParties == 0;
}
/**
* Returns a string identifying this phaser, as well as its
* state. The state, in brackets, includes the String {@code
* "phase = "} followed by the phase number, {@code "parties = "}
* followed by the number of registered parties, and {@code
* "arrived = "} followed by the number of arrived parties.
*
* @return a string identifying this phaser, as well as its state
*/
public String toString() {
return stateToString(reconcileState());
}
/**
* Implementation of toString and string-based error messages
*/
private String stateToString(long s) {
return super.toString() +
"[phase = " + phaseOf(s) +
" parties = " + partiesOf(s) +
" arrived = " + arrivedOf(s) + "]";
}
// Waiting mechanics
/**
* Removes and signals threads from queue for phase.
*/
private void releaseWaiters(int phase) {
QNode q; // first element of queue
Thread t; // its thread
AtomicReference<QNode> head = (phase & 1) == 0 ? evenQ : oddQ;
while ((q = head.get()) != null &&
q.phase != (int)(root.state >>> PHASE_SHIFT)) {
if (head.compareAndSet(q, q.next) &&
(t = q.thread) != null) {
q.thread = null;
LockSupport.unpark(t);
}
}
}
/**
* Variant of releaseWaiters that additionally tries to remove any
* nodes no longer waiting for advance due to timeout or
* interrupt. Currently, nodes are removed only if they are at
* head of queue, which suffices to reduce memory footprint in
* most usages.
*
* @return current phase on exit
*/
private int abortWait(int phase) {
AtomicReference<QNode> head = (phase & 1) == 0 ? evenQ : oddQ;
for (;;) {
Thread t;
QNode q = head.get();
int p = (int)(root.state >>> PHASE_SHIFT);
if (q == null || ((t = q.thread) != null && q.phase == p))
return p;
if (head.compareAndSet(q, q.next) && t != null) {
q.thread = null;
LockSupport.unpark(t);
}
}
}
/** The number of CPUs, for spin control */
private static final int NCPU = Runtime.getRuntime().availableProcessors();
/**
* The number of times to spin before blocking while waiting for
* advance, per arrival while waiting. On multiprocessors, fully
* blocking and waking up a large number of threads all at once is
* usually a very slow process, so we use rechargeable spins to
* avoid it when threads regularly arrive: When a thread in
* internalAwaitAdvance notices another arrival before blocking,
* and there appear to be enough CPUs available, it spins
* SPINS_PER_ARRIVAL more times before blocking. The value trades
* off good-citizenship vs big unnecessary slowdowns.
*/
static final int SPINS_PER_ARRIVAL = (NCPU < 2) ? 1 : 1 << 8;
/**
* Possibly blocks and waits for phase to advance unless aborted.
* Call only on root phaser.
*
* @param phase current phase
* @param node if non-null, the wait node to track interrupt and timeout;
* if null, denotes noninterruptible wait
* @return current phase
*/
private int internalAwaitAdvance(int phase, QNode node) {
// assert root == this;
releaseWaiters(phase-1); // ensure old queue clean
boolean queued = false; // true when node is enqueued
int lastUnarrived = 0; // to increase spins upon change
int spins = SPINS_PER_ARRIVAL;
long s;
int p;
while ((p = (int)((s = state) >>> PHASE_SHIFT)) == phase) {
if (node == null) { // spinning in noninterruptible mode
int unarrived = (int)s & UNARRIVED_MASK;
if (unarrived != lastUnarrived &&
(lastUnarrived = unarrived) < NCPU)
spins += SPINS_PER_ARRIVAL;
boolean interrupted = Thread.interrupted();
if (interrupted || --spins < 0) { // need node to record intr
node = new QNode(this, phase, false, false, 0L);
node.wasInterrupted = interrupted;
}
}
else if (node.isReleasable()) // done or aborted
break;
else if (!queued) { // push onto queue
AtomicReference<QNode> head = (phase & 1) == 0 ? evenQ : oddQ;
QNode q = node.next = head.get();
if ((q == null || q.phase == phase) &&
(int)(state >>> PHASE_SHIFT) == phase) // avoid stale enq
queued = head.compareAndSet(q, node);
}
else {
try {
ForkJoinPool.managedBlock(node);
} catch (InterruptedException ie) {
node.wasInterrupted = true;
}
}
}
if (node != null) {
if (node.thread != null)
node.thread = null; // avoid need for unpark()
if (node.wasInterrupted && !node.interruptible)
Thread.currentThread().interrupt();
if (p == phase && (p = (int)(state >>> PHASE_SHIFT)) == phase)
return abortWait(phase); // possibly clean up on abort
}
releaseWaiters(phase);
return p;
}
/**
* Wait nodes for Treiber stack representing wait queue
*/
static final class QNode implements ForkJoinPool.ManagedBlocker {
final Phaser phaser;
final int phase;
final boolean interruptible;
final boolean timed;
boolean wasInterrupted;
long nanos;
long lastTime;
volatile Thread thread; // nulled to cancel wait
QNode next;
QNode(Phaser phaser, int phase, boolean interruptible,
boolean timed, long nanos) {
this.phaser = phaser;
this.phase = phase;
this.interruptible = interruptible;
this.nanos = nanos;
this.timed = timed;
this.lastTime = timed ? System.nanoTime() : 0L;
thread = Thread.currentThread();
}
public boolean isReleasable() {
if (thread == null)
return true;
if (phaser.getPhase() != phase) {
thread = null;
return true;
}
if (Thread.interrupted())
wasInterrupted = true;
if (wasInterrupted && interruptible) {
thread = null;
return true;
}
if (timed) {
if (nanos > 0L) {
long now = System.nanoTime();
nanos -= now - lastTime;
lastTime = now;
}
if (nanos <= 0L) {
thread = null;
return true;
}
}
return false;
}
public boolean block() {
if (isReleasable())
return true;
else if (!timed)
LockSupport.park(this);
else if (nanos > 0)
LockSupport.parkNanos(this, nanos);
return isReleasable();
}
}
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long stateOffset;
static {
try {
UNSAFE = getUnsafe();
Class<?> k = Phaser.class;
stateOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("state"));
} catch (Exception e) {
throw new Error(e);
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException se) {
try {
return java.security.AccessController.doPrivileged
(new java.security
.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
java.lang.reflect.Field f = sun.misc
.Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
return (sun.misc.Unsafe) f.get(null);
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/jsr166y/RecursiveAction.java
|
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166y;
/**
* A recursive resultless {@link ForkJoinTask}. This class
* establishes conventions to parameterize resultless actions as
* {@code Void} {@code ForkJoinTask}s. Because {@code null} is the
* only valid value of type {@code Void}, methods such as {@code join}
* always return {@code null} upon completion.
*
* <p><b>Sample Usages.</b> Here is a simple but complete ForkJoin
* sort that sorts a given {@code long[]} array:
*
* <pre> {@code
* static class SortTask extends RecursiveAction {
* final long[] array; final int lo, hi;
* SortTask(long[] array, int lo, int hi) {
* this.array = array; this.lo = lo; this.hi = hi;
* }
* SortTask(long[] array) { this(array, 0, array.length); }
* protected void compute() {
* if (hi - lo < THRESHOLD)
* sortSequentially(lo, hi);
* else {
* int mid = (lo + hi) >>> 1;
* invokeAll(new SortTask(array, lo, mid),
* new SortTask(array, mid, hi));
* merge(lo, mid, hi);
* }
* }
* // implementation details follow:
* final static int THRESHOLD = 1000;
* void sortSequentially(int lo, int hi) {
* Arrays.sort(array, lo, hi);
* }
* void merge(int lo, int mid, int hi) {
* long[] buf = Arrays.copyOfRange(array, lo, mid);
* for (int i = 0, j = lo, k = mid; i < buf.length; j++)
* array[j] = (k == hi || buf[i] < array[k]) ?
* buf[i++] : array[k++];
* }
* }}</pre>
*
* You could then sort {@code anArray} by creating {@code new
* SortTask(anArray)} and invoking it in a ForkJoinPool. As a more
* concrete simple example, the following task increments each element
* of an array:
* <pre> {@code
* class IncrementTask extends RecursiveAction {
* final long[] array; final int lo, hi;
* IncrementTask(long[] array, int lo, int hi) {
* this.array = array; this.lo = lo; this.hi = hi;
* }
* protected void compute() {
* if (hi - lo < THRESHOLD) {
* for (int i = lo; i < hi; ++i)
* array[i]++;
* }
* else {
* int mid = (lo + hi) >>> 1;
* invokeAll(new IncrementTask(array, lo, mid),
* new IncrementTask(array, mid, hi));
* }
* }
* }}</pre>
*
* <p>The following example illustrates some refinements and idioms
* that may lead to better performance: RecursiveActions need not be
* fully recursive, so long as they maintain the basic
* divide-and-conquer approach. Here is a class that sums the squares
* of each element of a double array, by subdividing out only the
* right-hand-sides of repeated divisions by two, and keeping track of
* them with a chain of {@code next} references. It uses a dynamic
* threshold based on method {@code getSurplusQueuedTaskCount}, but
* counterbalances potential excess partitioning by directly
* performing leaf actions on unstolen tasks rather than further
* subdividing.
*
* <pre> {@code
* double sumOfSquares(ForkJoinPool pool, double[] array) {
* int n = array.length;
* Applyer a = new Applyer(array, 0, n, null);
* pool.invoke(a);
* return a.result;
* }
*
* class Applyer extends RecursiveAction {
* final double[] array;
* final int lo, hi;
* double result;
* Applyer next; // keeps track of right-hand-side tasks
* Applyer(double[] array, int lo, int hi, Applyer next) {
* this.array = array; this.lo = lo; this.hi = hi;
* this.next = next;
* }
*
* double atLeaf(int l, int h) {
* double sum = 0;
* for (int i = l; i < h; ++i) // perform leftmost base step
* sum += array[i] * array[i];
* return sum;
* }
*
* protected void compute() {
* int l = lo;
* int h = hi;
* Applyer right = null;
* while (h - l > 1 && getSurplusQueuedTaskCount() <= 3) {
* int mid = (l + h) >>> 1;
* right = new Applyer(array, mid, h, right);
* right.fork();
* h = mid;
* }
* double sum = atLeaf(l, h);
* while (right != null) {
* if (right.tryUnfork()) // directly calculate if not stolen
* sum += right.atLeaf(right.lo, right.hi);
* else {
* right.join();
* sum += right.result;
* }
* right = right.next;
* }
* result = sum;
* }
* }}</pre>
*
* @since 1.7
* @author Doug Lea
*/
public abstract class RecursiveAction extends ForkJoinTask<Void> {
private static final long serialVersionUID = 5232453952276485070L;
/**
* The main computation performed by this task.
*/
protected abstract void compute();
/**
* Always returns {@code null}.
*
* @return {@code null} always
*/
public final Void getRawResult() { return null; }
/**
* Requires null completion value.
*/
protected final void setRawResult(Void mustBeNull) { }
/**
* Implements execution conventions for RecursiveActions.
*/
protected final boolean exec() {
compute();
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/jsr166y/RecursiveTask.java
|
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166y;
/**
* A recursive result-bearing {@link ForkJoinTask}.
*
* <p>For a classic example, here is a task computing Fibonacci numbers:
*
* <pre> {@code
* class Fibonacci extends RecursiveTask<Integer> {
* final int n;
* Fibonacci(int n) { this.n = n; }
* Integer compute() {
* if (n <= 1)
* return n;
* Fibonacci f1 = new Fibonacci(n - 1);
* f1.fork();
* Fibonacci f2 = new Fibonacci(n - 2);
* return f2.compute() + f1.join();
* }
* }}</pre>
*
* However, besides being a dumb way to compute Fibonacci functions
* (there is a simple fast linear algorithm that you'd use in
* practice), this is likely to perform poorly because the smallest
* subtasks are too small to be worthwhile splitting up. Instead, as
* is the case for nearly all fork/join applications, you'd pick some
* minimum granularity size (for example 10 here) for which you always
* sequentially solve rather than subdividing.
*
* @since 1.7
* @author Doug Lea
*/
public abstract class RecursiveTask<V> extends ForkJoinTask<V> {
private static final long serialVersionUID = 5232453952276485270L;
/**
* The result of the computation.
*/
V result;
/**
* The main computation performed by this task.
*/
protected abstract V compute();
public final V getRawResult() {
return result;
}
protected final void setRawResult(V value) {
result = value;
}
/**
* Implements execution conventions for RecursiveTask.
*/
protected final boolean exec() {
result = compute();
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/jsr166y/ThreadLocalRandom.java
|
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166y;
import java.util.Random;
/**
* A random number generator isolated to the current thread. Like the
* global {@link java.util.Random} generator used by the {@link
* java.lang.Math} class, a {@code ThreadLocalRandom} is initialized
* with an internally generated seed that may not otherwise be
* modified. When applicable, use of {@code ThreadLocalRandom} rather
* than shared {@code Random} objects in concurrent programs will
* typically encounter much less overhead and contention. Use of
* {@code ThreadLocalRandom} is particularly appropriate when multiple
* tasks (for example, each a {@link ForkJoinTask}) use random numbers
* in parallel in thread pools.
*
* <p>Usages of this class should typically be of the form:
* {@code ThreadLocalRandom.current().nextX(...)} (where
* {@code X} is {@code Int}, {@code Long}, etc).
* When all usages are of this form, it is never possible to
* accidently share a {@code ThreadLocalRandom} across multiple threads.
*
* <p>This class also provides additional commonly used bounded random
* generation methods.
*
* @since 1.7
* @author Doug Lea
*/
public class ThreadLocalRandom extends Random {
// same constants as Random, but must be redeclared because private
private static final long multiplier = 0x5DEECE66DL;
private static final long addend = 0xBL;
private static final long mask = (1L << 48) - 1;
/**
* The random seed. We can't use super.seed.
*/
private long rnd;
/**
* Initialization flag to permit calls to setSeed to succeed only
* while executing the Random constructor. We can't allow others
* since it would cause setting seed in one part of a program to
* unintentionally impact other usages by the thread.
*/
boolean initialized;
// Padding to help avoid memory contention among seed updates in
// different TLRs in the common case that they are located near
// each other.
private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
/**
* The actual ThreadLocal
*/
private static final ThreadLocal<ThreadLocalRandom> localRandom =
new ThreadLocal<ThreadLocalRandom>() {
protected ThreadLocalRandom initialValue() {
return new ThreadLocalRandom();
}
};
/**
* Constructor called only by localRandom.initialValue.
*/
ThreadLocalRandom() {
super();
initialized = true;
}
/**
* Returns the current thread's {@code ThreadLocalRandom}.
*
* @return the current thread's {@code ThreadLocalRandom}
*/
public static ThreadLocalRandom current() {
return localRandom.get();
}
/**
* Throws {@code UnsupportedOperationException}. Setting seeds in
* this generator is not supported.
*
* @throws UnsupportedOperationException always
*/
public void setSeed(long seed) {
if (initialized)
throw new UnsupportedOperationException();
rnd = (seed ^ multiplier) & mask;
}
protected int next(int bits) {
rnd = (rnd * multiplier + addend) & mask;
return (int) (rnd >>> (48-bits));
}
/**
* Returns a pseudorandom, uniformly distributed value between the
* given least value (inclusive) and bound (exclusive).
*
* @param least the least value returned
* @param bound the upper bound (exclusive)
* @throws IllegalArgumentException if least greater than or equal
* to bound
* @return the next value
*/
public int nextInt(int least, int bound) {
if (least >= bound)
throw new IllegalArgumentException();
return nextInt(bound - least) + least;
}
/**
* Returns a pseudorandom, uniformly distributed value
* between 0 (inclusive) and the specified value (exclusive).
*
* @param n the bound on the random number to be returned. Must be
* positive.
* @return the next value
* @throws IllegalArgumentException if n is not positive
*/
public long nextLong(long n) {
if (n <= 0)
throw new IllegalArgumentException("n must be positive");
// Divide n by two until small enough for nextInt. On each
// iteration (at most 31 of them but usually much less),
// randomly choose both whether to include high bit in result
// (offset) and whether to continue with the lower vs upper
// half (which makes a difference only if odd).
long offset = 0;
while (n >= Integer.MAX_VALUE) {
int bits = next(2);
long half = n >>> 1;
long nextn = ((bits & 2) == 0) ? half : n - half;
if ((bits & 1) == 0)
offset += n - nextn;
n = nextn;
}
return offset + nextInt((int) n);
}
/**
* Returns a pseudorandom, uniformly distributed value between the
* given least value (inclusive) and bound (exclusive).
*
* @param least the least value returned
* @param bound the upper bound (exclusive)
* @return the next value
* @throws IllegalArgumentException if least greater than or equal
* to bound
*/
public long nextLong(long least, long bound) {
if (least >= bound)
throw new IllegalArgumentException();
return nextLong(bound - least) + least;
}
/**
* Returns a pseudorandom, uniformly distributed {@code double} value
* between 0 (inclusive) and the specified value (exclusive).
*
* @param n the bound on the random number to be returned. Must be
* positive.
* @return the next value
* @throws IllegalArgumentException if n is not positive
*/
public double nextDouble(double n) {
if (n <= 0)
throw new IllegalArgumentException("n must be positive");
return nextDouble() * n;
}
/**
* Returns a pseudorandom, uniformly distributed value between the
* given least value (inclusive) and bound (exclusive).
*
* @param least the least value returned
* @param bound the upper bound (exclusive)
* @return the next value
* @throws IllegalArgumentException if least greater than or equal
* to bound
*/
public double nextDouble(double least, double bound) {
if (least >= bound)
throw new IllegalArgumentException();
return nextDouble() * (bound - least) + least;
}
private static final long serialVersionUID = -5851777807851030925L;
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/jsr166y/TransferQueue.java
|
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package jsr166y;
import java.util.concurrent.*;
/**
* A {@link BlockingQueue} in which producers may wait for consumers
* to receive elements. A {@code TransferQueue} may be useful for
* example in message passing applications in which producers
* sometimes (using method {@link #transfer}) await receipt of
* elements by consumers invoking {@code take} or {@code poll}, while
* at other times enqueue elements (via method {@code put}) without
* waiting for receipt.
* {@linkplain #tryTransfer(Object) Non-blocking} and
* {@linkplain #tryTransfer(Object,long,TimeUnit) time-out} versions of
* {@code tryTransfer} are also available.
* A {@code TransferQueue} may also be queried, via {@link
* #hasWaitingConsumer}, whether there are any threads waiting for
* items, which is a converse analogy to a {@code peek} operation.
*
* <p>Like other blocking queues, a {@code TransferQueue} may be
* capacity bounded. If so, an attempted transfer operation may
* initially block waiting for available space, and/or subsequently
* block waiting for reception by a consumer. Note that in a queue
* with zero capacity, such as {@link SynchronousQueue}, {@code put}
* and {@code transfer} are effectively synonymous.
*
* <p>This interface is a member of the
* <a href="{@docRoot}/../technotes/guides/collections/index.html">
* Java Collections Framework</a>.
*
* @since 1.7
* @author Doug Lea
* @param <E> the type of elements held in this collection
*/
public interface TransferQueue<E> extends BlockingQueue<E> {
/**
* Transfers the element to a waiting consumer immediately, if possible.
*
* <p>More precisely, transfers the specified element immediately
* if there exists a consumer already waiting to receive it (in
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
* otherwise returning {@code false} without enqueuing the element.
*
* @param e the element to transfer
* @return {@code true} if the element was transferred, else
* {@code false}
* @throws ClassCastException if the class of the specified element
* prevents it from being added to this queue
* @throws NullPointerException if the specified element is null
* @throws IllegalArgumentException if some property of the specified
* element prevents it from being added to this queue
*/
boolean tryTransfer(E e);
/**
* Transfers the element to a consumer, waiting if necessary to do so.
*
* <p>More precisely, transfers the specified element immediately
* if there exists a consumer already waiting to receive it (in
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
* else waits until the element is received by a consumer.
*
* @param e the element to transfer
* @throws InterruptedException if interrupted while waiting,
* in which case the element is not left enqueued
* @throws ClassCastException if the class of the specified element
* prevents it from being added to this queue
* @throws NullPointerException if the specified element is null
* @throws IllegalArgumentException if some property of the specified
* element prevents it from being added to this queue
*/
void transfer(E e) throws InterruptedException;
/**
* Transfers the element to a consumer if it is possible to do so
* before the timeout elapses.
*
* <p>More precisely, transfers the specified element immediately
* if there exists a consumer already waiting to receive it (in
* {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
* else waits until the element is received by a consumer,
* returning {@code false} if the specified wait time elapses
* before the element can be transferred.
*
* @param e the element to transfer
* @param timeout how long to wait before giving up, in units of
* {@code unit}
* @param unit a {@code TimeUnit} determining how to interpret the
* {@code timeout} parameter
* @return {@code true} if successful, or {@code false} if
* the specified waiting time elapses before completion,
* in which case the element is not left enqueued
* @throws InterruptedException if interrupted while waiting,
* in which case the element is not left enqueued
* @throws ClassCastException if the class of the specified element
* prevents it from being added to this queue
* @throws NullPointerException if the specified element is null
* @throws IllegalArgumentException if some property of the specified
* element prevents it from being added to this queue
*/
boolean tryTransfer(E e, long timeout, TimeUnit unit)
throws InterruptedException;
/**
* Returns {@code true} if there is at least one consumer waiting
* to receive an element via {@link #take} or
* timed {@link #poll(long,TimeUnit) poll}.
* The return value represents a momentary state of affairs.
*
* @return {@code true} if there is at least one waiting consumer
*/
boolean hasWaitingConsumer();
/**
* Returns an estimate of the number of consumers waiting to
* receive elements via {@link #take} or timed
* {@link #poll(long,TimeUnit) poll}. The return value is an
* approximation of a momentary state of affairs, that may be
* inaccurate if consumers have completed or given up waiting.
* The value may be useful for monitoring and heuristics, but
* not for synchronization control. Implementations of this
* method are likely to be noticeably slower than those for
* {@link #hasWaitingConsumer}.
*
* @return the number of consumers waiting to receive elements
*/
int getWaitingConsumerCount();
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/jsr166y/package-info.java
|
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
/**
* Preview versions of classes targeted for Java 7. Includes a
* fine-grained parallel computation framework: ForkJoinTasks and
* their related support classes provide a very efficient basis for
* obtaining platform-independent parallel speed-ups of
* computation-intensive operations. They are not a full substitute
* for the kinds of arbitrary processing supported by Executors or
* Threads. However, when applicable, they typically provide
* significantly greater performance on multiprocessor platforms.
*
* <p>Candidates for fork/join processing mainly include those that
* can be expressed using parallel divide-and-conquer techniques: To
* solve a problem, break it in two (or more) parts, and then solve
* those parts in parallel, continuing on in this way until the
* problem is too small to be broken up, so is solved directly. The
* underlying <em>work-stealing</em> framework makes subtasks
* available to other threads (normally one per CPU), that help
* complete the tasks. In general, the most efficient ForkJoinTasks
* are those that directly implement this algorithmic design pattern.
*/
package jsr166y;
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/AbstractBuildVersion.java
|
package water;
abstract public class AbstractBuildVersion {
abstract public String branchName();
abstract public String lastCommitHash();
abstract public String describe();
abstract public String projectVersion();
abstract public String compiledOn();
abstract public String compiledBy();
public String buildNumber() {
String buildNumber = "(unknown)";
try {
String projectVersion = projectVersion();
buildNumber = projectVersion.split("\\.")[3];
}
catch (Exception xe) {}
return buildNumber;
}
@Override public String toString() {
return "H2O v"+projectVersion()+ " ("+branchName()+" - "+lastCommitHash()+")";
}
/** Dummy version of H2O. */
public static AbstractBuildVersion UNKNOWN_VERSION = new AbstractBuildVersion() {
@Override public String projectVersion() { return "(unknown)"; }
@Override public String lastCommitHash() { return "(unknown)"; }
@Override public String describe() { return "(unknown)"; }
@Override public String compiledOn() { return "(unknown)"; }
@Override public String compiledBy() { return "(unknown)"; }
@Override public String branchName() { return "(unknown)"; }
};
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/AbstractEmbeddedH2OConfig.java
|
package water;
import java.net.InetAddress;
/**
* This class is a small shim between a main java program (such as a
* Hadoop mapper) and an embedded full-capability H2O.
*/
public abstract class AbstractEmbeddedH2OConfig {
/**
* Tell the embedding software that H2O has started an embedded
* web server on an IP and port.
* This may be nonblocking.
*
* @param ip IP address this H2O can be reached at.
* @param port Port this H2O can be reached at (for REST API and browser).
*/
public abstract void notifyAboutEmbeddedWebServerIpPort(InetAddress ip, int port);
/**
* Whether H2O gets a flatfile config from this config object.
* @return true if H2O should query the config object for a flatfile. false otherwise.
*/
public abstract boolean providesFlatfile();
/**
* If configProvidesFlatfile, get it. This may incur a blocking network call.
* This must be called after notifyAboutEmbeddedWebServerIpPort() or the behavior
* will be undefined.
*
* This method includes it's own address, because the config may be building up
* and managing a directory of H2O nodes.
*
* If this method throws any kind of exception, the node failed to get it's config,
* and this H2O is hosed and should exit gracefully.
*
* @return A string with the multi-line flatfile text.
*/
public abstract String fetchFlatfile() throws Exception;
/**
* Tell the embedding software that this H2O instance belongs to
* a cloud of a certain size.
* This may be nonblocking.
*
* @param ip IP address this H2O can be reached at.
* @param port Port this H2O can be reached at (for REST API and browser).
* @param size Number of H2O instances in the cloud.
*/
public abstract void notifyAboutCloudSize(InetAddress ip, int port, int size);
/**
* Tell the embedding software that H2O wants the process to exit.
* This should not return. The embedding software should do any
* required cleanup and then call exit with the status.
*
* @param status Process-level exit status
*/
public abstract void exit (int status);
/**
* Print debug information.
*/
public abstract void print();
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Arguments.java
|
package water;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.Arrays;
import water.util.Log;
/**
* Utility for processing command
*
* Simple command line processing. This class provides functionality for parsing
* command line arguments that is coded over and over again in main methods. The
* model is that command line arguments have the form:
*
* <pre>
* option_args* free_form*
* </pre>
*
* where each element in option_args is an option starting with a '-' character
* and each element in free_form is a string. Option arguments have the syntax:
*
* <pre>
* '-'NAME[=VALUE]
* </pre>
*
* where NAME is the option identifier and VALUE is the string argument for that
* option.
* <p>
* An example use of the class is as follows:
*
* <pre>
* static void main(String[] args) {
* Arguments cl = new Arguments();
* cl.parse(args);
* if (cl.getOption("verbose") != null) ... ;
* String file = cl.getArgument(0);
* String path = cl.getOption("classpath");
* </pre>
*
* @author Jan Vitek
*/
public class Arguments {
static public abstract class Arg {
abstract public String usage();
abstract public boolean validate();
@Override public String toString() {
Field[] fields = getFields(this);
String r="";
for( Field field : fields ){
String name = field.getName();
Class cl = field.getType();
try{
if( cl.isPrimitive() ){
if( cl == Boolean.TYPE ){
boolean curval = field.getBoolean(this);
if( curval ) r += " -"+name;
}
else if( cl == Integer.TYPE ) r+=" -"+name+"="+field.getInt(this);
else if( cl == Float.TYPE ) r+=" -"+name+"="+field.getFloat(this);
else if( cl == Double.TYPE ) r+=" -"+name+"="+field.getDouble(this);
else if( cl == Long.TYPE ) r+=" -"+name+"="+field.getLong(this);
else continue;
} else if( cl == String.class )
if (field.get(this)!=null) r+=" -"+name+"="+field.get(this);
} catch( Exception e ) { Log.err("Argument failed with ",e); }
}
return r;
}
}
static public class MissingArgumentError extends Error {
final String m;
MissingArgumentError(String s) { m = s; }
public String toString() { return ( m != null ) ? m : super.toString(); }
}
/**
* Optional arguments. The instance fields of this class are treated as
* optional arguments, if they appear on the command line they will be
* extracted and the corresponding field will be set to the extracted value.
* If not found the field is left untouched (the orginal value is not
* modified).
*/
static public class Opt extends Arg {
public String usage() { return ""; }
public boolean validate() { return true; }
}
/**
* Required arguments. The instance fields of this class are treated as
* required arguments, arguments with keywords matching each one of the
* primitive and string fields of the object must appear on the command line.
* If they all do they will extracted and the corresponding field will be set
* to the extracted value. If any one of the fields is missing
*/
static public class Req extends Arg {
public String usage() { return ""; }
public boolean validate() { return true; }
}
/** Current argument list. The list may grow and shrink as arguments are processed.
*/
private Entry[] commandLineArgs;
/** Create a new CommandLine object with an initial argument array.
* @param args
* array of options and argument that will be parsed.
*/
public Arguments(String[] args) { parse(args); }
/** Create a new CommandLine object with no arguments. */
public Arguments() { parse(new String[0]); }
/**
* Returns the number of remaining command line arguments.
*/
public int size() { return commandLineArgs.length; }
public String get(int i) { return commandLineArgs[i].val; }
/**
* Add a new argument to this command line. The argument will be parsed and
* add at the end of the list. Bindings have the following format
* "-name=value" if value is empty, the binding is treated as an option.
* Options have the form "-name". All other strings are treated as values.
*
* @param str
* a string
*/
public int addArgument(String str, String next) {
int i = commandLineArgs.length;
int consumed = 1;
commandLineArgs = Arrays.copyOf(commandLineArgs, i + 1);
/*
* Flags have a null string as val and flag of true; Binding have non-empty
* name, a non-null val (possibly ""), and a flag of false; Plain strings
* have an empty name, "", a non-null, non-empty val, and a flag of true;
*/
if( str.startsWith("-") ){
int startOffset = (str.startsWith("--"))? 2 : 1;
String arg = "";
String opt;
boolean flag = false;
int eqPos = str.indexOf("=");
if( eqPos > 0 || (next!=null && !next.startsWith("-"))){
if( eqPos > 0 ){
opt = str.substring(startOffset, eqPos);
arg = str.substring(eqPos + 1);
}else{
opt = str.substring(startOffset);
arg = next;
consumed = 2;
}
}else{
flag = true;
opt = str.substring(startOffset);
}
commandLineArgs[i] = new Entry(opt, arg, flag, i);
return consumed;
}else{
commandLineArgs[i] = new Entry("", str, true, i);
return consumed;
}
}
public <TArg extends Arg> TArg extract(TArg arg) throws MissingArgumentError {
Field[] fields = getFields(arg);
int count = extract(arg, fields);
if( arg instanceof Req && count != fields.length )
throw new MissingArgumentError(arg.usage());
return arg;
}
/**
* Extracts bindings and options; and sets appropriate fields in the
* CommandLineArgument object.
*/
private int extract(Arg arg, Field[] fields) {
int count = 0;
for( Field field : fields ){
String name = field.getName();
Class cl = field.getType();
String opt = getValue(name); // optional value
try{
if( cl.isPrimitive() ){
if( cl == Boolean.TYPE ){
boolean curval = field.getBoolean(arg);
boolean xval = curval;
if( opt != null ) xval = !curval;
if( "1".equals(opt) || "true" .equals(opt) ) xval = true;
if( "0".equals(opt) || "false".equals(opt) ) xval = false;
if( opt != null ) field.setBoolean(arg, xval);
}else if( opt == null || opt.length()==0 ) continue;
else if( cl == Integer.TYPE ) field.setInt(arg, Integer.parseInt(opt));
else if( cl == Float.TYPE ) field.setFloat(arg, Float.parseFloat(opt));
else if( cl == Double.TYPE ) field.setDouble(arg, Double.parseDouble(opt));
else if( cl == Long.TYPE ) field.setLong(arg, Long.parseLong(opt));
else continue;
count++;
}else if( cl == String.class ){
if( opt != null ){
field.set(arg, opt);
count++;
}
}
} catch( Exception e ) { Log.err("Argument failed with ",e); }
}
Arrays.sort(commandLineArgs);
for( int i = 0; i < commandLineArgs.length; i++ )
commandLineArgs[i].position = i;
return count;
}
/**
* Return the value of a binding (e.g. "value" for "-name=value") and the
* empty string "" for an option ("-name" or "-name="). A null value is
* returned if no binding or option is found.
*
* @param name string name of the option or binding
*/
public String getValue(String name) {
for( Entry e : commandLineArgs )
if( name.equals(e.name) ) return e.val;
return System.getProperty("h2o.arg."+name);
}
/**
* Parse the command line arguments and extracts options. The current
* implementation allows the same command line instance to parse several
* argument lists, the results will be merged.
*
* @param s the array of arguments to be parsed
*/
private void parse(String[] s) {
commandLineArgs = new Entry[0];
for( int i = 0; i < s.length; ) {
String next = (i+1<s.length)? s[i+1]: null;
i += addArgument(s[i],next);
}
}
public String toString() {
String[] ss = toStringArray();
String result = "";
for( String s : ss ) result += s+" ";
return result;
}
public String[] toStringArray() {
String[] result = new String[commandLineArgs.length];
for( int i = 0; i < commandLineArgs.length; i++ )
result[i] = commandLineArgs[i].toString();
return result;
}
/**
* Keep only the fields which are either primitive or strings.
*/
static private Field[] getFields(Arg arg) {
Class target_ = arg.getClass();
Field[] fields = new Field[0];
while( target_ != null ){
int flen = fields.length;
Field[] f2 = target_.getDeclaredFields();
fields = Arrays.copyOf(fields,flen+f2.length);
System.arraycopy(f2,0,fields,flen,f2.length);
target_ = target_.getSuperclass();
}
Field[] keep = new Field[fields.length];
int num = 0;
for( Field field : fields ){
field.setAccessible(true);
if( Modifier.isStatic(field.getModifiers()) ) continue;
if( field.getType().isPrimitive() || field.getType() == String.class ) keep[num++] = field;
}
Field[] res = new Field[num];
for( int i = 0; i < num; i++ )
res[i] = keep[i];
return res;
}
/**
* Private class for holding arguments. There are three cases: a flag, a
* binding, or a plain string. - Flags have a null string as val and flag of
* true; - Binding have non-empty name, a non-null val (possibly ""), and a
* flag of false; - Plain strings have an empty name, "", a non-null,
* non-empty val, and a flag of true;
*/
private static class Entry implements Comparable {
//true if this is a flag, i.e. ("-name" or "-name=")
boolean flag;
// option name, -name=value
String name;
// position in the argument list
int position;
// option value, -name=value
String val;
Entry(String _name, String _val, boolean _flag, int _position) {
assert !_name.startsWith("-") && !_name.contains("=");
name = _name; val = _val; flag = _flag; position = _position;
}
public int compareTo(Object o) { return position - ((Entry) o).position; }
public String toString() {
String result = " ";
if( !name.equals("") ) result += "-";
result += name;
if( !flag ) result += "=";
if( val != null ) result += val;
return result;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Atomic.java
|
package water;
import water.DTask;
import water.H2O.H2OCountedCompleter;
/**
* Atomic update of a Key
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public abstract class Atomic<T extends Atomic> extends DTask {
public Key _key; // Transaction key
public Atomic(){}
public Atomic(H2OCountedCompleter completer){super(completer);}
// User's function to be run atomically. The Key's Value is fetched from the
// home STORE and passed in. The returned Value is atomically installed as
// the new Value (and the function is retried until it runs atomically). The
// original Value is supposed to be read-only. If the original Key misses
// (no Value), one is created with 0 length and wrong Value._type to allow
// the Key to passed in (as part of the Value)
abstract public Value atomic( Value val );
/** Executed on the transaction key's <em>home</em> node after any successful
* atomic update. Override this if you need to perform some action after
* the update succeeds (eg cleanup).
*/
public void onSuccess( Value old ){}
/** Block until it completes, even if run remotely */
public final T invoke( Key key ) {
RPC<Atomic<T>> rpc = fork(key);
return (T)(rpc == null ? this : rpc.get()); // Block for it
}
// Fork off
public final RPC<Atomic<T>> fork(Key key) {
_key = key;
if( key.home() ) { // Key is home?
compute2(); // Also, run it blocking/now
return null;
} else { // Else run it remotely
return RPC.call(key.home_node(),this);
}
}
// The (remote) workhorse:
@Override public final void compute2( ) {
assert _key.home() : "Atomic on wrong node; SELF="+H2O.SELF+
", key_home="+_key.home_node()+", key_is_home="+_key.home()+", class="+getClass();
Futures fs = new Futures(); // Must block on all invalidates eventually
Value val1 = DKV.get(_key);
while( true ) {
// Run users' function. This is supposed to read-only from val1 and
// return new val2 to atomically install.
Value val2 = atomic(val1);
if( val2 == null ) break; // ABORT: they gave up
assert val1 != val2; // No returning the same Value
// Attempt atomic update
Value res = DKV.DputIfMatch(_key,val2,val1,fs);
if( res == val1 ) { // Success?
onSuccess(val1); // Call user's post-XTN function
fs.blockForPending(); // Block for any pending invalidates on the atomic update
break;
}
val1 = res; // Otherwise try again with the current value
} // and retry
_key = null; // No need for key no more, don't send it back
tryComplete(); // Tell F/J this task is done
}
@Override public byte priority() { return H2O.ATOMIC_PRIORITY; }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/AutoBuffer.java
|
package water;
import java.io.*;
import java.lang.reflect.Array;
import java.net.*;
import java.nio.*;
import java.nio.channels.*;
import java.util.Random;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import water.util.Log;
/**
* A ByteBuffer backed mixed Input/OutputStream class.
*
* Reads/writes empty/fill the ByteBuffer as needed. When it is empty/full it
* we go to the ByteChannel for more/less. Because DirectByteBuffers are
* expensive to make, we keep a few pooled.
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
*/
public class AutoBuffer {
// The direct ByteBuffer for schlorping data about.
// Set to null to indicate the AutoBuffer is closed.
ByteBuffer _bb;
public boolean isClosed() { return _bb == null ; }
// The ByteChannel for moving data in or out. Could be a SocketChannel (for
// a TCP connection) or a FileChannel (spill-to-disk) or a DatagramChannel
// (for a UDP connection). Null on closed AutoBuffers. Null on initial
// remote-writing AutoBuffers which are still deciding UDP vs TCP. Not-null
// for open AutoBuffers doing file i/o or reading any TCP/UDP or having
// written at least one buffer to TCP/UDP.
private ByteChannel _chan;
// If we need a SocketChannel, raise the priority so we get the I/O over
// with. Do not want to have some TCP socket open, blocking the TCP channel
// and then have the thread stalled out. If we raise the priority - be sure
// to lower it again. Note this is for TCP channels ONLY, and only because
// we are blocking another Node with I/O.
private int _oldPrior = -1;
// Where to send or receive data via TCP or UDP (choice made as we discover
// how big the message is); used to lazily create a Channel. If NULL, then
// _chan should be a pre-existing Channel, such as a FileChannel.
final H2ONode _h2o;
// TRUE for read-mode. FALSE for write-mode. Can be flipped for rapid turnaround.
private boolean _read;
// TRUE if this AutoBuffer has never advanced past the first "page" of data.
// The UDP-flavor, port# and task fields are only valid until we read over
// them when flipping the ByteBuffer to the next chunk of data. Used in
// asserts all over the place.
private boolean _firstPage;
// Total size written out from 'new' to 'close'. Only updated when actually
// reading or writing data, or after close(). For profiling only.
int _size, _zeros, _arys;
// More profiling: start->close msec, plus nano's spent in blocking I/O
// calls. The difference between (close-start) and i/o msec is the time the
// i/o thread spends doing other stuff (e.g. allocating Java objects or
// (de)serializing).
long _time_start_ms, _time_close_ms, _time_io_ns;
// I/O persistence flavor: Value.ICE, NFS, HDFS, S3, TCP. Used to record I/O time.
final byte _persist;
// The assumed max UDP packetsize
static final int MTU = 1500-8/*UDP packet header size*/;
// Enable this to test random TCP fails on open or write
static final Random RANDOM_TCP_DROP = null; //new Random();
// Incoming UDP request. Make a read-mode AutoBuffer from the open Channel,
// figure the originating H2ONode from the first few bytes read.
AutoBuffer( DatagramChannel sock ) throws IOException {
_chan = null;
_bb = bbMake();
_read = true; // Reading by default
_firstPage = true;
// Read a packet; can get H2ONode from 'sad'?
Inet4Address addr = null;
SocketAddress sad = sock.receive(_bb);
if( sad instanceof InetSocketAddress ) {
InetAddress address = ((InetSocketAddress) sad).getAddress();
if( address instanceof Inet4Address ) {
addr = (Inet4Address) address;
}
}
_size = _bb.position();
_bb.flip(); // Set limit=amount read, and position==0
if( addr == null ) throw new RuntimeException("Unhandled socket type: " + sad);
// Read Inet from socket, port from the stream, figure out H2ONode
_h2o = H2ONode.intern(addr, getPort());
_firstPage = true;
assert _h2o != null;
_persist = 0; // No persistance
}
// Incoming TCP request. Make a read-mode AutoBuffer from the open Channel,
// figure the originating H2ONode from the first few bytes read.
AutoBuffer( SocketChannel sock ) throws IOException {
_chan = sock;
raisePriority(); // Make TCP priority high
_bb = bbMake();
_bb.flip();
_read = true; // Reading by default
_firstPage = true;
// Read Inet from socket, port from the stream, figure out H2ONode
_h2o = H2ONode.intern(sock.socket().getInetAddress(), getPort());
_firstPage = true; // Yes, must reset this.
assert _h2o != null && _h2o != H2O.SELF;
_time_start_ms = System.currentTimeMillis();
_persist = Value.TCP;
}
// Make an AutoBuffer to write to an H2ONode. Requests for full buffer will
// open a TCP socket and roll through writing to the target. Smaller
// requests will send via UDP.
AutoBuffer( H2ONode h2o ) {
_bb = bbMake();
_chan = null; // Channel made lazily only if we write alot
_h2o = h2o;
_read = false; // Writing by default
_firstPage = true; // Filling first page
assert _h2o != null;
_time_start_ms = System.currentTimeMillis();
_persist = Value.TCP;
}
// Spill-to/from-disk request.
public AutoBuffer( FileChannel fc, boolean read, byte persist ) {
_bb = bbMake();
_chan = fc; // Write to read/write
_h2o = null; // File Channels never have an _h2o
_read = read; // Mostly assert reading vs writing
if( read ) _bb.flip();
_time_start_ms = System.currentTimeMillis();
_persist = persist; // One of Value.ICE, NFS, S3, HDFS
}
// Read from UDP multicast. Same as the byte[]-read variant, except there is an H2O.
AutoBuffer( DatagramPacket pack ) {
_size = pack.getLength();
_bb = ByteBuffer.wrap(pack.getData(), 0, pack.getLength()).order(ByteOrder.nativeOrder());
_bb.position(0);
_read = true;
_firstPage = true;
_chan = null;
_h2o = H2ONode.intern(pack.getAddress(), getPort());
_persist = 0; // No persistance
}
/** Read from a fixed byte[]; should not be closed. */
public AutoBuffer( byte[] buf ) { this(buf,0); }
/** Read from a fixed byte[]; should not be closed. */
AutoBuffer( byte[] buf, int off ) {
assert buf != null : "null fed to ByteBuffer.wrap";
_bb = ByteBuffer.wrap(buf).order(ByteOrder.nativeOrder());
_bb.position(off);
_chan = null;
_h2o = null;
_read = true;
_firstPage = true;
_persist = 0; // No persistance
}
/** Write to an ever-expanding byte[]. Instead of calling {@link #close()},
* call {@link #buf()} to retrieve the final byte[].
*/
public AutoBuffer( ) {
_bb = ByteBuffer.wrap(new byte[16]).order(ByteOrder.nativeOrder());
_chan = null;
_h2o = null;
_read = false;
_firstPage = true;
_persist = 0; // No persistance
}
/** Write to a known sized byte[]. Instead of calling close(), call
* {@link #bufClose()} to retrieve the final byte[].
*/
public AutoBuffer( int len ) {
_bb = ByteBuffer.wrap(MemoryManager.malloc1(len)).order(ByteOrder.nativeOrder());
_chan = null;
_h2o = null;
_read = false;
_firstPage = true;
_persist = 0; // No persistance
}
@Override public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("[AB ").append(_read ? "read " : "write ");
sb.append(_firstPage?"first ":"2nd ").append(_h2o);
sb.append(" ").append(Value.nameOfPersist(_persist));
if( _bb != null ) sb.append(" 0 <= ").append(_bb.position()).append(" <= ").append(_bb.limit());
if( _bb != null ) sb.append(" <= ").append(_bb.capacity());
return sb.append("]").toString();
}
// Fetch a DBB from an object pool... they are fairly expensive to make
// because a native call is required to get the backing memory. I've
// included BB count tracking code to help track leaks. As of 12/17/2012 the
// leaks are under control, but figure this may happen again so keeping these
// counters around.
private static final boolean DEBUG = Boolean.getBoolean("h2o.find-ByteBuffer-leaks");
private static final AtomicInteger BBMAKE = new AtomicInteger(0);
private static final AtomicInteger BBFREE = new AtomicInteger(0);
private static final AtomicInteger BBCACHE= new AtomicInteger(0);
private static final LinkedBlockingDeque<ByteBuffer> BBS = new LinkedBlockingDeque<ByteBuffer>();
static final int BBSIZE = 64*1024; // Bytebuffer "common big size"
private static void bbstats( AtomicInteger ai ) {
if( !DEBUG ) return;
if( (ai.incrementAndGet()&511)==511 ) {
Log.warn("BB make="+BBMAKE.get()+" free="+BBFREE.get()+" cache="+BBCACHE.get()+" size="+BBS.size());
}
}
private static ByteBuffer bbMake() {
while( true ) { // Repeat loop for DBB OutOfMemory errors
ByteBuffer bb;
try { bb = BBS.pollFirst(0,TimeUnit.SECONDS); }
catch( InterruptedException e ) { throw Log.errRTExcept(e); }
if( bb != null ) {
bbstats(BBCACHE);
return bb;
}
try {
bb = ByteBuffer.allocateDirect(BBSIZE).order(ByteOrder.nativeOrder());
bbstats(BBMAKE);
return bb;
} catch( OutOfMemoryError oome ) {
// java.lang.OutOfMemoryError: Direct buffer memory
if( !"Direct buffer memory".equals(oome.getMessage()) ) throw oome;
System.out.println("Sleeping & retrying");
try { Thread.sleep(100); } catch( InterruptedException ignore ) { }
}
}
}
private static void bbFree(ByteBuffer bb) {
bbstats(BBFREE);
bb.clear();
BBS.offerFirst(bb);
}
private int bbFree() {
if( _bb != null && _bb.isDirect() ) bbFree(_bb);
_bb = null;
return 0; // Flow-coding
}
// You thought TCP was a reliable protocol, right? WRONG! Fails 100% of the
// time under heavy network load. Connection-reset-by-peer & connection
// timeouts abound, even after a socket open and after a 1st successful
// ByteBuffer write. It *appears* that the reader is unaware that a writer
// was told "go ahead and write" by the TCP stack, so all these fails are
// only on the writer-side.
static class AutoBufferException extends RuntimeException {
final IOException _ioe;
AutoBufferException( IOException ioe ) { _ioe = ioe; }
}
// For reads, just assert all was read and close and release resources.
// (release ByteBuffer back to the common pool). For writes, force any final
// bytes out. If the write is to an H2ONode and is short, send via UDP.
// AutoBuffer close calls order; i.e. a reader close() will block until the
// writer does a close().
public final int close() {
//if( _size > 2048 ) System.out.println("Z="+_zeros+" / "+_size+", A="+_arys);
if( isClosed() ) return 0; // Already closed
assert _h2o != null || _chan != null; // Byte-array backed should not be closed
try {
if( _chan == null ) { // No channel?
if( _read ) return 0;
// For small-packet write, send via UDP. Since nothing is sent until
// now, this close() call trivially orders - since the reader will not
// even start (much less close()) until this packet is sent.
if( _bb.position() < MTU ) return udpSend();
}
// Force AutoBuffer 'close' calls to order; i.e. block readers until
// writers do a 'close' - by writing 1 more byte in the close-call which
// the reader will have to wait for.
if( hasTCP() ) { // TCP connection?
try {
if( _read ) { // Reader?
int x = get1(); // Read 1 more byte
assert x == 0xab : "AB.close instead of 0xab sentinel got "+x+", "+this;
assert _chan != null; // chan set by incoming reader, since we KNOW it is a TCP
// Write the reader-handshake-byte.
((SocketChannel)_chan).socket().getOutputStream().write(0xcd);
// do not close actually reader socket; recycle it in TCPReader thread
} else { // Writer?
put1(0xab); // Write one-more byte ; might set _chan from null to not-null
sendPartial(); // Finish partial writes; might set _chan from null to not-null
assert _chan != null; // _chan is set not-null now!
// Read the writer-handshake-byte.
int x = ((SocketChannel)_chan).socket().getInputStream().read();
// either TCP con was dropped or other side closed connection without reading/confirming (e.g. task was cancelled).
if( x == -1 ) throw new IOException("Other side closed connection before handshake byte read");
assert x == 0xcd : "Handshake; writer expected a 0xcd from reader but got "+x;
}
} catch( IOException ioe ) {
try { _chan.close(); } catch( IOException ignore ) {} // Silently close
_chan = null; // No channel now, since i/o error
throw ioe; // Rethrow after close
} finally {
if( !_read ) _h2o.freeTCPSocket((SocketChannel)_chan); // Recycle writable TCP channel
restorePriority(); // And if we raised priority, lower it back
}
} else { // FileChannel
if( !_read ) sendPartial(); // Finish partial file-system writes
_chan.close();
_chan = null; // Closed file channel
}
} catch( IOException e ) { // Dunno how to handle so crash-n-burn
throw new AutoBufferException(e);
} finally {
bbFree();
_time_close_ms = System.currentTimeMillis();
TimeLine.record_IOclose(this,_persist); // Profile AutoBuffer connections
assert isClosed();
}
return 0;
}
// Need a sock for a big read or write operation.
// See if we got one already, else open a new socket.
private void tcpOpen() throws IOException {
assert _firstPage && _bb.limit() >= 1+2+4; // At least something written
assert _chan == null;
assert _bb.position()==0;
_chan = _h2o.getTCPSocket();
raisePriority();
}
// Just close the channel here without reading anything. Without the task
// object at hand we do not know what (how many bytes) should we read from
// the channel. And since the other side will try to read confirmation from
// us before closing the channel, we can not read till the end. So we just
// close the channel and let the other side to deal with it and figure out
// the task has been cancelled (still sending ack ack back).
void drainClose() {
if( isClosed() ) return; // Already closed
assert _h2o != null || _chan != null; // Byte-array backed should not be closed
if( _chan != null ) { // Channel assumed sick from prior IOException
ByteChannel chan = _chan; // Read before closing
try { chan.close(); } catch( IOException ignore ) {} // Silently close
_chan = null; // No channel now!
if( !_read && chan instanceof SocketChannel) _h2o.freeTCPSocket((SocketChannel)chan); // Recycle writable TCP channel
}
restorePriority(); // And if we raised priority, lower it back
bbFree();
_time_close_ms = System.currentTimeMillis();
TimeLine.record_IOclose(this,_persist); // Profile AutoBuffer connections
assert isClosed();
}
// True if we opened a TCP channel, or will open one to close-and-send
boolean hasTCP() { assert !isClosed(); return _chan instanceof SocketChannel || (_h2o!=null && _bb.position() >= MTU); }
// True if we are in read-mode
boolean readMode() { return _read; }
// Size in bytes sent, after a close()
int size() { return _size; }
int zeros() { return _zeros; }
// Available bytes in this buffer to read
public int position () { return _bb.position (); }
public void position(int pos) { _bb.position(pos); }
// Return byte[] from a writable AutoBuffer
public final byte[] buf() {
assert _h2o==null && _chan==null && !_read && !_bb.isDirect();
return MemoryManager.arrayCopyOfRange(_bb.array(), _bb.arrayOffset(), _bb.position());
}
public final byte[] bufClose() {
byte[] res = _bb.array();
bbFree();
return res;
}
final boolean eof() {
assert _h2o==null && _chan==null;
return _bb.position()==_bb.limit();
}
// For TCP sockets ONLY, raise the thread priority. We assume we are
// blocking other Nodes with our network I/O, so try to get the I/O
// over with.
private void raisePriority() {
if(_oldPrior == -1){
assert _chan instanceof SocketChannel;
_oldPrior = Thread.currentThread().getPriority();
Thread.currentThread().setPriority(Thread.MAX_PRIORITY-1);
}
}
private void restorePriority() {
if( _oldPrior == -1 ) return;
Thread.currentThread().setPriority(_oldPrior);
_oldPrior = -1;
}
// Send via UDP socket. Unlike eg TCP sockets, we only need one for sending
// so we keep a global one. Also, we do not close it when done, and we do
// not connect it up-front to a target - but send the entire packet right now.
private int udpSend() throws IOException {
assert _chan == null;
TimeLine.record_send(this,false);
_size += _bb.position();
_bb.flip(); // Flip for sending
if( _h2o==H2O.SELF ) { // SELF-send is the multi-cast signal
H2O.multicast(_bb);
} else { // Else single-cast send
H2O.CLOUD_DGRAM.send(_bb, _h2o._key);
}
return 0; // Flow-coding
}
// Flip to write-mode
AutoBuffer clearForWriting() {
assert _read;
_read = false;
_bb.clear();
_firstPage = true;
return this;
}
// Flip to read-mode
public AutoBuffer flipForReading() {
assert !_read;
_read = true;
_bb.flip();
_firstPage = true;
return this;
}
public void skip( int sz ) { assert sz <= _bb.remaining() : "Requested skip: "+sz+" bytes, BUT AB contains only: "+_bb.remaining() + " bytes"; _bb.position(_bb.position()+sz); }
/** Ensure the buffer has space for sz more bytes */
private ByteBuffer getSp( int sz ) { return sz > _bb.remaining() ? getImpl(sz) : _bb; }
/** Ensure buffer has at least sz bytes in it.
* - Also, set position just past this limit for future reading. */
private ByteBuffer getSz(int sz) {
assert _firstPage : "getSz() is only valid for early UDP bytes";
if( sz > _bb.limit() ) getImpl(sz);
_bb.position(sz);
return _bb;
}
private ByteBuffer getImpl( int sz ) {
assert _read : "Reading from a buffer in write mode";
assert _chan != null : "Read to much data from a byte[] backed buffer, AB="+this;
_bb.compact(); // Move remaining unread bytes to start of buffer; prep for reading
// Its got to fit or we asked for too much
assert _bb.position()+sz <= _bb.capacity() : "("+_bb.position()+"+"+sz+" <= "+_bb.capacity()+")";
long ns = System.nanoTime();
while( _bb.position() < sz ) { // Read until we got enuf
try {
int res = _chan.read(_bb); // Read more
// Readers are supposed to be strongly typed and read the exact expected bytes.
// However, if a TCP connection fails mid-read we'll get a short-read.
// This is indistinguishable from a mis-alignment between the writer and reader!
if( res == -1 )
throw new AutoBufferException(new EOFException("Reading "+sz+" bytes, AB="+this));
if( res == 0 ) throw new RuntimeException("Reading zero bytes - so no progress?");
_size += res; // What we read
} catch( IOException e ) { // Dunno how to handle so crash-n-burn
// Linux/Ubuntu message for a reset-channel
if( e.getMessage().equals("An existing connection was forcibly closed by the remote host") )
throw new AutoBufferException(e);
// Windows message for a reset-channel
if( e.getMessage().equals("An established connection was aborted by the software in your host machine") )
throw new AutoBufferException(e);
throw Log.errRTExcept(e);
}
}
_time_io_ns += (System.nanoTime()-ns);
_bb.flip(); // Prep for handing out bytes
//for( int i=0; i < _bb.limit(); i++ ) if( _bb.get(i)==0 ) _zeros++;
_firstPage = false; // First page of data is gone gone gone
return _bb;
}
/** Put as needed to keep from overflowing the ByteBuffer. */
private ByteBuffer putSp( int sz ) {
assert !_read;
if( sz <= _bb.remaining() ) return _bb;
return sendPartial();
}
// Do something with partial results, because the ByteBuffer is full.
// If we are byte[] backed, double the backing array size.
// If we are doing I/O, ship the bytes we have now and flip the ByteBuffer.
private ByteBuffer sendPartial() {
// Writing into an expanding byte[]?
if( _h2o==null && _chan == null ) {
// This is a byte[] backed buffer; expand the backing byte[].
byte[] ary = _bb.array();
int newlen = ary.length<<1; // New size is 2x old size
int oldpos = _bb.position();
_bb = ByteBuffer.wrap(MemoryManager.arrayCopyOfRange(ary,0,newlen),oldpos,newlen-oldpos)
.order(ByteOrder.nativeOrder());
return _bb;
}
// Doing I/O with the full ByteBuffer - ship partial results
_size += _bb.position();
if( _chan == null )
TimeLine.record_send(this,true);
_bb.flip(); // Prep for writing.
try {
if( _chan == null )
tcpOpen(); // This is a big operation. Open a TCP socket as-needed.
//for( int i=0; i < _bb.limit(); i++ ) if( _bb.get(i)==0 ) _zeros++;
long ns = System.nanoTime();
while( _bb.hasRemaining() ) {
_chan.write(_bb);
if( RANDOM_TCP_DROP != null &&_chan instanceof SocketChannel && RANDOM_TCP_DROP.nextInt(100) == 0 )
throw new IOException("Random TCP Write Fail");
}
_time_io_ns += (System.nanoTime()-ns);
} catch( IOException e ) { // Some kind of TCP fail?
// Change to an unchecked exception (so we don't have to annotate every
// frick'n put1/put2/put4/read/write call). Retry & recovery happens at
// a higher level. AutoBuffers are used for many things including e.g.
// disk i/o & UDP writes; this exception only happens on a failed TCP
// write - and we don't want to make the other AutoBuffer users have to
// declare (and then ignore) this exception.
throw new AutoBufferException(e);
}
if( _bb.capacity() < 16*1024 ) _bb = bbMake();
_firstPage = false;
_bb.clear();
return _bb;
}
public String getStr(int off, int len) {
return new String(_bb.array(), _bb.arrayOffset()+off, len);
}
// -----------------------------------------------
// Utility functions to get various Java primitives
public boolean getZ() { return get1()!=0; }
public int get1 () { return getSp(1).get ()&0xFF; }
public char get2 () { return getSp(2).getChar (); }
public int get4 () { return getSp(4).getInt (); }
public float get4f() { return getSp(4).getFloat (); }
public long get8 () { return getSp(8).getLong (); }
public double get8d() { return getSp(8).getDouble(); }
public int get3() {
return (0xff & get1()) << 0 |
(0xff & get1()) << 8 |
(0xff & get1()) << 16;
}
public AutoBuffer put3( int x ) {
assert (-1<<24) <= x && x < (1<<24);
return put1((x >> 0)&0xFF).put1((x >> 8)&0xFF).put1(x >> 16);
}
public int get1 (int off) { return _bb.get (off)&0xFF; }
public char get2 (int off) { return _bb.getChar (off); }
public int get4 (int off) { return _bb.getInt (off); }
public float get4f(int off) { return _bb.getFloat (off); }
public long get8 (int off) { return _bb.getLong (off); }
public double get8d(int off) { return _bb.getDouble(off); }
public AutoBuffer put1 (int off, int v) { _bb.put (off, (byte)(v&0xFF)); return this; }
public AutoBuffer put2 (int off, char v) { _bb.putChar (off, v); return this; }
public AutoBuffer put2 (int off, short v) { _bb.putShort (off, v); return this; }
public AutoBuffer put4 (int off, int v) { _bb.putInt (off, v); return this; }
public AutoBuffer put4f(int off, float v) { _bb.putFloat (off, v); return this; }
public AutoBuffer put8 (int off, long v) { _bb.putLong (off, v); return this; }
public AutoBuffer put8d(int off, double v) { _bb.putDouble(off, v); return this; }
public AutoBuffer putZ (boolean b){ return put1(b?1:0); }
public AutoBuffer put1 ( int b) { assert b >= -128 && b <= 255 : ""+b+" is not a byte";
putSp(1).put((byte)b); return this; }
public AutoBuffer put2 ( char c) { putSp(2).putChar (c); return this; }
public AutoBuffer put2 ( short s) { putSp(2).putShort (s); return this; }
public AutoBuffer put4 ( int i) { putSp(4).putInt (i); return this; }
public AutoBuffer put4f( float f) { putSp(4).putFloat (f); return this; }
public AutoBuffer put8 ( long l) { putSp(8).putLong (l); return this; }
public AutoBuffer put8d(double d) { putSp(8).putDouble(d); return this; }
public AutoBuffer put(Freezable f) {
if( f == null ) return put2(TypeMap.NULL);
assert f.frozenType() > 0 : "No TypeMap for "+f.getClass().getName();
put2((short)f.frozenType());
return f.write(this);
}
public AutoBuffer put(Iced f) {
if( f == null ) return put2(TypeMap.NULL);
assert f.frozenType() > 0;
put2((short)f.frozenType());
return f.write(this);
}
// Put a (compressed) integer. Specifically values in the range -1 to ~250
// will take 1 byte, values near a Short will take 1+2 bytes, values near an
// Int will take 1+4 bytes, and bigger values 1+8 bytes. This compression is
// optimized for small integers (including -1 which is often used as a "array
// is null" flag when passing the array length).
AutoBuffer putInt( int x ) {
if( 0 <= (x+1)&& (x+1) <= 253 ) return put1(x+1);
if( Short.MIN_VALUE <= x && x <= Short.MAX_VALUE ) return put1(255).put2((short)x);
return put1(254).put4(x);
}
// Get a (compressed) integer. See above for the compression strategy and reasoning.
int getInt( ) {
int x = get1();
if( x <= 253 ) return x-1;
if( x==255 ) return (short)get2();
assert x==254;
return get4();
}
// Put a zero-compressed array. Compression is:
// If null : putInt(-1)
// Else
// putInt(# of leading nulls)
// putInt(# of non-nulls)
// If # of non-nulls is > 0, putInt( # of trailing nulls)
long putZA( Object[] A ) {
if( A==null ) { putInt(-1); return 0; }
int x=0; for( ; x<A.length; x++ ) if( A[x ]!=null ) break;
int y=A.length; for( ; y>x; y-- ) if( A[y-1]!=null ) break;
putInt(x); // Leading zeros to skip
putInt(y-x); // Mixed non-zero guts in middle
if( y > x ) // If any trailing nulls
putInt(A.length-y); // Trailing zeros
return ((long)x<<32)|(y-x); // Return both leading zeros, and middle non-zeros
}
// Get the lengths of a zero-compressed array.
// Returns -1 if null.
// Returns a long of (leading zeros | middle non-zeros).
// If there are non-zeros, caller has to read the trailing zero-length.
long getZA( ) {
int x=getInt(); // Length of leading zeros
if( x == -1 ) return -1; // or a null
int nz=getInt(); // Non-zero in the middle
return ((long)x<<32)|(long)nz; // Return both ints
}
public AutoBuffer putA(Iced[] fs) {
_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) put(fs[i]);
return this;
}
public AutoBuffer putAA(Iced[][] fs) {
_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA(fs[i]);
return this;
}
public AutoBuffer putAAA(Iced[][][] fs) {
_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAA(fs[i]);
return this;
}
public AutoBuffer putA(Freezable[] fs) {
_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) put(fs[i]);
return this;
}
public <T extends Freezable> T get(Class<T> t) {
short id = (short)get2();
if( id == TypeMap.NULL ) return null;
assert id > 0 : "Bad type id "+id;
return TypeMap.newFreezable(id).read(this);
}
public <T extends Iced> T get() {
short id = (short)get2();
if( id == TypeMap.NULL ) return null;
assert id > 0 : "Bad type id "+id;
return TypeMap.newInstance(id).read(this);
}
public <T extends Freezable> T[] getA(Class<T> tc) {
_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
T[] ts = (T[]) Array.newInstance(tc, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = get(tc);
return ts;
}
public <T extends Iced> T[][] getAA(Class<T> tc) {
_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
Class<T[]> tcA = (Class<T[]>) Array.newInstance(tc, 0).getClass();
T[][] ts = (T[][]) Array.newInstance(tcA, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getA(tc);
return ts;
}
public <T extends Iced> T[][][] getAAA(Class<T> tc) {
_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
Class<T[] > tcA = (Class<T[] >) Array.newInstance(tc , 0).getClass();
Class<T[][]> tcAA = (Class<T[][]>) Array.newInstance(tcA, 0).getClass();
T[][][] ts = (T[][][]) Array.newInstance(tcAA, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getAA(tc);
return ts;
}
public AutoBuffer putAStr(String[] fs) {
_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putStr(fs[i]);
return this;
}
public String[] getAStr() {
_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
String[] ts = new String[x+y+z];
for( int i = x; i < x+y; ++i ) ts[i] = getStr();
return ts;
}
public AutoBuffer putAAStr(String[][] fs) {
_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAStr(fs[i]);
return this;
}
public String[][] getAAStr() {
_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
String[][] ts = new String[x+y+z][];
for( int i = x; i < x+y; ++i ) ts[i] = getAStr();
return ts;
}
// Read the smaller of _bb.remaining() and len into buf.
// Return bytes read, which could be zero.
public int read( byte[] buf, int off, int len ) {
int sz = Math.min(_bb.remaining(),len);
_bb.get(buf,off,sz);
return sz;
}
// -----------------------------------------------
// Utility functions to handle common UDP packet tasks.
// Get the 1st control byte
int getCtrl( ) { return getSz(1).get(0)&0xFF; }
// Get the port in next 2 bytes
int getPort( ) { return getSz(1+2).getChar(1); }
// Get the task# in the next 4 bytes
int getTask( ) { return getSz(1+2+4).getInt(1+2); }
// Get the flag in the next 1 byte
int getFlag( ) { return getSz(1+2+4+1).get(1+2+4); }
// Set the ctrl, port, task. Ready to write more bytes afterwards
AutoBuffer putUdp (UDP.udp type) {
assert _bb.position()==0;
putSp(1+2);
_bb.put ((byte)type.ordinal());
_bb.putChar((char)H2O.UDP_PORT ); // Outgoing port is always the sender's (me) port
assert _bb.position()==1+2;
return this;
}
AutoBuffer putTask(UDP.udp type, int tasknum) {
return putUdp(type).put4(tasknum);
}
AutoBuffer putTask(int ctrl, int tasknum) {
assert _bb.position()==0;
putSp(1+2+4);
_bb.put((byte)ctrl).putChar((char)H2O.UDP_PORT).putInt(tasknum);
return this;
}
// -----------------------------------------------
// Utility functions to read & write arrays
public byte[] getA1( ) {
_arys++;
int len = getInt();
return len == -1 ? null : getA1(len);
}
public byte[] getA1( int len ) {
byte[] buf = MemoryManager.malloc1(len);
int sofar = 0;
while( sofar < len ) {
int more = Math.min(_bb.remaining(), len - sofar);
_bb.get(buf, sofar, more);
sofar += more;
if( sofar < len ) getSp(Math.min(_bb.capacity(), len-sofar));
}
return buf;
}
public short[] getA2( ) {
_arys++;
int len = getInt(); if( len == -1 ) return null;
short[] buf = MemoryManager.malloc2(len);
int sofar = 0;
while( sofar < buf.length ) {
ShortBuffer as = _bb.asShortBuffer();
int more = Math.min(as.remaining(), len - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*2);
if( sofar < len ) getSp(Math.min(_bb.capacity()-1, (len-sofar)*2));
}
return buf;
}
public int[] getA4( ) {
_arys++;
int len = getInt(); if( len == -1 ) return null;
int[] buf = MemoryManager.malloc4(len);
int sofar = 0;
while( sofar < buf.length ) {
IntBuffer as = _bb.asIntBuffer();
int more = Math.min(as.remaining(), len - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*4);
if( sofar < len ) getSp(Math.min(_bb.capacity()-3, (len-sofar)*4));
}
return buf;
}
public float[] getA4f( ) {
_arys++;
int len = getInt(); if( len == -1 ) return null;
float[] buf = MemoryManager.malloc4f(len);
int sofar = 0;
while( sofar < buf.length ) {
FloatBuffer as = _bb.asFloatBuffer();
int more = Math.min(as.remaining(), len - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*4);
if( sofar < len ) getSp(Math.min(_bb.capacity()-3, (len-sofar)*4));
}
return buf;
}
public long[] getA8( ) {
_arys++;
// Get the lengths of lead & trailing zero sections, and the non-zero
// middle section.
int x = getInt(); if( x == -1 ) return null;
int y = getInt(); // Non-zero in the middle
int z = y==0 ? 0 : getInt();// Trailing zeros
long[] buf = MemoryManager.malloc8(x+y+z);
switch( get1() ) { // 1,2,4 or 8 for how the middle section is passed
case 1: for( int i=x; i<x+y; i++ ) buf[i] = get1(); return buf;
case 2: for( int i=x; i<x+y; i++ ) buf[i] = (short)get2(); return buf;
case 4: for( int i=x; i<x+y; i++ ) buf[i] = get4(); return buf;
case 8: break;
default: throw H2O.fail();
}
int sofar = x;
while( sofar < x+y ) {
LongBuffer as = _bb.asLongBuffer();
int more = Math.min(as.remaining(), x+y - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*8);
if( sofar < x+y ) getSp(Math.min(_bb.capacity()-7, (x+y-sofar)*8));
}
return buf;
}
public double[] getA8d( ) {
_arys++;
int len = getInt(); if( len == -1 ) return null;
double[] buf = MemoryManager.malloc8d(len);
int sofar = 0;
while( sofar < len ) {
DoubleBuffer as = _bb.asDoubleBuffer();
int more = Math.min(as.remaining(), len - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*8);
if( sofar < len ) getSp(Math.min(_bb.capacity()-7, (len-sofar)*8));
}
return buf;
}
public byte[][] getAA1( ) {
_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
byte[][] ary = new byte[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA1();
return ary;
}
public short[][] getAA2( ) {
_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
short[][] ary = new short[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA2();
return ary;
}
public int[][] getAA4( ) {
_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
int[][] ary = new int[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA4();
return ary;
}
public float[][] getAA4f( ) {
_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
float[][] ary = new float[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA4f();
return ary;
}
public long[][] getAA8( ) {
_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
long[][] ary = new long[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA8();
return ary;
}
public double[][] getAA8d( ) {
_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
double[][] ary = new double[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA8d();
return ary;
}
public int[][][] getAAA4( ) {
_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
int[][][] ary = new int[x+y+z][][];
for( int i=x; i<x+y; i++ ) ary[i] = getAA4();
return ary;
}
public long[][][] getAAA8( ) {
_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
long[][][] ary = new long[x+y+z][][];
for( int i=x; i<x+y; i++ ) ary[i] = getAA8();
return ary;
}
public double[][][] getAAA8d( ) {
_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
double[][][] ary = new double[x+y+z][][];
for( int i=x; i<x+y; i++ ) ary[i] = getAA8d();
return ary;
}
public String getStr( ) {
int len = getInt();
return len == -1 ? null : new String(getA1(len));
}
public AutoBuffer putA1( byte[] ary ) {
_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
return putA1(ary,ary.length);
}
public AutoBuffer putA1( byte[] ary, int length ) { return putA1(ary,0,length); }
public AutoBuffer putA1( byte[] ary, int sofar, int length ) {
while( sofar < length ) {
int len = Math.min(length - sofar, _bb.remaining());
_bb.put(ary, sofar, len);
sofar += len;
if( sofar < length ) sendPartial();
}
return this;
}
AutoBuffer putA2( short[] ary ) {
_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
int sofar = 0;
while( sofar < ary.length ) {
ShortBuffer sb = _bb.asShortBuffer();
int len = Math.min(ary.length - sofar, sb.remaining());
sb.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + sb.position()*2);
if( sofar < ary.length ) sendPartial();
}
return this;
}
public AutoBuffer putA4( int[] ary ) {
_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
int sofar = 0;
while( sofar < ary.length ) {
IntBuffer sb = _bb.asIntBuffer();
int len = Math.min(ary.length - sofar, sb.remaining());
sb.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + sb.position()*4);
if( sofar < ary.length ) sendPartial();
}
return this;
}
public AutoBuffer putA8( long[] ary ) {
_arys++;
if( ary == null ) return putInt(-1);
// Trim leading & trailing zeros. Pass along the length of leading &
// trailing zero sections, and the non-zero section in the middle.
int x=0; for( ; x<ary.length; x++ ) if( ary[x ]!=0 ) break;
int y=ary.length; for( ; y>x; y-- ) if( ary[y-1]!=0 ) break;
int nzlen = y-x;
putInt(x);
putInt(nzlen);
if( nzlen > 0 ) // If any trailing nulls
putInt(ary.length-y); // Trailing zeros
// Size trim the NZ section: pass as bytes or shorts if possible.
long min=Long.MAX_VALUE, max=Long.MIN_VALUE;
for( int i=x; i<y; i++ ) { if( ary[i]<min ) min=ary[i]; if( ary[i]>max ) max=ary[i]; }
if( 0 <= min && max < 256 ) { // Ship as unsigned bytes
put1(1); for( int i=x; i<y; i++ ) put1((int)ary[i]);
return this;
}
if( Short.MIN_VALUE <= min && max < Short.MAX_VALUE ) { // Ship as shorts
put1(2); for( int i=x; i<y; i++ ) put2((short)ary[i]);
return this;
}
if( Integer.MIN_VALUE <= min && max < Integer.MAX_VALUE ) { // Ship as ints
put1(4); for( int i=x; i<y; i++ ) put4((int)ary[i]);
return this;
}
put1(8); // Ship as full longs
int sofar = x;
while( sofar < y ) {
LongBuffer sb = _bb.asLongBuffer();
int len = Math.min(y - sofar, sb.remaining());
sb.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + sb.position()*8);
if( sofar < y ) sendPartial();
}
return this;
}
public AutoBuffer putA4f( float[] ary ) {
_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
int sofar = 0;
while( sofar < ary.length ) {
FloatBuffer sb = _bb.asFloatBuffer();
int len = Math.min(ary.length - sofar, sb.remaining());
sb.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + sb.position()*4);
if( sofar < ary.length ) sendPartial();
}
return this;
}
public AutoBuffer putA8d( double[] ary ) {
_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
int sofar = 0;
while( sofar < ary.length ) {
DoubleBuffer sb = _bb.asDoubleBuffer();
int len = Math.min(ary.length - sofar, sb.remaining());
sb.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + sb.position()*8);
if( sofar < ary.length ) sendPartial();
}
return this;
}
public AutoBuffer putAA1( byte[][] ary ) {
_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA1(ary[i]);
return this;
}
AutoBuffer putAA2( short[][] ary ) {
_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA2(ary[i]);
return this;
}
public AutoBuffer putAA4( int[][] ary ) {
_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA4(ary[i]);
return this;
}
public AutoBuffer putAA4f( float[][] ary ) {
_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA4f(ary[i]);
return this;
}
public AutoBuffer putAA8( long[][] ary ) {
_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA8(ary[i]);
return this;
}
public AutoBuffer putAA8d( double[][] ary ) {
_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA8d(ary[i]);
return this;
}
AutoBuffer putAAA4( int[][][] ary ) {
_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAA4(ary[i]);
return this;
}
public AutoBuffer putAAA8( long[][][] ary ) {
_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAA8(ary[i]);
return this;
}
public AutoBuffer putAAA8d( double[][][] ary ) {
_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAA8d(ary[i]);
return this;
}
// Put a String as bytes (not chars!)
@SuppressWarnings("deprecation")
public AutoBuffer putStr( String s ) {
if( s==null ) return putInt(-1);
// Use the explicit getBytes instead of the default no-arg one, to avoid
// the overhead of going in an out of a charset decoder.
byte[] buf = MemoryManager.malloc1(s.length());
s.getBytes(0,buf.length,buf,0);
return putA1(buf);
}
public AutoBuffer putEnum( Enum x ) {
return put1(x==null ? -1 : x.ordinal());
}
public AutoBuffer copyArrayFrom(int offset, AutoBuffer ab, int abOff, int len) {
byte[] dst = _bb.array();
offset += _bb.arrayOffset();
byte[] src = ab._bb.array();
abOff += ab._bb.arrayOffset();
System.arraycopy(src, abOff, dst, offset, len);
_bb.position(_bb.position()+len); // Bump dest buffer offset
return this;
}
public void shift(int source, int target, int length) {
System.arraycopy(_bb.array(), source, _bb.array(), target, length);
}
// ==========================================================================
// JSON AutoBuffer printers
public AutoBuffer putStr2( String s ) {
byte[] b = s.getBytes();
int off=0;
for( int i=0; i<b.length; i++ ) {
if( b[i] == '\\' || b[i] == '"') { // Double up backslashes, escape quotes
putA1(b,off,i); // Everything so far (no backslashes)
put1('\\'); // The extra backslash
off=i; // Advance the "so far" variable
}
// Replace embedded newline & tab with quoted newlines
if( b[i] == '\n' ) { putA1(b,off,i); put1('\\'); put1('n'); off=i+1; }
if( b[i] == '\t' ) { putA1(b,off,i); put1('\\'); put1('t'); off=i+1; }
}
return putA1(b,off,b.length);
}
public AutoBuffer putNULL( ) { return put1('n').put1('u').put1('l').put1('l'); }
public AutoBuffer putJSONStr( String s ) {
return s==null ? putNULL() : put1('"').putStr2(s).put1('"');
}
public AutoBuffer putJSONStr( String name, String value ) {
return putJSONStr(name).put1(':').putJSONStr(value);
}
public AutoBuffer putJSONAStr(String name, String[] fs) {
putJSONStr(name).put1(':');
return putJSONAStr(fs);
}
public AutoBuffer putJSONAStr(String[] fs) {
if( fs == null ) return putNULL();
put1('[');
for( int i=0; i<fs.length; i++ ) {
if( i>0 ) put1(',');
putJSONStr(fs[i]);
}
return put1(']');
}
public AutoBuffer putJSONAAStr( String name, String[][] a ) {
putJSONStr(name).put1(':');
if( a == null ) return putNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONAStr(a[i]);
}
return put1(']');
}
public AutoBuffer putJSON( Iced ice ) {
return ice == null ? putNULL() : ice.writeJSON(this);
}
public AutoBuffer putJSONA( Iced fs[] ) {
if( fs == null ) return putNULL();
put1('[');
for( int i=0; i<fs.length; i++ ) {
if( i>0 ) put1(',');
putJSON(fs[i]);
}
return put1(']');
}
public AutoBuffer putJSONAA( Iced fs[][] ) {
if( fs == null ) return putNULL();
put1('[');
for( int i=0; i<fs.length; i++ ) {
if( i>0 ) put1(',');
putJSONA(fs[i]);
}
return put1(']');
}
public AutoBuffer putJSONZ( String name, boolean value ) {
putJSONStr(name).put1(':');
putJSONStr("" + value);
return this;
}
public AutoBuffer putJSON1( byte b ) { return putJSON4(b); }
public AutoBuffer putJSONA1( byte ary[] ) {
if( ary == null ) return putNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSON1(ary[i]);
}
return put1(']');
}
public AutoBuffer putJSONAA1(byte ary[][]) {
if( ary == null ) return putNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSONA1(ary[i]);
}
return put1(']');
}
public AutoBuffer putJSONAA1(String name,byte ary[][]) {
return putJSONStr(name).put1(':').putJSONAA1(ary);
}
public AutoBuffer putJSON8 ( long l ) { return putStr2(Long.toString(l)); }
public AutoBuffer putJSONA8( long ary[] ) {
if( ary == null ) return putNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSON8(ary[i]);
}
return put1(']');
}
public AutoBuffer putJSONAA8( long ary[][] ) {
if( ary == null ) return putNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSONA8(ary[i]);
}
return put1(']');
}
public AutoBuffer putJSONAAA8( long ary[][][] ) {
if( ary == null ) return putNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSONAA8(ary[i]);
}
return put1(']');
}
public AutoBuffer putEnumJSON( Enum e ) {
return e==null ? putNULL() : put1('"').putStr2(e.toString()).put1('"');
}
public AutoBuffer putJSON ( String name, Iced f ) { return putJSONStr(name).put1(':').putJSON (f); }
public AutoBuffer putJSONA ( String name, Iced f[] ) { return putJSONStr(name).put1(':').putJSONA(f); }
public AutoBuffer putJSONAA( String name, Iced f[][]){ return putJSONStr(name).put1(':').putJSONAA(f); }
public AutoBuffer putJSON8 ( String name, long l ) { return putJSONStr(name).put1(':').putJSON8(l); }
public AutoBuffer putEnumJSON( String name, Enum e ) { return putJSONStr(name).put1(':').putEnumJSON(e); }
public AutoBuffer putJSONA8( String name, long ary[] ) { return putJSONStr(name).put1(':').putJSONA8(ary); }
public AutoBuffer putJSONAA8( String name, long ary[][] ) { return putJSONStr(name).put1(':').putJSONAA8(ary); }
public AutoBuffer putJSONAAA8( String name, long ary[][][] ) { return putJSONStr(name).put1(':').putJSONAAA8(ary); }
public AutoBuffer putJSON4 ( int i ) { return putStr2(Integer.toString(i)); }
public AutoBuffer putJSON4 ( String name, int i ) { return putJSONStr(name).put1(':').putJSON4(i); }
public AutoBuffer putJSONA4( int[] a) {
if( a == null ) return putNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSON4(a[i]);
}
return put1(']');
}
public AutoBuffer putJSONA4(String name, int[] a) {
putJSONStr(name).put1(':');
return putJSONA4(a);
}
public AutoBuffer putJSONAA4(String name, int[][] a) {
putJSONStr(name).put1(':');
if( a == null ) return putNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONA4(a[i]);
}
return put1(']');
}
public AutoBuffer putJSON4f ( float f ) { return f==Float.POSITIVE_INFINITY?putJSONStr(JSON_POS_INF):(f==Float.NEGATIVE_INFINITY?putJSONStr(JSON_NEG_INF):(Float.isNaN(f)?putJSONStr(JSON_NAN):putStr2(Float .toString(f)))); }
public AutoBuffer putJSON4f ( String name, float f ) { return putJSONStr(name).put1(':').putJSON4f(f); }
public AutoBuffer putJSONA4f( float[] a ) {
if( a == null ) return putNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSON4f(a[i]);
}
return put1(']');
}
public AutoBuffer putJSONA4f(String name, float[] a) {
putJSONStr(name).put1(':');
return putJSONA4f(a);
}
public AutoBuffer putJSONAA4f(String name, float[][] a) {
putJSONStr(name).put1(':');
if( a == null ) return putNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONA4f(a[i]);
}
return put1(']');
}
public AutoBuffer putJSON8d( double d ) { return d==Double.POSITIVE_INFINITY?putJSONStr(JSON_POS_INF):(d==Double.NEGATIVE_INFINITY?putJSONStr(JSON_NEG_INF):(Double.isNaN(d)?putJSONStr(JSON_NAN):putStr2(Double.toString(d)))); }
public AutoBuffer putJSON8d( String name, double d ) { return putJSONStr(name).put1(':').putJSON8d(d); }
public AutoBuffer putJSONA8d( double[] a ) {
if( a == null ) return putNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSON8d(a[i]);
}
return put1(']');
}
public AutoBuffer putJSONA8d( String name, double[] a ) {
putJSONStr(name).put1(':');
return putJSONA8d(a);
}
public AutoBuffer putJSONAA8d( String name, double[][] a ) {
putJSONStr(name).put1(':');
return putJSONAA8d(a);
}
public AutoBuffer putJSONAA8d(double[][] a ) {
if( a == null ) return putNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONA8d(a[i]);
}
return put1(']');
}
public AutoBuffer putJSONAAA8d( String name, double[][][] a ) {
putJSONStr(name).put1(':');
return putJSONAAA8d(a);
}
public AutoBuffer putJSONAAA8d( double[][][] a ) {
if( a == null ) return putNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONAA8d(a[i]);
}
return put1(']');
}
static final String JSON_NAN = "NaN";
static final String JSON_POS_INF = "Infinity";
static final String JSON_NEG_INF = "-Infinity";
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Boot.java
|
package water;
import java.io.*;
import java.lang.management.ManagementFactory;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.net.*;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import com.google.common.io.CharStreams;
import com.google.common.io.Closeables;
import water.util.Log;
import water.util.Utils;
/** Initializer class for H2O.
*
* Unpacks all the dependencies and H2O implementation from the jar file, sets
* the loader to be able to load all the classes properly and then executes the
* main method of the H2O class.
*
* Does nothing if the H2O is not run from a jar archive. (This *is* a feature,
* at least for the time being so that we can continue using different IDEs).
*/
public class Boot extends ClassLoader {
public static final Boot _init;
public final byte[] _jarHash;
public String loadContent(String fromFile) {
BufferedReader reader = null;
StringBuilder sb = new StringBuilder();
try {
InputStream is = getResource2(fromFile);
reader = new BufferedReader(new InputStreamReader(is));
CharStreams.copy(reader, sb);
} catch( IOException e ){
Log.err(e);
} finally {
Closeables.closeQuietly(reader);
}
return sb.toString();
}
private final String _jarPath;
private final ZipFile _h2oJar;
private File _parentDir;
private Weaver _weaver;
static {
try { _init = new Boot(); }
catch( Exception e ) { throw new RuntimeException(e); } // Do not attempt logging: no boot-loader
}
public boolean fromJar() { return _h2oJar != null; }
public String jarPath() { return _jarPath; }
private byte[] getMD5(InputStream is) throws IOException {
try {
MessageDigest md5 = MessageDigest.getInstance("MD5");
byte[] buf = new byte[4096];
int pos;
while( (pos = is.read(buf)) > 0 ) md5.update(buf, 0, pos);
return md5.digest();
} catch( NoSuchAlgorithmException e ) {
throw Log.errRTExcept(e);
} finally {
Utils.close(is);
}
}
private Boot() throws IOException {
super(Thread.currentThread().getContextClassLoader());
final String ownJar = getClass().getProtectionDomain().getCodeSource().getLocation().getPath();
Log.POST(2000, "ownJar is " + ownJar);
ZipFile jar = null;
// do nothing if not run from jar
if( ownJar.endsWith(".jar") ) {
Log.POST(2001, "");
_jarPath = URLDecoder.decode(ownJar, "UTF-8");
}
else if ( ownJar.endsWith(".jar/") ) {
Log.POST(2002, "");
// Some hadoop versions (like Hortonworks) will unpack the jar
// file on their own.
String stem = "h2o.jar";
File f = new File (ownJar + stem);
if (f.exists()) {
Log.POST(2003, "");
_jarPath = URLDecoder.decode(ownJar + stem, "UTF-8");
}
else {
_jarPath = null;
}
}
else {
_jarPath = null;
}
if (_jarPath == null) {
Log.POST(2004, "");
this._jarHash = new byte[16];
Arrays.fill(this._jarHash, (byte)0xFF);
_h2oJar = null;
}
else {
Log.POST(2005, "");
InputStream is = new FileInputStream(_jarPath);
_jarHash = getMD5(is);
is.close();
_h2oJar = new ZipFile(_jarPath);
}
Log.POST(2010, "_h2oJar is null: " + ((_h2oJar == null) ? "true" : "false"));
}
public static void main(String[] args) throws Exception { _init.boot(args); }
// NOTE: This method cannot be run from jar
public static void main(Class main, String[] args) throws Exception {
String[] packageNamesToWeave = { main.getPackage().getName()} ;
main(main, args, packageNamesToWeave);
}
// NOTE: This method cannot be run from jar
public static void main(Class main, String[] args, String[] packageNamesToWeave) throws Exception{
for (String packageName : packageNamesToWeave) {
weavePackage(packageName);
}
ArrayList<String> l = new ArrayList<String>(Arrays.asList(args));
l.add(0, "-mainClass");
l.add(1, main.getName());
_init.boot2(l.toArray(new String[0]));
}
public static void weavePackage(String name) {
Weaver.registerPackage(name);
}
public static String[] wovenPackages() {
return Weaver._packages;
}
private URLClassLoader _systemLoader;
private Method _addUrl;
public void boot( String[] args ) throws Exception {
try {
boot2(args);
}
catch (Exception e) {
Log.POST(119, e);
throw (e);
}
}
/**
* Shutdown hook to delete tmp directory on exit.
* Intent is to delete the unpacked jar files, not the log files or ICE files.
*/
class DeleteDirHandler extends Thread {
final String _dir;
DeleteDirHandler(String dir) { _dir=dir; }
void delete(File f) throws IOException {
if (f.isDirectory())
for (File c : f.listFiles())
delete(c);
if (!f.delete())
throw new FileNotFoundException("Failed to delete file: " + f);
}
@Override
public void run() {
try { delete (new File (_dir)); }
catch (Exception e) { /* silent lossage because we tried but cannot help */ }
}
}
public void boot2( String[] args ) throws Exception {
// Catch some log setup stuff before anything else can happen.
boolean help = false;
boolean version = false;
for (int i = 0; i < args.length; i++) {
String arg = args[i];
Log.POST(110, arg == null ? "(arg is null)" : "arg is: " + arg);
if (arg.equals("-h") || arg.equals("--h") || arg.equals("-help") || arg.equals ("--help")) {
help = true;
}
if (arg.equals("-version") || arg.equals ("--version")) {
version = true;
}
}
if (help) {
H2O.printHelp();
H2O.exit (0);
}
if (version) {
H2O.printAndLogVersion();
H2O.exit (0);
}
_systemLoader = (URLClassLoader) getSystemClassLoader();
_addUrl = URLClassLoader.class.getDeclaredMethod("addURL", URL.class);
_addUrl.setAccessible(true);
if( fromJar() ) {
// Calculate directory name of where to unpack JAR file stuff.
String tmproottmpdir;
{
// Get --ice_root.
String ice_root;
{
ice_root = H2O.DEFAULT_ICE_ROOT();
for( int i=0; i<args.length; i++ )
if( args[i].startsWith("--ice_root=") ) ice_root = args[i].substring(11);
else if( args[i].startsWith("-ice_root=") ) ice_root = args[i].substring(10);
else if( (args[i].equals("--ice_root") || args[i].equals("-ice_root")) && (i < args.length-1) )
ice_root = args[i+1];
}
// Make a tmp directory in ice_root.
File tmproot = new File(ice_root);
if( !tmproot.mkdirs() && !tmproot.isDirectory() ) throw new IOException("Unable to create ice root: " + tmproot.getAbsolutePath());
long now;
String randomChars;
String pid;
{
now = System.currentTimeMillis();
pid = "unknown";
Random r = new Random();
byte[] bytes = new byte[4];
r.nextBytes(bytes);
randomChars = String.format("%02x%02x%02x%02x", bytes[0], bytes[1], bytes[2], bytes[3]);
try {
String s = ManagementFactory.getRuntimeMXBean().getName();
Pattern p = Pattern.compile("([0-9]*).*");
Matcher m = p.matcher(s);
boolean b = m.matches();
if (b == true) {
pid = m.group(1);
}
}
catch (Exception xe) {}
}
tmproottmpdir = tmproot + File.separator + "h2o-temp-" + now + "-" + randomChars + "-" + pid;
}
File dir = new File (tmproottmpdir);
if (dir.exists()) {
if( !dir.delete() ) throw new IOException("Failed to remove tmp file: " + dir.getAbsolutePath());
}
if( !dir.mkdir() ) throw new IOException("Failed to create tmp dir: " + dir.getAbsolutePath());
// This causes the tmp JAR unpack dir to delete on exit.
// It does not delete logs or ICE stuff.
Runtime.getRuntime().addShutdownHook(new DeleteDirHandler(dir.toString()));
_parentDir = dir; // Set a global instead of passing the dir about?
Log.debug("Extracting jar into " + _parentDir);
// Make all the embedded jars visible to the custom class loader
extractInternalFiles(); // Resources
addInternalJars("apache");
addInternalJars("gson");
addInternalJars("junit");
addInternalJars("jama");
addInternalJars("poi");
addInternalJars("s3");
addInternalJars("jets3t");
addInternalJars("log4j");
addInternalJars("joda");
addInternalJars("json");
addInternalJars("tachyon");
}
run(args);
}
public static void run(String[] args) throws Exception {
// Figure out the correct main class to call
String mainClass = "water.H2O";
if(args != null) {
int index = Arrays.asList(args).indexOf("-mainClass");
if( index >= 0 && args.length > index + 1 ) {
mainClass = args[index + 1]; // Swap out for requested main
args = Arrays.copyOfRange(args, index + 2, args.length);
}
}
Class mainClazz = _init.loadClass(mainClass,true);
Log.POST(20, "before (in run) mainClass invoke " + mainClazz.getName());
Method main = null;
try {
// First look for 'userMain', so that user code only exposes one 'main'
// method. Problem showed up on samples where users launched the wrong one.
main = mainClazz.getMethod("userMain", String[].class);
} catch(NoSuchMethodException ex) {}
if(main == null) main = mainClazz.getMethod("main", String[].class);
main.invoke(null,(Object)args);
Log.POST(20, "after (in run) mainClass invoke "+ mainClazz.getName());
int index = Arrays.asList(args).indexOf("-runClass");
if( index >= 0 && args.length > index + 1 ) {
String className = args[index + 1]; // Swap out for requested main
args = Arrays.copyOfRange(args, index + 2, args.length);
Class clazz = _init.loadClass(className,true);
Log.POST(21, "before (in run) runClass invoke " + clazz.getName() + " main");
clazz.getMethod("main",String[].class).invoke(null,(Object)args);
Log.POST(21, "after (in run) runClass invoke " + clazz.getName() + " main");
}
}
/** Returns an external File for the internal file name. */
public File internalFile(String name) { return new File(_parentDir, name); }
/** Add a jar to the system classloader */
public void addInternalJars(String name) throws IllegalAccessException, InvocationTargetException, MalformedURLException {
addExternalJars(internalFile(name));
}
/** Adds all jars in given directory to the classpath. */
public void addExternalJars(File file) throws IllegalAccessException, InvocationTargetException, MalformedURLException {
assert file.exists() : "Unable to find external file: " + file.getAbsolutePath();
if( file.isDirectory() ) {
for( File f : file.listFiles() ) addExternalJars(f);
} else if( file.getName().endsWith(".jar") ) {
Log.POST(22, "before (in addExternalJars) invoke _addUrl " + file.toURI().toURL());
_addUrl.invoke(_systemLoader, file.toURI().toURL());
Log.POST(22, "after (in addExternalJars) invoke _addUrl " + file.toURI().toURL());
}
}
/** Extracts the libraries from the jar file to given local path. */
private void extractInternalFiles() throws IOException {
Enumeration entries = _h2oJar.entries();
while( entries.hasMoreElements() ) {
ZipEntry e = (ZipEntry) entries.nextElement();
String name = e.getName();
if( e.isDirectory() ) continue; // mkdirs() will handle these
if(! name.endsWith(".jar") ) continue;
// extract the entry
File out = internalFile(name);
out.getParentFile().mkdirs();
try {
FileOutputStream fos = new FileOutputStream(out);
BufferedInputStream is = new BufferedInputStream (_h2oJar.getInputStream(e));
BufferedOutputStream os = new BufferedOutputStream(fos);
int read;
byte[] buffer = new byte[4096];
while( (read = is.read(buffer)) != -1 ) os.write(buffer,0,read);
os.flush();
fos.getFD().sync(); // Force the output; throws SyncFailedException if full
os.close();
is.close();
} catch( FileNotFoundException ex ) {
// Expected FNF if 2 H2O instances are attempting to unpack in the same directory
} catch( IOException ex ) {
Log.die("Unable to extract file "+name+" because of "+ex+". Make sure that directory " + _parentDir + " contains at least 50MB of free space to unpack H2O libraries.");
throw ex; // dead code
}
}
}
public InputStream getResource2(String uri) {
if( fromJar() ) {
InputStream is = _systemLoader.getResourceAsStream("resources"+uri);
if (is==null) is = this.getClass().getClassLoader().getResourceAsStream("resources"+uri);
if (is==null) is = Thread.currentThread().getContextClassLoader().getResourceAsStream("resources"+uri);
return is;
} else {
try {
File resources = new File("lib/resources");
if(!resources.exists()) {
// IDE mode assumes classes are in target/classes. Not using current path
// to allow running from other locations.
String h2oClasses = getClass().getProtectionDomain().getCodeSource().getLocation().getPath();
resources = new File(h2oClasses + "/../../lib/resources");
}
return new FileInputStream(new File(resources, uri));
} catch (FileNotFoundException e) {
Log.err("Trying system loader because : ", e);
return _systemLoader.getResourceAsStream("resources"+uri);
}
}
}
// --------------------------------------------------------------------------
//
// Auto-Serialization!
//
// At Class-load-time, insert serializers for all subclasses of Iced & DTask
// that do not already contain serializers. We are limited to serializing
// primitives, arrays of primitivies, Keys, and Strings.
//
// --------------------------------------------------------------------------
// Intercept class loads that would otherwise go to the parent loader
// (probably the System loader) and try to auto-add e.g. serialization
// methods to classes that inherit from DTask & Iced. Notice that this
// changes the default search order: existing classes first, then my class
// search, THEN the System or parent loader.
@Override public synchronized Class loadClass( String name, boolean resolve ) throws ClassNotFoundException {
assert !name.equals(Weaver.class.getName());
Class z = loadClass2(name); // Do all the work in here
if( resolve ) resolveClass(z); // Resolve here instead in the work method
return z;
}
// Run the class lookups in my favorite non-default order.
private final Class loadClass2( String name ) throws ClassNotFoundException {
Class z = findLoadedClass(name); // Look for pre-existing class
if( z != null ) return z;
if( _weaver == null ) _weaver = new Weaver();
z = _weaver.weaveAndLoad(name, this); // Try the Happy Class Loader
if( z != null ) {
// Occasionally it's useful to print out class names that are actually Weaved.
// Leave this commented out println here so I can easily find it for next time.
// System.out.println("WEAVED: " + name);
return z;
}
z = getParent().loadClass(name); // Try the parent loader. Probably the System loader.
if( z != null ) return z;
return z;
}
// --------------------------------------------------------------------------
//
// Lists H2O classes
//
// --------------------------------------------------------------------------
public static List<String> getClasses() {
ArrayList<String> names = new ArrayList<String>();
if(_init._h2oJar != null) {
for( Enumeration<ZipEntry> e = (Enumeration) _init._h2oJar.entries(); e.hasMoreElements(); ) {
String name = e.nextElement().getName();
if( name.endsWith(".class") )
names.add(name);
}
} else
findClasses(new File(CLASSES), names);
for( int i = 0; i < names.size(); i++ ) {
String n = names.get(i);
names.set(i, Utils.className(n));
}
return names;
}
private static final String CLASSES = "target/classes";
private static void findClasses(File folder, ArrayList<String> names) {
for( File file : folder.listFiles() ) {
if( file.isDirectory() )
findClasses(file, names);
else if( file.getPath().endsWith(".class") )
names.add(file.getPath().substring(CLASSES.length() + 1));
}
}
// --------------------------------------------------------------------------
// Some global static variables used to pass state between System threads and
// H2O threads, such as the GC call-back thread and the MemoryManager threads.
static public volatile long HEAP_USED_AT_LAST_GC;
static public volatile long TIME_AT_LAST_GC=System.currentTimeMillis();
static private final Object _store_cleaner_lock = new Object();
static public void kick_store_cleaner() {
synchronized(_store_cleaner_lock) { _store_cleaner_lock.notifyAll(); }
}
static public void block_store_cleaner() {
synchronized( _store_cleaner_lock ) {
try { _store_cleaner_lock.wait(5000); } catch (InterruptedException ie) { }
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/BuildVersion.java
|
package water;
public class BuildVersion extends AbstractBuildVersion {
public String branchName() { return "master"; }
public String lastCommitHash() { return "d8ee05301370ef192a2668ce9f021209d35e7e95"; }
public String describe() { return "jenkins-master-1558-5-gd8ee053-dirty"; }
public String projectVersion() { return "2.9.0.99999"; }
public String compiledOn() { return "Fri Oct 24 11:21:17 PDT 2014"; }
public String compiledBy() { return "michal"; }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/DException.java
|
package water;
/** A Distributed Exception - an exception originally thrown on one node
* and passed to another.
*/
public class DException extends Iced {
final H2ONode _h2o; // Original throwing node
final String _exClass; // Structural breakdown of the original exception
final DException _cause;
final String _msg;
final Stk[] _stk;
DException( Throwable ex ) {
_h2o = H2O.SELF;
Throwable cex = ex.getCause();
while( ex instanceof DistributedException && cex != null )
{ ex = cex; cex = ex.getCause(); }
_exClass = ex.getClass().toString();
_cause = cex==null ? null : new DException(cex);
_msg = ex.getMessage();
StackTraceElement stk[] = ex.getStackTrace();
_stk = new Stk[stk.length];
for( int i=0; i<stk.length; i++ )
_stk[i] = new Stk(stk[i]);
}
DistributedException toEx() {
String msg = "from "+_h2o+"; "+_exClass+": "+_msg;
DistributedException e = new DistributedException(msg,_cause==null ? null : _cause.toEx());
StackTraceElement stk[] = new StackTraceElement[_stk.length];
for( int i=0; i<_stk.length; i++ )
stk[i] = _stk[i].toSTE();
e.setStackTrace(stk);
return e;
}
private static class Stk extends Iced {
String _cls, _mth, _fname;
int _line;
Stk( StackTraceElement stk ) {
_cls = stk.getClassName();
_mth = stk.getMethodName();
_fname = stk.getFileName();
_line = stk.getLineNumber();
}
public StackTraceElement toSTE() { return new StackTraceElement(_cls,_mth,_fname,_line); }
}
public static class DistributedException extends RuntimeException {
DistributedException( String msg, Throwable cause ) { super(msg,cause); }
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/DKV.java
|
package water;
/**
* Distributed Key/Value Store
*
* This class handles the distribution pattern.
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public abstract class DKV {
// This put is a top-level user-update, and not a reflected or retried
// update. i.e., The User has initiated a change against the K/V store.
// This is a WEAK update: it is not strongly ordered with other updates
static public Value put( Key key, Value val ) { return put(key,val,null); }
static public Value put( Key key, Value val, Futures fs ) { return put(key,val,fs,false);}
static public Value put( Key key, Value val, Futures fs, boolean dontCache ) {
assert key != null;
assert val==null || val._key == key:"non-matching keys " + ((Object)key).toString() + " != " + ((Object)val._key).toString();
while( true ) {
Value old = H2O.raw_get(key); // Raw-get: do not lazy-manifest if overwriting
Value res = DputIfMatch(key,val,old,fs,dontCache);
if( res == old ) return old; // PUT is globally visible now?
if( val != null && val._key != key ) key = val._key;
}
}
static public Value put( Key key, Iced v ) { return put(key,v,null); }
static public Value put( Key key, Iced v, Futures fs ) {
return put(key,new Value(key,v),fs);
}
static public Value put( Key key, Iced v, Futures fs,boolean donCache ) {
return put(key,new Value(key,v),fs,donCache);
}
// Remove this Key
static public Value remove( Key key ) { return remove(key,null); }
static public Value remove( Key key, Futures fs ) { return put(key,null,fs); }
// Do a PUT, and on success trigger replication. Some callers need the old
// value, and some callers need the Futures so we can block later to ensure
// the result is there. Many callers don't need either value. So rather
// than making a special object to return the pair of values, I've settled
// for a "callers pay" model with a more complex return setup. The return
// value is a Futures if one is needed, or the old Value if not. If a
// Futures is returned the old Value is stashed inside of it for the caller
// to consume.
static public Value DputIfMatch( Key key, Value val, Value old, Futures fs) {
return DputIfMatch(key, val, old, fs, false);
}
static public Value DputIfMatch( Key key, Value val, Value old, Futures fs, boolean dontCache ) {
// First: I must block repeated remote PUTs to the same Key until all prior
// ones complete - the home node needs to see these PUTs in order.
// Repeated PUTs on the home node are already ordered.
if( old != null && !key.home() ) old.startRemotePut();
// local update first, since this is a weak update
Value res = H2O.putIfMatch(key,val,old);
if( res != old ) // Failed?
return res; // Return fail value
// Check for trivial success: no need to invalidate remotes if the new
// value equals the old.
if( old != null && old == val ) return old; // Trivial success?
if( old != null && val != null && val.equals(old) )
return old; // Less trivial success, but no network i/o
// Before we start doing distributed writes... block until the cloud
// stablizes. After we start doing distrubuted writes, it is an error to
// change cloud shape - the distributed writes will be in the wrong place.
Paxos.lockCloud();
// The 'D' part of DputIfMatch: do Distribution.
// If PUT is on HOME, invalidate remote caches
// If PUT is on non-HOME, replicate/push to HOME
if( key.home() ) { // On HOME?
if( old != null ) old.lockAndInvalidate(H2O.SELF,fs);
} else { // On non-HOME?
// Start a write, but do not block for it
TaskPutKey.put(key.home_node(),key,val,fs, dontCache);
}
return old;
}
// Stall until all existing writes have completed.
// Used to order successive writes.
static public void write_barrier() {
for( H2ONode h2o : H2O.CLOUD._memary )
for( RPC rpc : h2o.tasks() )
if( rpc._dt instanceof TaskPutKey || rpc._dt instanceof Atomic )
rpc.get();
}
// User-Weak-Get a Key from the distributed cloud.
static public Value get( Key key, int len, int priority ) {
while( true ) {
// Read the Cloud once per put-attempt, to keep a consistent snapshot.
H2O cloud = H2O.CLOUD;
Value val = H2O.get(key);
// Hit in local cache?
if( val != null ) {
if( len > val._max ) len = val._max; // See if we have enough data cached locally
if( len == 0 || val.rawMem() != null || val.rawPOJO() != null || val.isPersisted() ) return val;
assert !key.home(); // Master must have *something*; we got nothing & need to fetch
}
// While in theory we could read from any replica, we always need to
// inform the home-node that his copy has been Shared... in case it
// changes and he needs to issue an invalidate. For now, always and only
// fetch from the Home node.
H2ONode home = cloud._memary[key.home(cloud)];
// If we missed in the cache AND we are the home node, then there is
// no V for this K (or we have a disk failure).
if( home == H2O.SELF ) return null;
// Pending write to same key from this node? Take that write instead.
// Moral equivalent of "peeking into the cpu store buffer". Can happen,
// e.g., because a prior 'put' of a null (i.e. a remove) is still mid-
// send to the remote, so the local get has missed above, but a remote
// get still might 'win' because the remote 'remove' is still in-progress.
for( RPC<?> rpc : home.tasks() )
if( rpc._dt instanceof TaskPutKey ) {
assert rpc._target == home;
TaskPutKey tpk = (TaskPutKey)rpc._dt;
Key k = tpk._key;
if( k != null && key.equals(k) )
return tpk._xval;
}
return TaskGetKey.get(home,key,priority);
}
}
static public Value get( Key key ) { return get(key,Integer.MAX_VALUE,H2O.GET_KEY_PRIORITY); }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/DRemoteTask.java
|
package water;
import java.util.ArrayList;
import java.util.concurrent.*;
import jsr166y.CountedCompleter;
import jsr166y.ForkJoinPool;
import water.DException.DistributedException;
import water.Job.JobCancelledException;
import water.util.Log;
/** A Distributed DTask.
* Execute a set of Keys on the home for each Key.
* Limited to doing a map/reduce style.
*/
public abstract class DRemoteTask<T extends DRemoteTask> extends DTask<T> implements Cloneable, ForkJoinPool.ManagedBlocker {
// Keys to be worked over
protected Key[] _keys;
// One-time flips from false to true
transient protected boolean _is_local, _top_level;
// Other RPCs we are waiting on
transient private RPC<T> _lo, _hi;
// Local work we are waiting on
transient private T _local;
// We can add more things to block on - in case we want a bunch of lazy tasks
// produced by children to all end before this top-level task ends.
// Semantically, these will all complete before we return from the top-level
// task. Pragmatically, we block on a finer grained basis.
transient protected volatile Futures _fs; // More things to block on
// Combine results from 'drt' into 'this' DRemoteTask
abstract public void reduce( T drt );
// Support for fluid-programming with strong types
private final T self() { return (T)this; }
// Super-class init on the 1st remote instance of this object. Caller may
// choose to clone/fork new instances, but then is reponsible for setting up
// those instances.
public void init() { }
// Invokes the task on all nodes
public T invokeOnAllNodes() {
H2O cloud = H2O.CLOUD;
Key[] args = new Key[cloud.size()];
String skey = "RunOnAll"+Key.rand();
for( int i = 0; i < args.length; ++i )
args[i] = Key.make(skey,(byte)0,Key.DFJ_INTERNAL_USER,cloud._memary[i]);
invoke(args);
for( Key arg : args ) DKV.remove(arg);
return self();
}
// Invoked with a set of keys
public T dfork ( Key... keys ) { keys(keys); _top_level=true; compute2(); return self(); }
public void keys( Key... keys ) { _keys = flatten(keys); }
public T invoke( Key... keys ) {
try {
ForkJoinPool.managedBlock(dfork(keys));
} catch(InterruptedException iex) { Log.errRTExcept(iex); }
// Intent was to quietlyJoin();
// Which forks, then QUIETLY join to not propagate local exceptions out.
return self();
}
// Return true if blocking is unnecessary, which is true if the Task isDone.
@Override public boolean isReleasable() { return isDone(); }
// Possibly blocks the current thread. Returns true if isReleasable would
// return true. Used by the FJ Pool management to spawn threads to prevent
// deadlock is otherwise all threads would block on waits.
@Override public boolean block() throws InterruptedException {
while( !isDone() ) {
try { get(); }
catch(ExecutionException eex) { // skip the execution part
Throwable tex = eex.getCause();
if( tex instanceof Error) throw ( Error)tex;
if( tex instanceof DistributedException) throw ( DistributedException)tex;
if( tex instanceof JobCancelledException) throw (JobCancelledException)tex;
throw new RuntimeException(tex);
}
catch(CancellationException cex) { Log.errRTExcept(cex); }
}
return true;
}
// Decide to do local-work or remote-work
@Override public final void compute2() {
if( _is_local )
lcompute();
else
dcompute();
}
// Decide to do local-completion or remote-completion
@Override public final void onCompletion( CountedCompleter caller ) {
if( _is_local ) lonCompletion(caller);
else donCompletion(caller);
}
// Real Work(tm)!
public abstract void lcompute(); // Override to specify local work
private final void dcompute() {// Work to do the distribution
// Split out the keys into disjointly-homed sets of keys.
// Find the split point. First find the range of home-indices.
H2O cloud = H2O.CLOUD;
int lo=cloud._memary.length, hi=-1;
for( Key k : _keys ) {
int i = k.home(cloud);
if( i<lo ) lo=i;
if( i>hi ) hi=i; // lo <= home(keys) <= hi
}
// Classic fork/join, but on CPUs.
// Split into 3 arrays of keys: lo keys, hi keys and self keys
final ArrayList<Key> locals = new ArrayList<Key>();
final ArrayList<Key> lokeys = new ArrayList<Key>();
final ArrayList<Key> hikeys = new ArrayList<Key>();
int self_idx = cloud.nidx(H2O.SELF);
int mid = (lo+hi)>>>1; // Mid-point
for( Key k : _keys ) {
int idx = k.home(cloud);
if( idx == self_idx ) locals.add(k);
else if( idx < mid ) lokeys.add(k);
else hikeys.add(k);
}
// Launch off 2 tasks for the other sets of keys, and get a place-holder
// for results to block on.
_lo = remote_compute(lokeys);
_hi = remote_compute(hikeys);
// Setup for local recursion: just use the local keys.
if( locals.size() != 0 ) { // Shortcut for no local work
_local = clone(); // 'this' is completer for '_local', so awaits _local completion
_local._is_local = true;
_local._keys = locals.toArray(new Key[locals.size()]); // Keys, including local keys (if any)
_local.init(); // One-time top-level init
H2O.submitTask(_local); // Begin normal execution on a FJ thread
} else {
tryComplete(); // No local work, so just immediate tryComplete
}
}
// Real Completion(tm)!
public void lonCompletion( CountedCompleter caller ) { } // Override for local completion
private final void donCompletion( CountedCompleter caller ) { // Distributed completion
assert _lo == null || _lo.isDone();
assert _hi == null || _hi.isDone();
// Fold up results from left & right subtrees
if( _lo != null ) reduce2(_lo.get());
if( _hi != null ) reduce2(_hi.get());
if( _local != null ) reduce2(_local );
// Note: in theory (valid semantics) we could push these "over the wire"
// and block for them as we're blocking for the top-level initial split.
// However, that would require sending "isDone" flags over the wire also.
// MUCH simpler to just block for them all now, and send over the empty set
// of not-yet-blocked things.
if(_local != null && _local._fs != null )
_local._fs.blockForPending(); // Block on all other pending tasks, also
_keys = null; // Do not return _keys over wire
if( _top_level ) postGlobal();
};
// Override to do work after all the forks have returned
protected void postGlobal(){}
// 'Reduce' left and right answers. Gather exceptions
private void reduce2( T drt ) {
if( drt == null ) return;
reduce(drt);
}
private final RPC<T> remote_compute( ArrayList<Key> keys ) {
if( keys.size() == 0 ) return null;
DRemoteTask rpc = clone();
rpc.setCompleter(null);
rpc._keys = keys.toArray(new Key[keys.size()]);
addToPendingCount(1); // Block until the RPC returns
// Set self up as needing completion by this RPC: when the ACK comes back
// we'll get a wakeup.
return new RPC(keys.get(0).home_node(), rpc).addCompleter(this).call();
}
private static Key[] flatten( Key[] args ) { return args; }
public Futures getFutures() {
if( _fs == null ) synchronized(this) { if( _fs == null ) _fs = new Futures(); }
return _fs;
}
public void alsoBlockFor( Future f ) {
if( f == null ) return;
getFutures().add(f);
}
public void alsoBlockFor( Futures fs ) {
if( fs == null ) return;
getFutures().add(fs);
}
protected void reduceAlsoBlock( T drt ) {
reduce(drt);
alsoBlockFor(drt._fs);
}
@Override public T clone() {
T dt = (T)super.clone();
dt.setCompleter(this); // Set completer, what used to be a final field
dt._fs = null; // Clone does not depend on extent futures
dt.setPendingCount(0); // Volatile write for completer field; reset pending count also
return dt;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/DTask.java
|
package water;
import jsr166y.CountedCompleter;
import water.DException.DistributedException;
import water.H2O.H2OCountedCompleter;
/** Objects which are passed and remotely executed.<p>
* <p>
* Efficient serialization methods for subclasses will be automatically
* generated, but explicit ones can be provided. Transient fields will
* <em>not</em> be mirrored between the VMs.
* <ol>
* <li>On the local vm, this task will be serialized and sent to a remote.</li>
* <li>On the remote, the task will be deserialized.</li>
* <li>On the remote, the H2ONode invoke method will be executed.</li>
* <li>On the remote, the task will be serialized and sent to the local vm</li>
* <li>On the local vm, the task will be deserialized
* <em>into the original instance</em></li>
* <li>On the local vm, the {@link #onAck()} method will be executed.</li>
* <li>On the remote, the {@link #onAckAck()} method will be executed.</li>
* </ol>
*
*/
public abstract class DTask<T extends DTask> extends H2OCountedCompleter implements Freezable {
protected DTask(){}
public DTask(H2OCountedCompleter completer){super(completer);}
// Return a distributed-exception
protected DException _ex;
public final boolean hasException() { return _ex != null; }
public synchronized void setException(Throwable ex) { if( _ex==null ) _ex = new DException(ex); }
public DistributedException getDException() { return _ex==null ? null : _ex.toEx(); }
// Track if the reply came via TCP - which means a timeout on ACKing the TCP
// result does NOT need to get the entire result again, just that the client
// needs more time to process the TCP result.
transient boolean _repliedTcp; // Any return/reply/result was sent via TCP
/** Top-level remote execution hook. Called on the <em>remote</em>. */
public void dinvoke( H2ONode sender ) { compute2(); }
/** 2nd top-level execution hook. After the primary task has received a
* result (ACK) and before we have sent an ACKACK, this method is executed on
* the <em>local vm</em>. Transients from the local vm are available here. */
public void onAck() {}
/** 3rd top-level execution hook. After the original vm sent an ACKACK, this
* method is executed on the <em>remote</em>. Transients from the remote vm
* are available here. */
public void onAckAck() {}
/** Override to remove 2 lines of logging per RPC. 0.5M RPC's will lead to
* 1M lines of logging at about 50 bytes/line produces 50M of log file,
* which will swamp all other logging output. */
public boolean logVerbose() { return false; }
@Override public AutoBuffer write(AutoBuffer bb) { return bb.put(_ex); }
@Override public <T extends Freezable> T read(AutoBuffer bb) { _ex = bb.get(); return (T)this; }
@Override public <F extends Freezable> F newInstance() { throw barf("newInstance"); }
@Override public int frozenType() {throw barf("frozenType");}
@Override public AutoBuffer writeJSONFields(AutoBuffer bb) { return bb; }
@Override public water.api.DocGen.FieldDoc[] toDocField() { return null; }
public void copyOver(Freezable other) {
DTask that = (DTask)other;
this._ex = that._ex; // Copy verbatim semantics, replacing all fields
}
private RuntimeException barf(String method) {
return new RuntimeException(H2O.SELF + ":" + getClass().toString()+ " " + method + " should be automatically overridden in the subclass by the auto-serialization code");
}
/**
* Task to be executed at home of the given key.
* Basically a wrapper around DTask which enables us to bypass
* remote/local distinction (RPC versus submitTask).
*/
public static abstract class DKeyTask<T extends DKeyTask,V extends Iced> extends DTask<DKeyTask>{
private final Key _key;
public DKeyTask(final Key k) {this(null,k);}
public DKeyTask(H2OCountedCompleter cmp,final Key k) {
super(cmp);
_key = k;
}
// override this
protected abstract void map(V v);
@Override public final void compute2(){
if(_key.home()){
Value val = H2O.get(_key);
if(val != null) {
V v = val.get();
map(v);
}
tryComplete();
} else new RPC(_key.home_node(),this).addCompleter(this).call();
}
// onCompletion must be empty here, may be invoked twice (on remote and local)
@Override public void onCompletion(CountedCompleter cc){}
public void submitTask() {H2O.submitTask(this);}
public void forkTask() {fork();}
public T invokeTask() {
H2O.submitTask(this);
join();
return (T)this;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/External.java
|
package water;
import java.io.InputStream;
import water.util.Log;
import dontweave.gson.JsonObject;
/**
* Expose internal H2O API calls to the External World
*
* H2O innards need to go through the H2O ClassLoader to get bytecode
* reweaving done - e.g. adding serialization methods, or auto-distribution
* code. The outside world probably does not want to use the H2O ClassLoader
* so this class bridges the gap by delegating all calls through a singleton
* interface object loaded by H2O.
*/
public class External {
public final static ExternalInterface API;
static {
ExternalInterface api = null;
try { api = (ExternalInterface)Boot._init.loadClass("water.InternalInterface").newInstance(); }
catch( ClassNotFoundException e ) { Log.err(e); }
catch( InstantiationException e ) { Log.err(e); }
catch( IllegalAccessException e ) { Log.err(e); }
API = api;
}
public static Object makeKey ( String key_name ) { return API.makeKey(key_name); }
public static Object makeValue(Object key,byte[] bits){ return API.makeValue(key,bits); }
public static void put( Object key, Object val) { API.put(key,val); }
public static Object getValue( Object key ) { return API.getValue(key); }
public static Object getBytes( Object val ) { return API.getBytes(val); }
public static Object ingestRFModelFromR(Object key,InputStream is){ return API.ingestRFModelFromR(key,is); }
public static float[] scoreKey ( Object modelKey, String [] colNames, String domains[][], double[] row ) { return API.scoreKey (modelKey,colNames,domains,row); }
public static float[] scoreModel( Object model , String [] colNames, String domains[][], double[] row ) { return API.scoreModel(model ,colNames,domains,row); }
public static JsonObject cloudStatus( ) { return API.cloudStatus(); }
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.