index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/h2o/h2o-clustering/3.46.0.7/water/clustering
|
java-sources/ai/h2o/h2o-clustering/3.46.0.7/water/clustering/api/H2OClusterStatusEndpoint.java
|
package water.clustering.api;
import water.H2O;
import water.H2ONode;
import com.sun.net.httpserver.HttpExchange;
import com.sun.net.httpserver.HttpHandler;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.util.Arrays;
import java.util.Set;
import static water.clustering.api.HttpResponses.*;
public class H2OClusterStatusEndpoint implements HttpHandler {
@Override
public void handle(HttpExchange httpExchange) throws IOException {
if (!GET_METHOD.equals(httpExchange.getRequestMethod())) {
newResponseCodeOnlyResponse(httpExchange, HttpURLConnection.HTTP_BAD_METHOD);
}
// H2O cluster grows in time, even when a flat file is used. The H2O.CLOUD property might be updated with new nodes during
// the clustering process and doesn't necessarily have to contain all the nodes since the very beginning of the clustering process.
// From this endpoint's point of view, H2O is clustered if and only if the H2O cloud members contain all nodes defined in the
// flat file.
if (!H2O.isFlatfileEnabled()) {
newResponseCodeOnlyResponse(httpExchange, HttpURLConnection.HTTP_NO_CONTENT);
return;
}
final Set<H2ONode> flatFile = H2O.getFlatfile();
final H2ONode[] cloudMembers = H2O.CLOUD.members();
final boolean clustered = flatFile != null && cloudMembers != null && cloudMembers.length == flatFile.size()
&& flatFile.containsAll(Arrays.asList(cloudMembers));
if (!clustered) {
// If there is no cluster, there is no content to report.
newResponseCodeOnlyResponse(httpExchange, HttpURLConnection.HTTP_NO_CONTENT);
} else {
newFixedLengthResponse(httpExchange, HttpURLConnection.HTTP_OK, MIME_TYPE_JSON, nodesListJson());
}
}
/**
* Construct a JSON representation of healthy and unhealthy H2O nodes.
* No external libraries are used, as those would make this independent embedded config heavier.
* No transitive dependencies from h2o-core module are used, as that would create an indirect dependency
* and prevent future upgrades on API level.
* <p>
* Example output:
* {
* "healthy_nodes": ["192.168.0.149:54321"],
* "unhealthy_nodes": []
* }
*
* @return A String with JSON representation of healthy and unhealthy H2O Nodes. Never null.
*/
private String nodesListJson() {
final H2ONode[] cloudMembers = H2O.CLOUD.members();
final StringBuilder healthyNodesStringArray = new StringBuilder();
final StringBuilder unhealthyNodesStringArray = new StringBuilder();
int healthyNodeCount = 0;
int unhealthyNodeCount = 0;
for (final H2ONode node : cloudMembers) {
if (node.isHealthy()) {
healthyNodesStringArray.append('"');
healthyNodesStringArray.append(node.getIpPortString());
healthyNodesStringArray.append("\",");
healthyNodeCount++;
} else {
unhealthyNodesStringArray.append('"');
unhealthyNodesStringArray.append(node.getIpPortString());
unhealthyNodesStringArray.append('"');
unhealthyNodesStringArray.append("\",");
unhealthyNodeCount++;
}
}
if (healthyNodeCount > 0) {
healthyNodesStringArray.deleteCharAt(healthyNodesStringArray.length() - 1);
}
if (unhealthyNodeCount > 0) {
unhealthyNodesStringArray.deleteCharAt(unhealthyNodesStringArray.length() - 1);
}
return String.format("{\n" +
"\"leader_node\": \"%s\",\n" +
"\"healthy_nodes\": [%s],\n" +
"\"unhealthy_nodes\": [%s]\n" +
"}", H2O.CLOUD.leader().getIpPortString(),
healthyNodesStringArray.toString(),
unhealthyNodesStringArray.toString());
}
}
|
0
|
java-sources/ai/h2o/h2o-clustering/3.46.0.7/water/clustering
|
java-sources/ai/h2o/h2o-clustering/3.46.0.7/water/clustering/api/HttpResponses.java
|
package water.clustering.api;
import com.sun.net.httpserver.HttpExchange;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
public class HttpResponses {
public static final String MIME_TYPE_TEXT_PLAIN = "text/plain";
public static final String MIME_TYPE_JSON = "application/json";
public static final String GET_METHOD = "GET";
public static final String POST_METHOD = "POST";
public static void newResponseCodeOnlyResponse(HttpExchange httpExchange, int responseCode) throws IOException {
httpExchange.getResponseHeaders().set("Content-Type", MIME_TYPE_TEXT_PLAIN);
httpExchange.sendResponseHeaders(responseCode, -1);
httpExchange.close();
}
public static void newFixedLengthResponse(HttpExchange httpExchange, int responseCode, String mimeType, String response) throws IOException {
byte[] responseBytes = response.getBytes(StandardCharsets.UTF_8);
httpExchange.getResponseHeaders().set("Content-Type", mimeType);
httpExchange.sendResponseHeaders(responseCode, responseBytes.length);
OutputStream os = httpExchange.getResponseBody();
os.write(responseBytes);
os.close();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/AUC2.java
|
package hex;
import water.Iced;
import water.MRTask;
import water.exceptions.H2OIllegalArgumentException;
import water.fvec.Chunk;
import water.fvec.Vec;
import water.util.Log;
import water.util.fp.Function;
import java.util.Arrays;
import static hex.AUC2.ThresholdCriterion.*;
/** One-pass approximate AUC
*
* This algorithm can compute the AUC in 1-pass with good resolution. During
* the pass, it builds an online histogram of the probabilities up to the
* resolution (number of bins) asked-for. It also computes the true-positive
* and false-positive counts for the histogramed thresholds. With these in
* hand, we can compute the TPR (True Positive Rate) and the FPR for the given
* thresholds; these define the (X,Y) coordinates of the AUC.
*/
public class AUC2 extends Iced {
public final int _nBins; // Max number of bins; can be less if there are fewer points
public final double[] _ths; // Thresholds
public final double[] _tps; // True Positives
public final double[] _fps; // False Positives
public final double _p, _n; // Actual trues, falses
public double _auc, _gini, _pr_auc; // Actual AUC value
public final int _max_idx; // Threshold that maximizes the default criterion
public static final ThresholdCriterion DEFAULT_CM = ThresholdCriterion.f1;
// Default bins, good answers on a highly unbalanced sorted (and reverse
// sorted) datasets
public static final int NBINS = 400;
/** Criteria for 2-class Confusion Matrices
*
* This is an Enum class, with an exec() function to compute the criteria
* from the basic parts, and from an AUC2 at a given threshold index.
*/
public enum ThresholdCriterion {
f1(false) { @Override double exec( double tp, double fp, double fn, double tn ) {
final double prec = precision.exec(tp,fp,fn,tn);
final double recl = tpr .exec(tp,fp,fn,tn);
return 2. * (prec * recl) / (prec + recl);
} },
f2(false) { @Override double exec( double tp, double fp, double fn, double tn ) {
final double prec = precision.exec(tp,fp,fn,tn);
final double recl = tpr .exec(tp,fp,fn,tn);
return 5. * (prec * recl) / (4. * prec + recl);
} },
f0point5(false) { @Override double exec( double tp, double fp, double fn, double tn ) {
final double prec = precision.exec(tp,fp,fn,tn);
final double recl = tpr .exec(tp,fp,fn,tn);
return 1.25 * (prec * recl) / (.25 * prec + recl);
} },
accuracy(false) { @Override double exec( double tp, double fp, double fn, double tn ) { return (tn+tp)/(tp+fn+tn+fp); } },
precision(false) { @Override double exec( double tp, double fp, double fn, double tn ) { return tp/(tp+fp); } },
recall(false) { @Override double exec( double tp, double fp, double fn, double tn ) { return tp/(tp+fn); } },
specificity(false) { @Override double exec( double tp, double fp, double fn, double tn ) { return tn/(tn+fp); } },
absolute_mcc(false) { @Override double exec( double tp, double fp, double fn, double tn ) {
double mcc = (tp*tn - fp*fn);
if (mcc == 0) return 0;
mcc /= Math.sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn));
// due tp and tn are double; the MCC could be slightly higher than 1. for example 1.000000000000002
double eps = 1e-10;
double absMcc = Math.abs(mcc);
assert(absMcc <= 1. + eps) : "Absolute mcc is greater than 1: mcc="+absMcc+" tp="+tp + " fp=" + fp + " fn=" + fn + " tn=" + tn;
if(absMcc > 1){
return 1;
}
return absMcc;
} },
// minimize max-per-class-error by maximizing min-per-class-accuracy.
// Report from max_criterion is the smallest correct rate for both classes.
// The max min-error-rate is 1.0 minus that.
min_per_class_accuracy(false) { @Override double exec( double tp, double fp, double fn, double tn ) {
return Math.min(tp/(tp+fn),tn/(tn+fp));
} },
mean_per_class_accuracy(false) { @Override double exec( double tp, double fp, double fn, double tn ) {
return 0.5*(tp/(tp+fn) + tn/(tn+fp));
} },
tns(true ) { @Override double exec( double tp, double fp, double fn, double tn ) { return tn; } },
fns(true ) { @Override double exec( double tp, double fp, double fn, double tn ) { return fn; } },
fps(true ) { @Override double exec( double tp, double fp, double fn, double tn ) { return fp; } },
tps(true ) { @Override double exec( double tp, double fp, double fn, double tn ) { return tp; } },
tnr(false) { @Override double exec( double tp, double fp, double fn, double tn ) { return tn/(fp+tn); } },
fnr(false) { @Override double exec( double tp, double fp, double fn, double tn ) { return fn/(fn+tp); } },
fpr(false) { @Override double exec( double tp, double fp, double fn, double tn ) { return fp/(fp+tn); } },
tpr(false) { @Override double exec( double tp, double fp, double fn, double tn ) { return tp/(tp+fn); } },
;
public final boolean _isInt; // Integral-Valued data vs Real-Valued
ThresholdCriterion(boolean isInt) { _isInt = isInt; }
/** @param tp True Positives (predicted true, actual true )
* @param fp False Positives (predicted true, actual false)
* @param fn False Negatives (predicted false, actual true )
* @param tn True Negatives (predicted false, actual false)
* @return criteria */
abstract double exec( double tp, double fp, double fn, double tn );
public double exec( AUC2 auc, int idx ) { return exec(auc.tp(idx),auc.fp(idx),auc.fn(idx),auc.tn(idx)); }
public double max_criterion( AUC2 auc ) { return exec(auc,max_criterion_idx(auc)); }
/** Convert a criterion into a threshold index that maximizes the criterion
* @return Threshold index that maximizes the criterion
*/
public int max_criterion_idx( AUC2 auc ) {
double md = -Double.MAX_VALUE;
int mx = -1;
for( int i=0; i<auc._nBins; i++ ) {
double d = exec(auc,i);
if( d > md ) {
md = d;
mx = i;
}
}
return mx;
}
public static final ThresholdCriterion[] VALUES = values();
public static AUC2.ThresholdCriterion fromString(String strRepr) {
for (ThresholdCriterion tc : ThresholdCriterion.values()) {
if (tc.toString().equalsIgnoreCase(strRepr)) {
return tc;
}
}
return null;
}
} // public enum ThresholdCriterion
public double threshold( int idx ) { return _ths[idx]; }
public double tp( int idx ) { return _tps[idx]; }
public double fp( int idx ) { return _fps[idx]; }
public double tn( int idx ) { return _n-_fps[idx]; }
public double fn( int idx ) { return _p-_tps[idx]; }
/** @return maximum F1 */
public double maxF1() { return ThresholdCriterion.f1.max_criterion(this); }
public Function<Integer, Double> forCriterion(final ThresholdCriterion tc) {
return new Function<Integer, Double>() {
public Double apply(Integer i) {
return tc.exec(AUC2.this, i);
}
};
}
/** Default bins, good answers on a highly unbalanced sorted (and reverse
* sorted) datasets */
public AUC2( Vec probs, Vec actls ) { this(NBINS,probs,actls); }
/** User-specified bin limits. Time taken is product of nBins and rows;
* large nBins can be very slow. */
AUC2( int nBins, Vec probs, Vec actls ) { this(new AUC_Impl(nBins).doAll(probs,actls)._bldr); }
public AUC2( AUCBuilder bldr ) {
this(bldr, true);
}
private AUC2( AUCBuilder bldr, boolean trueProbabilities ) {
// Copy result arrays into base object, shrinking to match actual bins
_nBins = bldr._n;
assert _nBins >= 1 : "Must have >= 1 bins for AUC calculation, but got " + _nBins;
assert trueProbabilities || bldr._ths[_nBins - 1] == 1 : "Bins need to contain pred = 1 when 0-1 probabilities are used";
_ths = Arrays.copyOf(bldr._ths,_nBins);
_tps = Arrays.copyOf(bldr._tps,_nBins);
_fps = Arrays.copyOf(bldr._fps,_nBins);
// Reverse everybody; thresholds from 1 down to 0, easier to read
for( int i=0; i<((_nBins)>>1); i++ ) {
double tmp= _ths[i]; _ths[i] = _ths[_nBins-1-i]; _ths[_nBins-1-i] = tmp ;
double tmpt = _tps[i]; _tps[i] = _tps[_nBins-1-i]; _tps[_nBins-1-i] = tmpt;
double tmpf = _fps[i]; _fps[i] = _fps[_nBins-1-i]; _fps[_nBins-1-i] = tmpf;
}
// Rollup counts, so that computing the rates are easier.
// The AUC is (TPR,FPR) as the thresholds roll about
double p=0, n=0;
for( int i=0; i<_nBins; i++ ) {
p += _tps[i]; _tps[i] = p;
n += _fps[i]; _fps[i] = n;
}
_p = p; _n = n;
if (trueProbabilities) {
_auc = compute_auc();
_pr_auc = pr_auc();
_gini = 2 * _auc - 1;
_max_idx = DEFAULT_CM.max_criterion_idx(this);
} else {
_auc = Double.NaN;
_pr_auc = Double.NaN;
_gini = Double.NaN;
_max_idx = 0;
}
}
private AUC2( AUC2 auc, int idx) {
_nBins = 1;
_ths = new double[]{auc._ths[idx]};
_tps = new double[]{auc._tps[idx]};
_fps = new double[]{auc._fps[idx]};
_p = auc._p;
_n = auc._n;
_auc = auc._auc;
_pr_auc = auc._pr_auc;
_gini = auc._gini;
_max_idx = auc._max_idx >= 0 ? 0 : -1;
}
/**
* Subsets the AUC values to a single bin corresponding to the threshold that maximizes the default criterion.
* @return AUC2 instance if there is threshold that maximizes the default criterion, null otherwise
*/
AUC2 restrictToMaxCriterion() {
return _max_idx >= 0 ? new AUC2(this, _max_idx) : null;
}
/**
* Creates an instance of AUC2 for classifiers that do not return probabilities, only 0-1.
* AUC, PR_AUC, and Gini index will be undefined in this case.
* @param bldr AUCBuilder
* @return instance of AUC2 restricted to a single bin, can be used to create a confusion matrix for the classifier
* and allows to ThresholdCriterion to calculate metrics.
*/
public static AUC2 make01AUC(AUCBuilder bldr) {
bldr.perRow(1, 0, 0); // trick: add a dummy prediction with 0 weight to make sure we always have class 1
return new AUC2(bldr, false).restrictToMaxCriterion();
}
// empty AUC, helps avoid NPE in edge cases
AUC2() {
_nBins = 0;
_ths = _tps = _fps = new double[0];
_p =_n = 0;
_auc = _gini = _pr_auc = Double.NaN;
_max_idx = -1;
}
/**
* Creates a dummy AUC2 instance with no metrics, meant to prevent possible NPEs
* @return a valid AUC2 instance
*/
public static AUC2 emptyAUC() {
return new AUC2();
}
public boolean isEmpty() {
return _nBins == 0;
}
// Checks that recall is a non-decreasing function
void checkRecallValidity() {
double x0 = recall.exec(this, 0);
for (int i = 1; i < _nBins; i++) {
double x1 = recall.exec(this, i);
if (x0 > x1)
throw new H2OIllegalArgumentException(String.valueOf(i), "recall", x0 + " > " + x1);
x0 = x1;
}
}
// Compute the Area Under the Curve, where the curve is defined by (TPR,FPR)
// points. TPR and FPR are monotonically increasing from 0 to 1.
private double compute_auc() {
if (_fps[_nBins-1] == 0) return 1.0; //special case
if (_tps[_nBins-1] == 0) return 0.0; //special case
// All math is computed scaled by TP and FP. We'll descale once at the
// end. Trapezoids from (tps[i-1],fps[i-1]) to (tps[i],fps[i])
double tp0 = 0, fp0 = 0;
double area = 0;
for( int i=0; i<_nBins; i++ ) {
area += (_fps[i]-fp0)*(_tps[i]+tp0)/2.0; // Trapezoid
tp0 = _tps[i]; fp0 = _fps[i];
}
// Descale
return area/_p/_n;
}
/**
* Compute the Area under Precision-Recall Curves using TPs and FPs.
* TPs and FPs are monotonically increasing.
* Calulation inspired by XGBoost implementation:
* https://github.com/dmlc/xgboost/blob/master/src/metric/rank_metric.cc#L566-L591
* @return
*/
public double pr_auc() {
if (isEmpty()) {
return Double.NaN;
}
checkRecallValidity();
if (_fps[_nBins-1] == 0) return 1.0;
if (_tps[_nBins-1] == 0) return 0.0;
double area = 0.0;
assert _p > 0 && _n > 0 : "AUC-PR calculation error, sum of positives and sum of negatives should be greater than zero.";
double tp, prevtp = 0.0, fp, prevfp = 0.0, tpp, prevtpp, h, a, b;
for (int j = 0; j < _nBins; j++) {
tp = _tps[j];
fp = _fps[j];
if (tp == prevtp) {
a = 1.0;
b = 0.0;
} else {
h = (fp - prevfp) / (tp - prevtp);
a = 1.0 + h;
b = (prevfp - h * prevtp) / _p;
}
tpp = tp / _p;
prevtpp = prevtp / _p;
if (0.0 != b) {
area += (tpp - prevtpp -
b / a * (Math.log(a * tpp + b) -
Math.log(a * prevtpp + b))) / a;
} else {
area += (tpp - prevtpp) / a;
}
prevtp = tp;
prevfp = fp;
}
return area;
}
// Build a CM for a threshold index. - typed as doubles because of double observation weights
public double[/*actual*/][/*predicted*/] buildCM( int idx ) {
// \ predicted: 0 1
// actual 0: TN FP
// 1: FN TP
return new double[][]{{tn(idx),fp(idx)},{fn(idx),tp(idx)}};
}
/** @return the default CM, or null for an empty AUC */
public double[/*actual*/][/*predicted*/] defaultCM( ) { return _max_idx == -1 ? null : buildCM(_max_idx); }
/** @return the CM that corresponds to a bin's index which brings max value for a given {@code criterion} */
public double[/*actual*/][/*predicted*/] cmByCriterion( ThresholdCriterion criterion) {
int maxIdx = criterion.max_criterion_idx(this);
return buildCM(maxIdx);
}
/** @return the default threshold; threshold that maximizes the default criterion */
public double defaultThreshold( ) { return _max_idx == -1 ? 0.5 : _ths[_max_idx]; }
/** @return the error of the default CM */
public double defaultErr( ) { return _max_idx == -1 ? Double.NaN : (fp(_max_idx)+fn(_max_idx))/(_p+_n); }
// Compute an online histogram of the predicted probabilities, along with
// true positive and false positive totals in each histogram bin.
private static class AUC_Impl extends MRTask<AUC_Impl> {
final int _nBins;
AUCBuilder _bldr;
AUC_Impl( int nBins ) { _nBins = nBins; }
@Override public void map( Chunk ps, Chunk as ) {
AUCBuilder bldr = _bldr = new AUCBuilder(_nBins);
for( int row = 0; row < ps._len; row++ )
if( !ps.isNA(row) && !as.isNA(row) )
bldr.perRow(ps.atd(row),(int)as.at8(row),1);
}
@Override public void reduce( AUC_Impl auc ) { _bldr.reduce(auc._bldr); }
}
public static class AUCBuilder extends Iced {
final int _nBins;
int _n; // Current number of bins
final double _ths[]; // Histogram bins, center
final double _sqe[]; // Histogram bins, squared error
final double _tps[]; // Histogram bins, true positives
final double _fps[]; // Histogram bins, false positives
// Merging this bin with the next gives the least increase in squared
// error, or -1 if not known. Requires a linear scan to find.
int _ssx;
private boolean _useFastPath = true; // only used for unit tests to check that the
public AUCBuilder(int nBins) {
_nBins = nBins;
_ths = new double[nBins<<1]; // Threshold; also the mean for this bin
_sqe = new double[nBins<<1]; // Squared error (variance) in this bin
_tps = new double[nBins<<1]; // True positives
_fps = new double[nBins<<1]; // False positives
_ssx = -1; // Unknown best merge bin
}
// Intended for unit tests only
AUCBuilder(int nBins, boolean useFastPath) {
this(nBins);
_useFastPath = useFastPath;
}
public void perRow(double pred, int act, double w ) {
// Insert the prediction into the set of histograms in sorted order, as
// if its a new histogram bin with 1 count.
assert !Double.isNaN(pred);
assert act==0 || act==1; // Actual better be 0 or 1
assert !Double.isNaN(w) && !Double.isInfinite(w);
int idx = Arrays.binarySearch(_ths,0,_n,pred);
if( idx >= 0 ) { // Found already in histogram; merge results
if( act==0 ) _fps[idx]+=w; else _tps[idx]+=w; // One more count; no change in squared error
_ssx = -1; // Blows the known best merge
return;
}
idx = -idx-1; // Get index to insert at
// If already full bins, try to instantly merge into an existing bin
if (_n == _nBins &&
_useFastPath && // Optimization enabled
idx > 0 && idx < _n && // Give up for the corner cases
_ths[idx - 1] != _ths[idx]) // Histogram has duplicates (mergeOneBin will get rid of them)
{ // Need to merge to shrink things
final int ssx = find_smallest();
double dssx = _sqe[ssx] + _sqe[ssx+1] + compute_delta_error(_ths[ssx+1], k(ssx+1), _ths[ssx], k(ssx));
// See if this point will fold into either the left or right bin
// immediately. This is the desired fast-path.
double d0 = _sqe[idx-1] + compute_delta_error(pred,w,_ths[idx-1],k(idx-1));
double d1 = _sqe[idx] + compute_delta_error(_ths[idx],k(idx),pred,w);
if (d0 < dssx || d1 < dssx) {
if (d0 <= d1) idx--; // Pick correct bin
if (ssx == idx-1 || ssx == idx)
_ssx = -1; // We don't know the minimum anymore
double k = k(idx);
if (act == 0) _fps[idx] += w; else _tps[idx] += w;
_sqe[idx] = _sqe[idx] + compute_delta_error(pred, w, _ths[idx], k);
_ths[idx] = combine_centers(_ths[idx], k, pred, w);
return;
}
}
// Must insert this point as it's own threshold (which is not insertion
// point), either because we have too few bins or because we cannot
// instantly merge the new point into an existing bin.
if (idx == 0 || idx == _n || // Just because we didn't bother to deal with the corner cases ^^^
idx == _ssx) _ssx = -1; // Smallest error becomes one of the splits
else if( idx < _ssx ) _ssx++; // Smallest error will slide right 1
// Slide over to do the insert. Horrible slowness.
System.arraycopy(_ths,idx,_ths,idx+1,_n-idx);
System.arraycopy(_sqe,idx,_sqe,idx+1,_n-idx);
System.arraycopy(_tps,idx,_tps,idx+1,_n-idx);
System.arraycopy(_fps,idx,_fps,idx+1,_n-idx);
// Insert into the histogram
_ths[idx] = pred; // New histogram center
_sqe[idx] = 0; // Only 1 point, so no squared error
if( act==0 ) { _tps[idx]=0; _fps[idx]=w; }
else { _tps[idx]=w; _fps[idx]=0; }
_n++;
if( _n > _nBins ) // Merge as needed back down to nBins
mergeOneBin(); // Merge best pair of bins
}
public void reduce( AUCBuilder bldr ) {
// Merge sort the 2 sorted lists into the double-sized arrays. The tail
// half of the double-sized array is unused, but the front half is
// probably a source. Merge into the back.
int x= _n-1;
int y=bldr._n-1;
while( x+y+1 >= 0 ) {
boolean self_is_larger = y < 0 || (x >= 0 && _ths[x] >= bldr._ths[y]);
AUCBuilder b = self_is_larger ? this : bldr;
int idx = self_is_larger ? x : y ;
_ths[x+y+1] = b._ths[idx];
_sqe[x+y+1] = b._sqe[idx];
_tps[x+y+1] = b._tps[idx];
_fps[x+y+1] = b._fps[idx];
if( self_is_larger ) x--; else y--;
}
_n += bldr._n;
_ssx = -1; // We no longer know what bin has the smallest error
// Merge elements with least squared-error increase until we get fewer
// than _nBins and no duplicates. May require many merges.
while( _n > _nBins || dups() )
mergeOneBin();
}
static double combine_centers(double ths1, double n1, double ths0, double n0) {
double center = (ths0 * n0 + ths1 * n1) / (n0 + n1);
if (Double.isNaN(center) || Double.isInfinite(center)) {
// use a simple average as a fallback
return (ths0 + ths1) / 2;
}
return center;
}
private void mergeOneBin( ) {
// Too many bins; must merge bins. Merge into bins with least total
// squared error. Horrible slowness linear arraycopy.
int ssx = find_smallest();
// Merge two bins. Classic bins merging by averaging the histogram
// centers based on counts.
double k0 = k(ssx);
double k1 = k(ssx+1);
_sqe[ssx] = _sqe[ssx]+_sqe[ssx+1]+compute_delta_error(_ths[ssx+1],k1,_ths[ssx],k0);
_ths[ssx] = combine_centers(_ths[ssx], k0, _ths[ssx+1], k1);
_tps[ssx] += _tps[ssx+1];
_fps[ssx] += _fps[ssx+1];
// Slide over to crush the removed bin at index (ssx+1)
System.arraycopy(_ths,ssx+2,_ths,ssx+1,_n-ssx-2);
System.arraycopy(_sqe,ssx+2,_sqe,ssx+1,_n-ssx-2);
System.arraycopy(_tps,ssx+2,_tps,ssx+1,_n-ssx-2);
System.arraycopy(_fps,ssx+2,_fps,ssx+1,_n-ssx-2);
_n--;
_ssx = -1;
}
// Find the pair of bins that when combined give the smallest increase in
// squared error. Dups never increase squared error.
//
// I tried code for merging bins with keeping the bins balanced in size,
// but this leads to bad errors if the probabilities are sorted. Also
// tried the original: merge bins with the least distance between bin
// centers. Same problem for sorted data.
private int find_smallest() {
if( _ssx == -1 ) {
_ssx = find_smallest_impl();
assert _ssx != -1 : toDebugString();
}
return _ssx;
}
private String toDebugString() {
return "_ssx = " + _ssx +
"; n = " + _n +
"; ths = " + Arrays.toString(_ths) +
"; tps = " + Arrays.toString(_tps) +
"; fps = " + Arrays.toString(_fps) +
"; sqe = " + Arrays.toString(_sqe);
}
private int find_smallest_impl() {
if (_n == 1)
return 0;
double minSQE = Double.MAX_VALUE;
int minI = -1;
int n = _n;
for( int i=0; i<n-1; i++ ) {
double derr = compute_delta_error(_ths[i+1],k(i+1),_ths[i],k(i));
if( derr == 0 ) return i; // Dup; no increase in SQE so return immediately
double sqe = _sqe[i]+_sqe[i+1]+derr;
if( sqe < minSQE ) {
minI = i; minSQE = sqe;
}
}
if (minI == -1) {
// we couldn't find any bins to merge based on SE (the math can be producing Double.Infinity or Double.NaN)
// revert to using a simple distance of the bin centers
minI = 0;
double minDist = _ths[1] - _ths[0];
for (int i = 1; i < n - 1; i++) {
double dist = _ths[i + 1] - _ths[i];
if (dist < minDist) {
minDist = dist;
minI = i;
}
}
}
return minI;
}
private boolean dups() {
int n = _n;
for( int i=0; i<n-1; i++ ) {
double derr = compute_delta_error(_ths[i+1],k(i+1),_ths[i],k(i));
if( derr == 0 ) { _ssx = i; return true; }
}
return false;
}
private double compute_delta_error( double ths1, double n1, double ths0, double n0 ) {
// If thresholds vary by less than a float ULP, treat them as the same.
// Some models only output predictions to within float accuracy (so a
// variance here is junk), and also it's not statistically sane to have
// a model which varies predictions by such a tiny change in thresholds.
double delta = (float)ths1-(float)ths0;
if (delta == 0)
return 0;
// Parallel equation drawn from:
// http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
return delta*delta*n0*n1 / (n0+n1);
}
private double k( int idx ) { return _tps[idx]+_fps[idx]; }
}
// ==========
// Given the probabilities of a 1, and the actuals (0/1) report the perfect
// AUC found by sorting the entire dataset. Expensive, and only works for
// smaller data (hundreds of millions of observations).
public static double perfectAUC( Vec vprob, Vec vacts ) {
if( vacts.min() < 0 || vacts.max() > 1 || !vacts.isInt() )
throw new IllegalArgumentException("Actuals are either 0 or 1");
if( vprob.min() < 0 || vprob.max() > 1 )
throw new IllegalArgumentException("Probabilities are between 0 and 1");
Vec.Reader rprob = vprob.new Reader();
Vec.Reader racts = vacts.new Reader();
final int posCnt = (int) vacts.nzCnt();
final int negCnt = (int) (vacts.length() - posCnt);
double[] posProbs = new double[posCnt];
double[] negProbs = new double[negCnt];
int pc = 0;
int nc = 0;
for (int i = 0; i < posCnt + negCnt; i++) {
byte actual = (byte) racts.at8(i);
double prob = rprob.at(i);
if (actual == 1) {
posProbs[pc++] = prob;
} else {
negProbs[nc++] = prob;
}
}
assert pc == posProbs.length;
assert nc == negProbs.length;
return perfectAUCFromComponents(negProbs, posProbs);
}
static double perfectAUC(double ds[], double[] acts) {
int posCnt = 0;
for (double act : acts) {
if (act == 1.0d)
posCnt++;
}
double[] posProbs = new double[posCnt];
double[] negProbs = new double[acts.length - posCnt];
int pi = 0;
int ni = 0;
for (int i = 0; i < acts.length; i++) {
if (acts[i] == 1.0d)
posProbs[pi++] = ds[i];
else
negProbs[ni++] = ds[i];
}
return perfectAUCFromComponents(negProbs, posProbs);
}
private static double perfectAUCFromComponents(double[] negProbs, double[] posProbs) {
Arrays.sort(posProbs);
Arrays.sort(negProbs);
double[] probs = new double[negProbs.length + posProbs.length];
byte[] acts = new byte[probs.length];
int pi = 0;
int ni = 0;
for (int i = 0; i < probs.length; i++) {
boolean takeNeg = pi == posProbs.length || (ni < negProbs.length && negProbs[ni] <= posProbs[pi]);
if (takeNeg) {
probs[i] = negProbs[ni++];
acts[i] = 0;
} else {
probs[i] = posProbs[pi++];
acts[i] = 1;
}
}
return perfectAUC(probs, acts);
}
private static double perfectAUC(double[] sortedProbs, byte[] sortedActs) {
// Compute Area Under Curve.
// All math is computed scaled by TP and FP. We'll descale once at the
// end. Trapezoids from (tps[i-1],fps[i-1]) to (tps[i],fps[i])
int tp0=0, fp0=0, tp1=0, fp1=0;
double prob = 1.0;
double area = 0;
for (int i = sortedProbs.length - 1; i >= 0; i--) {
if( sortedProbs[i]!=prob ) { // Tied probabilities: build a diagonal line
area += (fp1-fp0)*(tp1+tp0)/2.0; // Trapezoid
tp0 = tp1; fp0 = fp1;
prob = sortedProbs[i];
}
if( sortedActs[i]==1 ) tp1++; else fp1++;
}
area += (double)tp0*(fp1-fp0); // Trapezoid: Rectangle +
area += (double)(tp1-tp0)*(fp1-fp0)/2.0; // Right Triangle
// Descale
return area/tp1/fp1;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/AUUC.java
|
package hex;
import hex.quantile.Quantile;
import hex.quantile.QuantileModel;
import water.DKV;
import water.Iced;
import water.Key;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.ArrayUtils;
import java.util.Arrays;
import java.util.Iterator;
import java.util.TreeSet;
/**
* Object to calculate uplift curve and area under uplift curve
*/
public class AUUC extends Iced {
public final int _nBins; // max number of bins; can be less if there are fewer points
public final int _maxIdx; // id that maximize uplift
public final double[] _ths; // threshold of predictions created based on quantile computation
public final long[] _treatment; // treatments
public final long[] _control; // controls
public final long[] _yTreatment; // treatment group and y==1
public final long[] _yControl; // control group and y==1
public final long[] _frequency; // number of data in each bin
public final long[] _frequencyCumsum; // cumulative sum of frequency to plot AUUC
public double[][] _uplift; // output uplift values
public double[][] _upliftRandom; // output random uplift values
public double[][] _upliftNormalized; // output normalized uplift values
public final long _n; // number of data
public static final int NBINS = 1000;
public final AUUCType _auucType; // default auuc metric
public final int _auucTypeIndx; // default auuc metric index
public double[] _auucs; // areas under random uplif curve for all metrics
public double[] _auucsRandom; // areas under random uplift curve for all metrics
public double[] _aecu; // average excess cumulative uplift (auuc - auuc random)
public double[] _auucsNormalized; // normalized auuc
public double threshold( int idx ) { return _ths[idx]; }
public long treatment( int idx ) { return _treatment[idx]; }
public long control( int idx ) { return _control[idx]; }
public long yTreatment( int idx ) { return _yTreatment[idx]; }
public long yControl( int idx ) { return _yControl[idx]; }
public long frequency( int idx ) { return _frequency[idx]; }
public double uplift( int idx) { return _uplift[_auucTypeIndx][idx]; }
private int getIndexByAUUCType(AUUCType type){
return ArrayUtils.find(AUUC.AUUCType.VALUES, type);
}
public double[] upliftByType(AUUCType type){
int idx = getIndexByAUUCType(type);
return idx < 0 ? null : _uplift[idx];
}
public double[] upliftNormalizedByType(AUUCType type){
int idx = getIndexByAUUCType(type);
return idx < 0 ? null : _upliftNormalized[idx];
}
public double[] upliftRandomByType(AUUCType type){
int idx = getIndexByAUUCType(type);
return idx < 0 ? null : _upliftRandom[idx];
}
public AUUC(Vec probs, Vec y, Vec uplift, AUUCType auucType, int nbins) {
this(new AUUCImpl(calculateQuantileThresholds(nbins, probs)).doAll(probs, y, uplift)._bldr, auucType);
}
public AUUC(AUUCBuilder bldr, AUUCType auucType) {
this(bldr, true, auucType);
}
public AUUC(double[] customThresholds, Vec probs, Vec y, Vec uplift, AUUCType auucType) {
this(new AUUCImpl(customThresholds).doAll(probs, y, uplift)._bldr, auucType);
}
public AUUC(AUUCBuilder bldr, boolean trueProbabilities, AUUCType auucType) {
_auucType = auucType;
_auucTypeIndx = getIndexByAUUCType(_auucType);
_nBins = bldr._nBins;
//assert _nBins >= 1 : "Must have >= 1 bins for AUUC calculation, but got " + _nBins;
if (_nBins > 0) {
assert trueProbabilities || bldr._thresholds[_nBins - 1] == 1 : "Bins need to contain pred = 1 when 0-1 probabilities are used";
_n = bldr._n;
_ths = Arrays.copyOf(bldr._thresholds, _nBins);
_treatment = Arrays.copyOf(bldr._treatment, _nBins);
_control = Arrays.copyOf(bldr._control, _nBins);
_yTreatment = Arrays.copyOf(bldr._yTreatment, _nBins);
_yControl = Arrays.copyOf(bldr._yControl, _nBins);
_frequency = Arrays.copyOf(bldr._frequency, _nBins);
_frequencyCumsum = Arrays.copyOf(bldr._frequency, _nBins);
_uplift = new double[AUUCType.values().length][_nBins];
_upliftRandom = new double[AUUCType.values().length][_nBins];
_upliftNormalized = new double[AUUCType.values().length][_nBins];
// Rollup counts
long tmpt = 0, tmpc = 0, tmptp = 0, tmpcp = 0, tmpf = 0;
for (int i = 0; i < _nBins; i++) {
tmpt += _treatment[i];
_treatment[i] = tmpt;
tmpc += _control[i];
_control[i] = tmpc;
tmptp += _yTreatment[i];
_yTreatment[i] = tmptp;
tmpcp += _yControl[i];
_yControl[i] = tmpcp;
tmpf += _frequencyCumsum[i];
_frequencyCumsum[i] = tmpf;
}
// these methods need to be call in this order
setUplift();
setUpliftRandom();
setUpliftNormalized();
if (trueProbabilities) {
_auucs = computeAuucs();
_auucsRandom = computeAuucsRandom();
_aecu = computeAecu();
_auucsNormalized = computeAuucsNormalized();
_maxIdx = _auucType.maxCriterionIdx(this);
} else {
_maxIdx = 0;
}
} else {
_maxIdx = -1;
_n = 0;
_ths = null;
_treatment = null;
_control = null;
_yTreatment = null;
_yControl = null;
_frequency = null;
_frequencyCumsum = null;
_uplift = null;
_upliftRandom = null;
_upliftNormalized = null;
}
}
public void setUplift(){
for(int i=0; i<AUUCType.VALUES.length; i++) {
for (int j = 0; j < _nBins; j++) {
_uplift[i][j] = AUUCType.VALUES[i].exec(this, j);
}
}
for(int i=0; i<AUUCType.VALUES.length; i++) {
if (_uplift[i].length == 1 && Double.isNaN(_uplift[i][0])) {
_uplift[i][0] = 0;
} else {
ArrayUtils.interpolateLinear(_uplift[i]);
}
}
}
public void setUpliftRandom(){
for(int i=0; i<AUUCType.VALUES.length; i++) {
int maxIndex = _nBins-1;
double a = _uplift[i][maxIndex]/_frequencyCumsum[maxIndex];
for (int j = 0; j < _nBins; j++) {
_upliftRandom[i][j] = a * _frequencyCumsum[j];
}
}
}
public void setUpliftNormalized(){
for(int i=0; i<AUUCType.VALUES.length; i++) {
int maxIndex = _nBins - 1;
int liftIndex = getIndexByAUUCType(AUUCType.lift);
double a = i == liftIndex || _uplift[i][maxIndex] == 0 ? 1 : Math.abs(_uplift[i][maxIndex]);
for (int j = 0; j < _nBins; j++) {
_upliftNormalized[i][j] = _uplift[i][j] / a;
}
}
}
public AUUC() {
_nBins = 0;
_n = 0;
_ths = new double[0];
_treatment = _control = _yTreatment = _yControl = _frequency = _frequencyCumsum = new long[0];
_auucs = new double[AUUCType.VALUES.length];
Arrays.fill(_auucs, Double.NaN);
_auucsNormalized = new double[AUUCType.VALUES.length];
Arrays.fill(_auucsNormalized, Double.NaN);
_auucsRandom = new double[AUUCType.VALUES.length];
Arrays.fill(_auucsRandom, Double.NaN);
_aecu = new double[AUUCType.VALUES.length];
Arrays.fill(_aecu, Double.NaN);
_maxIdx = -1;
_auucType = AUUCType.AUTO;
_auucTypeIndx = getIndexByAUUCType(_auucType);
_uplift = new double[AUUCType.values().length][];
_upliftNormalized = new double[AUUCType.values().length][];
_upliftRandom = new double[AUUCType.values().length][];
}
public AUUC(double[] ths, long[] freq, double[] auuc, double[] auucNorm, double[] auucRand, double[] aecu,
AUUCType auucType, double[][] uplift, double[][] upliftNorm, double[][] upliftRand) {
_nBins = ths.length;
_n = freq[freq.length-1];
_ths = ths;
_frequencyCumsum = freq;
_treatment = _control = _yTreatment = _yControl = _frequency = new long[0];
_auucs = auuc;
_auucsNormalized = auucNorm;
_auucsRandom = auucRand;
_aecu = aecu;
_maxIdx = -1;
_auucType = auucType;
_auucTypeIndx = getIndexByAUUCType(_auucType);
_uplift = uplift;
_upliftNormalized = upliftNorm;
_upliftRandom = upliftRand;
}
public static double[] calculateQuantileThresholds(int groups, Vec preds) {
Frame fr = null;
QuantileModel qm = null;
double[] quantiles;
try {
QuantileModel.QuantileParameters qp = new QuantileModel.QuantileParameters();
qp._seed = 42;
fr = new Frame(Key.<Frame>make(), new String[]{"predictions"}, new Vec[]{preds});
DKV.put(fr);
qp._train = fr._key;
assert groups > 0;
qp._probs = new double[groups];
for (int i = 0; i < groups; ++i) {
qp._probs[i] = (groups - i - 1.) / groups; // This is 0.9, 0.8, 0.7, 0.6, ..., 0.1, 0 for 10 groups
}
qm = new Quantile(qp).trainModel().get();
quantiles = qm._output._quantiles[0];
// find uniques
TreeSet<Double> hs = new TreeSet<>();
for (double d : quantiles) hs.add(d);
quantiles = new double[hs.size()];
Iterator<Double> it = hs.descendingIterator();
int i = 0;
while (it.hasNext()) quantiles[i++] = it.next();
} finally {
if (qm != null) qm.remove();
if (fr != null) DKV.remove(fr._key);
}
if(quantiles == null){
quantiles = new double[]{0};
} else if(Double.isNaN(quantiles[0])){
quantiles[0] = 0;
}
return quantiles;
}
private double[] computeAuucs(){
return computeAuucs(_uplift);
}
private double[] computeAuucsRandom(){
return computeAuucs(_upliftRandom);
}
private double[] computeAuucsNormalized() {
return computeAuucs(_upliftNormalized);
}
private double[] computeAuucs(double[][] uplift){
AUUCType[] auucTypes = AUUCType.VALUES;
double[] auucs = new double[auucTypes.length];
for(int i = 0; i < auucTypes.length; i++ ) {
if(_n == 0){
auucs[i] = Double.NaN;
} else {
double area = 0;
for (int j = 0; j < _nBins; j++) {
area += uplift[i][j] * frequency(j);
}
auucs[i] = area / (_n + 1);
}
}
return auucs;
}
private double[] computeAecu(){
double[] aecu = new double[_auucs.length];
for(int i = 0; i < _auucs.length; i++){
aecu[i] = auuc(i) - auucRandom(i);
}
return aecu;
}
public double auucByType(AUUCType type){
int idx = getIndexByAUUCType(type);
return auuc(idx);
}
public double auucRandomByType(AUUCType type){
int idx = getIndexByAUUCType(type);
return auucRandom(idx);
}
public double aecuByType(AUUCType type){
int idx = getIndexByAUUCType(type);
return aecu(idx);
}
public double auucNormalizedByType(AUUCType type){
int idx = getIndexByAUUCType(type);
return auucNormalized(idx);
}
public double auuc (int idx){
return _n == 0 || idx < 0 ? Double.NaN : _auucs[idx];
}
public double auuc(){ return auuc(_auucTypeIndx); }
public double auucRandom(int idx){
return _n == 0 || idx < 0 ? Double.NaN : _auucsRandom[idx];
}
public double auucRandom(){ return auucRandom(_auucTypeIndx); }
public double aecu(int idx) { return _n == 0 || idx < 0 ? Double.NaN : _aecu[idx];}
public double qini(){ return aecuByType(AUUCType.qini);}
public double auucNormalized(int idx){ return _n == 0 || idx < 0 ? Double.NaN : _auucsNormalized[idx]; }
public double auucNormalized(){ return auucNormalized(_auucTypeIndx); }
public static class AUUCImpl extends MRTask<AUUCImpl> {
final double[] _thresholds;
AUUCBuilder _bldr;
public AUUCImpl(double[] thresholds) {
_thresholds = thresholds;
}
@Override public void map(Chunk ps, Chunk actuals, Chunk treatment) {
AUUCBuilder bldr = _bldr = new AUUCBuilder(_thresholds);
for( int row = 0; row < ps._len; row++ )
if( !ps.isNA(row) && !treatment.isNA(row) )
bldr.perRow(ps.atd(row),1, actuals.atd(row), (float) treatment.atd(row));
}
@Override public void reduce( AUUCImpl auuc ) { _bldr.reduce(auuc._bldr); }
}
/**
* Builder to process input data to build histogram in parallel. This builder is used to calculate AUUC quickly.
*/
public static class AUUCBuilder extends Iced {
final int _nBins;
final double[]_thresholds; // thresholds
final long[] _treatment; // number of data from treatment group
final long[] _control; // number of data from control group
final long[] _yTreatment; // number of data from treatment group with prediction = 1
final long[] _yControl; // number of data from control group with prediction = 1
final long[] _frequency; // frequency of data in each bin
long _n;
public AUUCBuilder(double[] thresholds) {
int nBins = thresholds.length;
_nBins = nBins;
_thresholds = thresholds;
_treatment = new long[nBins];
_control = new long[nBins];
_yTreatment = new long[nBins];
_yControl = new long[nBins];
_frequency = new long[nBins];
}
public void perRow(double pred, double w, double y, float treatment) {
if (w == 0) {return;}
for(int t = 0; t < _thresholds.length; t++) {
if (pred >= _thresholds[t] && (t == 0 || pred <_thresholds[t-1])) {
_n++;
_frequency[t]++;
if(treatment == 1){
_treatment[t]++;
if(y == 1){
_yTreatment[t]++;
}
} else {
_control[t]++;
if(y == 1){
_yControl[t]++;
}
}
break;
}
}
}
public void reduce( AUUCBuilder bldr) {
_n += bldr._n;
ArrayUtils.add(_treatment, bldr._treatment);
ArrayUtils.add(_control, bldr._control);
ArrayUtils.add(_yTreatment, bldr._yTreatment);
ArrayUtils.add(_yControl, bldr._yControl);
ArrayUtils.add(_frequency, bldr._frequency);
}
private String toDebugString() {
return "n =" +_n +
"; nBins = " + _nBins +
"; ths = " + Arrays.toString(_thresholds) +
"; treatment = " + Arrays.toString(_treatment) +
"; contribution = " + Arrays.toString(_control) +
"; yTreatment = " + Arrays.toString(_yTreatment) +
"; yContribution = " + Arrays.toString(_yControl) +
"; frequency = " + Arrays.toString(_frequency);
}
}
/** AUUC type enum
*
* This is an Enum class, with an exec() function to compute the criteria
* from the basic parts, and from an AUUC at a given threshold index.
*/
public enum AUUCType {
AUTO() {
@Override
double exec(long treatment, long control, long yTreatment, long yControl) {
return qini.exec(treatment, control, yTreatment, yControl);
}
},
qini() {
@Override
double exec(long treatment, long control, long yTreatment, long yControl) {
double norm = treatment / (double)control;
return yTreatment - yControl * norm;
}
},
lift() {
@Override
double exec(long treatment, long control, long yTreatment, long yControl) {
return yTreatment / (double) treatment - yControl / (double)control;
}
},
gain() {
@Override
double exec(long treatment, long control, long yTreatment, long yControl) {
return lift.exec(treatment, control, yTreatment, yControl) * (double)(treatment + control);}
};
/** @param treatment
* @param control
* @param yTreatment
* @param yControl
* @return metric value */
abstract double exec(long treatment, long control, long yTreatment, long yControl );
public double exec(AUUC auc, int idx) { return exec(auc.treatment(idx),auc.control(idx),auc.yTreatment(idx),auc.yControl(idx)); }
public static final AUUCType[] VALUES = values();
public static final AUUCType[] VALUES_WITHOUT_AUTO = ArrayUtils.remove(values().clone(), ArrayUtils.find(AUUCType.values(), AUTO));
public static String nameAuto(){
return qini.name();
}
/** Convert a criterion into a threshold index that maximizes the criterion
* @return Threshold index that maximizes the criterion
*/
public int maxCriterionIdx(AUUC auuc) {
double md = -Double.MAX_VALUE;
int mx = -1;
for( int i=0; i<auuc._nBins; i++) {
double d = exec(auuc,i);
if( d > md ) {
md = d;
mx = i;
}
}
return mx;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/CMetricScoringTask.java
|
package hex;
import water.udf.CFuncRef;
import water.udf.CFuncTask;
import water.udf.CMetricFunc;
/**
* Custom metric scoring task.
*
* The task provides support to load and invoke custom model metric
* defined via {@link water.udf.CFuncTask}.
*
* @param <T> self type
*/
public class CMetricScoringTask<T extends CMetricScoringTask<T>> extends CFuncTask<CMetricFunc, T> {
/** Internal parameter to preserve workspace for custom metric computation */
protected double[] customMetricWs;
transient private CustomMetric result;
public CMetricScoringTask(CFuncRef cFuncRef) {
super(cFuncRef);
}
@Override
protected final Class<CMetricFunc> getFuncType() {
return CMetricFunc.class;
}
protected final void customMetricPerRow(double preds[], float yact[],double weight, double offset, Model m) {
if (func != null) {
double[] rowR = func.map(preds, yact, weight, offset, m);
if (customMetricWs != null) {
customMetricWs = func.reduce(customMetricWs, rowR);
} else {
customMetricWs = rowR;
}
}
}
@Override
public void reduce(T t) {
super.reduce(t);
reduceCustomMetric(t);
}
public void reduceCustomMetric(T t) {
if (func != null) {
if (customMetricWs == null) {
customMetricWs = t.customMetricWs;
} else if (t.customMetricWs == null) {
// nop
} else {
customMetricWs = func.reduce(this.customMetricWs, t.customMetricWs);
}
}
}
@Override
protected void postGlobal() {
super.postGlobal();
result = computeCustomMetric();
}
public CustomMetric computeCustomMetric() {
if (func != null) {
return CustomMetric.from(cFuncRef.getName(),
customMetricWs != null ? func.metric(customMetricWs)
: Double.NaN);
}
return null;
}
public CustomMetric getComputedCustomMetric() {
return result;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/CVModelBuilder.java
|
package hex;
import org.apache.log4j.Logger;
import hex.ModelBuilder.TrainModelTaskController;
import water.Job;
/**
* Execute Cross-Validation model build in parallel
*/
public class CVModelBuilder {
private static final Logger LOG = Logger.getLogger(CVModelBuilder.class);
private final Job job;
private final ModelBuilder<?, ?, ?>[] modelBuilders;
private final int parallelization;
/**
* @param job parent job (processing will be stopped if stop of a parent job was requested)
* @param modelBuilders list of model builders to run in bulk
* @param parallelization level of parallelization (how many models can be built at the same time)
*/
public CVModelBuilder(
Job job, ModelBuilder<?, ?, ?>[] modelBuilders, int parallelization
) {
this.job = job;
this.modelBuilders = modelBuilders;
this.parallelization = parallelization;
}
protected void prepare(ModelBuilder<?, ?, ?> m) {}
protected void finished(ModelBuilder<?, ?, ?> m) {}
public void bulkBuildModels() {
final int N = modelBuilders.length;
TrainModelTaskController[] submodel_tasks = new TrainModelTaskController[N];
int nRunning = 0;
RuntimeException rt = null;
for (int i = 0; i < N; ++i) {
if (job.stop_requested()) {
LOG.info("Skipping build of last " + (N - i) + " out of " + N + " cross-validation models");
stopAll(submodel_tasks);
throw new Job.JobCancelledException(job);
}
LOG.info("Building cross-validation model " + (i + 1) + " / " + N + ".");
prepare(modelBuilders[i]);
modelBuilders[i].startClock();
submodel_tasks[i] = modelBuilders[i].submitTrainModelTask();
if (++nRunning == parallelization) { //piece-wise advance in training the models
while (nRunning > 0) {
final int waitForTaskIndex = i + 1 - nRunning;
try {
submodel_tasks[waitForTaskIndex].join();
finished(modelBuilders[waitForTaskIndex]);
} catch (RuntimeException t) {
if (rt == null) {
LOG.info("Exception from CV model #" + waitForTaskIndex + " will be reported as main exception.");
rt = t;
} else {
LOG.warn("CV model #" + waitForTaskIndex + " failed, the exception will not be reported", t);
}
} finally {
LOG.info("Completed cross-validation model " + waitForTaskIndex + " / " + N + ".");
nRunning--; // need to decrement regardless even if there is an exception, otherwise looping...
}
}
if (rt != null) throw rt;
}
}
for (int i = 0; i < N; ++i) //all sub-models must be completed before the main model can be built
try {
final TrainModelTaskController task = submodel_tasks[i];
assert task != null;
task.join();
} catch (RuntimeException t) {
if (rt == null) {
LOG.info("Exception from CV model #" + i + " will be reported as main exception.");
rt = t;
} else {
LOG.warn("CV model #" + i + " failed, the exception will not be reported", t);
}
} finally {
LOG.info("Completed cross-validation model " + i + " / " + N + ".");
}
if (rt != null) throw rt;
}
private void stopAll(TrainModelTaskController[] tasks) {
for (TrainModelTaskController task : tasks) {
if (task != null) {
task.cancel(true);
}
}
}
protected Job getJob() {
return job;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ClusteringModel.java
|
package hex;
import water.Key;
/** Clustering Model
* Generates a 2-D array of clusters.
*/
public abstract class ClusteringModel<M extends ClusteringModel<M,P,O>, P extends ClusteringModel.ClusteringParameters, O extends ClusteringModel.ClusteringOutput> extends Model<M,P,O> {
public ClusteringModel( Key selfKey, P parms, O output ) { super(selfKey,parms,output); }
/** Clustering Model Parameters includes the number of clusters desired */
public abstract static class ClusteringParameters extends Model.Parameters {
/** Clustering models must specify the number of clusters to generate */
public int _k = 1;
}
/** Output from all Clustering Models, includes generated clusters */
public abstract static class ClusteringOutput extends Model.Output {
/** Cluster centers_raw. During model init, might be null or might have a "k"
* which is oversampled a lot. Not standardized (although if standardization
* is used during the building process, the *builders* cluster centers_raw are standardized). */
public double[/*k*/][/*features*/] _centers_raw;
public double[/*k*/][/*features*/] _centers_std_raw;
// For internal use only: means and 1/(std dev) of each training col
public double[] _normSub;
public double[] _normMul;
public int [] _mode;
// Cluster size. Defined as the number of rows in each cluster.
public long[/*k*/] _size;
public ClusteringOutput() {
this(null);
}
/** Any final prep-work just before model-building starts, but after the
* user has clicked "go". */
public ClusteringOutput(ClusteringModelBuilder b) { super(b); }
@Override public boolean isSupervised() { return false; }
// Output classes is weird for clustering - it's like a regression
public int nclasses() { return 1; }
@Override public ModelCategory getModelCategory() { return ModelCategory.Clustering; }
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ClusteringModelBuilder.java
|
package hex;
import water.Job;
import water.fvec.Frame;
abstract public class ClusteringModelBuilder<M extends ClusteringModel<M,P,O>, P extends ClusteringModel.ClusteringParameters, O extends ClusteringModel.ClusteringOutput> extends ModelBuilder<M,P,O> {
public boolean isSupervised() { return false; }
/** Constructor called from an http request; MUST override in subclasses. */
public ClusteringModelBuilder(P parms ) { super(parms ); /*only call init in leaf classes*/ }
public ClusteringModelBuilder(P parms, Job job) { super(parms,job); /*only call init in leaf classes*/ }
public ClusteringModelBuilder(P parms, boolean startup_once) { super(parms,startup_once); /*only call init in leaf classes*/ }
/** Initialize the ModelBuilder, validating all arguments and preparing the
* training frame. This call is expected to be overridden in the subclasses
* and each subclass will start with "super.init();". This call is made
* by the front-end whenever the GUI is clicked, and needs to be fast;
* heavy-weight prep needs to wait for the trainModel() call. */
@Override public void init(boolean expensive) {
super.init(expensive);
if( _parms._k < 1 || _parms._k > 1e7 ) error("_k", "k must be between 1 and 1e7");
if( _train != null && _train.numRows() < _parms._k ) error("_k","Cannot make " + _parms._k + " clusters out of " + _train.numRows() + " rows");
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ConfusionMatrix.java
|
package hex;
import water.H2O;
import water.Iced;
import water.util.ArrayUtils;
import water.util.Log;
import water.util.TwoDimTable;
import java.util.Arrays;
public class ConfusionMatrix extends Iced {
private static final String MAX_CM_CLASSES_KEY = H2O.OptArgs.SYSTEM_PROP_PREFIX + "cm.maxClasses";
private static final int MAX_CM_CLASSES_DEFAULT = 1000;
private TwoDimTable _table;
public final double[][] _cm; // [actual][predicted], typed as double because of observation weights (which can be doubles)
public final String[] _domain;
/**
* Constructor for Confusion Matrix
* @param value 2D square matrix with co-occurrence counts for actual vs predicted class membership
* @param domain class labels (unified domain between actual and predicted class labels)
*/
public ConfusionMatrix(double[][] value, String[] domain) { _cm = value; _domain = domain; }
public void add(int i, int j) { _cm[i][j]++; }
public final int size() { return _domain.length; }
boolean tooLarge() { return size() > maxClasses(); }
static int maxClasses() {
String maxClassesSpec = System.getProperty(MAX_CM_CLASSES_KEY);
if (maxClassesSpec == null)
return MAX_CM_CLASSES_DEFAULT;
return parseMaxClasses(maxClassesSpec);
}
static int parseMaxClasses(String maxClassesSpec) {
try {
int maxClasses = Integer.parseInt(maxClassesSpec);
if (maxClasses <= 0) {
Log.warn("Using default limit of max classes in a confusion matrix (" + MAX_CM_CLASSES_DEFAULT + ", user specification is invalid: " + maxClasses + ")");
return MAX_CM_CLASSES_DEFAULT;
} else
return maxClasses;
} catch (NumberFormatException e) {
Log.warn("Using default limit of max classes in a confusion matrix (" + MAX_CM_CLASSES_DEFAULT + ", user specification is invalid: " + maxClassesSpec + ")", e);
return MAX_CM_CLASSES_DEFAULT;
}
}
public final double mean_per_class_error() {
if(tooLarge())throw new UnsupportedOperationException("mean per class error cannot be computed: too many classes");
double err = 0;
for( int d = 0; d < _cm.length; ++d )
err += class_error(d); //can be 0 if no actuals, but we're still dividing by the total count of classes
return err / _cm.length;
}
// mean(accuracy) = mean(1-error) = 1-mean(error)
public final double mean_per_class_accuracy() {
return 1-mean_per_class_error();
}
public final double class_error(int c) {
if(tooLarge())throw new UnsupportedOperationException("class errors cannot be computed: too many classes");
double s = ArrayUtils.sum(_cm[c]);
if( s == 0 ) return 0.0; // Either 0 or NaN, but 0 is nicer
return (s - _cm[c][c]) / s;
}
public double total_rows() {
double n = 0;
for (double[] a_arr : _cm)
n += ArrayUtils.sum(a_arr);
return n;
}
public void add(ConfusionMatrix other) {
if (_cm != null && other._cm != null)
ArrayUtils.add(_cm, other._cm);
}
/**
* @return overall classification error
*/
public double err() {
if(tooLarge())throw new UnsupportedOperationException("error cannot be computed: too many classes");
double n = total_rows();
double err = n;
for( int d = 0; d < _cm.length; ++d )
err -= _cm[d][d];
return err / n;
}
public double err_count() {
if(tooLarge())throw new UnsupportedOperationException("error count cannot be computed: too many classes");
double err = total_rows();
for( int d = 0; d < _cm.length; ++d )
err -= _cm[d][d];
assert(err >= 0);
return err;
}
/**
* The percentage of predictions that are correct.
*/
public double accuracy() { return 1-err(); }
/**
* The percentage of negative labeled instances that were predicted as negative.
* @return TNR / Specificity
*/
public double specificity() {
if(!isBinary())throw new UnsupportedOperationException("specificity is only implemented for 2 class problems.");
if(tooLarge())throw new UnsupportedOperationException("specificity cannot be computed: too many classes");
double tn = _cm[0][0];
double fp = _cm[0][1];
return tn / (tn + fp);
}
/**
* The percentage of positive labeled instances that were predicted as positive.
* @return Recall / TPR / Sensitivity
*/
public double recall() {
if(!isBinary())throw new UnsupportedOperationException("recall is only implemented for 2 class problems.");
if(tooLarge())throw new UnsupportedOperationException("recall cannot be computed: too many classes");
double tp = _cm[1][1];
double fn = _cm[1][0];
return tp / (tp + fn);
}
/**
* The percentage of positive predictions that are correct.
* @return Precision
*/
public double precision() {
if(!isBinary())throw new UnsupportedOperationException("precision is only implemented for 2 class problems.");
if(tooLarge())throw new UnsupportedOperationException("precision cannot be computed: too many classes");
double tp = _cm[1][1];
double fp = _cm[0][1];
return tp / (tp + fp);
}
/**
* The Matthews Correlation Coefficient, takes true negatives into account in contrast to F-Score
* See <a href="http://en.wikipedia.org/wiki/Matthews_correlation_coefficient">MCC</a>
* MCC = Correlation between observed and predicted binary classification
* @return mcc ranges from -1 (total disagreement) ... 0 (no better than random) ... 1 (perfect)
*/
public double mcc() {
if(!isBinary())throw new UnsupportedOperationException("mcc is only implemented for 2 class problems.");
if(tooLarge())throw new UnsupportedOperationException("mcc cannot be computed: too many classes");
double tn = _cm[0][0];
double fp = _cm[0][1];
double tp = _cm[1][1];
double fn = _cm[1][0];
return (tp*tn - fp*fn)/Math.sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn));
}
/**
* The maximum per-class error
* @return max[classErr(i)]
*/
public double max_per_class_error() {
int n = nclasses();
if(n == 0)throw new UnsupportedOperationException("max per class error is only defined for classification problems");
if(tooLarge())throw new UnsupportedOperationException("max per class error cannot be computed: too many classes");
double res = class_error(0);
for(int i = 1; i < n; ++i)
res = Math.max(res, class_error(i));
return res;
}
public final int nclasses(){return _domain == null ? 0: _domain.length;}
public final boolean isBinary(){return nclasses() == 2;}
/**
* Returns the F-measure which combines precision and recall. <br>
* C.f. end of http://en.wikipedia.org/wiki/Precision_and_recall.
*/
public double f1() {
final double precision = precision();
final double recall = recall();
return 2. * (precision * recall) / (precision + recall);
}
/**
* Returns the F-measure which combines precision and recall and weights recall higher than precision. <br>
* See <a href="http://en.wikipedia.org/wiki/F1_score.">F1_score</a>
*/
public double f2() {
final double precision = precision();
final double recall = recall();
return 5. * (precision * recall) / (4. * precision + recall);
}
/**
* Returns the F-measure which combines precision and recall and weights precision higher than recall. <br>
* See <a href="http://en.wikipedia.org/wiki/F1_score.">F1_score</a>
*/
public double f0point5() {
final double precision = precision();
final double recall = recall();
return 1.25 * (precision * recall) / (.25 * precision + recall);
}
@Override public String toString() {
StringBuilder sb = new StringBuilder();
for( double[] r : _cm)
sb.append(Arrays.toString(r)).append('\n');
return sb.toString();
}
private static String[] createConfusionMatrixHeader( double xs[], String ds[] ) {
String ss[] = new String[xs.length]; // the same length
for( int i=0; i<xs.length; i++ )
if( xs[i] >= 0 || (ds[i] != null && ds[i].length() > 0) && !Double.toString(i).equals(ds[i]) )
ss[i] = ds[i];
if( ds.length == xs.length-1 && xs[xs.length-1] > 0 )
ss[xs.length-1] = "NA";
return ss;
}
public String toASCII() { return table() == null ? "" : _table.toString(); }
/** Convert this ConfusionMatrix into a fully annotated TwoDimTable
* @return TwoDimTable */
public TwoDimTable table() { return _table == null ? (_table=toTable()) : _table; }
// Do the work making a TwoDimTable
private TwoDimTable toTable() {
if (tooLarge()) return null;
if (_cm == null || _domain == null) return null;
for( double cm[] : _cm ) assert(_cm.length == cm.length);
// Sum up predicted & actuals
double acts [] = new double[_cm.length];
double preds[] = new double[_cm[0].length];
boolean isInt = true;
for( int a=0; a< _cm.length; a++ ) {
double sum=0;
for( int p=0; p< _cm[a].length; p++ ) {
sum += _cm[a][p];
preds[p] += _cm[a][p];
isInt &= (_cm[a][p] == (long)_cm[a][p]);
}
acts[a] = sum;
}
String adomain[] = createConfusionMatrixHeader(acts , _domain);
String pdomain[] = createConfusionMatrixHeader(preds, _domain);
assert adomain.length == pdomain.length : "The confusion matrix should have the same length for both directions.";
String[] rowHeader = Arrays.copyOf(adomain,adomain.length+1);
rowHeader[adomain.length] = "Totals";
String[] colHeader = Arrays.copyOf(pdomain,pdomain.length+2);
colHeader[colHeader.length-2] = "Error";
colHeader[colHeader.length-1] = "Rate";
String[] colType = new String[colHeader.length];
String[] colFormat = new String[colHeader.length];
for (int i=0; i<colFormat.length-1; ++i) {
colType[i] = isInt ? "long":"double";
colFormat[i] = isInt ? "%d":"%.2f";
}
colType[colFormat.length-2] = "double";
colFormat[colFormat.length-2] = "%.4f";
colType[colFormat.length-1] = "string";
// pass 1: compute width of last column
double terr = 0;
int width = 0;
for (int a = 0; a < _cm.length; a++) {
if (adomain[a] == null) continue;
double correct = 0;
for (int p = 0; p < pdomain.length; p++) {
if (pdomain[p] == null) continue;
boolean onDiag = adomain[a].equals(pdomain[p]);
if (onDiag) correct = _cm[a][p];
}
double err = acts[a] - correct;
terr += err;
width = isInt ?
Math.max(width, String.format("%,d / %,d", (long)err, (long)acts[a]).length()):
Math.max(width, String.format("%.4f / %.4f", err, acts[a]).length());
}
double nrows = 0;
for (double n : acts) nrows += n;
width = isInt?
Math.max(width, String.format("%,d / %,d", (long)terr, (long)nrows).length()):
Math.max(width, String.format("%.4f / %.4f", terr, nrows).length());
// set format width
colFormat[colFormat.length-1] = "= %" + width + "s";
TwoDimTable table = new TwoDimTable("Confusion Matrix", "Row labels: Actual class; Column labels: Predicted class", rowHeader, colHeader, colType, colFormat, null);
// Main CM Body
for (int a = 0; a < _cm.length; a++) {
if (adomain[a] == null) continue;
double correct = 0;
for (int p = 0; p < pdomain.length; p++) {
if (pdomain[p] == null) continue;
boolean onDiag = adomain[a].equals(pdomain[p]);
if (onDiag) correct = _cm[a][p];
if (isInt)
table.set(a, p, (long)_cm[a][p]);
else
table.set(a, p, _cm[a][p]);
}
double err = acts[a] - correct;
table.set(a, pdomain.length, err / acts[a]);
table.set(a, pdomain.length + 1,
isInt ? String.format("%,d / %,d", (long)err, (long)acts[a]):
String.format("%.4f / %.4f", err, acts[a])
);
}
// Last row of CM
for (int p = 0; p < pdomain.length; p++) {
if (pdomain[p] == null) continue;
if (isInt)
table.set(adomain.length, p, (long)preds[p]);
else
table.set(adomain.length, p, preds[p]);
}
table.set(adomain.length, pdomain.length, (float) terr / nrows);
table.set(adomain.length, pdomain.length + 1,
isInt ? String.format("%,d / %,d", (long)terr, (long)nrows):
String.format("%.2f / %.2f", terr, nrows));
return table;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/CreateFrame.java
|
package hex;
import water.H2O;
import water.Iced;
import water.Job;
import water.Key;
import water.fvec.Frame;
import water.fvec.FrameCreator;
import water.util.Log;
import water.util.PrettyPrint;
import java.util.Random;
/**
* Create a Frame from scratch
* If randomize = true, then the frame is filled with Random values.
*/
public class CreateFrame extends Iced {
public final Job<Frame> _job;
public long rows = 10000;
public int cols = 10;
public long seed = -1;
public long seed_for_column_types = -1;
public boolean randomize = true;
public long value = 0;
public long real_range = 100;
public double categorical_fraction = 0.2;
public int factors = 100;
public double integer_fraction = 0.2;
public double time_fraction = 0.0;
public double string_fraction = 0.0;
public long integer_range = 100;
public double binary_fraction = 0.1;
public double binary_ones_fraction = 0.02;
public double missing_fraction = 0.01;
public int response_factors = 2;
public boolean positive_response; // only for response_factors=1
public boolean has_response = false;
public CreateFrame(Key<Frame> key) { _job = new Job<>(key,Frame.class.getName(),"CreateFrame"); }
public CreateFrame() { this(Key.<Frame>make()); }
public Job<Frame> execImpl() {
if (seed == -1) seed = new Random().nextLong();
if (seed_for_column_types == -1) seed_for_column_types = seed;
if (integer_fraction + binary_fraction + categorical_fraction + time_fraction + string_fraction > 1.00000001)
throw new IllegalArgumentException("Integer, binary, categorical, time and string fractions must add up to <= 1.");
if (missing_fraction < 0 || missing_fraction > 1) throw new IllegalArgumentException("Missing fraction must be between 0 and 1.");
if (integer_fraction < 0 || integer_fraction > 1) throw new IllegalArgumentException("Integer fraction must be between 0 and 1.");
if (binary_fraction < 0 || binary_fraction > 1) throw new IllegalArgumentException("Binary fraction must be between 0 and 1.");
if (time_fraction <0 || time_fraction > 1) throw new IllegalArgumentException("Time fraction must be between 0 and 1.");
if (string_fraction <0 || string_fraction > 1) throw new IllegalArgumentException("String fraction must be between 0 and 1.");
if (binary_ones_fraction < 0 || binary_ones_fraction > 1) throw new IllegalArgumentException("Binary ones fraction must be between 0 and 1.");
if (categorical_fraction < 0 || categorical_fraction > 1) throw new IllegalArgumentException("Categorical fraction must be between 0 and 1.");
if (categorical_fraction > 0 && factors <= 1) throw new IllegalArgumentException("Factors must be larger than 2 for categorical data.");
if (response_factors < 1) throw new IllegalArgumentException("Response factors must be either 1 (real-valued response), or >=2 (factor levels).");
if (response_factors > Model.Parameters.MAX_SUPPORTED_LEVELS) throw new IllegalArgumentException("Response factors must be <= " + Model.Parameters.MAX_SUPPORTED_LEVELS + ".");
if (factors > 10000000) throw new IllegalArgumentException("Number of factors must be <= 10,000,000).");
if (cols <= 0 || rows <= 0) throw new IllegalArgumentException("Must have number of rows > 0 and columns > 0.");
// estimate byte size of the frame
double byte_estimate = randomize ? rows * cols * (
binary_fraction * 1./8 //bits
+ categorical_fraction * (factors < 128 ? 1 : factors < 32768 ? 2 : 4)
+ integer_fraction * (integer_range < 128 ? 1 : integer_range < 32768 ? 2 : integer_range < (1<<31) ? 4 : 8)
+ time_fraction * 8
+ (1-integer_fraction - binary_fraction - categorical_fraction - time_fraction - string_fraction) * 8 ) //reals
+ rows //response is
: 0; // all constants - should be small
long cluster_free_mem = H2O.CLOUD.free_mem();
if (byte_estimate > cluster_free_mem)
throw new IllegalArgumentException("Frame is expected to require " + PrettyPrint.bytes((long) byte_estimate) + ", won't fit into H2O's free memory of "+ cluster_free_mem);
if (!randomize) {
if (integer_fraction != 0 || categorical_fraction != 0 || time_fraction != 0 || string_fraction != 0)
throw new IllegalArgumentException("Cannot have integer, categorical or time fractions > 0 unless randomize=true.");
} else {
if (value != 0)
throw new IllegalArgumentException("Cannot set data to a constant value if randomize=true.");
}
Log.info("Generated seed: " + seed);
FrameCreator fc = new FrameCreator(this);
return _job.start(fc,fc.nChunks()*7); // And start FrameCreator
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/CustomMetric.java
|
package hex;
import water.Iced;
public class CustomMetric extends Iced<CustomMetric> {
public static final CustomMetric EMPTY = new CustomMetric(null, Double.NaN);
public final String name;
public final double value;
public CustomMetric(String name, double value) {
this.name = name;
this.value = value;
}
public static CustomMetric from(String name, double value) {
return new CustomMetric(name, value);
}
public boolean isValid() {
return name != null;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/DMatrix.java
|
package hex;
import water.*;
import water.H2O.FJWThr;
import water.H2O.H2OCallback;
import water.H2O.H2OCountedCompleter;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.NewChunk.Value;
import water.fvec.Vec;
import water.util.ArrayUtils;
import water.util.Log;
import water.util.StringUtils;
import java.util.Arrays;
import java.util.Iterator;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Created by tomasnykodym on 11/13/14.
*
* Distributed matrix operations such as (sparse) multiplication and transpose.
*/
public class DMatrix {
/**
* Transpose the Frame as if it was a matrix (i.e. rows become coumns).
* Must be all numeric, currently will fail if there are too many rows ( >= ~.5M).
* Result will be put into a new Vectro Group and will be balanced so that each vec will have
* (4*num cpus in the cluster) chunks.
*
* @param src
* @return
*/
public static Frame transpose(Frame src){
if(src.numRows() != (int)src.numRows())
throw H2O.unimpl();
int nchunks = Math.max(1,src.numCols()/10000);
long [] espc = new long[nchunks+1];
int rpc = (src.numCols() / nchunks);
int rem = (src.numCols() % nchunks);
Arrays.fill(espc, rpc);
for (int i = 0; i < rem; ++i) ++espc[i];
long sum = 0;
for (int i = 0; i < espc.length; ++i) {
long s = espc[i];
espc[i] = sum;
sum += s;
}
Key key = Vec.newKey();
int rowLayout = Vec.ESPC.rowLayout(key,espc);
return transpose(src, new Frame(new Vec(key,rowLayout).makeZeros((int)src.numRows())));
}
/**
* Transpose the Frame as if it was a matrix (rows <-> columns).
* Must be all numeric, will fail if there are too many rows ( >= ~.5M).
*
* Result is made to be compatible (i.e. the same vector group and chunking) with the target frame.
*
* @param src
* @return
*/
public static Frame transpose(Frame src, Frame tgt){
if(src.numRows() != tgt.numCols() || src.numCols() != tgt.numRows())
throw new IllegalArgumentException("dimension do not match!");
for(Vec v:src.vecs()) {
if (v.isCategorical())
throw new IllegalArgumentException("transpose can only be applied to all-numeric frames (representing a matrix)");
if(v.length() > 1000000)
throw new IllegalArgumentException("too many rows, transpose only works for frames with < 1M rows.");
}
new TransposeTsk(tgt).doAll(src);
return tgt;
}
/**
* (MR)Task performing the matrix transpose.
* It is to be applied to the source frame.
* Target frame must be created up front (e.g. via Vec.makeZeros() call)
* and passed in as an argument.
*
* Task will utilize sparsity and will preserve compression if possible
* (compression may differ because of switching from column compressed to row-compressed form)
*/
public static class TransposeTsk extends MRTask<TransposeTsk> {
final Frame _tgt; // Target dataset, should be created up front, e.g. via Vec.makeZeros(n) call.
public TransposeTsk(Frame tgt){ _tgt = tgt;}
public void map(final Chunk[] chks) {
final Frame tgt = _tgt;
final long [] espc = tgt.anyVec().espc();
final int colStart = (int)chks[0].start();
for (int i = 0; i < espc.length - 1; ++i) {
final int fi = i;
final NewChunk[] tgtChunks = new NewChunk[chks[0]._len];
for (int j = 0; j < tgtChunks.length; ++j)
tgtChunks[j] = new NewChunk(tgt.vec(j + colStart), fi);
for (int c = ((int) espc[fi]); c < (int) espc[fi + 1]; ++c) {
Chunk nc = chks[c];
if(nc.isSparseZero()) {
for (int k = nc.nextNZ(-1); k < nc._len; k = nc.nextNZ(k)) {
tgtChunks[k].addZeros((int) (c - espc[fi]) - tgtChunks[k]._len);
nc.extractRows(tgtChunks[k], k);
}
} else
for(int k = 0; k < nc._len; k++) {
tgtChunks[k].addZeros((int) (c - espc[fi]) - tgtChunks[k]._len);
nc.extractRows(tgtChunks[k], k);
}
}
for (int j = 0; j < tgtChunks.length; ++j) { // finalize the target chunks and close them
final int fj = j;
tgtChunks[fj].addZeros((int) (espc[fi + 1] - espc[fi]) - tgtChunks[fj]._len);
tgtChunks[fj].close(_fs);
tgtChunks[fj] = null;
}
}
}
}
/**
* Info about matrix multiplication currently in progress.
*
* Contains runtime and (already computed)chunks stats
*
*/
public static class MatrixMulStats extends Iced {
public final Key jobKey;
public final long chunksTotal;
public final long _startTime;
public long lastUpdateAt;
public long chunksDone;
public long size;
public int [] chunkTypes = new int[0];
public long [] chunkCnts = new long[0];
public MatrixMulStats(long n, Key jobKey){chunksTotal = n; _startTime = System.currentTimeMillis(); this.jobKey = jobKey;}
public float progress(){ return (float)((double)chunksDone/chunksTotal);}
}
public static Frame mmul(Frame x, Frame y) {
MatrixMulTsk t = new MatrixMulTsk(null,null,x,y);
if(Thread.currentThread() instanceof FJWThr)
t.fork().join();
else
H2O.submitTask(t).join();
return t._z;
}
public static class MatrixMulTsk extends H2OCountedCompleter {
final transient Frame _x;
Frame _y;
Frame _z;
final Key _progressKey;
AtomicInteger _cntr;
public MatrixMulTsk(H2OCountedCompleter cmp, Key progressKey, Frame x, Frame y) {
super(cmp);
if(x.numCols() != y.numRows())
throw new IllegalArgumentException("dimensions do not match! x.numcols = " + x.numCols() + ", y.numRows = " + y.numRows());
_x = x;
_y = y;
_progressKey = progressKey;
}
@Override
public void compute2() {
_z = new Frame(_x.anyVec().makeZeros(_y.numCols()));
int total_cores = H2O.CLOUD.size()*H2O.NUMCPUS;
int chunksPerCol = _y.anyVec().nChunks();
int maxP = 256*total_cores/chunksPerCol;
Log.info("maxP = " + maxP);
_cntr = new AtomicInteger(maxP-1);
addToPendingCount(2*_y.numCols()-1);
for(int i = 0; i < Math.min(_y.numCols(),maxP); ++i)
forkVecTask(i);
}
private void forkVecTask(final int i) {
new GetNonZerosTsk(new H2OCallback<GetNonZerosTsk>(this) {
@Override
public void callback(GetNonZerosTsk gnz) {
new VecTsk(new Callback(), _progressKey, gnz._vals).dfork(ArrayUtils.append(_x.vecs(gnz._idxs), _z.vec(i)));
}
}).dfork(_y.vec(i));
}
private class Callback extends H2OCallback{
public Callback(){super(MatrixMulTsk.this);}
@Override
public void callback(H2OCountedCompleter h2OCountedCompleter) {
int i = _cntr.incrementAndGet();
if(i < _y.numCols())
forkVecTask(i);
}
}
}
static int cnt = 0;
// to be invoked from R expression
private static class GetNonZerosTsk extends MRTask<GetNonZerosTsk>{
final int _maxsz;
int [] _idxs;
double [] _vals;
public GetNonZerosTsk(H2OCountedCompleter cmp){super(cmp);_maxsz = 10000000;}
public GetNonZerosTsk(H2OCountedCompleter cmp, int maxsz){super(cmp); _maxsz = maxsz;}
@Override public void map(Chunk c){
int istart = (int)c.start();
assert (c.start() + c._len) == (istart + c._len);
final int n = c.sparseLenZero();
_idxs = MemoryManager.malloc4(n);
_vals = MemoryManager.malloc8d(n);
int j = 0;
for(int i = c.nextNZ(-1); i < c._len; i = c.nextNZ(i),++j) {
_idxs[j] = i + istart;
_vals[j] = c.atd(i);
}
assert j == n;
if(_idxs.length > _maxsz)
throw new RuntimeException("too many nonzeros! found at least " + _idxs.length + " nonzeros.");
}
@Override public void reduce(GetNonZerosTsk gnz){
if(_idxs.length + gnz._idxs.length > _maxsz)
throw new RuntimeException("too many nonzeros! found at least " + (_idxs.length + gnz._idxs.length) + " nonzeros.");
int [] idxs = MemoryManager.malloc4(_idxs.length + gnz._idxs.length);
double [] vals = MemoryManager.malloc8d(_vals.length + gnz._vals.length);
ArrayUtils.sortedMerge(_idxs,_vals,gnz._idxs,gnz._vals,idxs,vals);
_idxs = idxs;
_vals = vals;
}
}
// compute single vec of the output in matrix multiply
private static class VecTsk extends MRTask<VecTsk> {
double [] _y;
Key _progressKey;
public VecTsk(H2OCountedCompleter cmp, Key progressKey, double [] y){
super(cmp);
_progressKey = progressKey;
_y = y;
}
@Override public void setupLocal(){_fr.lastVec().preWriting();}
@Override public void map(Chunk [] chks) {
Chunk zChunk = chks[chks.length-1];
double [] res = MemoryManager.malloc8d(chks[0]._len);
for(int i = 0; i < _y.length; ++i) {
final double yVal = _y[i];
final Chunk xChunk = chks[i];
for (int k = xChunk.nextNZ(-1); k < res.length; k = xChunk.nextNZ(k))
res[k] += yVal * xChunk.atd(k);
}
Chunk modChunk = new NewChunk(res).setSparseRatio(2).compress();
if(_progressKey != null)
new UpdateProgress(modChunk.getBytes().length,modChunk.frozenType()).fork(_progressKey);
DKV.put(zChunk.vec().chunkKey(zChunk.cidx()),modChunk,_fs);
}
@Override public void closeLocal(){
_y = null; // drop inputs
_progressKey = null;
}
}
private static class UpdateProgress extends TAtomic<MatrixMulStats> {
final int _chunkSz;
final int _chunkType;
public UpdateProgress(int sz, int type) {
_chunkSz = sz;
_chunkType = type;
}
@Override
public MatrixMulStats atomic(MatrixMulStats old) {
old.chunkCnts = old.chunkCnts.clone();
int j = -1;
for(int i = 0; i < old.chunkTypes.length; ++i) {
if(_chunkType == old.chunkTypes[i]) {
j = i;
break;
}
}
if(j == -1) {
old.chunkTypes = Arrays.copyOf(old.chunkTypes,old.chunkTypes.length+1);
old.chunkCnts = Arrays.copyOf(old.chunkCnts,old.chunkCnts.length+1);
old.chunkTypes[old.chunkTypes.length-1] = _chunkType;
j = old.chunkTypes.length-1;
}
old.chunksDone++;
old.chunkCnts[j]++;
old.lastUpdateAt = System.currentTimeMillis();
old.size += _chunkSz;
return old;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/DefaultPojoWriter.java
|
package hex;
import water.Key;
import water.Lockable;
import water.codegen.CodeGeneratorPipeline;
import water.util.Log;
import water.util.SBPrintStream;
class DefaultPojoWriter<M extends Lockable<M>> extends Lockable<M> {
public DefaultPojoWriter(Key<M> key) {
super(key);
}
protected boolean toJavaCheckTooBig() {
Log.warn("toJavaCheckTooBig must be overridden for this model type to render it in the browser");
return true;
}
// Override in subclasses to provide some top-level model-specific goodness
protected SBPrintStream toJavaInit(SBPrintStream sb, CodeGeneratorPipeline fileContext) { return sb; }
// Override in subclasses to provide some inside 'predict' call goodness
// Method returns code which should be appended into generated top level class after
// predict method.
protected void toJavaPredictBody(SBPrintStream body,
CodeGeneratorPipeline classCtx,
CodeGeneratorPipeline fileCtx,
boolean verboseCode) {
throw new UnsupportedOperationException("This model type does not support conversion to Java");
}
// Generates optional "transform" method, transform method will have a different signature depending on the algo
// Empty by default - can be overriden by Model implementation
protected SBPrintStream toJavaTransform(SBPrintStream ccsb,
CodeGeneratorPipeline fileCtx,
boolean verboseCode) { // ccsb = classContext
return ccsb;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/DelegatingPojoWriter.java
|
package hex;
import water.codegen.CodeGeneratorPipeline;
import water.util.SBPrintStream;
public class DelegatingPojoWriter implements PojoWriter {
private final DefaultPojoWriter<?> _builder;
DelegatingPojoWriter(DefaultPojoWriter<?> builder) {
_builder = builder;
}
@Override
public boolean toJavaCheckTooBig() {
return _builder.toJavaCheckTooBig();
}
@Override
public SBPrintStream toJavaInit(SBPrintStream sb, CodeGeneratorPipeline fileContext) {
return _builder.toJavaInit(sb, fileContext);
}
@Override
public void toJavaPredictBody(SBPrintStream body, CodeGeneratorPipeline classCtx, CodeGeneratorPipeline fileCtx, boolean verboseCode) {
_builder.toJavaPredictBody(body, classCtx, fileCtx, verboseCode);
}
@Override
public SBPrintStream toJavaTransform(SBPrintStream ccsb, CodeGeneratorPipeline fileCtx, boolean verboseCode) {
return _builder.toJavaTransform(ccsb, fileCtx, verboseCode);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/Distribution.java
|
package hex;
import water.H2O;
import water.Iced;
import hex.genmodel.utils.DistributionFamily;
/**
* Distribution functions to be used by ML Algos
*/
public abstract class Distribution extends Iced<Distribution> {
public final double _tweediePower; // tweedie power
public final double _quantileAlpha; // for quantile regression
public double _huberDelta; // should be updated to huber_alpha quantile of absolute error of predictions via setter
public LinkFunction _linkFunction; // link function to use mainly for GLM
public final DistributionFamily _family; // distribution name, important for some algos to decide what to do
public Distribution(DistributionFamily family, LinkFunction lf) {
_tweediePower = 1.5;
_quantileAlpha = 0.5;
_huberDelta = Double.NaN;
_linkFunction = lf;
_family = family;
}
public Distribution(DistributionFamily family) {
_tweediePower = 1.5;
_quantileAlpha = 0.5;
_huberDelta = Double.NaN;
_linkFunction = new IdentityFunction();
_family = family;
}
public Distribution(Model.Parameters params, LinkFunction lf) {
_tweediePower = params._tweedie_power;
_quantileAlpha = params._quantile_alpha;
_huberDelta = 1;
assert (_tweediePower > 1 && _tweediePower < 2);
_linkFunction = lf;
_family = params._distribution;
}
public Distribution(Model.Parameters params) {
_tweediePower = params._tweedie_power;
_quantileAlpha = params._quantile_alpha;
_huberDelta = 1;
assert (_tweediePower > 1 && _tweediePower < 2);
_linkFunction = new IdentityFunction();
_family = params._distribution;
}
/**
* Setter of huber delta. Required for Huber aka M-regression.
*
* @param huberDelta
*/
public void setHuberDelta(double huberDelta) {
this._huberDelta = huberDelta;
}
/**
* Canonical link
*
* @param f value in original space, to be transformed to link space
* @return link(f)
*/
public double link(double f) { return _linkFunction.link(f); }
/**
* Canonical link inverse
*
* @param f value in link space, to be transformed back to original space
* @return linkInv(f)
*/
public double linkInv(double f) { return _linkFunction.linkInv(f); }
/**
* String version of link inverse (for POJO scoring code generation)
*
* @param f value to be transformed by link inverse
* @return String that turns into compilable expression of linkInv(f)
*/
public String linkInvString(String f) {
return _linkFunction.linkInvString(f);
}
/**
* Deviance of given distribution function at predicted value f
*
* @param w observation weight
* @param y (actual) response
* @param f (predicted) response in original response space (including offset)
* @return deviance
*/
public double deviance(double w, double y, double f) {
throw H2O.unimpl();
}
/**
* (Negative half) Gradient of deviance function at predicted value f, for actual response y
* This assumes that the deviance(w,y,f) is w*deviance(y,f), so the gradient is w * d/df deviance(y,f)
*
* @param y (actual) response
* @param f (predicted) response in link space (including offset)
* @return negative half gradient
*/
public double negHalfGradient(double y, double f) { throw H2O.unimpl(); }
/**
* (Negative half) Gradient of deviance function at predicted value f, for actual response y
* This assumes that the deviance(w,y,f) is w*deviance(y,f), so the gradient is w * d/df deviance(y,f)
*
* @param y (actual) response
* @param f (predicted) response in link space (including offset)
* @param l (class label) label of a class (converted lexicographically from original labels to 0-number of class - 1)
* @return negative half gradient
*/
public double negHalfGradient(double y, double f, int l) { throw H2O.unimpl(); }
/**
* Contribution to numerator for initial value computation
*
* @param w weight
* @param o offset
* @param y response
* @return weighted contribution to numerator
*/
public double initFNum(double w, double o, double y) {
throw H2O.unimpl();
}
/**
* Contribution to denominator for initial value computation
*
* @param w weight
* @param o offset
* @param y response
* @return weighted contribution to denominator
*/
public double initFDenom(double w, double o, double y) {
throw H2O.unimpl();
}
/**
* Contribution to numerator for GBM's leaf node prediction
*
* @param w weight
* @param y response
* @param z residual
* @param f predicted value (including offset)
* @return weighted contribution to numerator
*/
public double gammaNum(double w, double y, double z, double f) {
throw H2O.unimpl();
}
/**
* Contribution to denominator for GBM's leaf node prediction
*
* @param w weight
* @param y response
* @param z residual
* @param f predicted value (including offset)
* @return weighted contribution to denominator
*/
public double gammaDenom(double w, double y, double z, double f) {
throw H2O.unimpl();
}
/**
* Method useful for custom distribution only.
* It resets custom function to be loaded again.
*/
public void reset(){}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/DistributionFactory.java
|
package hex;
import hex.genmodel.utils.DistributionFamily;
import water.H2O;
import water.udf.CDistributionFunc;
import water.udf.CFuncObject;
import water.udf.CFuncRef;
/**
* Factory to get distribution based on Model.Parameters or DistributionFamily.
*
*/
public class DistributionFactory {
public static Distribution getDistribution(DistributionFamily family) {
switch (family) {
case bernoulli:
return new BernoulliDistribution(family);
case quasibinomial:
return new QuasibinomialDistribution(family);
case modified_huber:
return new ModifiedHuberDistribution(family);
case multinomial:
return new MultinomialDistribution(family);
case AUTO:
case gaussian:
return new GaussianDistribution(family);
case poisson:
return new PoissonDistribution(family);
case gamma:
return new GammaDistribution(family);
case laplace:
return new LaplaceDistribution(family);
default:
throw H2O.unimpl("Try to get "+family+" which is not supported.");
}
}
public static Distribution getDistribution(Model.Parameters params) {
DistributionFamily family = params._distribution;
switch (family) {
case bernoulli:
return new BernoulliDistribution(family);
case quasibinomial:
return new QuasibinomialDistribution(family);
case modified_huber:
return new ModifiedHuberDistribution(family);
case multinomial:
return new MultinomialDistribution(family);
case AUTO:
case gaussian:
return new GaussianDistribution(family);
case poisson:
return new PoissonDistribution(family);
case gamma:
return new GammaDistribution(family);
case laplace:
return new LaplaceDistribution(family);
case tweedie:
return new TweedieDistribution(params);
case huber:
return new HuberDistribution(params);
case quantile:
return new QuantileDistribution(params);
case custom:
return CustomDistribution.getCustomDistribution(params);
default:
throw H2O.unimpl("Try to get "+family+" which is not supported.");
}
}
/**
* Util class to calculate log and exp function for distribution and link function identically
*/
final public static class LogExpUtil {
final static public double MIN_LOG = -19;
final static public double MAX = 1e19;
/**
* Sanitized exponential function - helper function.
*
* @param x value to be transform
* @return result of exp function
*/
public static double exp(double x) { return Math.min(MAX, Math.exp(x)); }
/**
* Sanitized log function - helper function
*
* @param x value to be transform
* @return result of log function
*/
public static double log(double x) {
x = Math.max(0, x);
return x == 0 ? MIN_LOG : Math.max(MIN_LOG, Math.log(x));
}
}
}
class GaussianDistribution extends Distribution {
public GaussianDistribution(DistributionFamily family){
super(family);
}
@Override
public double deviance(double w, double y, double f) {
return w * (y - f) * (y - f); // leads to wMSE
}
@Override
public double negHalfGradient(double y, double f) {
return y - linkInv(f);
}
@Override
public double initFNum(double w, double o, double y) {
return w * (y - o);
}
@Override
public double initFDenom(double w, double o, double y) {
return w;
}
@Override
public double gammaNum(double w, double y, double z, double f) {
return w * z;
}
@Override
public double gammaDenom(double w, double y, double z, double f) {
return w;
}
}
class BernoulliDistribution extends Distribution {
public BernoulliDistribution(DistributionFamily family){ super(family, new LogitFunction()); }
@Override
public double deviance(double w, double y, double f) { return -2 * w * (y * DistributionFactory.LogExpUtil.log(f) + (1 - y) * DistributionFactory.LogExpUtil.log(1 - f)); }
@Override
public double negHalfGradient(double y, double f) { return y - linkInv(f); }
@Override
public double initFNum(double w, double o, double y) { return w * (y - o); }
@Override
public double initFDenom(double w, double o, double y) { return w; }
@Override
public double gammaNum(double w, double y, double z, double f) { return w * z; }
@Override
public double gammaDenom(double w, double y, double z, double f) {
double ff = y - z;
return w * ff * (1 - ff);
}
}
class QuasibinomialDistribution extends Distribution {
public QuasibinomialDistribution(DistributionFamily family){
super(family, new LogitFunction());
}
@Override
public double deviance(double w, double y, double f) {
if (y == f) return 0;
if (f > 1)
return -2 * w * y * DistributionFactory.LogExpUtil.log(f);
else if (f < 0)
return -2 * w * (1 - y) * DistributionFactory.LogExpUtil.log(1 - f);
else
return -2 * w * (y * DistributionFactory.LogExpUtil.log(f) + (1 - y) * DistributionFactory.LogExpUtil.log(1 - f));
}
@Override
public double negHalfGradient(double y, double f) {
double ff = linkInv(f);
if (ff == y)
return 0;
else if (ff > 1)
return y / ff;
else if (ff < 0)
return (1 - y) / (ff - 1);
else
return y - ff;
}
@Override
public double initFNum(double w, double o, double y) {
return w * (y - o);
}
@Override
public double initFDenom(double w, double o, double y) {
return w;
}
@Override
public double gammaNum(double w, double y, double z, double f) {
return w * z;
}
@Override
public double gammaDenom(double w, double y, double z, double f) {
double ff = y - z;
return w * ff * (1 - ff);
}
}
class ModifiedHuberDistribution extends Distribution {
public ModifiedHuberDistribution(DistributionFamily family){
super(family, new LogitFunction());
}
@Override
public double deviance(double w, double y, double f) {
double yf = (2 * y - 1) * f;
if (yf < -1)
return -w * 4 * yf;
else if (yf > 1)
return 0;
else
return w * yf * yf;
}
@Override
public double negHalfGradient(double y, double f) {
double yf = (2 * y - 1) * f;
if (yf < -1)
return 2 * (2 * y - 1);
else if (yf > 1)
return 0;
else
return -f * (2 * y - 1) * (2 * y - 1);
}
@Override
public double initFNum(double w, double o, double y) {
return y == 1 ? w : 0;
}
@Override
public double initFDenom(double w, double o, double y) {
return y == 1 ? 0 : w;
}
@Override
public double gammaNum(double w, double y, double z, double f) {
double yf = (2 * y - 1) * f;
if (yf < -1) return w * 4 * (2 * y - 1);
else if (yf > 1) return 0;
else return w * 2 * (2 * y - 1) * (1 - yf);
}
@Override
public double gammaDenom(double w, double y, double z, double f) {
double yf = (2 * y - 1) * f;
if (yf < -1) return -w * 4 * yf;
else if (yf > 1) return 0;
else return w * (1 - yf) * (1 - yf);
}
}
class MultinomialDistribution extends Distribution {
public MultinomialDistribution(DistributionFamily family){ super(family, new LogFunction()); }
@Override
public double initFNum(double w, double o, double y) {
return w * (y - o);
}
@Override
public double initFDenom(double w, double o, double y) {
return w;
}
@Override
public double gammaNum(double w, double y, double z, double f) {
return w * z;
}
@Override
public double gammaDenom(double w, double y, double z, double f) {
double absz = Math.abs(z);
return w * (absz * (1 - absz));
}
@Override
public double negHalfGradient(double y, double f, int l) {
return ((int) y == l ? 1f : 0f) - f;
}
}
class PoissonDistribution extends Distribution {
public PoissonDistribution(DistributionFamily family){
super(family, new LogFunction());
}
@Override
public double deviance(double w, double y, double f) {
return 2 * w * (y * DistributionFactory.LogExpUtil.log(y / f) - y + f);
}
@Override
public double negHalfGradient(double y, double f) {
return y - linkInv(f);
}
@Override
public double initFNum(double w, double o, double y) {
return w * y;
}
@Override
public double initFDenom(double w, double o, double y) {
return w * linkInv(o);
}
@Override
public double gammaNum(double w, double y, double z, double f) {
return w * y;
}
@Override
public double gammaDenom(double w, double y, double z, double f) { return w * (y - z); } // y - z == LogExpUtil.exp(f)
}
class GammaDistribution extends Distribution {
public GammaDistribution(DistributionFamily family){
super(family, new LogFunction());
}
@Override
public double deviance(double w, double y, double f) {
return 2 * w * (DistributionFactory.LogExpUtil.log(f / y) + ((y == 0 && f == 0) ? 1 : y / f) - 1);
}
@Override
public double negHalfGradient(double y, double f) {
return y * DistributionFactory.LogExpUtil.exp(-f) - 1;
}
@Override
public double initFNum(double w, double o, double y) {
return w * y * linkInv(-o);
}
@Override
public double initFDenom(double w, double o, double y) {
return w;
}
@Override
public double gammaNum(double w, double y, double z, double f) { return w * (z + 1); } // z + 1 == y * LogExpUtil.exp(-f)
@Override
public double gammaDenom(double w, double y, double z, double f) { return w; }
}
class TweedieDistribution extends Distribution {
public TweedieDistribution(Model.Parameters params){
super(params, new LogFunction());
}
@Override
public double deviance(double w, double y, double f) {
f = link(f); // bring back f to link space
assert (_tweediePower > 1 && _tweediePower < 2);
return 2 * w * (Math.pow(y, 2 - _tweediePower) / ((1 - _tweediePower) * (2 - _tweediePower)) -
y * DistributionFactory.LogExpUtil.exp(f * (1 - _tweediePower)) / (1 - _tweediePower) +
DistributionFactory.LogExpUtil.exp(f * (2 - _tweediePower)) / (2 - _tweediePower));
}
@Override
public double negHalfGradient(double y, double f) {
assert (_tweediePower > 1 && _tweediePower < 2);
return y * DistributionFactory.LogExpUtil.exp(f * (1 - _tweediePower)) - DistributionFactory.LogExpUtil.exp(f * (2 - _tweediePower));
}
@Override
public double initFNum(double w, double o, double y) {
return w * y * DistributionFactory.LogExpUtil.exp(o * (1 - _tweediePower));
}
@Override
public double initFDenom(double w, double o, double y) {
return w * DistributionFactory.LogExpUtil.exp(o * (2 - _tweediePower));
}
@Override
public double gammaNum(double w, double y, double z, double f) { return w * y * DistributionFactory.LogExpUtil.exp(f * (1 - _tweediePower)); }
@Override
public double gammaDenom(double w, double y, double z, double f) { return w * DistributionFactory.LogExpUtil.exp(f * (2 - _tweediePower)); }
}
class HuberDistribution extends Distribution {
public HuberDistribution(Model.Parameters params){
super(params);
}
@Override
public double deviance(double w, double y, double f) {
if (Math.abs(y - f) <= _huberDelta) {
return w * (y - f) * (y - f); // same as wMSE
} else {
return w * (2 * Math.abs(y - f) - _huberDelta) * _huberDelta; // w * (2 * MAE - delta) * delta
}
}
@Override
public double negHalfGradient(double y, double f) {
if (Math.abs(y - f) <= _huberDelta) {
return y - f;
} else {
return f >= y ? -_huberDelta : _huberDelta;
}
}
}
class LaplaceDistribution extends Distribution {
public LaplaceDistribution(DistributionFamily family){
super(family);
}
@Override
public double deviance(double w, double y, double f) {
return w * Math.abs(y - f);
}
@Override
public double negHalfGradient(double y, double f) {
return f > y ? -0.5 : 0.5;
}
}
class QuantileDistribution extends Distribution {
public QuantileDistribution(Model.Parameters params){
super(params);
}
@Override
public double deviance(double w, double y, double f) { return y > f ? w * _quantileAlpha * (y - f) : w * (1 - _quantileAlpha) * (f - y); }
@Override
public double negHalfGradient(double y, double f) { return y > f ? 0.5 * _quantileAlpha : 0.5 * (_quantileAlpha - 1); }
}
/**
* Custom distribution class to customized loss and prediction calculation.
* Currently supported only for GBM algorithm.
*/
class CustomDistribution extends Distribution {
private CustomDistributionWrapper _wrapper;
private static CustomDistribution _instance;
private String _distributionDef;
private CustomDistribution(Model.Parameters params){
super(params);
_distributionDef = params._custom_distribution_func;
_wrapper = new CustomDistributionWrapper(CFuncRef.from(params._custom_distribution_func));
assert _wrapper != null;
assert _wrapper.getFunc() != null;
super._linkFunction = LinkFunctionFactory.getLinkFunction(_wrapper.getFunc().link());
}
public static CustomDistribution getCustomDistribution(Model.Parameters params){
if(_instance == null || !params._custom_distribution_func.equals(_instance._distributionDef)){
_instance = new CustomDistribution(params);
}
return _instance;
}
@Override
public double deviance(double w, double y, double f) { throw H2O.unimpl("Deviance is not supported in Custom Distribution."); }
@Override
public double negHalfGradient(double y, double f) { return _wrapper.getFunc().gradient(y, f); }
@Override
public double negHalfGradient(double y, double f, int l) { return _wrapper.getFunc().gradient(y, f, l); }
@Override
public double initFNum(double w, double o, double y) {
double[] init = _wrapper.getFunc().init(w, o, y);
assert init.length == 2;
return init[0];
}
@Override
public double initFDenom(double w, double o, double y) {
double[] init = _wrapper.getFunc().init(w, o, y);
assert init.length == 2;
return init[1];
}
@Override
public double gammaNum(double w, double y, double z, double f) {
double[] gamma = _wrapper.getFunc().gamma(w, y, z, f);
assert gamma.length == 2;
return gamma[0];
}
@Override
public double gammaDenom(double w, double y, double z, double f) {
double[] gamma = _wrapper.getFunc().gamma(w, y, z, f);
assert gamma.length == 2;
return gamma[1];
}
@Override
public void reset() { _wrapper.setupLocal(); }
}
/**
* Custom distribution wrapper to get user custom functions to H2O Java code.
*/
class CustomDistributionWrapper extends CFuncObject<CDistributionFunc> {
CustomDistributionWrapper(CFuncRef ref){
super(ref);
}
@Override
protected Class<CDistributionFunc> getFuncType() {
return CDistributionFunc.class;
}
@Override
protected void setupLocal() { super.setupLocal(); }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/FeatureInteraction.java
|
package hex;
import hex.genmodel.algos.tree.SharedTreeNode;
import java.util.Comparator;
import java.util.List;
public class FeatureInteraction {
public String name;
public int depth;
public double gain;
public double cover;
public double fScore;
public double fScoreWeighted;
public double averageFScoreWeighted;
public double averageGain;
public double expectedGain;
public double treeIndex;
public double treeDepth;
public double averageTreeIndex;
public double averageTreeDepth;
public boolean hasLeafStatistics;
public double sumLeafValuesLeft;
public double sumLeafCoversLeft;
public double sumLeafValuesRight;
public double sumLeafCoversRight;
public SplitValueHistogram splitValueHistogram;
public FeatureInteraction(List<SharedTreeNode> interactionPath, double gain, double cover, double pathProba, double depth, double fScore, double treeIndex) {
this.name = interactionPathToStr(interactionPath, false, true);
this.depth = interactionPath.size() - 1;
this.gain = gain;
this.cover = cover;
this.fScore = fScore;
this.fScoreWeighted = pathProba;
this.averageFScoreWeighted = this.fScoreWeighted / this.fScore;
this.averageGain = this.gain / this.fScore;
this.expectedGain = this.gain * pathProba;
this.treeIndex = treeIndex;
this.treeDepth = depth;
this.averageTreeIndex = this.treeIndex / this.fScore;
this.averageTreeDepth = this.treeDepth / this.fScore;
this.hasLeafStatistics = false;
this.splitValueHistogram = new SplitValueHistogram();
if (this.depth == 0) {
splitValueHistogram.addValue(interactionPath.get(0).getSplitValue(), 1);
}
}
public static String interactionPathToStr(final List<SharedTreeNode> interactionPath, final boolean encodePath, final boolean sortByFeature) {
if (sortByFeature && !encodePath) {
interactionPath.sort(Comparator.comparing(SharedTreeNode::getColName));
}
StringBuilder sb = new StringBuilder();
String delim = encodePath ? "-" : "|";
for (SharedTreeNode node : interactionPath) {
if (node != interactionPath.get(0)) {
sb.append(delim);
}
sb.append(encodePath ? node.getNodeNumber() : node.getColName());
}
return sb.toString();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/FeatureInteractions.java
|
package hex;
import hex.genmodel.algos.tree.SharedTreeNode;
import org.apache.commons.lang.mutable.MutableInt;
import water.util.TwoDimTable;
import java.util.*;
import java.util.stream.Collectors;
public class FeatureInteractions {
private final HashMap<String, FeatureInteraction> map;
public FeatureInteractions() {
this.map = new HashMap<>();
}
public void mergeWith(FeatureInteractions featureInteractions) {
for (Map.Entry<String,FeatureInteraction> currEntry : featureInteractions.entrySet()) {
if (this.map.containsKey(currEntry.getKey())) {
FeatureInteraction leftFeatureInteraction = this.get(currEntry.getKey());
FeatureInteraction rightFeatureInteraction = currEntry.getValue();
leftFeatureInteraction.gain += rightFeatureInteraction.gain;
leftFeatureInteraction.cover += rightFeatureInteraction.cover;
leftFeatureInteraction.fScore += rightFeatureInteraction.fScore;
leftFeatureInteraction.fScoreWeighted += rightFeatureInteraction.fScoreWeighted;
leftFeatureInteraction.averageFScoreWeighted = leftFeatureInteraction.fScoreWeighted / leftFeatureInteraction.fScore;
leftFeatureInteraction.averageGain = leftFeatureInteraction.gain / leftFeatureInteraction.fScore;
leftFeatureInteraction.expectedGain += rightFeatureInteraction.expectedGain;
leftFeatureInteraction.treeIndex += rightFeatureInteraction.treeIndex;
leftFeatureInteraction.averageTreeIndex = leftFeatureInteraction.treeIndex / leftFeatureInteraction.fScore;
leftFeatureInteraction.treeDepth += rightFeatureInteraction.treeDepth;
leftFeatureInteraction.averageTreeDepth = leftFeatureInteraction.treeDepth / leftFeatureInteraction.fScore;
leftFeatureInteraction.sumLeafCoversRight += rightFeatureInteraction.sumLeafCoversRight;
leftFeatureInteraction.sumLeafCoversLeft += rightFeatureInteraction.sumLeafCoversLeft;
leftFeatureInteraction.sumLeafValuesRight += rightFeatureInteraction.sumLeafValuesRight;
leftFeatureInteraction.sumLeafValuesLeft += rightFeatureInteraction.sumLeafValuesLeft;
leftFeatureInteraction.splitValueHistogram.merge(rightFeatureInteraction.splitValueHistogram);
} else {
this.put(currEntry.getKey(), currEntry.getValue());
}
}
}
public boolean isEmpty(){
return entrySet().isEmpty();
}
public int maxDepth() {
if(isEmpty()) return 0;
return Collections.max(this.entrySet(), Comparator.comparingInt(entry -> entry.getValue().depth)).getValue().depth;
}
public TwoDimTable[] getAsTable() {
if(isEmpty()) return null;
int maxDepth = maxDepth();
TwoDimTable[] twoDimTables = new TwoDimTable[maxDepth + 1];
for (int depth = 0; depth < maxDepth + 1; depth++) {
twoDimTables[depth] = constructFeatureInteractionsTable(depth);
}
return twoDimTables;
}
private List<FeatureInteraction> getFeatureInteractionsOfDepth(int depthRequired) {
return this.entrySet()
.stream()
.filter(entry -> entry.getValue().depth == depthRequired)
.map(Map.Entry::getValue)
.collect(Collectors.toList());
}
private List<FeatureInteraction> getFeatureInteractionsWithLeafStatistics() {
return this.entrySet()
.stream()
.filter(entry -> entry.getValue().hasLeafStatistics == true)
.map(Map.Entry::getValue)
.collect(Collectors.toList());
}
private TwoDimTable constructFeatureInteractionsTable(int depth) {
assert depth >= 0 : "Depth has to be >= 0.";
String[] colHeaders = new String[] {"Interaction", "Gain", "FScore", "wFScore", "Average wFScore", "Average Gain",
"Expected Gain", "Gain Rank", "FScore Rank", "wFScore Rank", "Avg wFScore Rank", "Avg Gain Rank",
"Expected Gain Rank", "Average Rank", "Average Tree Index", "Average Tree Depth"};
String[] colTypes = new String[] {"string", "double", "double", "double", "double", "double",
"double", "int", "int", "int", "int", "int",
"int", "double", "double", "double"};
String[] colFormat = new String[] {"%s", "%.5f", "%.5f", "%.5f", "%.5f", "%.5f",
"%.5f", "%d", "%d", "%d", "%d", "%d",
"%d", "%.5f", "%.5f", "%.5f"};
List<FeatureInteraction> featureInteractions = getFeatureInteractionsOfDepth(depth);
int numRows = featureInteractions.size();
List<FeatureInteraction> gainSorted = new ArrayList(featureInteractions);
gainSorted.sort(Comparator.comparing(entry -> -entry.gain));
List<FeatureInteraction> fScoreSorted = new ArrayList(featureInteractions);
fScoreSorted.sort(Comparator.comparing(entry -> -entry.fScore));
List<FeatureInteraction> fScoreWeightedSorted = new ArrayList(featureInteractions);
fScoreWeightedSorted.sort(Comparator.comparing(entry -> -entry.fScoreWeighted));
List<FeatureInteraction> averagefScoreWeightedSorted = new ArrayList(featureInteractions);
averagefScoreWeightedSorted.sort(Comparator.comparing(entry -> -entry.averageFScoreWeighted));
List<FeatureInteraction> averageGainSorted = new ArrayList(featureInteractions);
averageGainSorted.sort(Comparator.comparing(entry -> -entry.averageGain));
List<FeatureInteraction> expectedGainSorted = new ArrayList(featureInteractions);
expectedGainSorted.sort(Comparator.comparing(entry -> -entry.expectedGain));
TwoDimTable table = new TwoDimTable(
"Interaction Depth " + depth, null,
new String[numRows],
colHeaders,
colTypes,
colFormat,
"");
for (int i = 0; i < numRows; i++) {
String name = featureInteractions.get(i).name;
table.set(i, 0, name);
table.set(i, 1, featureInteractions.get(i).gain);
table.set(i, 2, featureInteractions.get(i).fScore);
table.set(i, 3, featureInteractions.get(i).fScoreWeighted);
table.set(i, 4, featureInteractions.get(i).averageFScoreWeighted);
table.set(i, 5, featureInteractions.get(i).averageGain);
table.set(i, 6, featureInteractions.get(i).expectedGain);
double gainRank = indexOfInteractionWithName(name, gainSorted) + 1;
table.set(i, 7, gainRank);
double FScoreRank = indexOfInteractionWithName(name, fScoreSorted) + 1;
table.set(i, 8, FScoreRank);
double FScoreWeightedRank = indexOfInteractionWithName(name, fScoreWeightedSorted) + 1;
table.set(i, 9, FScoreWeightedRank);
double avgFScoreWeightedRank = indexOfInteractionWithName(name, averagefScoreWeightedSorted) + 1;
table.set(i, 10, avgFScoreWeightedRank);
double averageGain = indexOfInteractionWithName(name, averageGainSorted) + 1;
table.set(i, 11, averageGain);
double expectedGain = indexOfInteractionWithName(name, expectedGainSorted) + 1;
table.set(i, 12, expectedGain);
table.set(i, 13, (gainRank + FScoreRank + FScoreWeightedRank + avgFScoreWeightedRank + averageGain + expectedGain) / 6);
table.set(i, 14, featureInteractions.get(i).averageTreeIndex);
table.set(i, 15, featureInteractions.get(i).averageTreeDepth);
}
return table;
}
private int indexOfInteractionWithName(String name, List<FeatureInteraction> featureInteractions) {
for (int i = 0; i < featureInteractions.size(); i++)
if (featureInteractions.get(i).name == name)
return i;
return -1;
}
public TwoDimTable getLeafStatisticsTable() {
String[] colHeaders = new String[] {"Interaction", "Sum Leaf Values Left", "Sum Leaf Values Right", "Sum Leaf Covers Left", "Sum Leaf Covers Right"};
String[] colTypes = new String[] {"string", "double", "double", "double", "double"};
String[] colFormat = new String[] {"%s", "%.5f", "%.5f", "%.5f", "%.5f"};
List<FeatureInteraction> featureInteractions = getFeatureInteractionsWithLeafStatistics();
int numRows = featureInteractions.size();
TwoDimTable table = new TwoDimTable(
"Leaf Statistics", null,
new String[numRows],
colHeaders,
colTypes,
colFormat,
"");
for (int i = 0; i < numRows; i++) {
table.set(i, 0, featureInteractions.get(i).name);
table.set(i, 1, featureInteractions.get(i).sumLeafValuesLeft);
table.set(i, 2, featureInteractions.get(i).sumLeafValuesRight);
table.set(i, 3, featureInteractions.get(i).sumLeafCoversLeft);
table.set(i, 4, featureInteractions.get(i).sumLeafCoversRight);
}
return table;
}
public TwoDimTable[] getSplitValueHistograms() {
List<FeatureInteraction> featureInteractions = getFeatureInteractionsOfDepth(0);
int numHistograms = featureInteractions.size();
TwoDimTable[] splitValueHistograms = new TwoDimTable[numHistograms];
for (int i = 0; i < numHistograms; i++) {
splitValueHistograms[i] = constructHistogramForFeatureInteraction(featureInteractions.get(i));
}
return splitValueHistograms;
}
private TwoDimTable constructHistogramForFeatureInteraction(FeatureInteraction featureInteraction) {
String[] colHeaders = new String[] {"Split Value", "Count"};
String[] colTypes = new String[] {"double", "int"};
String[] colFormat = new String[] {"%.5f", "%d"};
int N = featureInteraction.splitValueHistogram.entrySet().size();
TwoDimTable table = new TwoDimTable(
featureInteraction.name + " Split Value Histogram", null,
new String[N],
colHeaders,
colTypes,
colFormat,
"");
int i = 0;
for (Map.Entry<Double, MutableInt> entry : featureInteraction.splitValueHistogram.entrySet()) {
table.set(i, 0, entry.getKey());
table.set(i, 1, entry.getValue().intValue());
i++;
}
return table;
}
public int size() {
return map.size();
}
public FeatureInteraction get(String key) {
return map.get(key);
}
public FeatureInteraction put(String key, FeatureInteraction value) {
return map.put(key, value);
}
public Set<Map.Entry<String, FeatureInteraction>> entrySet() {
return map.entrySet();
}
public static void collectFeatureInteractions(SharedTreeNode node, List<SharedTreeNode> interactionPath,
double currentGain, double currentCover, double pathProba, int depth, int deepening,
FeatureInteractions featureInteractions, Set<String> memo, int maxInteractionDepth,
int maxTreeDepth, int maxDeepening, int treeIndex, boolean useSquaredErrorForGain) {
if (node.isLeaf() || depth == maxTreeDepth) {
return;
}
interactionPath.add(node);
currentGain += node.getGain(useSquaredErrorForGain);
currentCover += node.getWeight();
double ppl = pathProba * (node.getLeftChild().getWeight() / node.getWeight());
double ppr = pathProba * (node.getRightChild().getWeight() / node.getWeight());
FeatureInteraction featureInteraction = new FeatureInteraction(interactionPath, currentGain, currentCover, pathProba, depth, 1, treeIndex);
if ((depth < maxDeepening) || (maxDeepening < 0)) {
collectFeatureInteractions(node.getLeftChild(), new ArrayList<>(), 0, 0, ppl, depth + 1,
deepening + 1, featureInteractions, memo, maxInteractionDepth, maxTreeDepth, maxDeepening, treeIndex, useSquaredErrorForGain);
collectFeatureInteractions(node.getRightChild(), new ArrayList<>(), 0, 0, ppr, depth + 1,
deepening + 1, featureInteractions, memo, maxInteractionDepth, maxTreeDepth, maxDeepening, treeIndex, useSquaredErrorForGain);
}
String path = FeatureInteraction.interactionPathToStr(interactionPath, true, true);
FeatureInteraction foundFI = featureInteractions.get(featureInteraction.name);
if (foundFI == null) {
featureInteractions.put(featureInteraction.name, featureInteraction);
memo.add(path);
} else {
if (memo.contains(path)) {
return;
}
memo.add(path);
foundFI.gain += currentGain;
foundFI.cover += currentCover;
foundFI.fScore += 1;
foundFI.fScoreWeighted += pathProba;
foundFI.averageFScoreWeighted = foundFI.fScoreWeighted / foundFI.fScore;
foundFI.averageGain = foundFI.gain / foundFI.fScore;
foundFI.expectedGain += currentGain * pathProba;
foundFI.treeDepth += depth;
foundFI.averageTreeDepth = foundFI.treeDepth / foundFI.fScore;
foundFI.treeIndex += treeIndex;
foundFI.averageTreeIndex = foundFI.treeIndex / foundFI.fScore;
foundFI.splitValueHistogram.merge(featureInteraction.splitValueHistogram);
}
if (interactionPath.size() - 1 == maxInteractionDepth)
return;
foundFI = featureInteractions.get(featureInteraction.name);
SharedTreeNode leftChild = node.getLeftChild();
if (leftChild.isLeaf() && deepening == 0) {
foundFI.sumLeafValuesLeft += leftChild.getLeafValue();
foundFI.sumLeafCoversLeft += leftChild.getWeight();
foundFI.hasLeafStatistics = true;
}
SharedTreeNode rightChild = node.getRightChild();
if (rightChild.isLeaf() && deepening == 0) {
foundFI.sumLeafValuesRight += rightChild.getLeafValue();
foundFI.sumLeafCoversRight += rightChild.getWeight();
foundFI.hasLeafStatistics = true;
}
collectFeatureInteractions(leftChild, new ArrayList<>(interactionPath), currentGain, currentGain, ppl,
depth + 1, deepening, featureInteractions, memo, maxInteractionDepth, maxTreeDepth, maxDeepening, treeIndex, useSquaredErrorForGain);
collectFeatureInteractions(node.getRightChild(), new ArrayList<>(interactionPath), currentGain, currentGain, ppr,
depth + 1, deepening, featureInteractions, memo, maxInteractionDepth, maxTreeDepth, maxDeepening, treeIndex, useSquaredErrorForGain);
}
public static TwoDimTable[][] getFeatureInteractionsTable(FeatureInteractions featureInteractions) {
if(featureInteractions == null) {
return null;
}
TwoDimTable[][] table = new TwoDimTable[3][];
table[0] = featureInteractions.getAsTable();
table[1] = new TwoDimTable[]{featureInteractions.getLeafStatisticsTable()};
table[2] = featureInteractions.getSplitValueHistograms();
return table;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/FeatureInteractionsCollector.java
|
package hex;
import water.util.TwoDimTable;
/**
* Implementors of this interface have feature interactions calculation implemented.
*/
public interface FeatureInteractionsCollector {
TwoDimTable[][] getFeatureInteractionsTable(int maxInteractionDepth, int maxTreeDepth, int maxDeepening);
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/FoldAssignment.java
|
package hex;
import water.Iced;
import water.Key;
import water.exceptions.H2OIllegalArgumentException;
import water.fvec.Frame;
import water.fvec.TransformWrappedVec;
import water.fvec.Vec;
import water.util.ArrayUtils;
import water.util.VecUtils;
import java.util.Arrays;
public class FoldAssignment extends Iced<FoldAssignment> {
protected final Vec _fold;
FoldAssignment(Vec fold) {
_fold = fold;
}
Frame toFrame(Key<Frame> key) {
return new Frame(key, new String[]{"fold_assignment"}, new Vec[]{_fold});
}
Vec getAdaptedFold() {
return _fold;
}
void remove(boolean keepOriginalFold) {
if (!keepOriginalFold)
_fold.remove();
}
static FoldAssignment fromUserFoldSpecification(int N, Vec fold) {
int[] foldValues = findActualFoldValues(fold);
if ( ! (fold.isCategorical()
|| (fold.isInt()
&& foldValues.length == N // No holes in the sequence
&& ((fold.min() == 0 && fold.max() == N-1) || (fold.min() == 1 && fold.max() == N)))) ) // Allow 0 to N-1, or 1 to N
throw new H2OIllegalArgumentException("Fold column must be either categorical or contiguous integers from 0..N-1 or 1..N");
return new TransformFoldAssignment(fold, foldValues);
}
static FoldAssignment fromInternalFold(int N, Vec fold) {
assert fold.isInt();
assert fold.min() == 0 && fold.max() == N-1;
return new FoldAssignment(fold);
}
static int nFoldWork(Vec fold) {
return findActualFoldValues(fold).length;
}
/**
* For a given fold Vec finds the actual used fold values (only used levels).
*
* @param f input Vec
* @return indices of the used domain levels (for categorical fold) or the used values (for a numerical fold)
*/
static int[] findActualFoldValues(Vec f) {
Vec fc = VecUtils.toCategoricalVec(f);
final String[] actualDomain;
try {
if (!f.isCategorical()) {
actualDomain = fc.domain();
} else {
actualDomain = VecUtils.collectDomainFast(fc);
}
} finally {
fc.remove();
}
int N = actualDomain.length;
if (Arrays.equals(actualDomain, fc.domain())) {
int offset = f.isCategorical() ? 0 : (int) f.min();
return ArrayUtils.seq(offset, N + offset);
} else {
int[] mapping = new int[N];
String[] fullDomain = fc.domain();
for (int i = 0; i < N; i++) {
int pos = ArrayUtils.find(fullDomain, actualDomain[i]);
assert pos >= 0;
mapping[i] = pos;
}
return mapping;
}
}
}
class TransformFoldAssignment extends FoldAssignment {
private final Vec _adaptedFold;
TransformFoldAssignment(Vec fold, int[] usedFoldValues) {
super(fold);
_adaptedFold = makeAdaptedFold(usedFoldValues);
}
Vec getAdaptedFold() {
return _adaptedFold;
}
final Vec makeAdaptedFold(int[] usedFoldValues) {
int[] foldValuesToFoldIndices = foldValuesToFoldIndices(usedFoldValues);
return new TransformWrappedVec(new Vec[]{_fold}, new MappingTransformFactory(foldValuesToFoldIndices));
}
static int[] foldValuesToFoldIndices(int[] usedFoldValues) {
int max = ArrayUtils.maxValue(usedFoldValues);
final int[] valueToFoldIndex = new int[max + 1];
Arrays.fill(valueToFoldIndex, -1);
for (int i = 0; i < usedFoldValues.length; i++) {
valueToFoldIndex[usedFoldValues[i]] = i;
}
return valueToFoldIndex;
}
@Override
void remove(boolean keepOriginalFold) {
_adaptedFold.remove();
}
}
class MappingTransformFactory extends Iced<MappingTransformFactory>
implements TransformWrappedVec.TransformFactory<MappingTransformFactory> {
final int[] _mapping;
public MappingTransformFactory(int[] mapping) {
_mapping = mapping;
}
@Override
public TransformWrappedVec.Transform create(int n_inputs) {
assert n_inputs == 1;
return new TransformWrappedVec.Function1DTransform() {
@Override
public double apply(double x) {
return _mapping[(int) x];
}
};
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/FrameSplitter.java
|
package hex;
import java.util.Arrays;
import jsr166y.CountedCompleter;
import water.*;
import water.H2O.H2OCountedCompleter;
import water.fvec.*;
/**
* Frame splitter function to divide given frame into
* multiple partitions based on given ratios.
*
* <p>The task creates <code>ratios.length+1</code> output frame each containing a
* demanded fraction of rows from source dataset</p>
*
* <p>The tasks internally extract data from source chunks and create output chunks in preserving order of parts.
* I.e., the 1st partition contains the first P1-rows, the 2nd partition contains following P2-rows, ...
* </p>
*
* <p>Assumptions and invariants</p>
* <ul>
* <li>number of demanding split parts is reasonable number, i.e., <10. The task is not designed to split into many small parts.</li>
* <li>the worker DOES NOT preserves distribution of new chunks over the cloud according to source dataset chunks.</li>
* <li>rows inside one output chunk are not shuffled, they are extracted deterministically in the same order as they appear in source chunk.</li>
* <li>workers can enforce data transfers if they need to obtain data from remote chunks.</li>
* </ul>
*
* <p>NOTE: the implementation is data-transfer expensive and in some cases it would be beneficial to use original
* implementation from <a href="https://github.com/0xdata/h2o/commits/9af3f4e">9af3f4e</a>.</p>.
*/
public class FrameSplitter extends H2OCountedCompleter<FrameSplitter> {
/** Dataset to split */
final Frame dataset;
/** Split ratios - resulting number of split is ratios.length+1 */
final double[] ratios;
/** Destination keys for each output frame split. */
final Key<Frame>[] destKeys;
/** Optional job key */
final Key<Job> jobKey;
/** Output frames for each output split part */
private Frame[] splits;
public FrameSplitter(Frame dataset, double[] ratios, Key<Frame>[] destKeys, Key<Job> jobKey) {
this(null, dataset, ratios,destKeys,jobKey);
}
public FrameSplitter(H2OCountedCompleter cc, Frame dataset, double[] ratios, Key<Frame>[] destKeys, Key<Job> jobKey) {
super(cc);
assert ratios.length > 0 : "No ratio specified!";
assert ratios.length < 100 : "Too many frame splits demanded!";
assert destKeys!=null : "Destination keys are not specified!";
assert destKeys.length == ratios.length+1 : "Unexpected number of destination keys.";
this.dataset = dataset;
this.ratios = ratios;
this.jobKey = jobKey;
this.destKeys = destKeys;
}
@Override public void compute2() {
// Lock all possible data
dataset.read_lock(jobKey);
// Create a template vector for each segment
final Vec[][] templates = makeTemplates(dataset, ratios);
final int nsplits = templates.length;
assert nsplits == ratios.length+1 : "Unexpected number of split templates!";
// Launch number of distributed FJ for each split part
final Vec[] datasetVecs = dataset.vecs();
splits = new Frame[nsplits];
for (int s=0; s<nsplits; s++) {
Frame split = new Frame(destKeys[s], dataset.names(), templates[s] );
split.delete_and_lock(jobKey);
splits[s] = split;
}
setPendingCount(nsplits);
for (int s=0; s<nsplits; s++)
new FrameSplitTask(this,datasetVecs, ratios, s).dfork(splits[s]);
tryComplete(); // complete the computation of thrown tasks
}
/** Blocking call to obtain a result of computation. */
public Frame[] getResult() {
join();
return splits;
}
@Override public void onCompletion(CountedCompleter caller) {
dataset.unlock(jobKey);
if (splits!=null)
for (Frame s : splits)
if (s!=null)
s.update(jobKey).unlock(jobKey);
}
@Override public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller) {
dataset.unlock(jobKey);
Futures fs = new Futures();
if (splits!=null)
for (Frame s : splits)
if (s!=null)
s.unlock(jobKey).delete(jobKey,fs, true);
fs.blockForPending();
return true;
}
// Make vector templates for all output frame vectors
private Vec[][] makeTemplates(Frame dataset, double[] ratios) {
Vec anyVec = dataset.anyVec();
final long[][] espcPerSplit = computeEspcPerSplit(anyVec.espc(), anyVec.length(), ratios);
final int num = dataset.numCols(); // number of columns in input frame
final int nsplits = espcPerSplit.length; // number of splits
final String[][] domains = dataset.domains(); // domains
final byte[] types = new byte[num];
int j=0;
for (Vec v : dataset.vecs()) types[j++] = v.get_type();
Vec[][] t = new Vec[nsplits][/*num*/]; // resulting vectors for all
for (int i=0; i<nsplits; i++) {
// vectors for j-th split
Key vkey = Vec.newKey();
int rowLayout = Vec.ESPC.rowLayout(vkey,espcPerSplit[i]);
t[i] = new Vec(vkey,rowLayout).makeCons(num, 0, domains, types);
}
return t;
}
// The task computes ESPC per split
static long[/*nsplits*/][/*nchunks*/] computeEspcPerSplit(long[] espc, long len, double[] ratios) {
assert espc.length>0 && espc[0] == 0;
assert espc[espc.length-1] == len;
long[] partSizes = partitione(len, ratios); // Split of whole vector
int nparts = ratios.length+1;
long[][] r = new long[nparts][espc.length]; // espc for each partition
long nrows = 0;
long start = 0;
for (int p=0,c=0; p<nparts; p++) {
int nc = 0; // number of chunks for this partition
for(;c<espc.length-1 && (espc[c+1]-start) <= partSizes[p];c++) r[p][++nc] = espc[c+1]-start;
if (r[p][nc] < partSizes[p]) r[p][++nc] = partSizes[p]; // last item in espc contains number of rows
r[p] = Arrays.copyOf(r[p], nc+1);
// Transfer rest of lines to the next part
nrows = nrows-partSizes[p];
start += partSizes[p];
}
return r;
}
/** MR task extract specified part of <code>_srcVecs</code>
* into output chunk.*/
private static class FrameSplitTask extends MRTask<FrameSplitTask> {
final Vec [] _srcVecs; // a source frame given by list of its columns
final double[] _ratios; // split ratios
final int _partIdx; // part index
transient int _pcidx; // Start chunk index for this partition
transient int _psrow; // Start row in chunk for this partition
public FrameSplitTask(H2OCountedCompleter completer, Vec[] srcVecs, double[] ratios, int partIdx) {
super(completer);
_srcVecs = srcVecs;
_ratios = ratios;
_partIdx = partIdx;
}
@Override protected void setupLocal() {
// Precompute the first input chunk index and start row inside that chunk for this partition
Vec anyInVec = _srcVecs[0];
long[] partSizes = partitione(anyInVec.length(), _ratios);
long pnrows = 0;
for (int p=0; p<_partIdx; p++) pnrows += partSizes[p];
long[] espc = anyInVec.espc();
while (_pcidx < espc.length-1 && (pnrows -= (espc[_pcidx+1]-espc[_pcidx])) >= 0 ) _pcidx++;
assert pnrows <= 0;
_psrow = (int) (pnrows + espc[_pcidx+1]-espc[_pcidx]);
}
@Override public void map(Chunk[] cs) { // Output chunks
int coutidx = cs[0].cidx(); // Index of output Chunk
int cinidx = _pcidx + coutidx;
int startRow = coutidx > 0 ? 0 : _psrow; // where to start extracting
int nrows = cs[0]._len;
// For each output chunk extract appropriate rows for partIdx-th part
for (int i=0; i<cs.length; i++) {
// WARNING: this implementation does not preserve co-location of chunks so we are forcing here network transfer!
ChunkSplitter.extractChunkPart(_srcVecs[i].chunkForChunkIdx(cinidx), cs[i], startRow, nrows, _fs);
}
}
}
static final long[] partitione(long len, double[] ratio) {
long[] r = new long[ratio.length+1];
long sum = 0;
int i = 0;
float sr = 0;
for (i=0; i<ratio.length; i++) {
r[i] = (int) (ratio[i]*len);
sum += r[i];
sr += ratio[i];
}
if (sr<1f) r[i] = len - sum;
else r[i-1] += (len-sum);
return r;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/FriedmanPopescusHCollector.java
|
package hex;
import water.fvec.Frame;
/**
* Implementors of this interface have Friedman & Popescu's H calculation implemented.
*/
public interface FriedmanPopescusHCollector {
double getFriedmanPopescusH(Frame frame, String[] vars);
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/GLMMetrics.java
|
package hex;
/**
* Created by tomasnykodym on 1/5/16.
*/
public interface GLMMetrics {
double residual_deviance(); //naming is pythonic because its user-facing via grid search sort criterion
double null_deviance();
long residual_degrees_of_freedom();
long null_degrees_of_freedom();
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/GainsLift.java
|
package hex;
import hex.quantile.Quantile;
import hex.quantile.QuantileModel;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.ArrayUtils;
import water.util.PrettyPrint;
import water.util.TwoDimTable;
import java.util.Arrays;
import java.util.Iterator;
import java.util.TreeSet;
public class GainsLift extends Iced {
private double[] _quantiles;
//INPUT
public int _groups = -1;
public Vec _labels;
public Vec _preds; //of length N, n_i = N/GROUPS
public Vec _weights;
//OUTPUT
public double[] response_rates; // p_i = e_i/n_i
public double[] avg_scores; // s_i
public double avg_response_rate; // P
public double avg_score; // S
public long[] events; // e_i
public long[] observations; // n_i
TwoDimTable table;
public GainsLift(Vec preds, Vec labels) {
this(preds, labels, null);
}
public GainsLift(Vec preds, Vec labels, Vec weights) {
_preds = preds;
_labels = labels;
_weights = weights;
}
private void init(Job job) throws IllegalArgumentException {
_labels = _labels.toCategoricalVec();
if( _labels ==null || _preds ==null )
throw new IllegalArgumentException("Missing actualLabels or predictedProbs!");
if (_labels.length() != _preds.length())
throw new IllegalArgumentException("Both arguments must have the same length ("+ _labels.length()+"!="+ _preds.length()+")!");
if (!_labels.isInt())
throw new IllegalArgumentException("Actual column must be integer class labels!");
if (_labels.cardinality() != -1 && _labels.cardinality() != 2)
throw new IllegalArgumentException("Actual column must contain binary class labels, but found cardinality " + _labels.cardinality() + "!");
if (_preds.isCategorical())
throw new IllegalArgumentException("Predicted probabilities cannot be class labels, expect probabilities.");
if (_weights != null && !_weights.isNumeric())
throw new IllegalArgumentException("Observation weights must be numeric.");
// The vectors are from different groups => align them, but properly delete it after computation
if (!_labels.group().equals(_preds.group())) {
_preds = _labels.align(_preds);
Scope.track(_preds);
if (_weights !=null) {
_weights = _labels.align(_weights);
Scope.track(_weights);
}
}
boolean fast = false;
if (fast) {
// FAST VERSION: single-pass, only works with the specific pre-computed quantiles from rollupstats
assert(_groups == 10);
assert(Arrays.equals(Vec.PERCENTILES,
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15, 16
new double[]{0.001, 0.01, 0.1, 0.2, 0.25, 0.3, 1.0 / 3.0, 0.4, 0.5, 0.6, 2.0 / 3.0, 0.7, 0.75, 0.8, 0.9, 0.99, 0.999}));
//HACK: hardcoded quantiles for simplicity (0.9,0.8,...,0.1,0)
double[] rq = _preds.pctiles(); //might do a full pass over the Vec
_quantiles = new double[]{
rq[14], rq[13], rq[11], rq[9], rq[8], rq[7], rq[5], rq[3], rq[2], 0 /*ignored*/
};
} else {
// ACCURATE VERSION: multi-pass
Frame fr = null;
QuantileModel qm = null;
try {
QuantileModel.QuantileParameters qp = new QuantileModel.QuantileParameters();
if (_weights==null) {
fr = new Frame(Key.<Frame>make(), new String[]{"predictions"}, new Vec[]{_preds});
} else {
fr = new Frame(Key.<Frame>make(), new String[]{"predictions", "weights"}, new Vec[]{_preds, _weights});
qp._weights_column = "weights";
}
DKV.put(fr);
qp._train = fr._key;
if (_groups > 0) {
qp._probs = new double[_groups];
for (int i = 0; i < _groups; ++i) {
qp._probs[i] = (_groups - i - 1.) / _groups; // This is 0.9, 0.8, 0.7, 0.6, ..., 0.1, 0 for 10 groups
}
} else {
qp._probs = new double[]{0.99, 0.98, 0.97, 0.96, 0.95, 0.9, 0.85, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0};
}
qm = job != null && !job.isDone() ? new Quantile(qp, job).trainModelNested(null) : new Quantile(qp).trainModel().get();
_quantiles = qm._output._quantiles[0];
// find uniques (is there a more elegant way?)
TreeSet<Double> hs = new TreeSet<>();
for (double d : _quantiles) hs.add(d);
_quantiles = new double[hs.size()];
Iterator<Double> it = hs.descendingIterator();
int i = 0;
while (it.hasNext()) _quantiles[i++] = it.next();
} finally {
if (qm!=null) qm.remove();
if (fr!=null) DKV.remove(fr._key);
}
}
}
public void exec() {
exec(null);
}
public void exec(Job job) {
Scope.enter();
init(job); //check parameters and obtain _quantiles from _preds
try {
GainsLiftBuilder gt = new GainsLiftBuilder(_quantiles);
gt = (_weights != null) ? gt.doAll(_labels, _preds, _weights) : gt.doAll(_labels, _preds);
response_rates = gt.response_rates();
avg_scores = gt.avg_scores();
avg_response_rate = gt.avg_response_rate();
avg_score = gt.avg_score();
events = gt.events();
observations = gt.observations();
} finally { // Delete adaptation vectors
Scope.exit();
}
}
@Override public String toString() {
TwoDimTable t = createTwoDimTable();
return t==null ? "" : t.toString();
}
public TwoDimTable createTwoDimTable() {
if (response_rates == null || Double.isNaN(avg_response_rate)) return null;
TwoDimTable table = new TwoDimTable(
"Gains/Lift Table",
"Avg response rate: " + PrettyPrint.formatPct(avg_response_rate) + ", avg score: " + PrettyPrint.formatPct(avg_score),
new String[events.length],
new String[]{"Group", "Cumulative Data Fraction", "Lower Threshold", "Lift", "Cumulative Lift", "Response Rate", "Score", "Cumulative Response Rate", "Cumulative Score", "Capture Rate", "Cumulative Capture Rate", "Gain", "Cumulative Gain", "Kolmogorov Smirnov"},
new String[]{"int", "double", "double", "double", "double", "double", "double", "double", "double", "double", "double", "double", "double", "double"},
new String[]{"%d", "%.8f", "%5f", "%5f", "%5f", "%5f", "%5f", "%5f", "%5f", "%5f", "%5f", "%5f", "%5f","%5f"},
null);
long sum_e_i = 0;
long sum_n_i = 0;
double sum_s_i = 0;
double P = avg_response_rate; // E/N
long N = ArrayUtils.sum(observations);
long E = Math.round(N * P);
for (int i = 0; i < events.length; ++i) {
long e_i = events[i];
long n_i = observations[i];
double p_i = response_rates[i];
double s_i = avg_scores[i];
sum_e_i += e_i;
sum_n_i += n_i;
sum_s_i += n_i * s_i;
double lift=p_i/P; //can be NaN if P==0
double sum_lift=(double)sum_e_i/sum_n_i/P; //can be NaN if P==0
final double cum_event = sum_e_i / (double)E;
final double total_non_event = (double) (N - E);
// If response rate is 1, there are non non-events and the cumulative count will always be zero
final double cum_non_event = total_non_event == 0 ? 0 : (sum_n_i - sum_e_i) / total_non_event;
table.set(i,0,i+1); //group
table.set(i,1,(double)sum_n_i/N); //cumulative_data_fraction
table.set(i,2,_quantiles[i]); //lower_threshold
table.set(i,3,lift); //lift
table.set(i,4,sum_lift); //cumulative_lift
table.set(i,5,p_i); //response_rate
table.set(i,6,s_i); //score
table.set(i,7,(double)sum_e_i/sum_n_i); //cumulative_response_rate
table.set(i,8,(double)sum_s_i/sum_n_i); //cumulative_score
table.set(i,9,(double)e_i/E); //capture_rate
table.set(i,10,(double)sum_e_i/E); //cumulative_capture_rate
table.set(i,11,100*(lift-1)); //gain
table.set(i,12,100*(sum_lift-1)); //cumulative gain
table.set(i,13,cum_event - cum_non_event); //Kolmogorov-Smirnov metric
if (i== events.length-1) {
assert(sum_n_i == N) : "Cumulative data fraction must be 1.0, but is " + (double)sum_n_i/N;
assert(sum_e_i == E) : "Cumulative capture rate must be 1.0, but is " + (double)sum_e_i/E;
if (!Double.isNaN(sum_lift)) assert(Math.abs(sum_lift - 1.0) < 1e-8) : "Cumulative lift must be 1.0, but is " + sum_lift;
assert(Math.abs((double)sum_e_i/sum_n_i - avg_response_rate) < 1e-8) : "Cumulative response rate must be " + avg_response_rate + ", but is " + (double)sum_e_i/sum_n_i;
}
}
return this.table = table;
}
// Compute Gains table via MRTask
public static class GainsLiftBuilder extends MRTask<GainsLiftBuilder> {
/* @OUT response_rates */
public final double[] response_rates() { return _response_rates; }
public final double avg_response_rate() { return _avg_response_rate; }
public final double avg_score() { return _avg_score; }
public final long[] events(){ return _events; }
public final long[] observations(){ return _observations; }
public final double[] avg_scores() { return _avg_scores; }
/* @IN quantiles/thresholds */
final private double[] _thresh;
private long[] _events;
private long[] _observations;
private long _avg_response;
private double _avg_response_rate;
private double _avg_score;
private double[] _response_rates;
private double[] _avg_scores;
public GainsLiftBuilder(double[] thresh) {
_thresh = thresh.clone();
}
@Override public void map( Chunk ca, Chunk cp) { map(ca, cp, (Chunk)null); }
@Override public void map( Chunk ca, Chunk cp, Chunk cw) {
_events = new long[_thresh.length];
_observations = new long[_thresh.length];
_avg_scores = new double[_thresh.length];
_avg_response = 0;
_avg_score = 0;
final int len = Math.min(ca._len, cp._len);
for( int i=0; i < len; i++ ) {
if (ca.isNA(i)) continue;
final int a = (int)ca.at8(i);
if (a != 0 && a != 1) throw new IllegalArgumentException("Invalid values in actualLabels: must be binary (0 or 1).");
if (cp.isNA(i)) continue;
final double pr = cp.atd(i);
final double w = cw!=null?cw.atd(i):1;
perRow(pr, a, w);
}
}
public void perRow(double pr, int a, double w) {
if (w==0) return;
assert (!Double.isNaN(pr));
assert (!Double.isNaN(a));
assert (!Double.isNaN(w));
//for-loop is faster than binary search for small number of thresholds
for( int t=0; t < _thresh.length; t++ ) {
if (pr >= _thresh[t] && (t==0 || pr <_thresh[t-1])) {
_observations[t]+=w;
_avg_scores[t]+=w*pr;
if (a == 1) _events[t]+=w;
break;
}
}
if (a == 1) _avg_response+=w;
_avg_score += w*pr;
}
@Override public void reduce(GainsLiftBuilder other) {
ArrayUtils.add(_events, other._events);
ArrayUtils.add(_observations, other._observations);
ArrayUtils.add(_avg_scores, other._avg_scores);
_avg_response += other._avg_response;
_avg_score += other._avg_score;
}
@Override public void postGlobal(){
_response_rates = new double[_thresh.length];
for (int i=0; i<_response_rates.length; ++i) {
_response_rates[i] = _observations[i] == 0 ? 0 : (double) _events[i] / _observations[i];
_avg_scores[i] = _observations[i] == 0 ? 0 : _avg_scores[i] / (double)_observations[i];
}
_avg_response_rate = (double)_avg_response / ArrayUtils.sum(_observations);
_avg_score /= ArrayUtils.sum(_observations);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/Interaction.java
|
package hex;
import water.DKV;
import water.Iced;
import water.Job;
import water.Key;
import water.fvec.CreateInteractions;
import water.fvec.Frame;
import water.util.Log;
import water.util.PrettyPrint;
import java.util.Arrays;
/**
* Create new factors that represent interactions of the given factors
*/
public class Interaction extends Iced {
public Job<Frame> _job;
public Key<Frame> _source_frame;
public String[] _factor_columns;
public boolean _pairwise = false;
public int _max_factors = 100;
public int _min_occurrence = 1;
public boolean _interactOnNA = true;
transient public int[] _factors = new int[0];
public Job<Frame> execImpl(Key<Frame> dest ) {
_job = new Job(dest == null ? Key.make() : dest, Frame.class.getName(), "CreateFrame");
Frame source_frame = DKV.getGet(_source_frame);
assert(source_frame != null);
if (_factor_columns == null || _factor_columns.length == 0) throw new IllegalArgumentException("factor_columns must be specified.");
if (_pairwise && _factor_columns.length < 3) Log.info("Ignoring the pairwise option, requires 3 or more factors.");
_factors = new int[_factor_columns.length];
int count=0;
for (String v: _factor_columns) {
int idx = source_frame.find(v);
if (idx >= 0) {
if (!source_frame.vecs()[idx].isCategorical()) {
throw new IllegalArgumentException("Column " + v + " is not categorical.");
}
_factors[count++] = idx;
} else {
throw new IllegalArgumentException("Column " + v + " not found.");
}
}
CreateInteractions in = new CreateInteractions(this);
return _job.start(in, in.work());
}
@Override public String toString() {
Frame res = _job.get();
if (res == null) return "Output frame not found";
if (!_pairwise)
return "Created interaction feature " + res.names()[0]
+ " (order: " + _factors.length + ") with " + res.lastVec().domain().length + " factor levels"
+ " in" + PrettyPrint.msecs(_job.msec(), true);
else
return "Created " + res.numCols() + " pair-wise interaction features " + Arrays.deepToString(res.names())
+ " (order: 2) in" + PrettyPrint.msecs(_job.msec(), true);
}
public static Interaction getInteraction(Key<Frame> key, String[] names, int maxLevels) {
Interaction inter = new Interaction();
inter._source_frame = key;
inter._max_factors = maxLevels; // keep only this many most frequent levels
inter._min_occurrence = 2; // but need at least 2 observations for a level to be kept
inter._pairwise = false;
inter._factor_columns = names;
return inter;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/KeyValue.java
|
package hex;
import water.Iced;
public class KeyValue extends Iced<KeyValue> {
public KeyValue() {}
public KeyValue(String key, double value) {
_key = key;
_value = value;
}
String _key;
double _value;
public String getKey() {
return _key;
}
public double getValue() {
return _value;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/LinkFunction.java
|
package hex;
import hex.genmodel.utils.LinkFunctionType;
import org.apache.commons.math3.distribution.NormalDistribution;
import water.Iced;
/**
* Link function class to calculate link, link inverse and string link inverse functions.
*
*/
public abstract class LinkFunction extends Iced<LinkFunction> {
public LinkFunctionType linkFunctionType;
/**
* Return x as e^x string - helper function
* @param x
* @return converted x to e^x string
*/
public static String expString(String x) {
return "Math.min(1e19, Math.exp(" + x + "))";
}
/**
* Canonical link
*
* @param f value in original space, to be transformed to link space
* @return link(f)
*/
public abstract double link(double f);
/**
* Canonical link inverse
*
* Be careful if you are changing code here - you have to change it in DeeplearningMojoModel and GbmMojoModel too
* @param f value in link space, to be transformed back to original space
* @return linkInv(f)
*/
public abstract double linkInv(double f);
/**
* String version of link inverse (for POJO scoring code generation)
*
* @param f value to be transformed by link inverse
* @return String that turns into compilable expression of linkInv(f)
*/
public abstract String linkInvString(String f);
public String linkInvStringFloat(String f) {
return linkInvString(f);
}
}
class IdentityFunction extends LinkFunction {
public IdentityFunction(){
linkFunctionType = LinkFunctionType.identity;
}
@Override
public double link(double f) {
return f;
}
@Override
public double linkInv(double f) {
return f;
}
@Override
public String linkInvString(String f) {
return f;
}
}
class InverseFunction extends LinkFunction {
public InverseFunction(){
linkFunctionType = LinkFunctionType.inverse;
}
@Override
public double link(double f) {
double xx = f < 0 ? Math.min(-1e-5, f) : Math.max(-1e-5, f);
return 1.0/xx;
}
@Override
public double linkInv(double f) {
return link(f);
}
@Override
public String linkInvString(String f) {
if(Integer.parseInt(f) < 0){
return "1.0/Math.min(-1e-5, "+f+")";
}
return "1.0/Math.max(1e-5, "+f+")";
}
}
class LogFunction extends LinkFunction {
public LogFunction(){
linkFunctionType = LinkFunctionType.log;
}
@Override
public double link(double f) {
return DistributionFactory.LogExpUtil.log(f);
}
@Override
public double linkInv(double f) {
return DistributionFactory.LogExpUtil.exp(f);
}
@Override
public String linkInvString(String f) {
return expString(f);
}
}
class LogitFunction extends LinkFunction {
public LogitFunction(){
linkFunctionType = LinkFunctionType.logit;
}
@Override
public double link(double f) { return DistributionFactory.LogExpUtil.log(f / (1 - f)); }
@Override
public double linkInv(double f) {
return 1 / (1 + DistributionFactory.LogExpUtil.exp(-f));
}
@Override
public String linkInvString(String f) {
return "1./(1. + " + expString("-(" + f + ")") + ")";
}
@Override
public String linkInvStringFloat(String f) {
return "1f/(1f + " + "(float)" + expString("-("+f+")") + ")";
}
}
class OlogitFunction extends LinkFunction {
public OlogitFunction(){
linkFunctionType = LinkFunctionType.ologit;
}
@Override
public double link(double f) { return DistributionFactory.LogExpUtil.log(f / (1 - f)); }
@Override
public double linkInv(double f) {
return 1 / (1 + DistributionFactory.LogExpUtil.exp(-f));
}
@Override
public String linkInvString(String f) {
return "1./(1. + " + expString("-("+f+")") + ")";
}
}
class OloglogFunction extends LinkFunction {
public OloglogFunction(){
linkFunctionType = LinkFunctionType.ologlog;
}
@Override
public double link(double f) { return DistributionFactory.LogExpUtil.log(-1 * DistributionFactory.LogExpUtil.log(1-f) ); }
@Override
public double linkInv(double f) { return 1 - DistributionFactory.LogExpUtil.exp(-1 * DistributionFactory.LogExpUtil.exp(f)); }
@Override
public String linkInvString(String f) { return expString("1. * "+expString("(-1. * "+expString("("+f+")")+")")); }
}
class OprobitFunction extends LinkFunction {
org.apache.commons.math3.distribution.NormalDistribution normalDistribution;
public OprobitFunction(){
linkFunctionType = LinkFunctionType.oprobit;
normalDistribution = new NormalDistribution(0, 1);
}
@Override
public double link(double f) { return normalDistribution.inverseCumulativeProbability(f); }
@Override
public double linkInv(double f) { return normalDistribution.cumulativeProbability(f); }
@Override
public String linkInvString(String f) {
return "new org.apache.commons.math3.distribution.NormalDistribution(0, 1).cumulativeProbability("+f+");";
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/LinkFunctionFactory.java
|
package hex;
import hex.genmodel.utils.LinkFunctionType;
import water.H2O;
public class LinkFunctionFactory {
public static LinkFunction getLinkFunction(String type) {
return getLinkFunction(LinkFunctionType.valueOf(type));
}
public static LinkFunction getLinkFunction(LinkFunctionType type) {
switch (type) {
case log:
return new LogFunction();
case logit:
return new LogitFunction();
case identity:
return new IdentityFunction();
case ologit:
return new OlogitFunction();
case ologlog:
return new OloglogFunction();
case oprobit:
return new OprobitFunction();
case inverse:
return new InverseFunction();
default:
throw H2O.unimpl("The" + type + " link function is not implemented.");
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/MeanResidualDeviance.java
|
package hex;
import water.Iced;
import water.MRTask;
import water.Scope;
import water.fvec.Chunk;
import water.fvec.Vec;
public class MeanResidualDeviance extends Iced {
//INPUT
public Vec _actuals;
public Vec _preds;
public Vec _weights;
public Distribution _dist;
//OUTPUT
public double meanResidualDeviance;
public MeanResidualDeviance(Distribution dist, Vec preds, Vec actuals, Vec weights) {
_preds = preds;
_actuals = actuals;
_weights = weights;
_dist = dist;
}
private void init() throws IllegalArgumentException {
if( _actuals ==null || _preds ==null )
throw new IllegalArgumentException("Missing actual targets or predicted values!");
if (_actuals.length() != _preds.length())
throw new IllegalArgumentException("Both arguments must have the same length ("+ _actuals.length()+"!="+ _preds.length()+")!");
if (!_actuals.isNumeric())
throw new IllegalArgumentException("Actual target column must be numeric!");
if (_preds.isCategorical())
throw new IllegalArgumentException("Predicted targets cannot be class labels, expect continuous values.");
if (_weights != null && !_weights.isNumeric())
throw new IllegalArgumentException("Observation weights must be numeric.");
// The vectors are from different groups => align them, but properly delete it after computation
if (!_actuals.group().equals(_preds.group())) {
_preds = _actuals.align(_preds);
Scope.track(_preds);
if (_weights !=null) {
_weights = _actuals.align(_weights);
Scope.track(_weights);
}
}
}
public MeanResidualDeviance exec() {
Scope.enter();
init();
try {
MeanResidualBuilder gt = new MeanResidualBuilder(_dist);
gt = (_weights != null) ? gt.doAll(_actuals, _preds, _weights) : gt.doAll(_actuals, _preds);
meanResidualDeviance=gt._mean_residual_deviance;
} finally {
Scope.exit();
}
return this;
}
// Compute Mean Residual Deviance table via MRTask
public static class MeanResidualBuilder extends MRTask<MeanResidualBuilder> {
public double _mean_residual_deviance;
private double _wcount;
private Distribution _dist;
MeanResidualBuilder(Distribution dist) { _dist = dist; }
@Override public void map(Chunk ca, Chunk cp) { map(ca, cp, (Chunk)null); }
@Override public void map(Chunk ca, Chunk cp, Chunk cw) {
_mean_residual_deviance=0;
_wcount=0;
final int len = Math.min(ca._len, cp._len);
for( int i=0; i < len; i++ ) {
if (ca.isNA(i)) continue;
if (cp.isNA(i)) continue;
final double a = ca.atd(i);
final double pr = cp.atd(i);
final double w = cw!=null?cw.atd(i):1;
perRow(pr, a, w);
}
}
public void perRow(double pr, double a, double w) {
if (w==0) return;
assert (!Double.isNaN(pr));
assert (!Double.isNaN(a));
assert (!Double.isNaN(w));
_mean_residual_deviance+=_dist.deviance(w,a,pr);
_wcount+=w;
}
@Override public void reduce(MeanResidualBuilder other) {
_mean_residual_deviance += other._mean_residual_deviance;
_wcount += other._wcount;
}
@Override public void postGlobal(){
_mean_residual_deviance/=_wcount;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/Model.java
|
package hex;
import hex.genmodel.*;
import hex.genmodel.algos.glrm.GlrmMojoModel;
import hex.genmodel.algos.tree.ContributionComposer;
import hex.genmodel.algos.tree.SharedTreeGraph;
import hex.genmodel.algos.tree.SharedTreeMojoModel;
import hex.genmodel.algos.tree.SharedTreeNode;
import hex.genmodel.attributes.ModelAttributes;
import hex.genmodel.descriptor.ModelDescriptor;
import hex.genmodel.easy.EasyPredictModelWrapper;
import hex.genmodel.easy.RowData;
import hex.genmodel.easy.exception.PredictException;
import hex.genmodel.easy.prediction.*;
import hex.genmodel.utils.DistributionFamily;
import hex.quantile.QuantileModel;
import org.joda.time.DateTime;
import water.*;
import water.api.ModelsHandler;
import water.api.StreamWriteOption;
import water.api.StreamWriter;
import water.api.StreamingSchema;
import water.api.schemas3.KeyV3;
import water.codegen.CodeGenerator;
import water.codegen.CodeGeneratorPipeline;
import water.exceptions.JCodeSB;
import water.fvec.*;
import water.parser.BufferedString;
import water.persist.Persist;
import water.udf.CFuncRef;
import water.util.*;
import java.io.*;
import java.lang.reflect.Field;
import java.net.URI;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static water.util.FrameUtils.categoricalEncoder;
import static water.util.FrameUtils.cleanUp;
/**
* A Model models reality (hopefully).
* A model can be used to 'score' a row (make a prediction), or a collection of
* rows on any compatible dataset - meaning the row has all the columns with the
* same names as used to build the mode and any categorical columns can
* be adapted.
*/
public abstract class Model<M extends Model<M,P,O>, P extends Model.Parameters, O extends Model.Output>
extends DefaultPojoWriter<M>
implements StreamWriter {
public static final String EVAL_AUTO_PARAMS_ENABLED = H2O.OptArgs.SYSTEM_PROP_PREFIX + "algos.evaluate_auto_model_parameters";
public P _parms; // TODO: move things around so that this can be protected
public P _input_parms;
public O _output; // TODO: move things around so that this can be protected
public String[] _warnings = new String[0]; // warning associated with model building
public transient String[] _warningsP; // warnings associated with prediction only (transient, not persisted)
public Distribution _dist;
protected ScoringInfo[] scoringInfo;
public IcedHashMap<Key, String> _toDelete = new IcedHashMap<>();
public boolean evalAutoParamsEnabled;
public static Model[] fetchAll() {
final Key[] modelKeys = KeySnapshot.globalSnapshot().filter(new KeySnapshot.KVFilter() {
@Override
public boolean filter(KeySnapshot.KeyInfo k) {
return Value.isSubclassOf(k._type, Model.class) && !Value.isSubclassOf(k._type, QuantileModel.class);
}
}).keys();
Model[] models = new Model[modelKeys.length];
for (int i = 0; i < modelKeys.length; i++) {
Model model = ModelsHandler.getFromDKV("(none)", modelKeys[i]);
models[i] = model;
}
return models;
}
/**
* Whether to evaluate input parameters of value AUTO.
*/
public static boolean evaluateAutoModelParameters() {
return Boolean.parseBoolean(System.getProperty(EVAL_AUTO_PARAMS_ENABLED, "true"));
}
public void setInputParms(P _input_parms) {
this._input_parms = _input_parms;
}
public interface DeepFeatures {
Frame scoreAutoEncoder(Frame frame, Key destination_key, boolean reconstruction_error_per_feature);
Frame scoreDeepFeatures(Frame frame, final int layer);
Frame scoreDeepFeatures(Frame frame, final int layer, final Job j); //for Deep Learning
Frame scoreDeepFeatures(Frame frame, final String layer, final Job j); //for Deep Water
}
public interface GLRMArchetypes {
Frame scoreReconstruction(Frame frame, Key<Frame> destination_key, boolean reverse_transform);
Frame scoreArchetypes(Frame frame, Key<Frame> destination_key, boolean reverse_transform);
}
public interface LeafNodeAssignment {
enum LeafNodeAssignmentType {Path, Node_ID}
Frame scoreLeafNodeAssignment(Frame frame, LeafNodeAssignmentType type, Key<Frame> destination_key);
}
public interface FeatureFrequencies {
Frame scoreFeatureFrequencies(Frame frame, Key<Frame> destination_key);
}
public interface StagedPredictions {
Frame scoreStagedPredictions(Frame frame, Key<Frame> destination_key);
}
public interface UpdateAuxTreeWeights {
UpdateAuxTreeWeightsReport updateAuxTreeWeights(Frame frame, String weightsColumn);
class UpdateAuxTreeWeightsReport {
public int[] _warn_trees;
public int[] _warn_classes;
public boolean hasWarnings() {
return _warn_trees != null && _warn_trees.length > 0;
}
}
}
public interface Contributions {
enum ContributionsOutputFormat {Original, Compact}
class ContributionsOptions {
public ContributionsOutputFormat _outputFormat = ContributionsOutputFormat.Original;
public int _topN;
public int _bottomN;
public boolean _compareAbs;
public boolean _outputSpace; // Used only iff SHAP is in link space
public boolean _outputPerReference; // If T, return contributions against each background sample (aka reference), i.e. phi(feature, x, bg), otherwise return contributions averaged over the background sample (phi(feature, x) = E_{bg} phi(feature, x, bg))
public ContributionsOptions setOutputFormat(ContributionsOutputFormat outputFormat) {
_outputFormat = outputFormat;
return this;
}
public ContributionsOptions setTopN(int topN) {
_topN = topN;
return this;
}
public ContributionsOptions setBottomN(int bottomN) {
_bottomN = bottomN;
return this;
}
public ContributionsOptions setCompareAbs(boolean compareAbs) {
_compareAbs = compareAbs;
return this;
}
public ContributionsOptions setOutputSpace(boolean outputSpace) {
_outputSpace = outputSpace;
return this;
}
public ContributionsOptions setOutputPerReference(boolean perReference) {
_outputPerReference = perReference;
return this;
}
public boolean isSortingRequired() {
return _topN != 0 || _bottomN != 0;
}
}
default Frame scoreContributions(Frame frame, Key<Frame> destination_key) {
throw H2O.unimpl("Calculating SHAP is not supported.");
}
default Frame scoreContributions(Frame frame, Key<Frame> destination_key, Job<Frame> j) {
return scoreContributions(frame, destination_key, j, new ContributionsOptions());
}
default Frame scoreContributions(Frame frame, Key<Frame> destination_key, Job<Frame> j, ContributionsOptions options) {
return scoreContributions(frame, destination_key);
}
default Frame scoreContributions(Frame frame, Key<Frame> destination_key, Job<Frame> j, ContributionsOptions options, Frame backgroundFrame) {
if (backgroundFrame != null) {
throw H2O.unimpl("Calculating SHAP with background frame is not supported for this model.");
}
return scoreContributions(frame, destination_key, j, options);
}
default void composeScoreContributionTaskMetadata(final String[] names, final byte[] types, final String[][] domains, final String[] originalFrameNames, final Contributions.ContributionsOptions options) {
final String[] contribNames = hex.genmodel.utils.ArrayUtils.append(originalFrameNames, "BiasTerm");
final ContributionComposer contributionComposer = new ContributionComposer();
int topNAdjusted = contributionComposer.checkAndAdjustInput(options._topN, originalFrameNames.length);
int bottomNAdjusted = contributionComposer.checkAndAdjustInput(options._bottomN, originalFrameNames.length);
int outputSize = Math.min((topNAdjusted+bottomNAdjusted)*2, originalFrameNames.length*2);
for (int i = 0; i < outputSize; i+=2) {
types[i] = Vec.T_CAT;
domains[i] = Arrays.copyOf(contribNames, contribNames.length);
domains[i+1] = null;
types[i+1] = Vec.T_NUM;
}
int topFeatureIterator = 1;
for (int i = 0; i < topNAdjusted*2; i+=2) {
names[i] = "top_feature_" + topFeatureIterator;
names[i+1] = "top_value_" + topFeatureIterator;
topFeatureIterator++;
}
int bottomFeatureIterator = 1;
for (int i = topNAdjusted*2; i < outputSize; i+=2) {
names[i] = "bottom_feature_" + bottomFeatureIterator;
names[i+1] = "bottom_value_" + bottomFeatureIterator;
bottomFeatureIterator++;
}
names[outputSize] = "BiasTerm";
types[outputSize] = Vec.T_NUM;
domains[outputSize] = null;
}
default long scoreContributionsWorkEstimate(Frame frame, Frame backgroundFrame, boolean outputPerReference) {
long frameNRows = frame.numRows();
long bgFrameNRows = backgroundFrame.numRows();
long workAmount = Math.max(frameNRows, bgFrameNRows); // Maps over the bigger frame while the smaller is sent across the cluster
if (!outputPerReference)
workAmount += frameNRows * bgFrameNRows; // Aggregating over the baselines
return workAmount;
}
}
public interface RowToTreeAssignment {
Frame rowToTreeAssignment(Frame frame, Key<Frame> destination_key, Job<Frame> j);
}
public interface ExemplarMembers {
Frame scoreExemplarMembers(Key<Frame> destination_key, int exemplarIdx);
}
public interface GetMostImportantFeatures {
String[] getMostImportantFeatures(int n);
}
public interface GetNTrees {
int getNTrees();
}
/**
* Default threshold for assigning class labels to the target class (for binomial models)
* @return threshold in 0...1
*/
public double defaultThreshold() {
return _output.defaultThreshold();
}
public void resetThreshold(double value){
_output.resetThreshold(value);
}
/**
* @deprecated use {@link Output#defaultThreshold()} instead.
*/
@Deprecated
public static <O extends Model.Output> double defaultThreshold(O output) {
return output.defaultThreshold();
}
public final boolean isSupervised() { return _output.isSupervised(); }
public boolean isGeneric() {
return false;
}
public boolean havePojo() {
if (_parms._preprocessors != null) return false; // TE processor not included to current POJO (see PUBDEV-8508 for potential fix)
final String algoName = _parms.algoName();
return ModelBuilder.getRegisteredBuilder(algoName)
.map(ModelBuilder::havePojo)
.orElseGet(() -> {
Log.warn("Model Builder for algo = " + algoName + " is not registered. " +
"Unable to determine if Model has a POJO. Please override method havePojo().");
return false;
});
}
public boolean haveMojo() {
if (_parms._preprocessors != null) return false; // until PUBDEV-7799, disable model MOJO if it was trained with embedded TE.
final String algoName = _parms.algoName();
return ModelBuilder.getRegisteredBuilder(algoName)
.map(ModelBuilder::haveMojo)
.orElseGet(() -> {
Log.warn("Model Builder for algo = " + algoName + " is not registered. " +
"Unable to determine if Model has a MOJO. Please override method haveMojo().");
return false;
});
}
/**
* Identifies the default ordering method for models returned from Grid Search
* @return default sort-by
*/
public GridSortBy getDefaultGridSortBy() {
if (! isSupervised())
return null;
else if (_output.hasTreatment()){
return GridSortBy.AUUC;
} else if (_output.nclasses() > 1)
return GridSortBy.LOGLOSS;
else
return GridSortBy.RESDEV;
}
public static class GridSortBy { // intentionally not an enum to allow 3rd party extensions
public static final GridSortBy LOGLOSS = new GridSortBy("logloss", false);
public static final GridSortBy RESDEV = new GridSortBy("residual_deviance", false);
public static final GridSortBy R2 = new GridSortBy("r2", true);
public static final GridSortBy AUUC = new GridSortBy("auuc", false);
public final String _name;
public final boolean _decreasing;
GridSortBy(String name, boolean decreasing) { _name = name; _decreasing = decreasing; }
}
public ToEigenVec getToEigenVec() { return null; }
/** Model-specific parameter class. Each model sub-class contains
* instance of one of these containing its builder parameters, with
* model-specific parameters. E.g. KMeansModel extends Model and has a
* KMeansParameters extending Model.Parameters; sample parameters include K,
* whether or not to normalize, max iterations and the initial random seed.
*
* <p>The non-transient fields are input parameters to the model-building
* process, and are considered "first class citizens" by the front-end - the
* front-end will cache Parameters (in the browser, in JavaScript, on disk)
* and rebuild Parameter instances from those caches.
*
* WARNING: Model Parameters is not immutable object and ModelBuilder can modify
* them!
*/
public abstract static class Parameters extends Iced<Parameters> implements AdaptFrameParameters {
/** Maximal number of supported levels in response. */
public static final int MAX_SUPPORTED_LEVELS = 1<<20;
/** The short name, used in making Keys. e.g. "GBM" */
abstract public String algoName();
/** The pretty algo name for this Model (e.g., Gradient Boosting Machine, rather than GBM).*/
abstract public String fullName();
/** The Java class name for this Model (e.g., hex.tree.gbm.GBM, rather than GBM).*/
abstract public String javaName();
/** Default relative tolerance for convergence-based early stopping */
protected double defaultStoppingTolerance() { return 1e-3; }
/** How much work will be done for this model? */
abstract public long progressUnits();
public Key<Frame> _train; // User-Key of the Frame the Model is trained on
public Key<Frame> _valid; // User-Key of the Frame the Model is validated on, if any
public int _nfolds = 0;
public boolean _keep_cross_validation_models = true;
public boolean _keep_cross_validation_predictions = false;
/**
* What precision to use for storing holdout predictions (the number of decimal places stored)?
* Special values:
* -1 == AUTO; use precision=8 for classification, precision=unlimited for everything else
* 0; disabled
*
* for classification problems consider eg.:
* 4 to keep only first 4 decimal places (consumes 75% less memory)
* or 8 to keep 8 decimal places (consumes 50% less memory)
*/
public int _keep_cross_validation_predictions_precision = -1;
public boolean _keep_cross_validation_fold_assignment = false;
public boolean _parallelize_cross_validation = true;
public boolean _auto_rebalance = true;
public void setTrain(Key<Frame> train) {
this._train = train;
}
public enum FoldAssignmentScheme {
AUTO, Random, Modulo, Stratified
}
public enum CategoricalEncodingScheme {
AUTO(false),
OneHotInternal(false),
OneHotExplicit(false),
Enum(false),
Binary(false),
Eigen(false),
LabelEncoder(false),
SortByResponse(true),
EnumLimited(false)
;
CategoricalEncodingScheme(boolean needResponse) { _needResponse = needResponse; }
final boolean _needResponse;
boolean needsResponse() { return _needResponse; }
public static CategoricalEncodingScheme fromGenModel(CategoricalEncoding encoding) {
if (encoding == null)
return null;
try {
return Enum.valueOf(CategoricalEncodingScheme.class, encoding.name());
} catch (IllegalArgumentException iae) {
throw new UnsupportedOperationException("Unknown encoding " + encoding);
}
}
}
public Key<ModelPreprocessor>[] _preprocessors;
public long _seed = -1;
public long getOrMakeRealSeed(){
while (_seed==-1) {
_seed = RandomUtils.getRNG(System.nanoTime()).nextLong();
Log.debug("Auto-generated time-based seed for pseudo-random number generator (because it was set to -1): " + _seed);
}
return _seed;
}
public FoldAssignmentScheme _fold_assignment = FoldAssignmentScheme.AUTO;
public CategoricalEncodingScheme _categorical_encoding = CategoricalEncodingScheme.AUTO;
public int _max_categorical_levels = 10;
public DistributionFamily _distribution = DistributionFamily.AUTO;
public double _tweedie_power = 1.5;
public double _quantile_alpha = 0.5;
public double _huber_alpha = 0.9;
// TODO: This field belongs in the front-end column-selection process and
// NOT in the parameters - because this requires all model-builders to have
// column strip/ignore code.
public String[] _ignored_columns; // column names to ignore for training
public boolean _ignore_const_cols; // True if dropping constant cols
public String _weights_column;
public String _offset_column;
public String _fold_column;
public String _treatment_column;
// Check for constant response
public boolean _check_constant_response = true;
public boolean _is_cv_model; //internal helper
public int _cv_fold = -1; //internal use
// Scoring a model on a dataset is not free; sometimes it is THE limiting
// factor to model building. By default, partially built models are only
// scored every so many major model iterations - throttled to limit scoring
// costs to less than 10% of the build time. This flag forces scoring for
// every iteration, allowing e.g. more fine-grained progress reporting.
public boolean _score_each_iteration;
/**
* Maximum allowed runtime in seconds for model training. Use 0 to disable.
*/
public double _max_runtime_secs = 0;
/** Using _main_model_time_budget_factor to determine if and how we should restrict the time for the main model.
* Value 0 means do not use time constraint for the main model.
* More details in {@link ModelBuilder#setMaxRuntimeSecsForMainModel()}.
*/
public double _main_model_time_budget_factor = 0;
/**
* Early stopping based on convergence of stopping_metric.
* Stop if simple moving average of the stopping_metric does not improve by stopping_tolerance for
* k scoring events.
* Can only trigger after at least 2k scoring events. Use 0 to disable.
*/
public int _stopping_rounds = 0;
/**
* Metric to use for convergence checking, only for _stopping_rounds > 0.
*/
public ScoreKeeper.StoppingMetric _stopping_metric = ScoreKeeper.StoppingMetric.AUTO;
/**
* Relative tolerance for metric-based stopping criterion: stop if relative improvement is not at least this much.
*/
public double _stopping_tolerance = defaultStoppingTolerance();
/** Supervised models have an expected response they get to train with! */
public String _response_column; // response column name
/** Should all classes be over/under-sampled to balance the class
* distribution? */
public boolean _balance_classes = false;
/** When classes are being balanced, limit the resulting dataset size to
* the specified multiple of the original dataset size. Maximum relative
* size of the training data after balancing class counts (can be less
* than 1.0) */
public float _max_after_balance_size = 5.0f;
/**
* Desired over/under-sampling ratios per class (lexicographic order).
* Only when balance_classes is enabled.
* If not specified, they will be automatically computed to obtain class balance during training.
*/
public float[] _class_sampling_factors;
/** For classification models, the maximum size (in terms of classes) of
* the confusion matrix for it to be printed. This option is meant to
* avoid printing extremely large confusion matrices. */
public int _max_confusion_matrix_size = 20;
/**
* A model key associated with a previously trained Deep Learning
* model. This option allows users to build a new model as a
* continuation of a previously generated model.
*/
public Key<? extends Model> _checkpoint;
/**
* A pretrained Autoencoder DL model with matching inputs and hidden layers
* can be used to initialize the weights and biases (excluding the output layer).
*/
public Key<? extends Model> _pretrained_autoencoder;
/**
* Reference to custom metric function.
*/
public String _custom_metric_func = null;
/**
* Reference to custom distribution function.
*/
public String _custom_distribution_func = null;
/**
* Directory where generated models will be exported
*/
public String _export_checkpoints_dir;
/**
* Bins for Gains/Lift table, if applicable. Ignored if G/L are not calculated.
*/
public int _gainslift_bins = -1;
public MultinomialAucType _auc_type = MultinomialAucType.AUTO;
/**
* Type to calculate default AUUC value.Ignored for non uplift models.
*/
public AUUC.AUUCType _auuc_type = AUUC.AUUCType.AUTO;
/**
* Bins for calculating AUUC, if applicable. Ignored for non uplift models.
*/
public int _auuc_nbins = -1;
// Public no-arg constructor for reflective creation
public Parameters() { _ignore_const_cols = defaultDropConsCols(); }
/** @return the training frame instance */
public final Frame train() { return _train==null ? null : _train.get(); }
/** @return the validation frame instance, or null
* if a validation frame was not specified */
public final Frame valid() { return _valid==null ? null : _valid.get(); }
public String[] getNonPredictors() {
return Arrays.stream(new String[]{_weights_column, _offset_column, _fold_column, _response_column, _treatment_column})
.filter(Objects::nonNull)
.toArray(String[]::new);
}
/** Read-Lock both training and validation User frames. */
public void read_lock_frames(Job job) {
@SuppressWarnings("unchecked")
Key<Job> jobKey = job._key;
Frame tr = train();
if (tr != null)
read_lock_frame(tr, jobKey);
if (_valid != null && !_train.equals(_valid))
read_lock_frame(_valid.get(), jobKey);
}
private void read_lock_frame(Frame fr, Key<Job> jobKey) {
if (_is_cv_model)
fr.write_lock_to_read_lock(jobKey);
else
fr.read_lock(jobKey);
}
/** Read-UnLock both training and validation User frames. This method is
* called on crashing cleanup pathes, so handles the case where the frames
* are not actually locked. */
public void read_unlock_frames(Job job) {
Frame tr = train();
if( tr != null ) tr.unlock(job._key,false);
if( _valid != null && !_train.equals(_valid) )
valid().unlock(job._key,false);
}
// Override in subclasses to change the default; e.g. true in GLM
protected boolean defaultDropConsCols() { return true; }
/** Type of missing columns during adaptation between train/test datasets
* Overload this method for models that have sparse data handling - a zero
* will preserve the sparseness. Otherwise, NaN is used.
* @return real-valued number (can be NaN) */
@Override
public double missingColumnsType() { return Double.NaN; }
public boolean hasCheckpoint() { return _checkpoint != null; }
public boolean hasCustomMetricFunc() { return _custom_metric_func != null; }
public long checksum() {
return checksum(null);
}
/**
* Compute a checksum based on all non-transient non-static ice-able assignable fields (incl. inherited ones) which have @API annotations.
* Sort the fields first, since reflection gives us the fields in random order and we don't want the checksum to be affected by the field order.
* NOTE: if a field is added to a Parameters class the checksum will differ even when all the previous parameters have the same value. If
* a client wants backward compatibility they will need to compare parameter values explicitly.
*
* The method is motivated by standard hash implementation `hash = hash * P + value` but we use high prime numbers in random order.
* @param ignoredFields A {@link Set} of fields to ignore. Can be empty or null.
* @return checksum A 64-bit long representing the checksum of the {@link Parameters} object
*/
public long checksum(final Set<String> ignoredFields) {
long xs = 0x600DL;
int count = 0;
Field[] fields = Weaver.getWovenFields(this.getClass());
Arrays.sort(fields, Comparator.comparing(Field::getName));
for (Field f : fields) {
if (ignoredFields != null && ignoredFields.contains(f.getName())) {
// Do not include ignored fields in the final hash
continue;
}
final long P = MathUtils.PRIMES[count % MathUtils.PRIMES.length];
Class<?> c = f.getType();
if (c.isArray()) {
try {
f.setAccessible(true);
if (f.get(this) != null) {
if (c.getComponentType() == Integer.TYPE){
int[] arr = (int[]) f.get(this);
xs = xs * P + (long) Arrays.hashCode(arr);
} else if (c.getComponentType() == Float.TYPE) {
float[] arr = (float[]) f.get(this);
xs = xs * P + (long) Arrays.hashCode(arr);
} else if (c.getComponentType() == Double.TYPE) {
double[] arr = (double[]) f.get(this);
xs = xs * P + (long) Arrays.hashCode(arr);
} else if (c.getComponentType() == Long.TYPE){
long[] arr = (long[]) f.get(this);
xs = xs * P + (long) Arrays.hashCode(arr);
} else if (c.getComponentType() == Boolean.TYPE){
boolean[] arr = (boolean[]) f.get(this);
xs = xs * P + (long) Arrays.hashCode(arr);
} else {
Object[] arr = (Object[]) f.get(this);
xs = xs * P + (long) Arrays.deepHashCode(arr);
} //else lead to ClassCastException
} else {
xs = xs * P;
}
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
} catch (ClassCastException t) {
throw H2O.fail("Failed to calculate checksum for the parameter object", t); //no support yet for int[][] etc.
}
} else {
try {
f.setAccessible(true);
Object value = f.get(this);
if (value instanceof Enum) {
// use string hashcode for enums, otherwise the checksum would be different each run
xs = xs * P + (long)(value.toString().hashCode());
} else if (value != null) {
xs = xs * P + (long)(value.hashCode());
} else {
xs = xs * P + P;
}
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
count++;
}
xs ^= (train() == null ? 43 : train().checksum()) * (valid() == null ? 17 : valid().checksum());
return xs;
}
private void addToUsedIfColumn(Set<String> usedColumns, Set<String> allColumns, String value) {
if (value == null) return;
if (allColumns.contains(value)) {
usedColumns.add(value);
}
}
/**
* Looks for all String parameters with the word 'column' in the parameter name, if
* the parameter value is present in supplied array of strings, it will be added to the
* returned set of used columns.
*
* @param trainNames names of columns in the training frame
* @return set of names of columns present in the params as well as the training frame names
*/
public Set<String> getUsedColumns(final String[] trainNames) {
final Set<String> trainColumns = new HashSet<>(Arrays.asList(trainNames));
final Set<String> usedColumns = new HashSet<>();
final Field[] fields = Weaver.getWovenFields(this.getClass());
for (Field f : fields) {
if (f.getName().equals("_ignored_columns") || !f.getName().toLowerCase().contains("column")) continue;
Class<?> c = f.getType();
if (c.isArray()) {
try {
f.setAccessible(true);
if (f.get(this) != null && c.getComponentType() == String.class) {
String[] values = (String[]) f.get(this);
for (String v : values) {
addToUsedIfColumn(usedColumns, trainColumns, v);
}
}
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
} else {
try {
f.setAccessible(true);
Object value = f.get(this);
if (value instanceof String) {
addToUsedIfColumn(usedColumns, trainColumns, (String) value);
}
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
}
return usedColumns;
}
@SuppressWarnings("rawtypes")
public Set<Key<?>> getDependentKeys() {
Field[] fields = Weaver.getWovenFields(getClass());
Set<Key<?>> values = new HashSet<>();
for (Field f : fields) {
f.setAccessible(true);
Class<?> c = f.getType();
try {
Object value = f.get(this);
if (value instanceof Key) {
values.add((Key) value);
} else if (value != null && c.isArray() && c.getComponentType() == Key.class) {
Key[] arr = (Key[]) value;
for (Key k : arr)
if (k != null) values.add(k);
}
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
return values;
}
@Override
public final CategoricalEncodingScheme getCategoricalEncoding() {
return _categorical_encoding;
}
@Override
public final String getWeightsColumn() {
return _weights_column;
}
@Override
public final String getOffsetColumn() {
return _offset_column;
}
@Override
public final String getFoldColumn() {
return _fold_column;
}
@Override
public final String getResponseColumn() {
return _response_column;
}
@Override
public final String getTreatmentColumn(){
return _treatment_column;
}
@Override
public final int getMaxCategoricalLevels() {
return _max_categorical_levels;
}
public void setDistributionFamily(DistributionFamily distributionFamily){
_distribution = distributionFamily;
}
public DistributionFamily getDistributionFamily() {
return _distribution;
}
}
public ModelMetrics addModelMetrics(final ModelMetrics mm) {
DKV.put(mm);
incrementModelMetrics(_output, mm._key);
return mm;
}
static void incrementModelMetrics(Output out, Key k) {
synchronized(out) {
for (Key key : out._model_metrics)
if (k.equals(key)) return;
out._model_metrics = Arrays.copyOf(out._model_metrics, out._model_metrics.length + 1);
out._model_metrics[out._model_metrics.length - 1] = k;
}
}
public void addWarning(String s){
_warnings = Arrays.copyOf(_warnings,_warnings.length+1);
_warnings[_warnings.length-1] = s;
}
public interface InteractionBuilder {
Frame makeInteractions(Frame f);
}
public static class InteractionSpec extends Iced {
private final String[] _columns;
private final StringPair[] _pairs;
private final String[] _interactionsOnly;
private String[] _ignored; // list of columns that can be dropped if they are not used in any interaction
private InteractionSpec(String[] columns, StringPair[] pairs, String[] interactionsOnly, String[] ignored) {
_columns = columns;
_pairs = pairs;
_interactionsOnly = interactionsOnly;
if (ignored != null) {
_ignored = ignored.clone();
Arrays.sort(_ignored);
}
}
public String[] getInteractionsOnly() {
return _interactionsOnly;
}
public static InteractionSpec allPairwise(String[] columns) {
return columns != null ? new InteractionSpec(columns, null, null, null) : null;
}
public static InteractionSpec create(String[] columns, StringPair[] pairs, String[] interactionsOnly, String[] ignored) {
return columns == null && pairs == null ?
null : new InteractionSpec(columns, pairs, interactionsOnly, ignored);
}
public static InteractionSpec create(String[] columns, StringPair[] pairs, String[] interactionsOnly) {
return columns == null && pairs == null ?
null : new InteractionSpec(columns, pairs, interactionsOnly, null);
}
public static InteractionSpec create(String[] columns, StringPair[] pairs) {
return columns == null && pairs == null ?
null : new InteractionSpec(columns, pairs, null, null);
}
public boolean isEmpty() {
return _columns == null && _pairs == null;
}
private boolean isUsed(String col) {
if (_columns != null) {
for (String usedCol : _columns) {
if (usedCol.equals(col))
return true;
}
}
if (_pairs != null) {
for (StringPair colPair : _pairs) {
if (col.equals(colPair._a) || col.equals(colPair._b))
return true;
}
}
return false;
}
/**
* Reorders columns of a Frame so that columns that only used to make interactions
* are at the end of the Frame. Only Vecs that will actually be used are kept in the frame.
* @param f frame to adjust
* @return reordered frame
*/
public Frame reorderColumns(Frame f) {
if ((_interactionsOnly == null) || (f == null))
return f;
Vec[] interOnlyVecs = f.vecs(_interactionsOnly);
f.remove(_interactionsOnly);
for (int i = 0; i < _interactionsOnly.length; i++) {
if (isUsed(_interactionsOnly[i])) {
f.add(_interactionsOnly[i], interOnlyVecs[i]);
} else if (! isIgnored(_interactionsOnly[i])) {
Log.warn("Column '" + _interactionsOnly[i] + "' was marked to be used for interactions only " +
"but it is not actually required in any interaction.");
}
}
return f;
}
private boolean isIgnored(String column) {
return _ignored != null && Arrays.binarySearch(_ignored, column) >= 0;
}
public Frame removeInteractionOnlyColumns(Frame f) {
if ((_interactionsOnly == null) || (f == null))
return f;
return f.remove(_interactionsOnly);
}
public Model.InteractionPair[] makeInteractionPairs(Frame f) {
if (isEmpty())
return null;
InteractionPair[] allPairwise = null;
InteractionPair[] allExplicit = null;
int[] interactionIDs = new int[0];
if (_columns != null) {
interactionIDs = new int[_columns.length];
for (int i = 0; i < _columns.length; ++i) {
interactionIDs[i] = f.find(_columns[i]);
if (interactionIDs[i] == -1)
throw new IllegalArgumentException("missing column from the dataset, could not make interaction: " + interactionIDs[i]);
}
allPairwise = Model.InteractionPair.generatePairwiseInteractionsFromList(f, interactionIDs);
}
if (_pairs != null) {
Arrays.sort(interactionIDs);
allExplicit = new InteractionPair[_pairs.length];
int n = 0;
for (StringPair p : _pairs) {
int aIdx = f.find(p._a);
if (aIdx == -1)
throw new IllegalArgumentException("Invalid interactions specified (first column is missing): " + p.toJsonString() + " in " + Arrays.toString(f.names()));
int bIdx = f.find(p._b);
if (bIdx == -1)
throw new IllegalArgumentException("Invalid interactions specified (second column is missing): " + p.toJsonString() + " in " + Arrays.toString(f.names()));
if (Arrays.binarySearch(interactionIDs, aIdx) >= 0 && Arrays.binarySearch(interactionIDs, bIdx) >= 0)
continue; // This interaction is already included in set of all pairwise interactions
allExplicit[n++] = new InteractionPair(f, aIdx, bIdx, f.vec(aIdx).domain(), f.vec(bIdx).domain());
}
if (n != allExplicit.length) {
InteractionPair[] resized = new InteractionPair[n];
System.arraycopy(allExplicit, 0, resized, 0, resized.length);
allExplicit = resized;
}
}
InteractionPair[] pairs = allExplicit == null ? allPairwise : ArrayUtils.append(allPairwise, allExplicit);
if (pairs != null) {
pairs = flagAllFactorInteractionPairs(f, pairs);
}
return pairs;
}
private InteractionPair[] flagAllFactorInteractionPairs(Frame f, InteractionPair[] pairs) {
if (_interactionsOnly == null || _interactionsOnly.length == 0)
return pairs;
final String[] interOnly = _interactionsOnly.clone();
Arrays.sort(interOnly);
for (InteractionPair p : pairs) {
boolean v1num = f.vec(p._v1).isNumeric();
boolean v2num = f.vec(p._v2).isNumeric();
if (v1num == v2num)
continue;
// numerical-categorical interaction
String numVecName = v1num ? f.name(p._v1) : f.name(p._v2);
boolean needsAllFactorColumns = Arrays.binarySearch(interOnly, numVecName) >= 0;
p.setNeedsAllFactorLevels(needsAllFactorColumns);
}
return pairs;
}
}
/** Model-specific output class. Each model sub-class contains an instance
* of one of these containing its "output": the pieces of the model needed
* for scoring. E.g. KMeansModel has a KMeansOutput extending Model.Output
* which contains the cluster centers. The output also includes the names,
* domains and other fields which are determined at training time. */
public abstract static class Output extends Iced {
/** Columns used in the model and are used to match up with scoring data
* columns. The last name is the response column name (if any). */
public String _names[];
public String _column_types[];
/**
* @deprecated as of March 6, 2019, replaced by (@link #setNames(String[] names, String[] columnTypes))
*
*/
@Deprecated
public void setNames(String[] names) {
_names = names;
_column_types = new String[names.length];
Arrays.fill(_column_types, "NA");
}
public void setNames(String[] names, String[] columntypes) {
_names = names;
_column_types = columntypes;
}
public String _origNames[]; // only set if ModelBuilder.encodeFrameCategoricals() changes the training frame
/** Categorical/factor mappings, per column. Null for non-categorical cols.
* Columns match the post-init cleanup columns. The last column holds the
* response col categoricals for SupervisedModels. */
public String _domains[][];
public String _origDomains[][]; // only set if ModelBuilder.encodeFrameCategoricals() changes the training frame
public double[] _orig_projection_array;// only set if ModelBuilder.encodeFrameCategoricals() changes the training frame
/** List of Keys to cross-validation models (non-null iff _parms._nfolds > 1 or _parms._fold_column != null) **/
public Key _cross_validation_models[];
/** List of Keys to cross-validation predictions (if requested) **/
public Key _cross_validation_predictions[];
public Key<Frame> _cross_validation_holdout_predictions_frame_id;
public Key<Frame> _cross_validation_fold_assignment_frame_id;
// Model-specific start/end/run times
// Each individual model's start/end/run time is reported here, not the total time to build N+1 cross-validation models, or all grid models
public long _start_time;
public long _end_time;
public long _run_time;
public long _total_run_time; // includes building of cv models
protected void startClock() { _start_time = System.currentTimeMillis(); }
protected void stopClock() {
_end_time = System.currentTimeMillis();
_total_run_time = _run_time = _end_time - _start_time;
}
public Output(){this(false,false,false);}
public Output(boolean hasWeights, boolean hasOffset, boolean hasFold) {
_hasWeights = hasWeights;
_hasOffset = hasOffset;
_hasFold = hasFold;
}
/** Any final prep-work just before model-building starts, but after the
* user has clicked "go". E.g., converting a response column to an categorical
* touches the entire column (can be expensive), makes a parallel vec
* (Key/Data leak management issues), and might throw IAE if there are too
* many classes. */
public Output(ModelBuilder b) {
this(b, b._train);
}
protected Output(ModelBuilder b, Frame train) {
if (b.error_count() > 0)
throw new IllegalArgumentException(b.validationErrors());
// Capture the data "shape" the model is valid on
setNames(train != null ? train.names() : new String[0], train!=null?train.typesStr():new String[0]);
_domains = train != null ? train.domains() : new String[0][];
_origNames = b._origNames;
_origDomains = b._origDomains;
_orig_projection_array = b._orig_projection_array;
_isSupervised = b.isSupervised();
_hasOffset = b.hasOffsetCol();
_hasWeights = b.hasWeightCol();
_hasFold = b.hasFoldCol();
_hasTreatment = b.hasTreatmentCol();
_distribution = b._distribution;
_priorClassDist = b._priorClassDist;
_reproducibility_information_table = createReproducibilityInformationTable(b);
assert(_job==null); // only set after job completion
_defaultThreshold = -1;
}
/** Returns number of input features (OK for most supervised methods, need to override for unsupervised!) */
public int nfeatures() {
return _names.length - (_hasOffset?1:0) - (_hasWeights?1:0) - (_hasFold?1:0) - (_hasTreatment ?1:0) - (isSupervised()?1:0);
}
/** Returns features used by the model */
public String[] features() {
return Arrays.copyOf(_names, nfeatures());
}
/** List of all the associated ModelMetrics objects, so we can delete them
* when we delete this model. */
Key<ModelMetrics>[] _model_metrics = new Key[0];
/** Job info: final status (canceled, crashed), build time */
public Job _job;
/**
* Training set metrics obtained during model training
*/
public ModelMetrics _training_metrics;
/**
* Validation set metrics obtained during model training (if a validation data set was specified)
*/
public ModelMetrics _validation_metrics;
/**
* Cross-Validation metrics obtained during model training
*/
public ModelMetrics _cross_validation_metrics;
/**
* Summary of cross-validation metrics of all k-fold models
*/
public TwoDimTable _cross_validation_metrics_summary;
/**
* User-facing model summary - Display model type, complexity, size and other useful stats
*/
public TwoDimTable _model_summary;
/**
* Reproducibility information describing the current cluster configuration, each node configuration
* and checksums for each frame used on the input of the algorithm
*/
public TwoDimTable[] _reproducibility_information_table;
/**
* User-facing model scoring history - 2D table with modeling accuracy as a function of time/trees/epochs/iterations, etc.
*/
public TwoDimTable _scoring_history;
public TwoDimTable[] _cv_scoring_history;
public double[] _distribution;
public double[] _modelClassDist;
public double[] _priorClassDist;
protected boolean _isSupervised;
/**
* Default threshold used to make decision about binomial predictions
* -1 if is not set by user - than the default threshold is 0.5 if metrics are not set
* (0, 1> custom default threshold or validation metric threshold or training metric threshold
*/
public double _defaultThreshold;
public boolean isSupervised() { return _isSupervised; }
/** The name of the response column (which is always the last column). */
protected boolean _hasOffset; // weights and offset are kept at designated position in the names array
protected boolean _hasWeights;// only need to know if we have them
protected boolean _hasFold;// only need to know if we have them
protected boolean _hasTreatment;
public boolean hasOffset () { return _hasOffset;}
public boolean hasWeights () { return _hasWeights;}
public boolean hasFold () { return _hasFold;}
public boolean hasTreatment() { return _hasTreatment;}
public boolean hasResponse() { return isSupervised(); }
public String responseName() { return isSupervised()?_names[responseIdx()]:null;}
public String weightsName () { return _hasWeights ?_names[weightsIdx()]:null;}
public String offsetName () { return _hasOffset ?_names[offsetIdx()]:null;}
public String foldName () { return _hasFold ?_names[foldIdx()]:null;}
public String treatmentName() { return _hasTreatment ? _names[treatmentIdx()]: null;}
public InteractionBuilder interactionBuilder() { return null; }
// Vec layout is [c1,c2,...,cn, w?, o?, f?, u?, r]
// cn are predictor cols, r is response, w is weights, o is offset, f is fold and t is treatment - these are optional
protected int lastSpecialColumnIdx() {
return _names.length - 1 - (isSupervised()?1:0);
}
public int weightsIdx() {
if(!_hasWeights) return -1;
return lastSpecialColumnIdx() - (hasOffset()?1:0) - (hasFold()?1:0) - (hasTreatment()?1:0);
}
public int offsetIdx() {
if(!_hasOffset) return -1;
return lastSpecialColumnIdx() - (hasFold()?1:0) - (hasTreatment()?1:0);
}
public int foldIdx() {
if(!_hasFold) return -1;
return lastSpecialColumnIdx() - (hasTreatment()?1:0);
}
public int responseIdx() {
if(!isSupervised()) return -1;
return _names.length-1;
}
public int treatmentIdx() {
if(!_hasTreatment) return -1;
return _names.length - (isSupervised()?1:0) - 1;
}
/** Names of levels for a categorical response column. */
public String[] classNames() {
if (_domains == null || _domains.length == 0 || !isSupervised()) return null;
return _domains[_domains.length - 1];
}
/** Is this model a classification model? (v. a regression or clustering model) */
public boolean isClassifier() { return isSupervised() && nclasses() > 1; }
/** Is this model a binomial classification model? (v. a regression or clustering model) */
public boolean isBinomialClassifier() { return isSupervised() && nclasses() == 2; }
/**Is this model a multinomial classification model (supervised and nclasses() > 2 */
public boolean isMultinomialClassifier() { return isSupervised() && nclasses() > 2; }
/** Number of classes in the response column if it is categorical and the model is supervised. */
public int nclasses() {
String cns[] = classNames();
return cns == null ? 1 : cns.length;
}
// Note: some algorithms MUST redefine this method to return other model categories
public ModelCategory getModelCategory() {
if (isSupervised())
return (isClassifier() ?
(nclasses() > 2 ? ModelCategory.Multinomial : ModelCategory.Binomial) :
ModelCategory.Regression);
return ModelCategory.Unknown;
}
public boolean isAutoencoder() { return false; } // Override in DeepLearning and so on.
/**
* Retrieves variable importances
* @return instance of TwoDimTable if model supports variable importances, null otherwise
*/
public TwoDimTable getVariableImportances() {
return null;
}
public synchronized Key<ModelMetrics>[] clearModelMetrics(boolean keepModelTrainingMetrics) {
Key<ModelMetrics>[] removed;
if (keepModelTrainingMetrics) {
Key<ModelMetrics>[] kept = new Key[0];
if (_training_metrics != null) kept = ArrayUtils.append(kept, _training_metrics._key);
if (_validation_metrics != null) kept = ArrayUtils.append(kept, _validation_metrics._key);
if (_cross_validation_metrics != null) kept = ArrayUtils.append(kept, _cross_validation_metrics._key);
removed = new Key[0];
for (Key<ModelMetrics> k : _model_metrics) {
if (!ArrayUtils.contains(kept, k))
removed = ArrayUtils.append(removed, k);
}
_model_metrics = kept;
} else {
removed = Arrays.copyOf(_model_metrics, _model_metrics.length);
_model_metrics = new Key[0];
}
return removed;
}
public synchronized Key<ModelMetrics>[] getModelMetrics() { return Arrays.copyOf(_model_metrics, _model_metrics.length); }
public synchronized void changeModelMetricsKey(Key modelkey) {
for (Key<ModelMetrics> modelMetrics : _model_metrics) {
modelMetrics.get().setModelKey(modelkey);
}
}
protected long checksum_impl() {
return (null == _names ? 13 : Arrays.hashCode(_names)) *
(null == _domains ? 17 : Arrays.deepHashCode(_domains)) *
getModelCategory().ordinal();
}
public double defaultThreshold() {
if (nclasses() != 2 || _training_metrics == null || _training_metrics instanceof ModelMetricsBinomialUplift)
return 0.5;
if(_defaultThreshold == -1) {
if (_validation_metrics != null && ((ModelMetricsBinomial) _validation_metrics)._auc != null)
return ((ModelMetricsBinomial) _validation_metrics)._auc.defaultThreshold();
if (((ModelMetricsBinomial) _training_metrics)._auc != null)
return ((ModelMetricsBinomial) _training_metrics)._auc.defaultThreshold();
} else {
return _defaultThreshold;
}
return 0.5;
}
public void resetThreshold(double value){
assert value > 0 && value <= 1: "Reset threshold should be value from 0 to 1 (included). Got "+value+".";
_defaultThreshold = value;
}
public void printTwoDimTables(StringBuilder sb, Object o) {
for (Field f : Weaver.getWovenFields(o.getClass())) {
Class<?> c = f.getType();
if (c.isAssignableFrom(TwoDimTable.class)) {
try {
TwoDimTable t = (TwoDimTable) f.get(this);
f.setAccessible(true);
if (t != null) sb.append(t.toString(1,false /*don't print the full table if too long*/));
} catch (IllegalAccessException e) {
Log.err(e);
sb.append("Failed to print table ").append(f.getName()).append("\n");
}
}
}
}
@Override public String toString() {
StringBuilder sb = new StringBuilder();
if (_training_metrics!=null) sb.append(_training_metrics.toString());
if (_validation_metrics!=null) sb.append(_validation_metrics.toString());
if (_cross_validation_metrics!=null) sb.append(_cross_validation_metrics.toString());
printTwoDimTables(sb, this);
return sb.toString();
}
private TwoDimTable[] createReproducibilityInformationTable(ModelBuilder modelBuilder) {
TwoDimTable nodeInformation = ReproducibilityInformationUtils.createNodeInformationTable();
TwoDimTable clusterConfiguration = ReproducibilityInformationUtils.createClusterConfigurationTable();
TwoDimTable inputFramesInformation = createInputFramesInformationTable(modelBuilder);
return new TwoDimTable[] {nodeInformation, clusterConfiguration, inputFramesInformation};
}
public TwoDimTable createInputFramesInformationTable(ModelBuilder modelBuilder) {
String[] colHeaders = new String[] {"Input Frame", "Checksum", "ESPC"};
String[] colTypes = new String[] {"string", "long", "string"};
String[] colFormat = new String[] {"%s", "%d", "%d"};
final int rows = getInformationTableNumRows();
TwoDimTable table = new TwoDimTable(
"Input Frames Information", null,
new String[rows],
colHeaders,
colTypes,
colFormat,
"");
table.set(0, 0, "training_frame");
table.set(1, 0, "validation_frame");
table.set(0, 1, modelBuilder.train() != null ? modelBuilder.train().checksum() : -1);
table.set(1, 1, modelBuilder._valid != null ? modelBuilder.valid().checksum() : -1);
table.set(0, 2, modelBuilder.train() != null ? Arrays.toString(modelBuilder.train().anyVec().espc()) : -1);
table.set(1, 2, modelBuilder._valid != null ? Arrays.toString(modelBuilder.valid().anyVec().espc()) : -1);
return table;
}
public int getInformationTableNumRows() {
return 2; // 1 row per each input frame (training frame, validation frame)
}
} // Output
protected String[][] scoringDomains() { return _output._domains; }
public ModelMetrics addMetrics(ModelMetrics mm) { return addModelMetrics(mm); }
public abstract ModelMetrics.MetricBuilder makeMetricBuilder(String[] domain);
/** Full constructor */
public Model(Key<M> selfKey, P parms, O output) {
super(selfKey);
assert parms != null;
_parms = parms;
evalAutoParamsEnabled = evaluateAutoModelParameters();
if (evalAutoParamsEnabled) {
initActualParamValues();
}
_output = output; // Output won't be set if we're assert output != null;
if (_output != null)
_output.startClock();
_dist = isSupervised() && _output.nclasses() == 1 ? DistributionFactory.getDistribution(_parms) : null;
Log.info("Starting model "+ selfKey);
}
public void initActualParamValues() {}
/**
* Deviance of given distribution function at predicted value f
* @param w observation weight
* @param y (actual) response
* @param f (predicted) response in original response space
* @return value of gradient
*/
public double deviance(double w, double y, double f) {
return _dist.deviance(w, y, f);
}
public double likelihood(double w, double y, double[] f) {
return Double.NaN; // placeholder. This function is overridden in GLM and GenericModel.
}
public double aic(double likelihood) {
return Double.NaN; // placeholder. This function is overridden in GenericModel.
}
public ScoringInfo[] scoring_history() { return scoringInfo; }
/**
* Fill a ScoringInfo with data from the ModelMetrics for this model.
* @param scoringInfo
*/
public void fillScoringInfo(ScoringInfo scoringInfo) {
scoringInfo.is_classification = this._output.isClassifier();
scoringInfo.is_autoencoder = _output.isAutoencoder();
scoringInfo.scored_train = new ScoreKeeper(this._output._training_metrics);
scoringInfo.scored_valid = new ScoreKeeper(this._output._validation_metrics);
scoringInfo.scored_xval = new ScoreKeeper(this._output._cross_validation_metrics);
scoringInfo.validation = _output._validation_metrics != null;
scoringInfo.cross_validation = _output._cross_validation_metrics != null;
}
// return the most up-to-date model metrics
public ScoringInfo last_scored() {
return scoringInfo == null ? null : scoringInfo[scoringInfo.length-1];
}
// Lower is better
public float loss() {
switch (Optional.ofNullable(_parms._stopping_metric).orElse(ScoreKeeper.StoppingMetric.AUTO)) {
case MSE:
return (float) mse();
case MAE:
return (float) mae();
case RMSLE:
return (float) rmsle();
case logloss:
return (float) logloss();
case deviance:
return (float) deviance();
case misclassification:
return (float) classification_error();
case AUC:
return (float)(1-auc());
case AUCPR:
return (float)(1-AUCPR());
/* case r2:
return (float)(1-r2());*/
case mean_per_class_error:
return (float)mean_per_class_error();
case lift_top_group:
return (float)lift_top_group();
case AUTO:
default:
return (float) (_output.isClassifier() ? logloss() : _output.isAutoencoder() ? mse() : deviance());
}
} // loss()
public int compareTo(M o) {
if (o._output.isClassifier() != _output.isClassifier())
throw new UnsupportedOperationException("Cannot compare classifier against regressor.");
if (o._output.isClassifier()) {
if (o._output.nclasses() != _output.nclasses())
throw new UnsupportedOperationException("Cannot compare models with different number of classes.");
}
return (loss() < o.loss() ? -1 : loss() > o.loss() ? 1 : 0);
}
public double classification_error() {
if (scoringInfo != null)
return last_scored().cross_validation ? last_scored().scored_xval._classError : last_scored().validation ? last_scored().scored_valid._classError : last_scored().scored_train._classError;
ModelMetrics mm = _output._cross_validation_metrics != null ? _output._cross_validation_metrics : _output._validation_metrics != null ? _output._validation_metrics : _output._training_metrics;
if (mm == null) return Double.NaN;
if (mm instanceof ModelMetricsBinomial) {
return ((ModelMetricsBinomial)mm)._auc.defaultErr();
} else if (mm instanceof ModelMetricsMultinomial) {
return ((ModelMetricsMultinomial)mm)._cm.err();
}
return Double.NaN;
}
public double mse() {
if (scoringInfo != null)
return last_scored().cross_validation ? last_scored().scored_xval._mse : last_scored().validation ? last_scored().scored_valid._mse : last_scored().scored_train._mse;
ModelMetrics mm = _output._cross_validation_metrics != null ? _output._cross_validation_metrics : _output._validation_metrics != null ? _output._validation_metrics : _output._training_metrics;
if (mm == null) return Double.NaN;
return mm.mse();
}
public double r2() {
if (scoringInfo != null)
return last_scored().cross_validation ? last_scored().scored_xval._r2 : last_scored().validation ? last_scored().scored_valid._r2 : last_scored().scored_train._r2;
ModelMetrics mm = _output._cross_validation_metrics != null ? _output._cross_validation_metrics : _output._validation_metrics != null ? _output._validation_metrics : _output._training_metrics;
if (mm == null) return Double.NaN;
return mm.mse();
}
public double mae() {
if (scoringInfo != null)
return last_scored().cross_validation ? last_scored().scored_xval._mae : last_scored().validation ? last_scored().scored_valid._mae : last_scored().scored_train._mae;
ModelMetrics mm = _output._cross_validation_metrics != null ? _output._cross_validation_metrics : _output._validation_metrics != null ? _output._validation_metrics : _output._training_metrics;
if (mm == null) return Double.NaN;
return ((ModelMetricsRegression)mm).mae();
}
public double rmsle() {
if (scoringInfo != null)
return last_scored().cross_validation ? last_scored().scored_xval._rmsle : last_scored().validation ? last_scored().scored_valid._rmsle : last_scored().scored_train._rmsle;
ModelMetrics mm = _output._cross_validation_metrics != null ? _output._cross_validation_metrics : _output._validation_metrics != null ? _output._validation_metrics : _output._training_metrics;
if (mm == null) return Double.NaN;
return ((ModelMetricsRegression)mm).rmsle();
}
public double auc() {
if (scoringInfo != null)
return last_scored().cross_validation ? last_scored().scored_xval._AUC : last_scored().validation ? last_scored().scored_valid._AUC : last_scored().scored_train._AUC;
ModelMetrics mm = _output._cross_validation_metrics != null ? _output._cross_validation_metrics : _output._validation_metrics != null ? _output._validation_metrics : _output._training_metrics;
if (mm == null) return Double.NaN;
if(mm instanceof ModelMetricsBinomial) {
return ((ModelMetricsBinomial) mm)._auc._auc;
} else if(mm instanceof ModelMetricsMultinomial) {
return ((ModelMetricsMultinomial) mm).auc();
}
return Double.NaN;
}
public double AUCPR() {
if (scoringInfo != null)
return last_scored().cross_validation ? last_scored().scored_xval._pr_auc : last_scored().validation ? last_scored().scored_valid._pr_auc : last_scored().scored_train._pr_auc;
ModelMetrics mm = _output._cross_validation_metrics != null ? _output._cross_validation_metrics : _output._validation_metrics != null ? _output._validation_metrics : _output._training_metrics;
if (mm == null) return Double.NaN;
if(mm instanceof ModelMetricsBinomial) {
return ((ModelMetricsBinomial) mm)._auc._pr_auc;
} else if(mm instanceof ModelMetricsMultinomial) {
return ((ModelMetricsMultinomial) mm).pr_auc();
}
return Double.NaN;
}
public double deviance() {
if (scoringInfo != null)
return last_scored().cross_validation ? last_scored().scored_xval._mean_residual_deviance: last_scored().validation ? last_scored().scored_valid._mean_residual_deviance : last_scored().scored_train._mean_residual_deviance;
ModelMetrics mm = _output._cross_validation_metrics != null ? _output._cross_validation_metrics : _output._validation_metrics != null ? _output._validation_metrics : _output._training_metrics;
if (mm == null) return Double.NaN;
return ((ModelMetricsRegression)mm)._mean_residual_deviance;
}
public double logloss() {
if (scoringInfo != null)
return last_scored().cross_validation ? last_scored().scored_xval._logloss : last_scored().validation ? last_scored().scored_valid._logloss : last_scored().scored_train._logloss;
ModelMetrics mm = _output._cross_validation_metrics != null ? _output._cross_validation_metrics : _output._validation_metrics != null ? _output._validation_metrics : _output._training_metrics;
if (mm == null) return Double.NaN;
if (mm instanceof ModelMetricsBinomial) {
return ((ModelMetricsBinomial)mm).logloss();
} else if (mm instanceof ModelMetricsMultinomial) {
return ((ModelMetricsMultinomial)mm).logloss();
}
return Double.NaN;
}
public double mean_per_class_error() {
if (scoringInfo != null)
return last_scored().cross_validation ? last_scored().scored_xval._mean_per_class_error : last_scored().validation ? last_scored().scored_valid._mean_per_class_error : last_scored().scored_train._mean_per_class_error;
ModelMetrics mm = _output._cross_validation_metrics != null ? _output._cross_validation_metrics : _output._validation_metrics != null ? _output._validation_metrics : _output._training_metrics;
if (mm == null) return Double.NaN;
if (mm instanceof ModelMetricsBinomial) {
return ((ModelMetricsBinomial)mm).mean_per_class_error();
} else if (mm instanceof ModelMetricsMultinomial) {
return ((ModelMetricsMultinomial)mm).mean_per_class_error();
}
return Double.NaN;
}
public double lift_top_group() {
if (scoringInfo != null)
return last_scored().cross_validation ? last_scored().scored_xval._lift : last_scored().validation ? last_scored().scored_valid._lift : last_scored().scored_train._lift;
ModelMetrics mm = _output._cross_validation_metrics != null ? _output._cross_validation_metrics : _output._validation_metrics != null ? _output._validation_metrics : _output._training_metrics;
if (mm == null) return Double.NaN;
if (mm instanceof ModelMetricsBinomial) {
GainsLift gl = ((ModelMetricsBinomial)mm)._gainsLift;
if (gl != null && gl.response_rates != null && gl.response_rates.length > 0) {
return gl.response_rates[0] / gl.avg_response_rate;
}
}
return Double.NaN;
}
/** Adapt a Test/Validation Frame to be compatible for a Training Frame. The
* intention here is that ModelBuilders can assume the test set has the same
* count of columns, and within each factor column the same set of
* same-numbered levels. Extra levels are renumbered past those in the
* Train set but will still be present in the Test set, thus requiring
* range-checking.
*
* This routine is used before model building (with no Model made yet) to
* check for compatible datasets, and also used to prepare a large dataset
* for scoring (with a Model).
*
* Adaption does the following things:
* - Remove any "extra" Vecs appearing only in the test and not the train
* - Insert any "missing" Vecs appearing only in the train and not the test
* with all NAs ({@see missingColumnsType}). This will issue a warning,
* and if the "expensive" flag is false won't actually make the column
* replacement column but instead will bail-out on the whole adaption (but
* will continue looking for more warnings).
* - If all columns are missing, issue an error.
* - Renumber matching cat levels to match the Train levels; this might make
* "holes" in the Test set cat levels, if some are not in the Test set.
* - Extra Test levels are renumbered past the end of the Train set, hence
* the train and test levels match up to all the train levels; there might
* be extra Test levels past that.
* - For all mis-matched levels, issue a warning.
*
* The {@code test} frame is updated in-place to be compatible, by altering
* the names and Vecs; make a defensive copy if you do not want it modified.
* There is a fast-path cutout if the test set is already compatible. Since
* the test-set is conditionally modifed with extra CategoricalWrappedVec optionally
* added it is recommended to use a Scope enter/exit to track Vec lifetimes.
*
* @param test Testing Frame, updated in-place
* @param expensive Try hard to adapt; this might involve the creation of
* whole Vecs and thus get expensive. If {@code false}, then only adapt if
* no warnings and errors; otherwise just the messages are produced.
* Created Vecs have to be deleted by the caller (e.g. Scope.enter/exit).
* @return Array of warnings; zero length (never null) for no warnings.
* Throws {@code IllegalArgumentException} if no columns are in common, or
* if any factor column has no levels in common.
*/
public String[] adaptTestForTrain(Frame test, boolean expensive, boolean computeMetrics) {
return adaptTestForTrain(test, expensive, computeMetrics, false);
}
public String[] adaptTestForTrain(Frame test, boolean expensive, boolean computeMetrics, boolean catEncoded) {
return adaptTestForTrain(
test,
_output._origNames,
_output._origDomains,
_output._names,
_output._domains,
makeAdaptFrameParameters(),
expensive,
computeMetrics,
_output.interactionBuilder(),
getToEigenVec(),
_toDelete,
catEncoded
);
}
protected AdaptFrameParameters makeAdaptFrameParameters() {
return _parms;
}
public interface AdaptFrameParameters {
Parameters.CategoricalEncodingScheme getCategoricalEncoding();
String getWeightsColumn();
String getOffsetColumn();
String getFoldColumn();
String getResponseColumn();
String getTreatmentColumn();
double missingColumnsType();
int getMaxCategoricalLevels();
default String[] getNonPredictors() {
return Arrays.stream(new String[]{getWeightsColumn(), getOffsetColumn(), getFoldColumn(), getResponseColumn(), getTreatmentColumn()})
.filter(Objects::nonNull)
.toArray(String[]::new);
}
}
/**
* @param test Frame to be adapted
* @param origNames Training column names before categorical column encoding - can be the same as names
* @param origDomains Training column levels before categorical column encoding - can be the same as domains
* @param names Training column names
* @param domains Training column levels
* @param parms Model parameters
* @param expensive Whether to actually do the hard work
* @param computeMetrics Whether metrics can be (and should be) computed
* @param interactionBldr Column names to create pairwise interactions with
* @param catEncoded Whether the categorical columns of the test frame were already transformed via categorical_encoding
*/
public static String[] adaptTestForTrain(final Frame test, final String[] origNames, final String[][] origDomains,
String[] names, String[][] domains, final AdaptFrameParameters parms,
final boolean expensive, final boolean computeMetrics,
final InteractionBuilder interactionBldr, final ToEigenVec tev,
final IcedHashMap<Key, String> toDelete, final boolean catEncoded)
throws IllegalArgumentException {
String[] msg = new String[0];
if (test == null) return msg;
if (catEncoded && origNames==null) return msg;
// test frame matches the training frame (after categorical encoding, if applicable)
String[][] tdomains = test.domains();
if (names == test._names && domains == tdomains || (Arrays.equals(names, test._names) && Arrays.deepEquals(domains, tdomains)) )
return msg;
String[] backupNames = names;
String[][] backupDomains = domains;
final String weights = parms.getWeightsColumn();
final String offset = parms.getOffsetColumn();
final String fold = parms.getFoldColumn();
final String response = parms.getResponseColumn();
final String treatment = parms.getTreatmentColumn();
// whether we need to be careful with categorical encoding - the test frame could be either in original state or in encoded state
// keep in sync with FrameUtils.categoricalEncoder: as soon as a categorical column has been encoded, we should check here.
final boolean checkCategoricals = !catEncoded && Arrays.asList(
Parameters.CategoricalEncodingScheme.Binary,
Parameters.CategoricalEncodingScheme.LabelEncoder,
Parameters.CategoricalEncodingScheme.Eigen,
Parameters.CategoricalEncodingScheme.EnumLimited,
Parameters.CategoricalEncodingScheme.OneHotExplicit
).indexOf(parms.getCategoricalEncoding()) >= 0;
// test frame matches the user-given frame (before categorical encoding, if applicable)
if (checkCategoricals && origNames != null) {
boolean match = Arrays.equals(origNames, test.names());
if (!match) {
// As soon as the test frame contains at least one original pre-encoding predictor,
// then we consider the frame as valid for predictions, and we'll later fill missing columns with NA
Set<String> required = new HashSet<>(Arrays.asList(origNames));
required.removeAll(Arrays.asList(response, weights, fold, treatment));
for (String name : test.names()) {
if (required.contains(name)) {
match = true;
break;
}
}
}
// still have work to do below, make sure we set the names/domains to the original user-given values
// such that we can do the int->enum mapping and cat. encoding below (from scratch)
if (match) {
names = origNames;
domains = origDomains;
}
}
// create the interactions now and bolt them on to the front of the test Frame
if (null != interactionBldr) {
interactionBldr.makeInteractions(test);
}
// Build the validation set to be compatible with the training set.
// Toss out extra columns, complain about missing ones, remap categoricals
ArrayList<String> msgs = new ArrayList<>();
Vec vvecs[] = new Vec[names.length];
int good = 0; // Any matching column names, at all?
int convNaN = 0; // count of columns that were replaced with NA
final Frame.FrameVecRegistry frameVecRegistry = test.frameVecRegistry();
for (int i = 0; i < names.length; i++) {
Vec vec = frameVecRegistry.findByColName(names[i]); // Search in the given validation set
boolean isResponse = response != null && names[i].equals(response);
boolean isWeights = weights != null && names[i].equals(weights);
boolean isOffset = offset != null && names[i].equals(offset);
boolean isFold = fold != null && names[i].equals(fold);
boolean isTreatment = treatment != null && names[i].equals(treatment);
// If a training set column is missing in the test set, complain (if it's ok, fill in with NAs (or 0s if it's a fold-column))
if (vec == null) {
if (isResponse && computeMetrics)
throw new IllegalArgumentException("Test/Validation dataset is missing response column '" + response + "'");
else if (isOffset)
throw new IllegalArgumentException(H2O.technote(12, "Test/Validation dataset is missing offset column '" + offset + "'. If your intention is to disable the effect of the offset add a zero offset column."));
else if (isWeights && computeMetrics) {
if (expensive) {
vec = test.anyVec().makeCon(1);
toDelete.put(vec._key, "adapted missing vectors");
// cross-validation generated weights will not be found in test/validation dataset. This warning is
// invalid. We will suppress this warning.
if (!names[i].contains("_internal_cv_weights_")) {
msgs.add(H2O.technote(1, "Test/Validation dataset is missing weights column '" +
names[i] + "' (needed because a response was found and metrics are to be computed): " +
"substituting in a column of 1s"));
}
}
else if (isTreatment && computeMetrics) {
throw new IllegalArgumentException("Test/Validation dataset is missing treatment column '" + treatment + "'");
}
} else if (expensive) { // generate warning even for response columns. Other tests depended on this.
final double defval;
if (isWeights)
defval = 1; // note: even though computeMetrics is false we should still have sensible weights (GLM skips rows with NA weights)
else
if (isFold && domains[i] == null)
defval = 0;
else {
defval = parms.missingColumnsType();
convNaN++;
}
vec = test.anyVec().makeCon(defval);
toDelete.put(vec._key, "adapted missing vectors");
String str = "Test/Validation dataset is missing column '" + names[i] + "': substituting in a column of " + defval;
if (ArrayUtils.contains(parms.getNonPredictors(), names[i]))
Log.info(str); // we are doing a "pure" predict (computeMetrics is false), don't complain to the user
else
msgs.add(str);
}
}
if( vec != null) { // I have a column with a matching name
if( domains[i] != null) { // Model expects an categorical
if (vec.isString())
vec = VecUtils.stringToCategorical(vec); //turn a String column into a categorical column (we don't delete the original vec here)
if( expensive && !Arrays.equals(vec.domain(),domains[i]) ) { // Result needs to be the same categorical
Vec evec;
try {
evec = vec.adaptTo(domains[i]); // Convert to categorical or throw IAE
toDelete.put(evec._key, "categorically adapted vec");
} catch( NumberFormatException nfe ) {
throw new IllegalArgumentException("Test/Validation dataset has a non-categorical column '"+names[i]+"' which is categorical in the training data");
}
String[] ds = evec.domain();
assert ds != null && ds.length >= domains[i].length;
if( isResponse && vec.domain() != null && ds.length == domains[i].length+vec.domain().length )
throw new IllegalArgumentException("Test/Validation dataset has a categorical response column '"+names[i]+"' with no levels in common with the model");
if( isTreatment && vec.domain() != null && ds.length == domains[i].length+vec.domain().length)
throw new IllegalArgumentException("Test/Validation dataset has a categorical treatment column '"+names[i]+"' with no levels in common with the model");
if (ds.length > domains[i].length)
msgs.add("Test/Validation dataset column '" + names[i] + "' has levels not trained on: " + ArrayUtils.toStringQuotedElements(Arrays.copyOfRange(ds, domains[i].length, ds.length), 20));
vec = evec;
}
} else if(vec.isCategorical()) {
throw new IllegalArgumentException("Test/Validation dataset has categorical column '" + names[i] + "' which is real-valued in the training data");
}
good++; // Assumed compatible; not checking e.g. Strings vs UUID
}
vvecs[i] = vec;
}
if( good == convNaN )
throw new IllegalArgumentException("Test/Validation dataset has no columns in common with the training set");
if( good == names.length || (response != null && test.find(response) == -1 && good == names.length - 1) ) // Only update if got something for all columns
test.restructure(names, vvecs, good);
if (expensive && checkCategoricals) {
final boolean hasCategoricalPredictors = hasCategoricalPredictors(test, response, weights, offset, fold, treatment, names, domains);
// check if we first need to expand categoricals before calling this method again
if (hasCategoricalPredictors) {
Frame updated = categoricalEncoder(test, parms.getNonPredictors(), parms.getCategoricalEncoding(), tev, parms.getMaxCategoricalLevels());
toDelete.put(updated._key, "categorically encoded frame");
test.restructure(updated.names(), updated.vecs()); //updated in place
String[] msg2 = adaptTestForTrain(test, origNames, origDomains, backupNames, backupDomains, parms, expensive, computeMetrics, interactionBldr, tev, toDelete, true /*catEncoded*/);
msgs.addAll(Arrays.asList(msg2));
return msgs.toArray(new String[msgs.size()]);
}
}
return msgs.toArray(new String[msgs.size()]);
}
private static boolean hasCategoricalPredictors(final Frame frame, final String responseName,
final String wieghtsName, final String offsetName,
final String foldName, final String treatmentName, final String[] names,
final String[][] domains) {
boolean haveCategoricalPredictors = false;
final Map<String, Integer> namesIndicesMap = new HashMap<>(names.length);
for (int i = 0; i < names.length; i++) {
namesIndicesMap.put(names[i], i);
}
for (int i = 0; i < frame.numCols(); ++i) {
if (frame.names()[i].equals(responseName)) continue;
if (frame.names()[i].equals(wieghtsName)) continue;
if (frame.names()[i].equals(offsetName)) continue;
if (frame.names()[i].equals(foldName)) continue;
if (frame.names()[i].equals(treatmentName)) continue;
// either the column of the test set is categorical (could be a numeric col that's already turned into a factor)
if (frame.vec(i).get_type() == Vec.T_CAT) {
haveCategoricalPredictors = true;
break;
}
// or a equally named column of the training set is categorical, but the test column isn't (e.g., numeric column provided to be converted to a factor)
final int whichCol = namesIndicesMap.get(frame.name(i));
if (whichCol >= 0 && domains[whichCol] != null) {
haveCategoricalPredictors = true;
break;
}
}
return haveCategoricalPredictors;
}
/**
* Bulk score the frame, and auto-name the resulting predictions frame.
* @see #score(Frame, String)
* @param fr frame which should be scored
* @return A new frame containing a predicted values. For classification it
* contains a column with prediction and distribution for all
* response classes. For regression it contains only one column with
* predicted values.
* @throws IllegalArgumentException
*/
public Frame score(Frame fr) throws IllegalArgumentException {
return score(fr, null, null, true);
}
public Frame result() {
throw new UnsupportedOperationException("this model doesn't support constant frame results");
}
public Frame transform(Frame fr) {
throw new UnsupportedOperationException("this model doesn't support constant frame results");
}
/** Bulk score the frame {@code fr}, producing a Frame result; the 1st
* Vec is the predicted class, the remaining Vecs are the probability
* distributions. For Regression (single-class) models, the 1st and only
* Vec is the prediction value. The result is in the DKV; caller is
* responsible for deleting.
*
* @param fr frame which should be scored
* @param destination_key store prediction frame under give key
* @param customMetricFunc function to produce adhoc scoring metrics if actuals are presented
* @return A new frame containing a predicted values. For classification it
* contains a column with prediction and distribution for all
* response classes. For regression it contains only one column with
* predicted values.
* @throws IllegalArgumentException
*/
public Frame score(Frame fr, String destination_key, CFuncRef customMetricFunc) throws IllegalArgumentException {
return score(fr, destination_key, null, true, customMetricFunc);
}
public Frame score(Frame fr, String destination_key) throws IllegalArgumentException {
return score(fr, destination_key, null, true);
}
public Frame score(Frame fr, String destination_key, Job j) throws IllegalArgumentException {
return score(fr, destination_key, j, true);
}
public Frame score(Frame fr, CFuncRef customMetricFunc) throws IllegalArgumentException {
return score(fr, null, null, true, customMetricFunc);
}
/**
* Adds a scoring-related warning.
*
* Note: The implementation might lose a warning if scoring is triggered in parallel
*
* @param s warning description
*/
private void addWarningP(String s) {
String[] warningsP = _warningsP;
warningsP = warningsP != null ? Arrays.copyOf(warningsP, warningsP.length + 1) : new String[1];
warningsP[warningsP.length - 1] = s;
_warningsP = warningsP;
}
public boolean containsResponse(String s, String responseName) {
Pattern pat = Pattern.compile("'(.*?)'");
Matcher match = pat.matcher(s);
if (match.find() && responseName.equals(match.group(1))) {
return true;
}
return false;
}
public Frame score(Frame fr, String destination_key, Job j, boolean computeMetrics) throws IllegalArgumentException {
return score(fr, destination_key, j, computeMetrics, CFuncRef.NOP);
}
protected Frame adaptFrameForScore(Frame fr, boolean computeMetrics) {
Frame adaptFr = new Frame(fr);
applyPreprocessors(adaptFr);
String[] msg = adaptTestForTrain(adaptFr,true, computeMetrics); // Adapt
if (msg.length > 0) {
for (String s : msg) {
if ((_output.responseName() == null) || !containsResponse(s, _output.responseName())) { // response column missing will not generate warning for prediction
addWarningP(s); // add warning string to model
Log.warn(s);
}
}
}
Scope.track(adaptFr);
return adaptFr;
}
public Frame score(Frame fr, String destination_key, Job j, boolean computeMetrics, CFuncRef customMetricFunc) throws IllegalArgumentException {
try (Scope.Safe s = Scope.safe(fr)) {
// Adapt frame, clean up the previous score warning messages
_warningsP = new String[0];
computeMetrics = computeMetrics &&
(!_output.hasResponse() || (fr.vec(_output.responseName()) != null && !fr.vec(_output.responseName()).isBad()));
Frame adaptFr = adaptFrameForScore(fr, computeMetrics);
// Predict & Score
PredictScoreResult result = predictScoreImpl(fr, adaptFr, destination_key, j, computeMetrics, customMetricFunc);
Frame output = result.getPredictions();
result.makeModelMetrics(fr, adaptFr);
Vec predicted = output.vecs()[0]; // Modeled/predicted response
String[] mdomain = predicted.domain(); // Domain of predictions (union of test and train)
// Output is in the model's domain, but needs to be mapped to the scored
// dataset's domain.
if (_output.isClassifier() && computeMetrics && !_output.hasTreatment()) {
Vec actual = fr.vec(_output.responseName());
if (actual != null) { // Predict does not have an actual, scoring does
String[] sdomain = actual.domain(); // Scored/test domain; can be null
if (sdomain != null && mdomain != sdomain && !Arrays.equals(mdomain, sdomain))
CategoricalWrappedVec.updateDomain(output.vec(0), sdomain);
}
}
return Scope.untrack(output);
}
}
private void applyPreprocessors(Frame fr) {
if (_parms._preprocessors == null) return;
for (Key<ModelPreprocessor> key : _parms._preprocessors) {
DKV.prefetch(key);
}
Frame result = fr;
for (Key<ModelPreprocessor> key : _parms._preprocessors) {
ModelPreprocessor preprocessor = key.get();
result = preprocessor.processScoring(result, this);
Scope.track(result);
}
fr.restructure(result.names(), result.vecs()); //inplace
}
/**
* Compute the deviances for each observation
* @param valid Validation Frame (must contain the response)
* @param predictions Predictions made by the model
* @param outputName Name of the output frame
* @return Frame containing 1 column with the per-row deviances
*/
public Frame computeDeviances(Frame valid, Frame predictions, String outputName) {
assert (_parms._response_column!=null) : "response column can't be null";
assert valid.find(_parms._response_column)>=0 : "validation frame must contain a response column";
predictions.add(_parms._response_column, valid.vec(_parms._response_column));
if (valid.find(_parms._weights_column)>=0)
predictions.add(_parms._weights_column, valid.vec(_parms._weights_column));
final int respIdx=predictions.find(_parms._response_column);
final int weightIdx=predictions.find(_parms._weights_column);
final Distribution myDist = _dist == null ? null : IcedUtils.deepCopy(_dist);
if (myDist != null && myDist._family == DistributionFamily.huber) {
myDist.setHuberDelta(hex.ModelMetricsRegression.computeHuberDelta(
valid.vec(_parms._response_column), //actual
predictions.vec(0), //predictions
valid.vec(_parms._weights_column), //weight
_parms._huber_alpha));
}
return new MRTask() {
@Override
public void map(Chunk[] cs, NewChunk[] nc) {
Chunk weight = weightIdx>=0 ? cs[weightIdx] : new C0DChunk(1, cs[0]._len);
Chunk response = cs[respIdx];
for (int i=0;i<cs[0]._len;++i) {
double w=weight.atd(i);
double y=response.atd(i);
if (_output.nclasses()==1) { //regression - deviance
double f=cs[0].atd(i);
if (myDist!=null && myDist._family == DistributionFamily.huber) {
nc[0].addNum(myDist.deviance(w, y, f)); //use above custom huber delta for this dataset
}
else {
nc[0].addNum(deviance(w, y, f));
}
} else {
int iact=(int)y;
double err = iact < _output.nclasses() ? 1-cs[1+iact].atd(i) : 1;
nc[0].addNum(w*MathUtils.logloss(err));
}
}
}
}.doAll(Vec.T_NUM, predictions).outputFrame(Key.<Frame>make(outputName), new String[]{"deviance"}, null);
}
protected String[] makeScoringNames(){
return makeScoringNames(_output);
}
protected String[][] makeScoringDomains(Frame adaptFrm, boolean computeMetrics, String[] names) {
String[][] domains = new String[names.length][];
Vec response = adaptFrm.lastVec();
domains[0] = names.length == 1 || _output.hasTreatment() ? null : ! computeMetrics ? _output._domains[_output._domains.length - 1] : response.domain();
if (_parms._distribution == DistributionFamily.quasibinomial) {
domains[0] = new VecUtils.CollectDoubleDomain(null,2).doAll(response).stringDomain(response.isInt());
}
return domains;
}
public static <O extends Model.Output> String [] makeScoringNames(O output){
final int nc = output.nclasses();
final int ncols = nc==1?1:nc+1; // Regression has 1 predict col; classification also has class distribution
String [] names = new String[ncols];
if(output.hasTreatment()){
names[0] = "uplift_predict";
names[1] = "p_y1_with_treatment";
names[2] = "p_y1_without_treatment";
} else {
names[0] = "predict";
for (int i = 1; i < names.length; ++i) {
names[i] = output.classNames()[i - 1];
// turn integer class labels such as 0, 1, etc. into p0, p1, etc.
try {
Integer.valueOf(names[i]);
names[i] = "p" + names[i];
} catch (Throwable t) {
// do nothing, non-integer names are fine already
}
}
}
return names;
}
/** Allow subclasses to define their own BigScore class. */
protected BigScore makeBigScoreTask(String[][] domains, String[] names ,
Frame adaptFrm, boolean computeMetrics,
boolean makePrediction, Job j,
CFuncRef customMetricFunc) {
return new BigScore(domains[0],
names != null ? names.length : 0,
adaptFrm.means(),
_output.hasWeights() && adaptFrm.find(_output.weightsName()) >= 0,
computeMetrics,
makePrediction,
j,
customMetricFunc);
}
/** Score an already adapted frame. Returns a new Frame with new result
* vectors, all in the DKV. Caller responsible for deleting. Input is
* already adapted to the Model's domain, so the output is also. Also
* computes the metrics for this frame.
*
* @param adaptFrm Already adapted frame
* @param computeMetrics
* @return A Frame containing the prediction column, and class distribution
*/
protected PredictScoreResult predictScoreImpl(Frame fr, Frame adaptFrm, String destination_key, Job j, boolean computeMetrics, CFuncRef customMetricFunc) {
// Build up the names & domains.
String[] names = makeScoringNames();
String[][] domains = makeScoringDomains(adaptFrm, computeMetrics, names);
// Score the dataset, building the class distribution & predictions
BigScore bs = makeBigScoreTask(domains,
names,
adaptFrm,
computeMetrics,
true,
j,
customMetricFunc).doAll(names.length, Vec.T_NUM, adaptFrm);
ModelMetrics.MetricBuilder<?> mb = null;
Frame rawPreds = null;
if (computeMetrics && bs._mb != null) {
rawPreds = bs.outputFrame();
mb = bs._mb;
}
Frame predictFr = bs.outputFrame(Key.make(destination_key), names, domains);
Frame outputPreds = postProcessPredictions(adaptFrm, predictFr, j);
return new PredictScoreResult(mb, rawPreds, outputPreds);
}
protected class PredictScoreResult {
private final ModelMetrics.MetricBuilder<?> _mb; // metric builder can be null if training was interrupted/cancelled even when metrics were requested
private final Frame _rawPreds;
private final Frame _outputPreds;
public PredictScoreResult(ModelMetrics.MetricBuilder<?> mb, Frame rawPreds, Frame outputPreds) {
_mb = mb;
_rawPreds = rawPreds;
_outputPreds = outputPreds;
}
public final Frame getPredictions() {
return _outputPreds;
}
public ModelMetrics.MetricBuilder<?> getMetricBuilder() {
return _mb;
}
public ModelMetrics makeModelMetrics(Frame fr, Frame adaptFrm) {
if (_mb == null)
return null;
return _mb.makeModelMetrics(Model.this, fr, adaptFrm, _rawPreds);
}
}
/**
* Post-process prediction frame.
*
* @param adaptFrm
* @param predictFr
* @return
*/
protected Frame postProcessPredictions(Frame adaptFrm, Frame predictFr, Job j) {
return predictFr;
}
/** Score an already adapted frame. Returns a MetricBuilder that can be used to make a model metrics.
* @param adaptFrm Already adapted frame
* @return MetricBuilder
*/
protected ModelMetrics.MetricBuilder scoreMetrics(Frame adaptFrm) {
final boolean computeMetrics = (!isSupervised() || (adaptFrm.vec(_output.responseName()) != null && !adaptFrm.vec(_output.responseName()).isBad()));
// Build up the names & domains.
//String[] names = makeScoringNames();
String[][] domains = new String[1][];
Vec response = adaptFrm.lastVec();
domains[0] = _output.nclasses() == 1 ? null : !computeMetrics ? _output._domains[_output._domains.length-1] : response.domain();
if (_parms._distribution == DistributionFamily.quasibinomial) {
domains[0] = new VecUtils.CollectDoubleDomain(null,2).doAll(response).stringDomain(response.isInt());
}
// Score the dataset, building the class distribution & predictions
BigScore bs = makeBigScoreTask(domains, null, adaptFrm, computeMetrics, false, null, CFuncRef.from(_parms._custom_metric_func)).doAll(adaptFrm);
return bs._mb;
}
protected class BigScore extends CMetricScoringTask<BigScore> implements BigScorePredict, BigScoreChunkPredict {
final protected String[] _domain; // Prediction domain; union of test and train classes
final protected int _npredcols; // Number of columns in prediction; nclasses+1 - can be less than the prediction domain
final double[] _mean; // Column means of test frame
final public boolean _computeMetrics; // Column means of test frame
final public boolean _hasWeights;
final public boolean _makePreds;
final public Job _j;
private transient BigScorePredict _localPredict;
/** Output parameter: Metric builder */
public ModelMetrics.MetricBuilder _mb;
public BigScore(String[] domain, int ncols, double[] mean, boolean testHasWeights,
boolean computeMetrics, boolean makePreds, Job j, CFuncRef customMetricFunc) {
super(customMetricFunc);
_j = j;
_domain = domain; _npredcols = ncols; _mean = mean; _computeMetrics = computeMetrics; _makePreds = makePreds;
if(_output._hasWeights && _computeMetrics && !testHasWeights)
throw new IllegalArgumentException("Missing weights when computing validation metrics.");
_hasWeights = testHasWeights;
}
@Override
protected void setupLocal() {
super.setupLocal();
_localPredict = setupBigScorePredict(this);
assert _localPredict != null;
}
@Override public void map(Chunk chks[], NewChunk cpreds[] ) {
if (isCancelled() || _j != null && _j.stop_requested()) return;
Chunk weightsChunk = _hasWeights && _computeMetrics ? chks[_output.weightsIdx()] : null;
Chunk offsetChunk = _output.hasOffset() ? chks[_output.offsetIdx()] : null;
Chunk treatmentChunk = _output.hasTreatment() ? chks[_output.treatmentIdx()] : null;
Chunk responseChunk = null;
float [] actual = null;
_mb = Model.this.makeMetricBuilder(_domain);
if (_computeMetrics) {
if (_output.hasTreatment()){
actual = new float[2];
responseChunk = chks[_output.responseIdx()];
treatmentChunk = chks[_output.treatmentIdx()];
} else if (_output.hasResponse()) {
actual = new float[1];
responseChunk = chks[_output.responseIdx()];
} else
actual = new float[chks.length];
}
int len = chks[0]._len;
try (BigScoreChunkPredict predict = _localPredict.initMap(_fr, chks)) {
double[] tmp = new double[_output.nfeatures()];
for (int row = 0; row < len; row++) {
double weight = weightsChunk != null ? weightsChunk.atd(row) : 1;
if (weight == 0) {
if (_makePreds) {
for (int c = 0; c < _npredcols; c++) // Output predictions; sized for train only (excludes extra test classes)
cpreds[c].addNum(0);
}
continue;
}
double offset = offsetChunk != null ? offsetChunk.atd(row) : 0;
double[] preds = predict.score0(chks, offset, row, tmp, _mb._work);
if (_computeMetrics) {
if (responseChunk != null) {
actual[0] = (float) responseChunk.atd(row);
} else {
for (int i = 0; i < actual.length; ++i)
actual[i] = (float) data(chks, row, i);
}
if (treatmentChunk != null) {
actual[1] = (float) treatmentChunk.atd(row);
}
_mb.perRow(preds, actual, weight, offset, Model.this);
// Handle custom metric
customMetricPerRow(preds, actual, weight, offset, Model.this);
}
if (_makePreds) {
for (int c = 0; c < _npredcols; c++) // Output predictions; sized for train only (excludes extra test classes)
cpreds[c].addNum(preds[c]);
}
}
}
}
@Override
public double[] score0(Chunk[] chks, double offset, int row_in_chunk, double[] tmp, double[] preds) {
return Model.this.score0(chks, offset, row_in_chunk, tmp, preds);
}
@Override
public BigScoreChunkPredict initMap(final Frame fr, final Chunk[] chks) {
return this;
}
@Override
public void close() {
// nothing to do - meant to be overridden
}
@Override public void reduce(BigScore bs ) {
super.reduce(bs);
if (_mb != null) _mb.reduce(bs._mb);
}
@Override protected void postGlobal() {
super.postGlobal();
if(_mb != null) {
_mb.postGlobal(getComputedCustomMetric());
if (null != cFuncRef)
_mb._CMetricScoringTask = (CMetricScoringTask) this;
}
}
}
public interface BigScorePredict {
BigScoreChunkPredict initMap(final Frame fr, final Chunk chks[]);
}
public interface BigScoreChunkPredict extends AutoCloseable {
double[] score0(Chunk chks[], double offset, int row_in_chunk, double[] tmp, double[] preds);
@Override
void close();
}
protected BigScorePredict setupBigScorePredict(BigScore bs) { return bs; };
// OVerride this if your model needs data preprocessing (on the fly standardization, NA handling)
protected double data(Chunk[] chks, int row, int col) {
return chks[col].atd(row);
}
/** Bulk scoring API for one row. Chunks are all compatible with the model,
* and expect the last Chunks are for the final distribution and prediction.
* Default method is to just load the data into the tmp array, then call
* subclass scoring logic. */
public double[] score0( Chunk chks[], int row_in_chunk, double[] tmp, double[] preds ) {
return score0(chks, 0, row_in_chunk, tmp, preds);
}
public double[] score0( Chunk chks[], double offset, int row_in_chunk, double[] tmp, double[] preds ) {
assert(_output.nfeatures() == tmp.length);
for( int i=0; i< tmp.length; i++ )
tmp[i] = chks[i].atd(row_in_chunk);
double [] scored = score0(tmp, preds, offset);
if(needsPostProcess() && isSupervised())
score0PostProcessSupervised(scored, tmp);
return scored;
}
/**
* Implementations can disable post-processing of predictions by overriding this method (eg. GLM)
* @return true, if output of score0 needs post-processing, false if the output is final
*/
protected boolean needsPostProcess() {
return true;
}
protected final void score0PostProcessSupervised(double[] scored, double[] tmp) {
// Correct probabilities obtained from training on oversampled data back to original distribution
// C.f. http://gking.harvard.edu/files/0s.pdf Eq.(27)
if( _output.isClassifier()) {
if (_parms._balance_classes)
GenModel.correctProbabilities(scored, _output._priorClassDist, _output._modelClassDist);
//assign label at the very end (after potentially correcting probabilities)
if(!_output.hasTreatment()) {
scored[0] = hex.genmodel.GenModel.getPrediction(scored, _output._priorClassDist, tmp, defaultThreshold());
}
}
}
/** Subclasses implement the scoring logic. The data is pre-loaded into a
* re-used temp array, in the order the model expects. The predictions are
* loaded into the re-used temp array, which is also returned. */
protected abstract double[] score0(double data[/*ncols*/], double preds[/*nclasses+1*/]);
/**Override scoring logic for models that handle weight/offset**/
protected double[] score0(double data[/*ncols*/], double preds[/*nclasses+1*/], double offset) {
assert (offset == 0) : "Override this method for non-trivial offset!";
return score0(data, preds);
}
// Version where the user has just ponied-up an array of data to be scored.
// Data must be in proper order. Handy for JUnit tests.
public double score(double[] data){
double[] pred = score0(data, new double[_output.nclasses()]);
return _output.nclasses() == 1 ? pred[0] /* regression */ : ArrayUtils.maxIndex(pred) /*classification?*/;
}
@Override protected Futures remove_impl(Futures fs, boolean cascade) {
if (_output._model_metrics != null)
for( Key k : _output._model_metrics )
Keyed.remove(k, fs, true);
if (cascade) {
deleteCrossValidationFoldAssignment();
deleteCrossValidationPreds();
deleteCrossValidationModels();
}
cleanUp(_toDelete);
return super.remove_impl(fs, cascade);
}
/** Write out K/V pairs, in this case model metrics. */
@Override protected AutoBuffer writeAll_impl(AutoBuffer ab) {
if (_output._model_metrics != null)
for( Key k : _output._model_metrics )
ab.putKey(k);
return super.writeAll_impl(ab);
}
@Override protected Keyed readAll_impl(AutoBuffer ab, Futures fs) {
if (_output._model_metrics != null)
for( Key k : _output._model_metrics )
ab.getKey(k,fs); // Load model metrics
return super.readAll_impl(ab,fs);
}
@Override protected long checksum_impl() {
return _parms.checksum(null) * _output.checksum_impl();
}
/**
* Override this in models that support serialization into the MOJO format.
* @return a class that inherits from ModelMojoWriter
*/
public ModelMojoWriter getMojo() {
throw H2O.unimpl("MOJO format is not available for " + _parms.fullName() + " models.");
}
/**
* Specify categorical encoding that should be applied before running score0 method of POJO/MOJO.
* Default: AUTO - POJO/MOJO handles encoding or no transformation of input is needed.
* @return instance of CategoricalEncoding supported by GenModel or null if encoding is not supported.
*/
protected CategoricalEncoding getGenModelEncoding() {
return CategoricalEncoding.AUTO;
}
// ==========================================================================
/** Return a String which is a valid Java program representing a class that
* implements the Model. The Java is of the form:
* <pre>
* class UUIDxxxxModel {
* public static final String NAMES[] = { ....column names... }
* public static final String DOMAINS[][] = { ....domain names... }
* // Pass in data in a double[], pre-aligned to the Model's requirements.
* // Jam predictions into the preds[] array; preds[0] is reserved for the
* // main prediction (class for classifiers or value for regression),
* // and remaining columns hold a probability distribution for classifiers.
* double[] predict( double data[], double preds[] );
* double[] map( HashMap < String,Double > row, double data[] );
* // Does the mapping lookup for every row, no allocation
* double[] predict( HashMap < String,Double > row, double data[], double preds[] );
* // Allocates a double[] for every row
* double[] predict( HashMap < String,Double > row, double preds[] );
* // Allocates a double[] and a double[] for every row
* double[] predict( HashMap < String,Double > row );
* }
* </pre>
*/
public final String toJava(boolean preview, boolean verboseCode) {
// 32k buffer by default
ByteArrayOutputStream os = new ByteArrayOutputStream(Short.MAX_VALUE);
// We do not need to close BAOS
/* ignore returned stream */ toJava(os, preview, verboseCode);
return os.toString();
}
public final SBPrintStream toJava(OutputStream os, boolean preview, boolean verboseCode) {
if (preview /* && toJavaCheckTooBig() */) {
os = new LineLimitOutputStreamWrapper(os, 1000);
}
return toJava(new SBPrintStream(os), preview, verboseCode);
}
protected SBPrintStream toJava(SBPrintStream sb, boolean isGeneratingPreview, boolean verboseCode) {
PojoWriter writer = makePojoWriter();
CodeGeneratorPipeline fileCtx = new CodeGeneratorPipeline(); // preserve file context
String modelName = JCodeGen.toJavaId(_key.toString());
// HEADER
sb.p("/*").nl();
sb.p(" Licensed under the Apache License, Version 2.0").nl();
sb.p(" http://www.apache.org/licenses/LICENSE-2.0.html").nl();
sb.nl();
sb.p(" AUTOGENERATED BY H2O at ").p(new DateTime().toString()).nl();
sb.p(" ").p(H2O.ABV.projectVersion()).nl();
sb.p(" ").nl();
sb.p(" Standalone prediction code with sample test data for ").p(toJavaModelClassName()).p(" named ").p(modelName)
.nl();
sb.nl();
sb.p(" How to download, compile and execute:").nl();
sb.p(" mkdir tmpdir").nl();
sb.p(" cd tmpdir").nl();
sb.p(" curl http:/").p(H2O.SELF.toString()).p("/3/h2o-genmodel.jar > h2o-genmodel.jar").nl();
sb.p(" curl http:/").p(H2O.SELF.toString()).p("/3/Models.java/").pobj(_key).p(" > ").p(modelName).p(".java").nl();
sb.p(" javac -cp h2o-genmodel.jar -J-Xmx2g -J-XX:MaxPermSize=128m ").p(modelName).p(".java").nl();
// Intentionally disabled since there is no main method in generated code
// sb.p("// java -cp h2o-genmodel.jar:. -Xmx2g -XX:MaxPermSize=256m -XX:ReservedCodeCacheSize=256m ").p(modelName).nl();
sb.nl();
sb.p(" (Note: Try java argument -XX:+PrintCompilation to show runtime JIT compiler behavior.)").nl();
if (_parms._offset_column != null) {
sb.nl();
sb.nl();
sb.nl();
sb.p(" NOTE: Java model export does not support offset_column.").nl();
sb.nl();
Log.warn("Java model export does not support offset_column.");
}
if (isGeneratingPreview && writer.toJavaCheckTooBig()) {
sb.nl();
sb.nl();
sb.nl();
sb.p(" NOTE: Java model is too large to preview, please download as shown above.").nl();
sb.nl();
return sb;
}
sb.p("*/").nl();
sb.p("import java.util.Map;").nl();
sb.p("import hex.genmodel.GenModel;").nl();
sb.p("import hex.genmodel.annotations.ModelPojo;").nl();
for (Class<?> clz : getPojoInterfaces())
sb.p("import ").p(clz.getName()).p(";").nl();
sb.nl();
sb.p("@ModelPojo(name=\"").p(modelName).p("\", algorithm=\"").p(toJavaAlgo()).p("\")").nl();
sb.p("public class ").p(modelName).p(" extends GenModel ").p(makeImplementsClause()).p("{").nl().ii(1);
sb.ip("public hex.ModelCategory getModelCategory() { return hex.ModelCategory." + _output
.getModelCategory() + "; }").nl();
writer.toJavaInit(sb, fileCtx).nl();
toJavaNAMES(sb, fileCtx);
CategoricalEncoding encoding = getGenModelEncoding();
assert encoding != null;
boolean writeOrigs = encoding != CategoricalEncoding.AUTO; // export orig names & domains if POJO/MOJO doesn't handle encoding itself
if (writeOrigs && _output._origNames != null)
toJavaOrigNAMES(sb, fileCtx);
toJavaNCLASSES(sb);
toJavaDOMAINS(sb, fileCtx);
if (writeOrigs && _output._origDomains != null)
toJavaOrigDOMAINS(sb, fileCtx);
toJavaPROB(sb);
toJavaSuper(modelName, sb);
sb.p(" public String getUUID() { return Long.toString("+toJavaUUID()+"L); }").nl();
toJavaPredict(writer, sb, fileCtx, verboseCode);
writer.toJavaTransform(sb, fileCtx, verboseCode);
sb.p("}").nl().di(1);
fileCtx.generate(sb); // Append file context
sb.nl();
return sb;
}
protected PojoWriter makePojoWriter() {
return new DelegatingPojoWriter(this);
}
protected String toJavaModelClassName() {
return this.getClass().getSimpleName();
}
protected String toJavaAlgo() {
return this.getClass().getSimpleName().toLowerCase().replace("model", "");
}
protected String toJavaUUID() {
return String.valueOf(checksum());
}
protected Class<?>[] getPojoInterfaces() { return new Class<?>[0]; }
private SB makeImplementsClause() {
SB sb = new SB();
Class<?>[] interfaces = getPojoInterfaces();
if (interfaces.length == 0)
return sb;
sb.p("implements ");
for (int i = 0; i < interfaces.length - 1; i++)
sb.p(interfaces[i].getSimpleName()).p(", ");
sb.p(interfaces[interfaces.length - 1].getSimpleName()).p(' ');
return sb;
}
/** Generate implementation for super class. */
private SBPrintStream toJavaSuper(String modelName, SBPrintStream sb) {
String responseName = isSupervised() ? '"' + _output.responseName() + '"': null;
return sb.nl().ip("public " + modelName + "() { super(NAMES,DOMAINS," + responseName + "); }").nl();
}
private SBPrintStream toJavaNAMES(SBPrintStream sb, CodeGeneratorPipeline fileCtx) {
final String modelName = JCodeGen.toJavaId(_key.toString());
final String namesHolderClassName = "NamesHolder_"+modelName;
sb.i().p("// ").p("Names of columns used by model.").nl();
sb.i().p("public static final String[] NAMES = "+namesHolderClassName+".VALUES;").nl();
// Generate class which fills the names into array
fileCtx.add(new CodeGenerator() {
@Override
public void generate(JCodeSB out) {
out.i().p("// The class representing training column names").nl();
JCodeGen.toClassWithArray(out, null, namesHolderClassName,
Arrays.copyOf(_output._names, _output.nfeatures()));
}
});
return sb;
}
private SBPrintStream toJavaOrigNAMES(SBPrintStream sb, CodeGeneratorPipeline fileCtx) {
final String modelName = JCodeGen.toJavaId(_key.toString());
final String namesHolderClassName = "OrigNamesHolder_"+modelName;
sb.i().p("// ").p("Original names of columns used by model.").nl();
sb.i().p("public static final String[] ORIG_NAMES = "+namesHolderClassName+".VALUES;").nl();
// Generate class which fills the names into array
fileCtx.add(new CodeGenerator() {
@Override
public void generate(JCodeSB out) {
out.i().p("// The class representing original training column names").nl();
int nResponse = _output._names.length - _output.nfeatures();
JCodeGen.toClassWithArray(out, null, namesHolderClassName,
Arrays.copyOf(_output._origNames, _output._origNames.length - nResponse));
}
});
sb.nl();
sb.ip("@Override").nl();
sb.ip("public String[] getOrigNames() {").nl();
sb.ii(1).ip("return ORIG_NAMES;").nl();
sb.di(1).ip("}").nl();
return sb;
}
private SBPrintStream toJavaNCLASSES(SBPrintStream sb ) {
return _output.isClassifier() ? JCodeGen.toStaticVar(sb, "NCLASSES",
_output.nclasses(),
"Number of output classes included in training data response column.")
: sb;
}
private SBPrintStream toJavaDOMAINS(SBPrintStream sb, CodeGeneratorPipeline fileCtx) {
String modelName = JCodeGen.toJavaId(_key.toString());
sb.nl();
sb.ip("// Column domains. The last array contains domain of response column.").nl();
sb.ip("public static final String[][] DOMAINS = new String[][] {").nl();
String [][] domains = scoringDomains();
for (int i=0; i< domains.length; i++) {
final int idx = i;
final String[] dom = domains[i];
final String colInfoClazz = modelName+"_ColInfo_"+i;
sb.i(1).p("/* ").p(_output._names[i]).p(" */ ");
if (dom != null) sb.p(colInfoClazz).p(".VALUES"); else sb.p("null");
if (i!=domains.length-1) sb.p(',');
sb.nl();
// Right now do not generate the class representing column
// since it does not hold any interesting information except String array holding domain
if (dom != null) {
fileCtx.add(new CodeGenerator() {
@Override
public void generate(JCodeSB out) {
out.ip("// The class representing column ").p(_output._names[idx]).nl();
JCodeGen.toClassWithArray(out, null, colInfoClazz, dom);
}
}
);
}
}
return sb.ip("};").nl();
}
private SBPrintStream toJavaOrigDOMAINS(SBPrintStream sb, CodeGeneratorPipeline fileCtx) {
String modelName = JCodeGen.toJavaId(_key.toString());
sb.nl();
sb.ip("// Original column domains. The last array contains domain of response column.").nl();
sb.ip("public static final String[][] ORIG_DOMAINS = new String[][] {").nl();
String [][] domains = _output._origDomains;
for (int i=0; i< domains.length; i++) {
final int idx = i;
final String[] dom = domains[i];
final String colInfoClazz = modelName+"_OrigColInfo_"+i;
sb.i(1).p("/* ").p(_output._origNames[i]).p(" */ ");
if (dom != null) sb.p(colInfoClazz).p(".VALUES"); else sb.p("null");
if (i!=domains.length-1) sb.p(',');
sb.nl();
// Right now do not generate the class representing column
// since it does not hold any interesting information except String array holding domain
if (dom != null) {
fileCtx.add(new CodeGenerator() {
@Override
public void generate(JCodeSB out) {
out.ip("// The class representing the original column ").p(_output._names[idx]).nl();
JCodeGen.toClassWithArray(out, null, colInfoClazz, dom);
}
}
);
}
}
sb.ip("};").nl();
sb.nl();
sb.ip("@Override").nl();
sb.ip("public String[][] getOrigDomainValues() {").nl();
sb.ii(1).ip("return ORIG_DOMAINS;").nl();
sb.di(1).ip("}").nl();
return sb;
}
private SBPrintStream toJavaPROB(SBPrintStream sb) {
if(isSupervised()) {
JCodeGen.toStaticVar(sb, "PRIOR_CLASS_DISTRIB", _output._priorClassDist, "Prior class distribution");
JCodeGen.toStaticVar(sb, "MODEL_CLASS_DISTRIB", _output._modelClassDist, "Class distribution used for model building");
}
return sb;
}
protected boolean toJavaCheckTooBig() {
Log.warn("toJavaCheckTooBig must be overridden for this model type to render it in the browser");
return true;
}
// Override in subclasses to provide some top-level model-specific goodness
protected SBPrintStream toJavaInit(SBPrintStream sb, CodeGeneratorPipeline fileContext) { return sb; }
// Override in subclasses to provide some inside 'predict' call goodness
// Method returns code which should be appended into generated top level class after
// predict method.
protected void toJavaPredictBody(SBPrintStream body,
CodeGeneratorPipeline classCtx,
CodeGeneratorPipeline fileCtx,
boolean verboseCode) {
throw new UnsupportedOperationException("This model type does not support conversion to Java");
}
// Generates optional "transform" method, transform method will have a different signature depending on the algo
// Empty by default - can be overriden by Model implementation
protected SBPrintStream toJavaTransform(SBPrintStream ccsb,
CodeGeneratorPipeline fileCtx,
boolean verboseCode) { // ccsb = classContext
return ccsb;
}
// Wrapper around the main predict call, including the signature and return value
private SBPrintStream toJavaPredict(PojoWriter builder,
SBPrintStream ccsb,
CodeGeneratorPipeline fileCtx,
boolean verboseCode) { // ccsb = classContext
ccsb.nl();
ccsb.ip("// Pass in data in a double[], pre-aligned to the Model's requirements.").nl();
ccsb.ip("// Jam predictions into the preds[] array; preds[0] is reserved for the").nl();
ccsb.ip("// main prediction (class for classifiers or value for regression),").nl();
ccsb.ip("// and remaining columns hold a probability distribution for classifiers.").nl();
ccsb.ip("public final double[] score0( double[] data, double[] preds ) {").nl();
CodeGeneratorPipeline classCtx = new CodeGeneratorPipeline(); //new SB().ii(1);
builder.toJavaPredictBody(ccsb.ii(1), classCtx, fileCtx, verboseCode);
ccsb.ip("return preds;").nl();
ccsb.di(1).ip("}").nl();
// Output class context
classCtx.generate(ccsb.ii(1));
ccsb.di(1);
return ccsb;
}
// Convenience method for testing: build Java, convert it to a class &
// execute it: compare the results of the new class's (JIT'd) scoring with
// the built-in (interpreted) scoring on this dataset. Returns true if all
// is well, false is there are any mismatches. Throws if there is any error
// (typically an AssertionError or unable to compile the POJO).
public boolean testJavaScoring(Frame data, Frame model_predictions, double rel_epsilon) {
return testJavaScoring(data, model_predictions, rel_epsilon,
JavaScoringOptions.DEFAULT._abs_epsilon, JavaScoringOptions.DEFAULT._fraction);
}
public boolean testJavaScoring(Frame data, Frame model_predictions, double rel_epsilon, double abs_epsilon) {
return testJavaScoring(data, model_predictions, rel_epsilon, abs_epsilon,
JavaScoringOptions.DEFAULT._fraction);
}
public boolean testJavaScoring(Frame data, Frame model_predictions, double rel_epsilon, double abs_epsilon, double fraction) {
return testJavaScoring(data, model_predictions, new EasyPredictModelWrapper.Config(), rel_epsilon, abs_epsilon, fraction);
}
public boolean testJavaScoring(Frame data, Frame model_predictions, EasyPredictModelWrapper.Config config,
double rel_epsilon, double abs_epsilon, double fraction) {
JavaScoringOptions options = new JavaScoringOptions();
options._abs_epsilon = abs_epsilon;
options._fraction = fraction;
options._config = config;
return testJavaScoring(data, model_predictions, rel_epsilon, options);
}
public static class JavaScoringOptions {
private static final JavaScoringOptions DEFAULT = new JavaScoringOptions();
public double _abs_epsilon = 1e-15;
public double _fraction = 1;
public boolean _disable_pojo = false;
public boolean _disable_mojo = false;
EasyPredictModelWrapper.Config _config = new EasyPredictModelWrapper.Config();
}
public boolean testJavaScoring(Frame data, Frame model_predictions, double rel_epsilon, JavaScoringOptions options) {
ModelBuilder<?, P, ?> mb = ModelBuilder.make(_parms.algoName().toLowerCase(), null, null);
mb._parms = _parms;
boolean havePojo = mb.havePojo() && !options._disable_pojo;
boolean haveMojo = mb.haveMojo() && !options._disable_mojo;
Random rnd = RandomUtils.getRNG(data.byteSize());
assert data.numRows() == model_predictions.numRows();
Frame fr = new Frame(data);
boolean computeMetrics = data.vec(_output.responseName()) != null && !data.vec(_output.responseName()).isBad();
try {
String[] warns = adaptTestForJavaScoring(fr, computeMetrics);
if( warns.length > 0 )
System.err.println(Arrays.toString(warns));
// Output is in the model's domain, but needs to be mapped to the scored
// dataset's domain.
int[] omap = null;
if( _output.isClassifier() && model_predictions.vec(0).domain() != null) {
Vec actual = fr.vec(_output.responseName());
String[] sdomain = actual == null ? null : actual.domain(); // Scored/test domain; can be null
String[] mdomain = model_predictions.vec(0).domain(); // Domain of predictions (union of test and train)
if( sdomain != null && !Arrays.equals(mdomain, sdomain)) {
omap = CategoricalWrappedVec.computeMap(mdomain,sdomain); // Map from model-domain to scoring-domain
}
}
String modelName = JCodeGen.toJavaId(_key.toString());
boolean preview = false;
GenModel genmodel = null;
Vec[] dvecs = fr.vecs();
Vec[] pvecs = model_predictions.vecs();
double[] features = null;
int num_errors = 0;
int num_total = 0;
// First try internal POJO via fast double[] API
if (havePojo) {
try {
String java_text = toJava(preview, true);
Class clz = JCodeGen.compile(modelName,java_text);
genmodel = (GenModel)clz.newInstance();
} catch (Exception e) {
e.printStackTrace();
throw new IllegalStateException("Internal POJO compilation failed",e);
}
// Check that POJO has the expected interfaces
for (Class<?> clz : getPojoInterfaces())
if (! clz.isInstance(genmodel))
throw new IllegalStateException("POJO is expected to implement interface " + clz.getName());
// Check some model metadata
assert _output.responseName() == null || _output.responseName().equals(genmodel.getResponseName());
features = MemoryManager.malloc8d(genmodel.nfeatures());
double[] predictions = MemoryManager.malloc8d(genmodel.nclasses() + 1);
// Compare predictions, counting mis-predicts
final int compVecLen = _output.isBinomialClassifier() ? 3 : pvecs.length; // POJO doesn't have calibrated probs
for (int row=0; row<fr.numRows(); row++) { // For all rows, single-threaded
if (rnd.nextDouble() >= options._fraction) continue;
num_total++;
// Native Java API
for (int col = 0; col < features.length; col++) // Build feature set
features[col] = dvecs[col].at(row);
genmodel.score0(features, predictions); // POJO predictions
for (int col = _output.isClassifier() ? 1 : 0; col < compVecLen; col++) { // Compare predictions
double d = pvecs[col].at(row); // Load internal scoring predictions
if (col == 0 && omap != null) d = omap[(int) d]; // map categorical response to scoring domain
if (!MathUtils.compare(predictions[col], d, options._abs_epsilon, rel_epsilon)) {
if (num_errors++ < 10)
System.err.println("Predictions mismatch, row " + row + ", col " + model_predictions._names[col] + ", internal prediction=" + d + ", POJO prediction=" + predictions[col]);
break;
}
}
}
}
// EasyPredict API with POJO and/or MOJO
for (int i = 0; i < 2; ++i) {
if (i == 0 && !havePojo) continue;
if (i == 1 && !haveMojo) continue;
if (i == 1) { // MOJO
final String filename = modelName + ".zip";
StreamingSchema ss = new StreamingSchema(getMojo(), filename);
try {
FileOutputStream os = new FileOutputStream(ss.getFilename());
ss.getStreamWriter().writeTo(os);
os.close();
genmodel = MojoModel.load(filename, true);
checkSerializable((MojoModel) genmodel);
features = MemoryManager.malloc8d(genmodel._names.length);
} catch (IOException e1) {
e1.printStackTrace();
throw new IllegalStateException("Internal MOJO loading failed", e1);
} finally {
boolean deleted = new File(filename).delete();
if (!deleted) Log.warn("Failed to delete the file");
}
if (! Arrays.equals(model_predictions.names(), genmodel.getOutputNames())) {
if (_parms._distribution == DistributionFamily.quasibinomial) {
Log.warn("Quasibinomial doesn't correctly return output names in MOJO");
} else if (genmodel.getModelCategory() == ModelCategory.Clustering && Arrays.equals(genmodel.getOutputNames(), new String[]{"cluster"})) {
Log.warn("Known inconsistency between MOJO output naming and H2O predict - cluster vs predict");
} else if (genmodel instanceof GlrmMojoModel) {
Log.trace("GLRM is being tested for 'reconstruct', not the default score0 - dim reduction, unable to compare output names");
} else if (false)
throw new IllegalStateException("GenModel output naming doesn't match provided scored frame. " +
"Expected: " + Arrays.toString(model_predictions.names()) +
", Actual: " + Arrays.toString(genmodel.getOutputNames()));
}
}
if (genmodel instanceof GlrmMojoModel) {
try {
options._config.setModel(genmodel).setEnableGLRMReconstrut(true);
} catch (IOException e) {
e.printStackTrace();
}
}
SharedTreeGraph[] trees = null;
if (genmodel instanceof SharedTreeMojoModel) {
SharedTreeMojoModel treemodel = (SharedTreeMojoModel) genmodel;
final int ntrees = treemodel.getNTreeGroups();
trees = new SharedTreeGraph[ntrees];
for (int t = 0; t < ntrees; t++)
trees[t] = treemodel.computeGraph(t);
}
EasyPredictModelWrapper epmw;
try {
options._config.setModel(genmodel)
.setConvertUnknownCategoricalLevelsToNa(true)
.setEnableLeafAssignment(genmodel instanceof SharedTreeMojoModel)
.setEnableStagedProbabilities(genmodel instanceof SharedTreeMojoModel)
.setUseExternalEncoding(true); // input Frame is already adapted!
epmw = new EasyPredictModelWrapper(options._config);
} catch (IOException e) {
throw new RuntimeException(e);
}
RowData rowData = new RowData();
BufferedString bStr = new BufferedString();
final int compVecLen = i == 0 && _output.isBinomialClassifier() ? 3 : pvecs.length; // POJO doesn't have calibrated probs
for (int row = 0; row < fr.numRows(); row++) { // For all rows, single-threaded
if (rnd.nextDouble() >= options._fraction) continue;
if (genmodel instanceof GlrmMojoModel) // enable random seed setting to ensure reproducibility
((GlrmMojoModel) genmodel)._rcnt = row;
// Generate input row
for (int col = 0; col < features.length; col++) {
if (dvecs[col].isString()) {
rowData.put(genmodel._names[col], dvecs[col].atStr(bStr, row).toString());
} else {
double val = dvecs[col].at(row);
rowData.put(
genmodel._names[col],
genmodel._domains[col] == null ? (Double) val
: Double.isNaN(val) ? val // missing categorical values are kept as NaN, the score0 logic passes it on to bitSetContains()
: (int) val < genmodel._domains[col].length ? genmodel._domains[col][(int) val] : "UnknownLevel"); //unseen levels are treated as such
}
}
// Make a prediction
AbstractPrediction p;
try {
if (genmodel instanceof GlrmMojoModel) // enable random seed setting to ensure reproducibility
((GlrmMojoModel) genmodel)._rcnt = row;
if (genmodel._offsetColumn != null) {
double offset = fr.vec(genmodel._offsetColumn).at(row);
// TODO: MOJO API is cumbersome in this case - will be fixed in https://github.com/h2oai/h2o-3/issues/8560
switch (genmodel.getModelCategory()) {
case Regression:
p = epmw.predictRegression(rowData, offset);
break;
case Binomial:
p = epmw.predictBinomial(rowData, offset);
break;
case Multinomial:
p = epmw.predictMultinomial(rowData, offset);
break;
case Ordinal:
p = epmw.predictOrdinal(rowData, offset);
break;
case KLime:
p = epmw.predictKLime(rowData);
break;
case CoxPH:
p = epmw.predictCoxPH(rowData, offset);
break;
default:
throw new UnsupportedOperationException("Predicting with offset current not supported for " + genmodel.getModelCategory());
}
} else {
p = epmw.predict(rowData);
}
} catch (PredictException e) {
num_errors++;
if (num_errors < 20) {
System.err.println("EasyPredict threw an exception when predicting row " + rowData);
e.printStackTrace();
}
continue;
}
// Convert model predictions and "internal" predictions into the same shape
double[] expected_preds = new double[pvecs.length];
double[] actual_preds = new double[pvecs.length];
String[] decisionPath = null;
int[] nodeIds = null;
for (int col = 0; col < compVecLen; col++) { // Compare predictions
double d = pvecs[col].at(row); // Load internal scoring predictions
if (col == 0 && omap != null) d = omap[(int) d]; // map categorical response to scoring domain
double d2 = Double.NaN;
switch (genmodel.getModelCategory()) {
case AutoEncoder:
d2 = ((AutoEncoderModelPrediction) p).reconstructed[col];
break;
case Clustering:
d2 = ((ClusteringModelPrediction) p).cluster;
break;
case Regression:
RegressionModelPrediction rmp = (RegressionModelPrediction) p;
d2 = rmp.value;
decisionPath = rmp.leafNodeAssignments;
nodeIds = rmp.leafNodeAssignmentIds;
break;
case Binomial:
BinomialModelPrediction bmp = (BinomialModelPrediction) p;
d2 = (col == 0) ?
bmp.labelIndex
:
col > bmp.classProbabilities.length && bmp.calibratedClassProbabilities != null ?
bmp.calibratedClassProbabilities[col - bmp.classProbabilities.length - 1]
:
bmp.classProbabilities[col - 1];
decisionPath = bmp.leafNodeAssignments;
nodeIds = bmp.leafNodeAssignmentIds;
break;
case Ordinal:
OrdinalModelPrediction orp = (OrdinalModelPrediction) p;
d2 = (col == 0) ? orp.labelIndex : orp.classProbabilities[col - 1];
break;
case Multinomial:
MultinomialModelPrediction mmp = (MultinomialModelPrediction) p;
d2 = (col == 0) ? mmp.labelIndex : mmp.classProbabilities[col - 1];
decisionPath = mmp.leafNodeAssignments;
nodeIds = mmp.leafNodeAssignmentIds;
break;
case AnomalyDetection:
AnomalyDetectionPrediction adp = (AnomalyDetectionPrediction) p;
d2 = adp.toPreds()[col];
decisionPath = adp.leafNodeAssignments;
nodeIds = adp.leafNodeAssignmentIds;
break;
case BinomialUplift:
UpliftBinomialModelPrediction bup = (UpliftBinomialModelPrediction) p;
d2 = bup.predictions[col];
break;
case DimReduction:
d2 = (genmodel instanceof GlrmMojoModel)?((DimReductionModelPrediction) p).reconstructed[col]:
((DimReductionModelPrediction) p).dimensions[col]; // look at the reconstructed matrix
break;
case CoxPH:
d2 = ((CoxPHModelPrediction) p).value;
break;
}
expected_preds[col] = d;
actual_preds[col] = d2;
}
if (trees != null && (genmodel.getModelCategory() != ModelCategory.BinomialUplift) /* UpliftModel doesn't support decisionPath yet */) {
for (int t = 0; t < trees.length; t++) {
SharedTreeGraph tree = trees[t];
SharedTreeNode node = tree.walkNodes(0, decisionPath[t]);
if (node == null || node.getNodeNumber() != nodeIds[t]) {
throw new IllegalStateException("Path to leaf node is inconsistent with predicted node id: path=" + decisionPath[t] + ", nodeId=" + nodeIds[t]);
}
}
}
// Verify the correctness of the prediction
num_total++;
for (int col = genmodel.isClassifier() ? 1 : 0; col < compVecLen; col++) {
if (!MathUtils.compare(actual_preds[col], expected_preds[col], options._abs_epsilon, rel_epsilon)) {
num_errors++;
if (num_errors < 20) {
System.err.println( (i == 0 ? "POJO" : "MOJO") + " EasyPredict Predictions mismatch for row " + row + ":" + rowData);
System.err.println(" Expected predictions: " + Arrays.toString(expected_preds));
System.err.println(" Actual predictions: " + Arrays.toString(actual_preds));
System.err.println("Difference: " + Math.abs(expected_preds[expected_preds.length-1]-actual_preds[actual_preds.length-1]));
}
break;
}
}
}
}
if (num_errors != 0)
System.err.println("Number of errors: " + num_errors + (num_errors > 20 ? " (only first 20 are shown)": "") +
" out of " + num_total + " rows tested.");
return num_errors == 0;
} finally {
Frame.deleteTempFrameAndItsNonSharedVecs(fr, data); // Remove temp keys.
}
}
protected String[] adaptTestForJavaScoring(Frame test, boolean computeMetrics) {
return adaptTestForTrain(test, true, computeMetrics);
}
private static void checkSerializable(MojoModel mojoModel) {
try (ByteArrayOutputStream bos = new ByteArrayOutputStream();
ObjectOutput out = new ObjectOutputStream(bos)) {
out.writeObject(mojoModel);
out.flush();
} catch (IOException e) {
throw new RuntimeException("MOJO cannot be serialized", e);
}
}
static <T extends Lockable<T>> int deleteAll(Key<T>[] keys) {
int c = 0;
for (Key k : keys) {
if (Keyed.remove(k)) c++;
}
return c;
}
/**
* delete from the output all associated CV models from DKV.
*/
public void deleteCrossValidationModels() {
if (_output._cross_validation_models != null) {
Log.info("Cleaning up CV Models for " + _key);
int count = deleteAll(_output._cross_validation_models);
Log.info(count+" CV models were removed");
}
}
/**
* delete from the output all associated CV predictions from DKV.
*/
public void deleteCrossValidationPreds() {
if (_output._cross_validation_predictions != null) {
Log.info("Cleaning up CV Predictions for " + _key);
int count = deleteAll(_output._cross_validation_predictions);
Log.info(count+" CV predictions were removed");
}
Keyed.remove(_output._cross_validation_holdout_predictions_frame_id);
}
public void deleteCrossValidationFoldAssignment() {
Keyed.remove(_output._cross_validation_fold_assignment_frame_id);
}
@Override public String toString() {
return _output.toString();
}
/** Model stream writer - output Java code representation of model. */
public class JavaModelStreamWriter implements StreamWriter {
/** Show only preview */
private final boolean preview;
public JavaModelStreamWriter(boolean preview) {
this.preview = preview;
}
@Override
public void writeTo(OutputStream os, StreamWriteOption... options) {
toJava(os, preview, true);
}
}
@Override public Class<KeyV3.ModelKeyV3> makeSchema() { return KeyV3.ModelKeyV3.class; }
public static Frame makeInteractions(Frame fr, boolean valid, InteractionPair[] interactions,
final boolean useAllFactorLevels, final boolean skipMissing, final boolean standardize) {
Vec anyTrainVec = fr.anyVec();
Vec[] interactionVecs = new Vec[interactions.length];
String[] interactionNames = new String[interactions.length];
int idx = 0;
for (InteractionPair ip : interactions) {
interactionNames[idx] = fr.name(ip._v1) + "_" + fr.name(ip._v2);
boolean allFactLevels = useAllFactorLevels || ip.needsAllFactorLevels();
InteractionWrappedVec iwv =new InteractionWrappedVec(anyTrainVec.group().addVec(), anyTrainVec._rowLayout, ip._v1Enums, ip._v2Enums, allFactLevels, skipMissing, standardize, fr.vec(ip._v1)._key, fr.vec(ip._v2)._key);
interactionVecs[idx++] = iwv;
}
return new Frame(interactionNames, interactionVecs);
}
public static InteractionWrappedVec[] makeInteractions(Frame fr, InteractionPair[] interactions, boolean useAllFactorLevels, boolean skipMissing, boolean standardize) {
Vec anyTrainVec = fr.anyVec();
InteractionWrappedVec[] interactionVecs = new InteractionWrappedVec[interactions.length];
int idx = 0;
for (InteractionPair ip : interactions)
interactionVecs[idx++] = new InteractionWrappedVec(anyTrainVec.group().addVec(), anyTrainVec._rowLayout, ip._v1Enums, ip._v2Enums, useAllFactorLevels, skipMissing, standardize, fr.vec(ip._v1)._key, fr.vec(ip._v2)._key);
return interactionVecs;
}
public static InteractionWrappedVec makeInteraction(Frame fr, InteractionPair ip, boolean useAllFactorLevels, boolean skipMissing, boolean standardize) {
Vec anyVec = fr.anyVec();
return new InteractionWrappedVec(anyVec.group().addVec(), anyVec._rowLayout, ip._v1Enums, ip._v2Enums, useAllFactorLevels, skipMissing, standardize, fr.vec(ip._v1)._key, fr.vec(ip._v2)._key);
}
/**
* This class represents a pair of interacting columns plus some additional data
* about specific enums to be interacted when the vecs are categorical. The question
* naturally arises why not just use something like an ArrayList of int[2] (as is done,
* for example, in the Interaction/CreateInteraction classes) and the answer essentially
* boils down a desire to specify these specific levels.
*
* Another difference with the CreateInteractions class:
* 1. do not interact on NA (someLvl_NA and NA_somLvl are actual NAs)
* this does not appear here, but in the InteractionWrappedVec class
* TODO: refactor the CreateInteractions to be useful here and in InteractionWrappedVec
*/
public static class InteractionPair extends Iced<InteractionPair> {
public final String _name1, _name2;
private int _v1,_v2;
private String[] _v1Enums;
private String[] _v2Enums;
private int _hash;
private boolean _needsAllFactorLevels;
private InteractionPair(Frame f, int v1, int v2, String[] v1Enums, String[] v2Enums) {
_name1 = f.name(v1);
_name2 = f.name(v2);
_v1=v1;_v2=v2;_v1Enums=v1Enums;_v2Enums=v2Enums;
// hash is column ints; Item 9 p.47 of Effective Java
_hash=17;
_hash = 31*_hash + _v1;
_hash = 31*_hash + _v2;
if( _v1Enums==null ) _hash = 31*_hash;
else
for( String s:_v1Enums ) _hash = 31*_hash + s.hashCode();
if( _v2Enums==null ) _hash = 31*_hash;
else
for( String s:_v2Enums ) _hash = 31*_hash + s.hashCode();
}
/**
* Indicates that Interaction should be created from all factor levels
* (regardless of the global setting useAllFactorLevels).
* @return do we need to make all factor levels?
*/
public boolean needsAllFactorLevels() { return _needsAllFactorLevels; }
public void setNeedsAllFactorLevels(boolean needsAllFactorLevels) { _needsAllFactorLevels = needsAllFactorLevels; }
/**
* Generate all pairwise combinations of the arguments.
* @param indexes An array of column indices.
* @return An array of interaction pairs
*/
public static InteractionPair[] generatePairwiseInteractionsFromList(Frame f, int... indexes) {
if( null==indexes ) return null;
if( indexes.length < 2 ) {
if( indexes.length==1 && indexes[0]==-1 ) return null;
throw new IllegalArgumentException("Must supply 2 or more columns.");
}
InteractionPair[] res = new InteractionPair[ (indexes.length-1)*(indexes.length)>>1]; // n*(n+1) / 2
int idx=0;
for(int i=0;i<indexes.length;++i)
for(int j=i+1;j<indexes.length;++j)
res[idx++] = new InteractionPair(f, indexes[i],indexes[j],f.vec(indexes[i]).domain(),f.vec(indexes[j]).domain());
return res;
}
@Override public int hashCode() { return _hash; }
@Override public String toString() { return _v1+(_v1Enums==null?"":Arrays.toString(_v1Enums))+":"+_v2+(_v2Enums==null?"":Arrays.toString(_v2Enums)); }
@Override public boolean equals( Object o ) {
boolean res = o instanceof InteractionPair;
if (res) {
InteractionPair ip = (InteractionPair) o;
return (_v1 == ip._v1) && (_v2 == ip._v2) && Arrays.equals(_v1Enums, ip._v1Enums) && Arrays.equals(_v2Enums, ip._v2Enums);
}
return false;
}
public int getV1() { return _v1; }
public int getV2() { return _v2; }
public boolean isNumeric() {
return _v1Enums == null && _v2Enums == null;
}
}
/**
* Imports a binary model from a given location.
* Note: binary model has to be created by the same version of H2O, import of a model from a different version will fail
* @param location path to the binary representation of the model on a local filesystem, HDFS, S3...
* @return instance of an H2O Model
* @throws IOException when reading fails
*/
public static <M extends Model<?, ?, ?>> M importBinaryModel(String location) throws IOException {
InputStream is = null;
try {
URI targetUri = FileUtils.getURI(location);
Persist p = H2O.getPM().getPersistForURI(targetUri);
is = p.open(targetUri.toString());
final AutoBuffer ab = new AutoBuffer(is);
ab.sourceName = targetUri.toString();
@SuppressWarnings("unchecked")
M model = (M) Keyed.readAll(ab);
Keyed.readAll(ab); // CV holdouts frame
ab.close();
is.close();
return model;
} finally {
FileUtils.closeSilently(is);
}
}
/**
* Uploads a binary model from a given frame.
* Note: binary model has to be created by the same version of H2O, import of a model from a different version will fail
* @param destinationFrame key of the frame containing the binary representation of the model on a local filesystem, HDFS, S3...
* @return instance of an H2O Model
* @throws IOException when reading fails
*/
public static <M extends Model<?, ?, ?>> M uploadBinaryModel(String destinationFrame) throws IOException {
Frame fr = DKV.getGet(destinationFrame);
ByteVec vec = (ByteVec) fr.vec(0);
try (InputStream inputStream = vec.openStream(null)) {
final AutoBuffer ab = new AutoBuffer(inputStream);
@SuppressWarnings("unchecked")
M model = (M) Keyed.readAll(ab);
Keyed.readAll(ab); // CV holdouts frame
ab.close();
return model;
}
}
/**
* Exports a binary model to a given location.
* @param location target path, it can be on local filesystem, HDFS, S3...
* @param force If true, overwrite already existing file
* @return URI representation of the target location
* @throws water.api.FSIOException when writing fails
*/
public final URI exportBinaryModel(String location, boolean force, ModelExportOption... options) throws IOException {
OutputStream os = null;
try {
URI targetUri = FileUtils.getURI(location);
Persist p = H2O.getPM().getPersistForURI(targetUri);
os = p.create(targetUri.toString(), force);
writeTo(os, options);
os.close();
return targetUri;
} finally {
FileUtils.closeSilently(os);
}
}
@Override
public final void writeTo(OutputStream os, StreamWriteOption... options) {
try (AutoBuffer ab = new AutoBuffer(os, true)) {
writeAll(ab);
Frame holdoutFrame = null;
if (ArrayUtils.contains(options, ModelExportOption.INCLUDE_CV_PREDICTIONS)
&& _output._cross_validation_holdout_predictions_frame_id != null) {
holdoutFrame = DKV.getGet(_output._cross_validation_holdout_predictions_frame_id);
if (holdoutFrame == null)
Log.warn("CV holdout predictions frame is no longer available and won't be exported in the binary model file.");
}
if (holdoutFrame != null) {
holdoutFrame.writeAll(ab);
} else {
ab.put(null); // mark no holdout preds
}
}
}
/**
* Exports a MOJO representation of a model to a given location.
* @param location target path, it can be on local filesystem, HDFS, S3...
* @param force If true, overwrite already existing file
* @return URI representation of the target location
* @throws IOException when writing fails
*/
public URI exportMojo(String location, boolean force) throws IOException {
if (! haveMojo())
throw new IllegalStateException("Model doesn't support MOJOs.");
OutputStream os = null;
try {
URI targetUri = FileUtils.getURI(location);
Persist p = H2O.getPM().getPersistForURI(targetUri);
os = p.create(targetUri.toString(), force);
ModelMojoWriter mojo = getMojo();
mojo.writeTo(os);
os.close();
return targetUri;
} finally {
FileUtils.closeSilently(os);
}
}
/**
* Convenience method to convert Model to a MOJO representation. Please be aware that converting models
* to MOJOs using this function will require sufficient memory (to hold the mojo representation and interim
* serialized representation as well).
*
* @return instance of MojoModel
* @throws IOException when writing MOJO fails
*/
public MojoModel toMojo() throws IOException {
MojoReaderBackend mojoReaderBackend = convertToInMemoryMojoReader();
return MojoModel.load(mojoReaderBackend);
}
/**
* Convenience method to convert Model to a MOJO representation. Please be aware that converting models
* to MOJOs using this function will require sufficient memory (to hold the mojo representation and interim
* serialized representation as well).
*
* @param readMetadata If true, parses also model metadata (model performance metrics... {@link ModelAttributes})
* Model metadata are not required for scoring, it is advised to leave this option disabled
* if you want to use MOJO for inference only.
* @return instance of MojoModel
* @throws IOException when writing MOJO fails
*/
public MojoModel toMojo(boolean readMetadata) throws IOException {
MojoReaderBackend mojoReaderBackend = convertToInMemoryMojoReader();
return ModelMojoReader.readFrom(mojoReaderBackend, readMetadata);
}
MojoReaderBackend convertToInMemoryMojoReader() throws IOException {
if (! haveMojo())
throw new IllegalStateException("Model doesn't support MOJOs.");
try (ByteArrayOutputStream os = new ByteArrayOutputStream()) {
this.getMojo().writeTo(os);
return MojoReaderBackendFactory.createReaderBackend(
new ByteArrayInputStream(os.toByteArray()), MojoReaderBackendFactory.CachingStrategy.MEMORY);
}
}
public ModelDescriptor modelDescriptor() {
return new H2OModelDescriptor();
}
protected class H2OModelDescriptor implements ModelDescriptor {
@Override
public String[][] scoringDomains() { return Model.this.scoringDomains(); }
@Override
public String projectVersion() { return H2O.ABV.projectVersion(); }
@Override
public String algoName() { return _parms.algoName(); }
@Override
public String algoFullName() { return _parms.fullName(); }
@Override
public String offsetColumn() { return _output.offsetName(); }
@Override
public String weightsColumn() { return _output.weightsName(); }
@Override
public String treatmentColumn() { return _output.treatmentName(); }
@Override
public String foldColumn() { return _output.foldName(); }
@Override
public ModelCategory getModelCategory() { return _output.getModelCategory(); }
@Override
public boolean isSupervised() { return _output.isSupervised(); }
@Override
public int nfeatures() { return _output.nfeatures(); }
@Override
public String[] features() { return _output.features(); }
@Override
public int nclasses() { return _output.nclasses(); }
@Override
public String[] columnNames() { return _output._names; }
@Override
public boolean balanceClasses() { return _parms._balance_classes; }
@Override
public double defaultThreshold() { return Model.this.defaultThreshold(); }
@Override
public double[] priorClassDist() { return _output._priorClassDist; }
@Override
public double[] modelClassDist() { return _output._modelClassDist; }
@Override
public String uuid() { return String.valueOf(Model.this.checksum()); }
@Override
public String timestamp() { return new DateTime().toString(); }
@Override
public String[] getOrigNames() { return _output._origNames; }
@Override
public String[][] getOrigDomains() { return _output._origDomains; }
}
/**
* Convenience method to find out if featureName is used for prediction, i.e., if it has beta == 0 in GLM,
* it is not considered to be used.
* This is mainly intended for optimizing prediction speed in StackedEnsemble.
* @param featureName
*/
public boolean isFeatureUsedInPredict(String featureName) {
if (featureName.equals(_parms._response_column)) return false;
int featureIdx = ArrayUtils.find(_output._names, featureName);
if (featureIdx == -1) {
return false;
}
return isFeatureUsedInPredict(featureIdx);
}
protected boolean isFeatureUsedInPredict(int featureIdx) {
return true;
}
public boolean isDistributionHuber() {
return _parms._distribution == DistributionFamily.huber;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelBuilder.java
|
package hex;
import hex.genmodel.MojoModel;
import hex.genmodel.utils.DistributionFamily;
import jsr166y.CountedCompleter;
import water.*;
import water.api.FSIOException;
import water.api.HDFSIOException;
import water.exceptions.H2OIllegalArgumentException;
import water.exceptions.H2OModelBuilderIllegalArgumentException;
import water.fvec.*;
import water.rapids.ast.prims.advmath.AstKFold;
import water.udf.CFuncRef;
import water.util.*;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.*;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
/**
* Model builder parent class. Contains the common interfaces and fields across all model builders.
*/
abstract public class ModelBuilder<M extends Model<M,P,O>, P extends Model.Parameters, O extends Model.Output> extends Iced {
public ToEigenVec getToEigenVec() { return null; }
public boolean shouldReorder(Vec v) { return _parms._categorical_encoding.needsResponse() && isSupervised(); }
// initialized to be non-null to provide nicer exceptions when used incorrectly (instead of NPE)
private transient Workspace _workspace = new Workspace(false);
public Job<M> _job; // Job controlling this build
/** Block till completion, and return the built model from the DKV. Note the
* funny assert: the Job does NOT have to be controlling this model build,
* but might, e.g. be controlling a Grid search for which this is just one
* of many results. Calling 'get' means that we are blocking on the Job
* which is controlling ONLY this ModelBuilder, and when the Job completes
* we can return built Model. */
public final M get() { assert _job._result == _result; return _job.get(); }
public final boolean isStopped() { return _job.isStopped(); }
// Key of the model being built; note that this is DIFFERENT from
// _job._result if the Job is being shared by many sub-models
// e.g. cross-validation.
protected Key<M> _result; // Built Model key
public final Key<M> dest() { return _result; }
public String _desc = "Main model";
private Countdown _build_model_countdown;
private Countdown _build_step_countdown;
final void startClock() {
_build_model_countdown = Countdown.fromSeconds(_parms._max_runtime_secs);
_build_model_countdown.start();
}
protected boolean timeout() {
return _build_step_countdown != null ? _build_step_countdown.timedOut() : _build_model_countdown.timedOut();
}
protected boolean stop_requested() {
return _job.stop_requested() || timeout();
}
protected long remainingTimeSecs() {
return (long) Math.ceil(_build_model_countdown.remainingTime() / 1000.0);
}
/** Default model-builder key */
public static <S extends Model> Key<S> defaultKey(String algoName) {
return Key.make(H2O.calcNextUniqueModelId(algoName));
}
/** Default easy constructor: Unique new job and unique new result key */
protected ModelBuilder(P parms) {
this(parms, ModelBuilder.<M>defaultKey(parms.algoName()));
}
/** Unique new job and named result key */
protected ModelBuilder(P parms, Key<M> key) {
_job = new Job<>(_result = key, parms.javaName(), parms.algoName());
_parms = parms;
_input_parms = (P) parms.clone();
}
/** Shared pre-existing Job and unique new result key */
protected ModelBuilder(P parms, Job<M> job) {
_job = job;
_result = defaultKey(parms.algoName());
_parms = parms;
_input_parms = (P) parms.clone();
}
/** List of known ModelBuilders with all default args; endlessly cloned by
* the GUI for new private instances, then the GUI overrides some of the
* defaults with user args. */
private static String[] ALGOBASES = new String[0];
public static String[] algos() { return ALGOBASES; }
private static String[] SCHEMAS = new String[0];
private static ModelBuilder[] BUILDERS = new ModelBuilder[0];
protected boolean _startUpOnceModelBuilder = false;
/** One-time start-up only ModelBuilder, endlessly cloned by the GUI for the
* default settings. */
protected ModelBuilder(P parms, boolean startup_once) { this(parms,startup_once,"hex.schemas."); }
protected ModelBuilder(P parms, boolean startup_once, String externalSchemaDirectory ) {
String base = getName();
if (!startup_once)
throw H2O.fail("Algorithm " + base + " registration issue. It can only be called at startup.");
_startUpOnceModelBuilder = true;
_job = null;
_result = null;
_parms = parms;
init(false); // Default cheap init
if( ArrayUtils.find(ALGOBASES,base) != -1 )
throw H2O.fail("Only called once at startup per ModelBuilder, and "+base+" has already been called");
// FIXME: this is not thread safe!
// michalk: this note ^^ is generally true (considering 3rd parties), however, in h2o-3 code base we have a sequential ModelBuilder initialization
ALGOBASES = Arrays.copyOf(ALGOBASES,ALGOBASES.length+1);
BUILDERS = Arrays.copyOf(BUILDERS ,BUILDERS .length+1);
SCHEMAS = Arrays.copyOf(SCHEMAS ,SCHEMAS .length+1);
ALGOBASES[ALGOBASES.length-1] = base;
BUILDERS [BUILDERS .length-1] = this;
SCHEMAS [SCHEMAS .length-1] = externalSchemaDirectory;
}
/** gbm -> GBM, deeplearning -> DeepLearning */
public static String algoName(String urlName) { return BUILDERS[ArrayUtils.find(ALGOBASES,urlName)]._parms.algoName(); }
/** gbm -> hex.tree.gbm.GBM, deeplearning -> hex.deeplearning.DeepLearning */
public static String javaName(String urlName) { return BUILDERS[ArrayUtils.find(ALGOBASES,urlName)]._parms.javaName(); }
/** gbm -> GBMParameters */
public static String paramName(String urlName) { return algoName(urlName)+"Parameters"; }
/** gbm -> "hex.schemas." ; custAlgo -> "org.myOrg.schemas." */
public static String schemaDirectory(String urlName) { return SCHEMAS[ArrayUtils.find(ALGOBASES,urlName)]; }
@SuppressWarnings("unchecked")
static <B extends ModelBuilder> Optional<B> getRegisteredBuilder(String urlName) {
final String formattedName = urlName.toLowerCase();
int idx = ArrayUtils.find(ALGOBASES, formattedName);
if (idx < 0)
return Optional.empty();
return Optional.of((B) BUILDERS[idx]);
}
@SuppressWarnings("unchecked")
public static <P extends Model.Parameters> P makeParameters(String algo) {
return (P) make(algo, null, null)._parms;
}
/** Factory method to create a ModelBuilder instance for given the algo name.
* Shallow clone of both the default ModelBuilder instance and a Parameter. */
public static <B extends ModelBuilder> B make(String algo, Job job, Key<Model> result) {
return getRegisteredBuilder(algo)
.map(prototype -> {
@SuppressWarnings("unchecked")
B mb = (B) prototype.clone();
mb._job = job;
mb._result = result;
mb._parms = prototype._parms.clone();
mb._input_parms = prototype._parms.clone();
return mb;
})
.orElseThrow(() -> {
StringBuilder sb = new StringBuilder();
sb.append("Unknown algo: '").append(algo).append("'; Extension report: ");
Log.err(ExtensionManager.getInstance().makeExtensionReport(sb));
return new IllegalStateException("Algorithm '" + algo + "' is not registered. " +
"Available algos: [" + StringUtils.join(",", ALGOBASES) + "]");
});
}
/**
* Factory method to create a ModelBuilder instance from a clone of a given {@code parms} instance of Model.Parameters.
*/
public static <B extends ModelBuilder, MP extends Model.Parameters> B make(MP parms) {
Key<Model> mKey = ModelBuilder.defaultKey(parms.algoName());
return make(parms, mKey);
}
public static <B extends ModelBuilder, MP extends Model.Parameters> B make(MP parms, Key<Model> mKey) {
Job<Model> mJob = new Job<>(mKey, parms.javaName(), parms.algoName());
B newMB = ModelBuilder.make(parms.algoName(), mJob, mKey);
newMB._parms = parms.clone();
newMB._input_parms = parms.clone();
return newMB;
}
/** All the parameters required to build the model.
* The values of this property will be used as actual parameters of the model. */
public P _parms; // Not final, so CV can set-after-clone
/** All the parameters required to build the model conserved in the input form, with AUTO values not evaluated yet. */
public P _input_parms;
/** Training frame: derived from the parameter's training frame, excluding
* all ignored columns, all constant and bad columns, perhaps flipping the
* response column to an Categorical, etc. */
public final Frame train() { return _train; }
protected transient Frame _train;
protected transient Frame _origTrain;
public void setTrain(Frame train) {
_train = train;
}
public void setValid(Frame valid) {
_valid = valid;
}
/** Validation frame: derived from the parameter's validation frame, excluding
* all ignored columns, all constant and bad columns, perhaps flipping the
* response column to a Categorical, etc. Is null if no validation key is set. */
public final Frame valid() { return _valid; }
protected transient Frame _valid;
// TODO: tighten up the type
// Map the algo name (e.g., "deeplearning") to the builder class (e.g., DeepLearning.class) :
private static final Map<String, Class<? extends ModelBuilder>> _builders = new HashMap<>();
// Map the Model class (e.g., DeepLearningModel.class) to the algo name (e.g., "deeplearning"):
private static final Map<Class<? extends Model>, String> _model_class_to_algo = new HashMap<>();
// Map the simple algo name (e.g., deeplearning) to the full algo name (e.g., "Deep Learning"):
private static final Map<String, String> _algo_to_algo_full_name = new HashMap<>();
// Map the algo name (e.g., "deeplearning") to the Model class (e.g., DeepLearningModel.class):
private static final Map<String, Class<? extends Model>> _algo_to_model_class = new HashMap<>();
/** Train response vector. */
public Vec response(){return _response;}
/** Validation response vector. */
public Vec vresponse(){return _vresponse == null ? _response : _vresponse;}
abstract protected class Driver extends H2O.H2OCountedCompleter<Driver> {
protected Driver(){ super(); }
private ModelBuilderListener _callback;
public void setCallback(ModelBuilderListener callback) {
this._callback = callback;
}
// Pull the boilerplate out of the computeImpl(), so the algo writer doesn't need to worry about the following:
// 1) Scope (unless they want to keep data, then they must call Scope.untrack(Key<Vec>[]))
// 2) Train/Valid frame locking and unlocking
// 3) calling tryComplete()
public void compute2() {
try {
Scope.enter();
_parms.read_lock_frames(_job); // Fetch & read-lock input frames
computeImpl();
computeParameters();
saveModelCheckpointIfConfigured();
notifyModelListeners();
} finally {
_parms.read_unlock_frames(_job);
if (_parms._is_cv_model) {
// CV models get completely cleaned up when the main model is fully trained.
Key[] keep = _workspace == null ? new Key[0] : _workspace.getToDelete(true).keySet().toArray(new Key[0]);
Scope.exit(keep);
} else {
cleanUp();
Scope.exit();
}
}
tryComplete();
}
@Override
public void onCompletion(CountedCompleter caller) {
setFinalState();
if (_callback != null) {
_callback.onModelSuccess(_result.get());
}
}
@Override
public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller) {
setFinalState();
if (_callback != null) {
_callback.onModelFailure(ex, _parms);
}
return true;
}
public abstract void computeImpl();
public final void computeParameters() {
M model = _result.get();
if (model != null) {
model.write_lock(_job);
model.setInputParms(_input_parms);
model.update(_job);
model.unlock(_job);
}
}
}
private void setFinalState() {
Key<M> reskey = dest();
if (reskey == null) return;
M res = reskey.get();
if (res != null && res._output != null) {
res._output._job = _job;
res._output.stopClock();
res.write_lock(_job);
res.update(_job);
res.unlock(_job);
}
Log.info("Completing model "+ reskey);
}
private void saveModelCheckpointIfConfigured() {
Model model = _result.get();
if (model != null && !StringUtils.isNullOrEmpty(model._parms._export_checkpoints_dir)) {
try {
model.exportBinaryModel(model._parms._export_checkpoints_dir + "/" + model._key.toString(), true);
} catch (FSIOException | HDFSIOException | IOException e) {
throw new H2OIllegalArgumentException("export_checkpoints_dir", "saveModelIfConfigured", e);
}
}
}
private void notifyModelListeners() {
Model<?, ?, ?> model = _result.get();
ListenerService.getInstance().report("model_completed", model, _parms);
}
/**
* Start model training using a this ModelBuilder as a template. The MB can be either used directly
* or if the method was invoked on a regular H2O node. If the method was called on a client node, the model builder
* will be used as a template only and the actual instance used for training will re-created on a remote H2O node.
*
* Warning: the nature of this method prohibits further use of this instance of the model builder after the method
* is called.
*
* This is intended to reduce training time in client-mode setups, it pushes all computation to a regular H2O node
* and avoid exchanging data between client and H2O cluster. This also lowers requirements on the H2O client node.
*
* @return model job
*/
public Job<M> trainModelOnH2ONode() {
if (error_count() > 0)
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(this);
this._input_parms = (P) this._parms.clone();
TrainModelRunnable trainModel = new TrainModelRunnable(this);
H2O.runOnH2ONode(trainModel);
return _job;
}
private static class TrainModelRunnable extends H2O.RemoteRunnable<TrainModelRunnable> {
private transient ModelBuilder _mb;
private Job<Model> _job;
private Key<Model> _key;
private Model.Parameters _parms;
private Model.Parameters _input_parms;
@SuppressWarnings("unchecked")
private TrainModelRunnable(ModelBuilder mb) {
_mb = mb;
_job = (Job<Model>) _mb._job;
_key = _job._result;
_parms = _mb._parms;
_input_parms = _mb._input_parms;
}
@Override
public void setupOnRemote() {
_mb = ModelBuilder.make(_parms.algoName(), _job, _key);
_mb._parms = _parms;
_mb._input_parms = _input_parms;
_mb.init(false); // validate parameters
}
@Override
public void run() {
_mb.trainModel();
}
}
/** Method to launch training of a Model, based on its parameters. */
final public Job<M> trainModel() {
return trainModel(null);
}
final public Job<M> trainModel(final ModelBuilderListener callback) {
if (error_count() > 0)
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(this);
startClock();
if (!nFoldCV()) {
Driver driver = trainModelImpl();
driver.setCallback(callback);
return _job.start(driver, _parms.progressUnits(), _parms._max_runtime_secs);
} else {
// cross-validation needs to be forked off to allow continuous (non-blocking) progress bar
return _job.start(new H2O.H2OCountedCompleter() {
@Override
public void compute2() {
computeCrossValidation();
tryComplete();
}
@Override
public void onCompletion(CountedCompleter caller) {
if (callback != null) callback.onModelSuccess(_result.get());
}
@Override
public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller) {
Log.warn("Model training job " + _job._description + " completed with exception: " + ex);
if (callback != null) callback.onModelFailure(ex, _parms);
try {
Keyed.remove(_job._result); // ensure there's no incomplete model left for manipulation after crash or cancellation
} catch (Exception logged) {
Log.warn("Exception thrown when removing result from job " + _job._description, logged);
}
return true;
}
},
(nFoldWork() + 1/*main model*/) * _parms.progressUnits(), _parms._max_runtime_secs);
}
}
/**
* Train a model as part of a larger Job;
*
* @param fr: Input frame override, ignored if null.
* In some cases, algos do not work directly with the original frame in the K/V store.
* Instead they run on a private anonymous copy (eg: reblanced dataset).
* Use this argument if you want nested job to work on the actual working copy rather than the original Frame in the K/V.
* Example: Outer job rebalances dataset and then calls nested job. To avoid needless second reblance, pass in the (already rebalanced) working copy.
* */
final public M trainModelNested(Frame fr) {
if(fr != null) // Use the working copy (e.g. rebalanced) instead of the original K/V store version
setTrain(fr);
if (error_count() > 0)
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(this);
startClock();
if( !nFoldCV() ) submitTrainModelTask().join();
else computeCrossValidation();
return _result.get();
}
/**
* Train a model as part of a larger job. The model will be built on a non-client node.
*
* @param job containing job
* @param result key of the resulting model
* @param params model parameters
* @param fr input frame, ignored if null
* @param <MP> Model.Parameters
* @return instance of a Model
*/
public static <MP extends Model.Parameters> Model trainModelNested(Job<?> job, Key<Model> result, MP params, Frame fr) {
H2O.runOnH2ONode(new TrainModelNestedRunnable(job, result, params, fr));
return result.get();
}
private static class TrainModelNestedRunnable extends H2O.RemoteRunnable<TrainModelNestedRunnable> {
private Job<?> _job;
private Key<Model> _key;
private Model.Parameters _parms;
private Frame _fr;
private TrainModelNestedRunnable(Job<?> job, Key<Model> key, Model.Parameters parms, Frame fr) {
_job = job;
_key = key;
_parms = parms;
_fr = fr;
}
@Override
public void run() {
ModelBuilder mb = ModelBuilder.make(_parms.algoName(), _job, _key);
mb._parms = _parms;
mb._input_parms = _parms.clone();
mb.trainModelNested(_fr);
}
}
/** Model-specific implementation of model training
* @return A F/J Job, which, when executed, does the build. F/J is NOT started. */
abstract protected Driver trainModelImpl();
private static class Barrier extends CountedCompleter {
@Override public void compute() { }
}
/**
* Simple wrapper around model task Driver, its main purpose is to make
* sure onExceptionalCompletion is not called after join method finishes (similarly how Job behaves).
*/
class TrainModelTaskController {
private final Driver _driver;
private final Barrier _barrier;
TrainModelTaskController(Driver driver, Barrier barrier) {
_driver = driver;
_barrier = barrier;
}
/**
* Block for Driver to finish
*/
void join() {
_barrier.join();
}
void cancel(boolean mayInterruptIfRunning) {
_driver.cancel(mayInterruptIfRunning);
}
}
/**
* Submits the model Driver task for execution, blocking on a barrier
* that is only completed after the Driver is fully finished (including
* possible calls to onExceptionalCompletion).
*
* @return controller object that can be used to wait for completion or
* to cancel the execution.
*/
TrainModelTaskController submitTrainModelTask() {
Driver d = trainModelImpl();
Barrier b = new Barrier();
d.setCompleter(b);
H2O.submitTask(d);
return new TrainModelTaskController(d, b);
}
@Deprecated protected int nModelsInParallel() { return 0; }
/**
* How many should be trained in parallel during N-fold cross-validation?
* Train all CV models in parallel when parallelism is enabled, otherwise train one at a time
* Each model can override this logic, based on parameters, dataset size, etc.
* @return How many models to train in parallel during cross-validation
*/
protected int nModelsInParallel(int folds) {
int n = nModelsInParallel();
if (n > 0) return n;
return nModelsInParallel(folds, 1);
}
protected int nModelsInParallel(int folds, int defaultParallelization) {
if (!_parms._parallelize_cross_validation) return 1; //user demands serial building (or we need to honor the time constraints for all CV models equally)
int parallelization = defaultParallelization;
if (_train.byteSize() < smallDataSize())
parallelization = folds; //for small data, parallelize over CV models
return Math.min(parallelization, H2O.ARGS.nthreads);
}
protected long smallDataSize() {
return (long) 1e6;
}
private double maxRuntimeSecsPerModel(int cvModelsCount, int parallelization) {
return cvModelsCount > 0
? _parms._max_runtime_secs / Math.ceil((double)cvModelsCount / parallelization + 1)
// ? _parms._max_runtime_secs * cvModelsCount / (cvModelsCount + 1) / Math.ceil((double)cvModelsCount / parallelization)
: _parms._max_runtime_secs;
}
// Work for each requested fold
protected int nFoldWork() {
if( _parms._fold_column == null )
return _parms._nfolds;
Vec fold = _parms._train.get().vec(_parms._fold_column);
return FoldAssignment.nFoldWork(fold);
}
protected transient ModelTrainingEventsPublisher _eventPublisher;
protected transient ModelTrainingCoordinator _coordinator;
public class ModelTrainingCoordinator {
private final BlockingQueue<ModelTrainingEventsPublisher.Event> _events;
private final ModelBuilder<M, P, O>[] _cvModelBuilders;
private int _inProgress;
public ModelTrainingCoordinator(BlockingQueue<ModelTrainingEventsPublisher.Event> events,
ModelBuilder<M, P, O>[] cvModelBuilders) {
_events = events;
_cvModelBuilders = cvModelBuilders;
_inProgress = _cvModelBuilders.length;
}
public void initStoppingParameters() {
cv_initStoppingParameters();
}
public void updateParameters() {
try {
while (_inProgress > 0) {
ModelTrainingEventsPublisher.Event e = _events.take();
switch (e) {
case ALL_DONE:
_inProgress--;
break;
case ONE_DONE:
if (cv_updateOptimalParameters(_cvModelBuilders))
return;
break;
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Failed to update model parameters based on result of CV model training", e);
}
cv_updateOptimalParameters(_cvModelBuilders);
}
}
/**
* Default naive (serial) implementation of N-fold cross-validation
* (builds N+1 models, all have train+validation metrics, the main model has N-fold cross-validated validation metrics)
*/
public void computeCrossValidation() {
assert _job.isRunning(); // main Job is still running
_job.setReadyForView(false); //wait until the main job starts to let the user inspect the main job
final int N = nFoldWork();
ModelBuilder<M, P, O>[] cvModelBuilders = null;
try {
Scope.enter();
init(false);
// Step 1: Assign each row to a fold
final FoldAssignment foldAssignment = cv_AssignFold(N);
// Step 2: Make 2*N binary weight vectors
final Vec[] weights = cv_makeWeights(N, foldAssignment);
// Step 3: Build N train & validation frames; build N ModelBuilders; error check them all
cvModelBuilders = cv_makeFramesAndBuilders(N, weights);
// Step 4: Run all the CV models (and optionally train the main model in parallel to the CV training)
final boolean buildMainModel;
if (useParallelMainModelBuilding(N)) {
int parallelization = nModelsInParallel(N);
Log.info(_desc + " will be trained in parallel to the Cross-Validation models " +
"(up to " + parallelization + " models running at the same time).");
BlockingQueue<ModelTrainingEventsPublisher.Event> events = new LinkedBlockingQueue<>();
for (ModelBuilder<M, P, O> mb : cvModelBuilders) {
mb._eventPublisher = new ModelTrainingEventsPublisher(events);
}
_coordinator = new ModelTrainingCoordinator(events, cvModelBuilders);
final ModelBuilder<M, P, O>[] builders = Arrays.copyOf(cvModelBuilders, cvModelBuilders.length + 1);
builders[builders.length - 1] = this;
new SubModelBuilder(_job, builders, parallelization).bulkBuildModels();
buildMainModel = false;
} else {
cv_buildModels(N, cvModelBuilders);
buildMainModel = true;
}
// Step 5: Score the CV models
ModelMetrics.MetricBuilder mbs[] = cv_scoreCVModels(N, weights, cvModelBuilders);
if (buildMainModel) {
// Step 6: Build the main model
long time_allocated_to_main_model = (long) (maxRuntimeSecsPerModel(N, nModelsInParallel(N)) * 1e3);
buildMainModel(time_allocated_to_main_model);
}
// Step 7: Combine cross-validation scores; compute main model x-val
// scores; compute gains/lifts
if (!cvModelBuilders[0].getName().equals("infogram")) // infogram does not support scoring
cv_mainModelScores(N, mbs, cvModelBuilders);
_job.setReadyForView(true);
DKV.put(_job);
} catch (Exception e) {
if (cvModelBuilders != null) {
Futures fs = new Futures();
// removing keys added during cv_makeFramesAndBuilders and cv_makeFramesAndBuilders
// need a better solution: part of this is done in cv_makeFramesAndBuilders but partially and only for its method scope
// also removing the completed CV models as the main model is incomplete anyway
for (ModelBuilder mb : cvModelBuilders) {
DKV.remove(mb._parms._train, fs);
DKV.remove(mb._parms._valid, fs);
DKV.remove(Key.make(mb.getPredictionKey()), fs);
Keyed.remove(mb._result, fs, true);
}
fs.blockForPending();
}
throw e;
} finally {
if (cvModelBuilders != null) {
for (ModelBuilder mb : cvModelBuilders) {
mb.cleanUp();
}
}
cleanUp();
Scope.exit();
}
}
// Step 1: Assign each row to a fold
FoldAssignment cv_AssignFold(int N) {
assert(N>=2);
Vec fold = train().vec(_parms._fold_column);
if (fold != null) {
return FoldAssignment.fromUserFoldSpecification(N, fold);
} else {
final long seed = _parms.getOrMakeRealSeed();
Log.info("Creating " + N + " cross-validation splits with random number seed: " + seed);
switch (_parms._fold_assignment) {
case AUTO:
case Random:
fold = AstKFold.kfoldColumn(train().anyVec().makeZero(), N, seed);
break;
case Modulo:
fold = AstKFold.moduloKfoldColumn(train().anyVec().makeZero(), N);
break;
case Stratified:
fold = AstKFold.stratifiedKFoldColumn(response(), N, seed);
break;
default:
throw H2O.unimpl();
}
return FoldAssignment.fromInternalFold(N, fold);
}
}
// Step 2: Make 2*N binary weight vectors
Vec[] cv_makeWeights(final int N, FoldAssignment foldAssignment) {
String origWeightsName = _parms._weights_column;
Vec origWeight = origWeightsName != null ? train().vec(origWeightsName) : train().anyVec().makeCon(1.0);
Frame folds_and_weights = new Frame(foldAssignment.getAdaptedFold(), origWeight);
Vec[] weights = new MRTask() {
@Override public void map(Chunk chks[], NewChunk nchks[]) {
Chunk fold = chks[0], orig = chks[1];
for( int row=0; row< orig._len; row++ ) {
int foldIdx = (int) fold.atd(row);
double w = orig.atd(row);
for( int f = 0; f < N; f++ ) {
boolean holdout = foldIdx == f;
nchks[2 * f].addNum(holdout ? 0 : w);
nchks[2*f+1].addNum(holdout ? w : 0);
}
}
}
}.doAll(2*N,Vec.T_NUM,folds_and_weights).outputFrame().vecs();
if (origWeightsName == null)
origWeight.remove(); // Cleanup temp
if (_parms._keep_cross_validation_fold_assignment)
DKV.put(foldAssignment.toFrame(Key.make("cv_fold_assignment_" + _result.toString())));
foldAssignment.remove(_parms._keep_cross_validation_fold_assignment);
for( Vec weight : weights )
if( weight.isConst() )
throw new H2OIllegalArgumentException("Not enough data to create " + N + " random cross-validation splits. Either reduce nfolds, specify a larger dataset (or specify another random number seed, if applicable).");
return weights;
}
// Step 3: Build N train & validation frames; build N ModelBuilders; error check them all
private ModelBuilder<M, P, O>[] cv_makeFramesAndBuilders( int N, Vec[] weights ) {
final long old_cs = _parms.checksum();
final String origDest = _result.toString();
final String weightName = "__internal_cv_weights__";
if (train().find(weightName) != -1) throw new H2OIllegalArgumentException("Frame cannot contain a Vec called '" + weightName + "'.");
Frame cv_fr = new Frame(train().names(),train().vecs());
if( _parms._weights_column!=null ) cv_fr.remove( _parms._weights_column ); // The CV frames will have their own private weight column
ModelBuilder<M, P, O>[] cvModelBuilders = new ModelBuilder[N];
List<Frame> cvFramesForFailedModels = new ArrayList<>();
double cv_max_runtime_secs = maxRuntimeSecsPerModel(N, nModelsInParallel(N));
for( int i=0; i<N; i++ ) {
String identifier = origDest + "_cv_" + (i+1);
// Training/Validation share the same data, but will have exclusive weights
Frame cvTrain = new Frame(Key.make(identifier + "_train"), cv_fr.names(), cv_fr.vecs());
cvTrain.write_lock(_job);
cvTrain.add(weightName, weights[2*i]);
cvTrain.update(_job);
Frame cvValid = new Frame(Key.make(identifier + "_valid"), cv_fr.names(), cv_fr.vecs());
cvValid.write_lock(_job);
cvValid.add(weightName, weights[2*i+1]);
cvValid.update(_job);
// Shallow clone - not everything is a private copy!!!
ModelBuilder<M, P, O> cv_mb = (ModelBuilder)this.clone();
cv_mb.setTrain(cvTrain);
cv_mb._result = Key.make(identifier); // Each submodel gets its own key
cv_mb._parms = (P) _parms.clone();
// Fix up some parameters of the clone
cv_mb._parms._is_cv_model = true;
cv_mb._parms._cv_fold = i;
cv_mb._parms._weights_column = weightName;// All submodels have a weight column, which the main model does not
cv_mb._parms.setTrain(cvTrain._key); // All submodels have a weight column, which the main model does not
cv_mb._parms._valid = cvValid._key;
cv_mb._parms._fold_assignment = Model.Parameters.FoldAssignmentScheme.AUTO;
cv_mb._parms._nfolds = 0; // Each submodel is not itself folded
cv_mb._parms._max_runtime_secs = cv_max_runtime_secs;
cv_mb.clearValidationErrors(); // each submodel gets its own validation messages and error_count()
cv_mb._input_parms = (P) _parms.clone();
cv_mb._desc = "Cross-Validation model " + (i + 1) + " / " + N;
// Error-check all the cross-validation Builders before launching any
cv_mb.init(false);
if( cv_mb.error_count() > 0 ) { // Gather all submodel error messages
Log.info("Marking frame for failed cv model for removal: " + cvTrain._key);
cvFramesForFailedModels.add(cvTrain);
Log.info("Marking frame for failed cv model for removal: " + cvValid._key);
cvFramesForFailedModels.add(cvValid);
for (ValidationMessage vm : cv_mb._messages)
message(vm._log_level, vm._field_name, vm._message);
}
cvModelBuilders[i] = cv_mb;
}
if( error_count() > 0 ) { // Found an error in one or more submodels
Futures fs = new Futures();
for (Frame cvf : cvFramesForFailedModels) {
cvf.vec(weightName).remove(fs); // delete the Vec's chunks
DKV.remove(cvf._key, fs); // delete the Frame from the DKV, leaving its vecs
Log.info("Removing frame for failed cv model: " + cvf._key);
}
fs.blockForPending();
throw H2OModelBuilderIllegalArgumentException.makeFromBuilder(this);
}
// check that this Job's original _params haven't changed
assert old_cs == _parms.checksum();
return cvModelBuilders;
}
// Step 4: Run all the CV models and launch the main model
public void cv_buildModels(int N, ModelBuilder<M, P, O>[] cvModelBuilders ) {
makeCVModelBuilder(cvModelBuilders, nModelsInParallel(N)).bulkBuildModels();
cv_computeAndSetOptimalParameters(cvModelBuilders);
}
protected CVModelBuilder makeCVModelBuilder(ModelBuilder<?, ?, ?>[] modelBuilders, int parallelization) {
return new CVModelBuilder(_job, modelBuilders, parallelization);
}
// Step 5: Score the CV models
public ModelMetrics.MetricBuilder[] cv_scoreCVModels(int N, Vec[] weights, ModelBuilder<M, P, O>[] cvModelBuilders) {
if (_job.stop_requested()) {
Log.info("Skipping scoring of CV models");
throw new Job.JobCancelledException(_job);
}
assert weights.length == 2*N;
assert cvModelBuilders.length == N;
Log.info("Scoring the "+N+" CV models");
ModelMetrics.MetricBuilder[] mbs = new ModelMetrics.MetricBuilder[N];
Futures fs = new Futures();
for (int i=0; i<N; ++i) {
if (_job.stop_requested()) {
Log.info("Skipping scoring for last "+(N-i)+" out of "+N+" CV models");
throw new Job.JobCancelledException(_job);
}
Frame cvValid = cvModelBuilders[i].valid();
Frame preds = null;
try (Scope.Safe s = Scope.safe(cvValid)) {
Frame adaptFr = new Frame(cvValid);
if (makeCVMetrics(cvModelBuilders[i])) {
M cvModel = cvModelBuilders[i].dest().get();
cvModel.adaptTestForTrain(adaptFr, true, !isSupervised());
if (nclasses() == 2 /* need holdout predictions for gains/lift table */
|| _parms._keep_cross_validation_predictions
|| (cvModel.isDistributionHuber() /*need to compute quantiles on abs error of holdout predictions*/)) {
String predName = cvModelBuilders[i].getPredictionKey();
Model.PredictScoreResult result = cvModel.predictScoreImpl(cvValid, adaptFr, predName, _job, true, CFuncRef.from(_parms._custom_metric_func));
preds = result.getPredictions();
Scope.untrack(preds);
result.makeModelMetrics(cvValid, adaptFr);
mbs[i] = result.getMetricBuilder();
DKV.put(cvModel);
} else {
mbs[i] = cvModel.scoreMetrics(adaptFr);
}
}
} finally {
Scope.track(preds);
}
DKV.remove(cvModelBuilders[i]._parms._train,fs);
DKV.remove(cvModelBuilders[i]._parms._valid,fs);
weights[2*i ].remove(fs);
weights[2*i+1].remove(fs);
}
fs.blockForPending();
return mbs;
}
protected boolean makeCVMetrics(ModelBuilder<?, ?, ?> cvModelBuilder) {
return !cvModelBuilder.getName().equals("infogram");
}
private boolean useParallelMainModelBuilding(int nFolds) {
int parallelizationLevel = nModelsInParallel(nFolds);
return parallelizationLevel > 1 && _parms._parallelize_cross_validation && cv_canBuildMainModelInParallel();
}
protected boolean cv_canBuildMainModelInParallel() {
return false;
}
protected boolean cv_updateOptimalParameters(ModelBuilder<M, P, O>[] cvModelBuilders) {
throw new UnsupportedOperationException();
}
protected boolean cv_initStoppingParameters() {
throw new UnsupportedOperationException();
}
// Step 6: build the main model
private void buildMainModel(long max_runtime_millis) {
if (_job.stop_requested()) {
Log.info("Skipping main model");
throw new Job.JobCancelledException(_job);
}
assert _job.isRunning();
Log.info("Building main model.");
Log.info("Remaining time for main model (ms): " + max_runtime_millis);
_build_step_countdown = new Countdown(max_runtime_millis, true);
submitTrainModelTask().join();
_build_step_countdown = null;
}
// Step 7: Combine cross-validation scores; compute main model x-val scores; compute gains/lifts
public void cv_mainModelScores(int N, ModelMetrics.MetricBuilder mbs[], ModelBuilder<M, P, O> cvModelBuilders[]) {
//never skipping CV main scores: we managed to reach last step and this should not be an expensive one, so let's offer this model
M mainModel = _result.get();
// Compute and put the cross-validation metrics into the main model
Log.info("Computing "+N+"-fold cross-validation metrics.");
Key<M>[] cvModKeys = new Key[N];
mainModel._output._cross_validation_models = _parms._keep_cross_validation_models ? cvModKeys : null;
Key<Frame>[] predKeys = new Key[N];
mainModel._output._cross_validation_predictions = _parms._keep_cross_validation_predictions ? predKeys : null;
for (int i = 0; i < N; ++i) {
cvModKeys[i] = cvModelBuilders[i]._result;
predKeys[i] = Key.make(cvModelBuilders[i].getPredictionKey());
}
cv_makeAggregateModelMetrics(mbs);
Frame holdoutPreds = null;
if (_parms._keep_cross_validation_predictions || (nclasses()==2 /*GainsLift needs this*/ || mainModel.isDistributionHuber())) {
Key<Frame> cvhp = Key.make("cv_holdout_prediction_" + mainModel._key.toString());
if (_parms._keep_cross_validation_predictions) //only show the user if they asked for it
mainModel._output._cross_validation_holdout_predictions_frame_id = cvhp;
holdoutPreds = combineHoldoutPredictions(predKeys, cvhp);
}
if (_parms._keep_cross_validation_fold_assignment) {
mainModel._output._cross_validation_fold_assignment_frame_id = Key.make("cv_fold_assignment_" + _result.toString());
Frame xvalidation_fold_assignment_frame = mainModel._output._cross_validation_fold_assignment_frame_id.get();
if (xvalidation_fold_assignment_frame != null)
Scope.untrack(xvalidation_fold_assignment_frame.keysList());
}
// Keep or toss predictions
if (_parms._keep_cross_validation_predictions) {
for (Key<Frame> k : predKeys) {
Frame fr = DKV.getGet(k);
if (fr != null) Scope.untrack(fr);
}
} else {
int count = Model.deleteAll(predKeys);
Log.info(count+" CV predictions were removed");
}
mainModel._output._cross_validation_metrics = mbs[0].makeModelMetrics(mainModel, _parms.train(), null, holdoutPreds);
if (holdoutPreds != null) {
if (_parms._keep_cross_validation_predictions) Scope.untrack(holdoutPreds);
else holdoutPreds.remove();
}
mainModel._output._cross_validation_metrics._description = N + "-fold cross-validation on training data (Metrics computed for combined holdout predictions)";
Log.info(mainModel._output._cross_validation_metrics.toString());
mainModel._output._cross_validation_metrics_summary = makeCrossValidationSummaryTable(cvModKeys);
// Put cross-validation scoring history to the main model
if (mainModel._output._scoring_history != null) { // check if scoring history is supported (e.g., NaiveBayes doesn't)
mainModel._output._cv_scoring_history = new TwoDimTable[cvModKeys.length];
for (int i = 0; i < cvModKeys.length; i++) {
TwoDimTable sh = cvModKeys[i].get()._output._scoring_history;
String[] rowHeaders = sh.getRowHeaders();
String[] colTypes = sh.getColTypes();
int tableSize = rowHeaders.length;
int colSize = colTypes.length;
TwoDimTable copiedScoringHistory = new TwoDimTable(
sh.getTableHeader(),
sh.getTableDescription(),
sh.getRowHeaders(),
sh.getColHeaders(),
sh.getColTypes(),
sh.getColFormats(),
sh.getColHeaderForRowHeaders());
for (int rowIndex = 0; rowIndex < tableSize; rowIndex++) {
for (int colIndex = 0; colIndex < colSize; colIndex++) {
copiedScoringHistory.set(rowIndex, colIndex,sh.get(rowIndex, colIndex));
}
}
mainModel._output._cv_scoring_history[i] = copiedScoringHistory;
}
}
if (!_parms._keep_cross_validation_models) {
int count = Model.deleteAll(cvModKeys);
Log.info(count+" CV models were removed");
}
mainModel._output._total_run_time = _build_model_countdown.elapsedTime();
// Now, the main model is complete (has cv metrics)
DKV.put(mainModel);
}
public void cv_makeAggregateModelMetrics(ModelMetrics.MetricBuilder[] mbs){
for (int i = 1; i < mbs.length; ++i) {
mbs[0].reduceForCV(mbs[i]);
}
}
private String getPredictionKey() {
return "prediction_"+_result.toString();
}
/** Set max_runtime_secs for the main model.
* Using _main_model_time_budget_factor to determine if and how we should restrict the time for the main model.
* In general, we should use 0 or > 1 to be reasonably certain that the main model will have time to converge.
* if _main_model_time_budget_factor < 0, main_model_time_budget_factor is applied on remaining time to get max runtime secs.
* if _main_model_time_budget_factor == 0, do not restrict time for the main model.
* if _main_model_time_budget_factor > 0, use max_runtime_secs estimate using nfolds (doesn't depend on the remaining time).
*/
protected void setMaxRuntimeSecsForMainModel() {
if (_parms._max_runtime_secs == 0) return;
if (_parms._main_model_time_budget_factor < 0) {
// strict version that uses the actual remaining time or 1 sec in case we ran out of time
_parms._max_runtime_secs = Math.max(1, -_parms._main_model_time_budget_factor * remainingTimeSecs());
} else {
int nFolds = nFoldWork();
// looser version that uses max of remaining time and estimated remaining time based on number of folds
_parms._max_runtime_secs = Math.max(remainingTimeSecs(),
_parms._main_model_time_budget_factor * maxRuntimeSecsPerModel(nFolds, nModelsInParallel(nFolds)) * nFolds /((double) nFolds - 1));
}
}
/** Override for model-specific checks / modifications to _parms for the main model during N-fold cross-validation.
* Also allow the cv models to be modified after all of them have been built.
* For example, the model might need to be told to not do early stopping. CV models might have their lambda value modified, etc.
*/
public void cv_computeAndSetOptimalParameters(ModelBuilder<M, P, O>[] cvModelBuilders) { }
/** @return Whether n-fold cross-validation is done */
public boolean nFoldCV() {
return _parms._fold_column != null || _parms._nfolds != 0;
}
/** List containing the categories of models that this builder can
* build. Each ModelBuilder must have one of these. */
abstract public ModelCategory[] can_build();
/** Visibility for this algo: is it always visible, is it beta (always
* visible but with a note in the UI) or is it experimental (hidden by
* default, visible in the UI if the user gives an "experimental" flag at
* startup); test-only builders are "experimental" */
public enum BuilderVisibility {
Experimental, Beta, Stable;
/**
* @param value A value to search for among {@link BuilderVisibility}'s values
* @return A member of {@link BuilderVisibility}, if found.
* @throws IllegalArgumentException If given value is not found among members of {@link BuilderVisibility} enum.
*/
public static BuilderVisibility valueOfIgnoreCase(final String value) throws IllegalArgumentException {
final BuilderVisibility[] values = values();
for (int i = 0; i < values.length; i++) {
if (values[i].name().equalsIgnoreCase(value)) return values[i];
}
throw new IllegalArgumentException(String.format("Algorithm availability level of '%s' is not known. Available levels: %s",
value, Arrays.toString(values)));
}
}
public BuilderVisibility builderVisibility() { return BuilderVisibility.Stable; }
/** Clear whatever was done by init() so it can be run again. */
public void clearInitState() {
clearValidationErrors();
}
protected boolean logMe() { return true; }
abstract public boolean isSupervised();
public boolean isResponseOptional() {
return false;
}
protected transient Vec _response; // Handy response column
protected transient Vec _vresponse; // Handy response column
protected transient Vec _offset; // Handy offset column
protected transient Vec _weights; // observation weight column
protected transient Vec _fold; // fold id column
protected transient Vec _treatment;
protected transient String[] _origNames; // only set if ModelBuilder.encodeFrameCategoricals() changes the training frame
protected transient String[][] _origDomains; // only set if ModelBuilder.encodeFrameCategoricals() changes the training frame
protected transient double[] _orig_projection_array; // only set if ModelBuilder.encodeFrameCategoricals() changes the training frame
public boolean hasOffsetCol(){ return _parms._offset_column != null;} // don't look at transient Vec
public boolean hasWeightCol(){ return _parms._weights_column != null;} // don't look at transient Vec
public boolean hasFoldCol() { return _parms._fold_column != null;} // don't look at transient Vec
public boolean hasTreatmentCol() { return _parms._treatment_column != null;}
public int numSpecialCols() { return (hasOffsetCol() ? 1 : 0) + (hasWeightCol() ? 1 : 0) + (hasFoldCol() ? 1 : 0) + (hasTreatmentCol() ? 1 : 0); }
public boolean havePojo() { return false; }
public boolean haveMojo() { return false; }
protected int _nclass; // Number of classes; 1 for regression; 2+ for classification
public int nclasses(){return _nclass;}
public final boolean isClassifier() { return nclasses() > 1; }
protected boolean validateStoppingMetric() {
return true;
}
protected boolean validateBinaryResponse() {
return true;
}
protected void checkEarlyStoppingReproducibility() {
// nothing by default -> meant to be overridden
}
/**
* Find and set response/weights/offset/fold and put them all in the end,
* @return number of non-feature vecs
*/
public int separateFeatureVecs() {
int res = 0;
if(_parms._weights_column != null) {
Vec w = _train.remove(_parms._weights_column);
if(w == null)
error("_weights_column","Weights column '" + _parms._weights_column + "' not found in the training frame");
else {
if(!w.isNumeric())
error("_weights_column","Invalid weights column '" + _parms._weights_column + "', weights must be numeric");
_weights = w;
if(w.naCnt() > 0)
error("_weights_columns","Weights cannot have missing values.");
if(w.min() < 0)
error("_weights_columns","Weights must be >= 0");
if(w.max() == 0)
error("_weights_columns","Max. weight must be > 0");
_train.add(_parms._weights_column, w);
++res;
}
} else {
_weights = null;
assert(!hasWeightCol());
}
if(_parms._offset_column != null) {
Vec o = _train.remove(_parms._offset_column);
if(o == null)
error("_offset_column","Offset column '" + _parms._offset_column + "' not found in the training frame");
else {
if(!o.isNumeric())
error("_offset_column","Invalid offset column '" + _parms._offset_column + "', offset must be numeric");
_offset = o;
if(o.naCnt() > 0)
error("_offset_column","Offset cannot have missing values.");
if(_weights == _offset)
error("_offset_column", "Offset must be different from weights");
_train.add(_parms._offset_column, o);
++res;
}
} else {
_offset = null;
assert(!hasOffsetCol());
}
if(_parms._fold_column != null) {
Vec f = _train.remove(_parms._fold_column);
if(f == null)
error("_fold_column","Fold column '" + _parms._fold_column + "' not found in the training frame");
else {
if(!f.isInt() && !f.isCategorical())
error("_fold_column","Invalid fold column '" + _parms._fold_column + "', fold must be integer or categorical");
if(f.min() < 0)
error("_fold_column","Invalid fold column '" + _parms._fold_column + "', fold must be non-negative");
if(f.isConst())
error("_fold_column","Invalid fold column '" + _parms._fold_column + "', fold cannot be constant");
_fold = f;
if(f.naCnt() > 0)
error("_fold_column","Fold cannot have missing values.");
if(_fold == _weights)
error("_fold_column", "Fold must be different from weights");
if(_fold == _offset)
error("_fold_column", "Fold must be different from offset");
_train.add(_parms._fold_column, f);
++res;
}
} else {
_fold = null;
assert(!hasFoldCol());
}
if(_parms._treatment_column != null) {
Vec u = _train.remove(_parms._treatment_column);
if(u == null)
error("_treatment_column","Treatment column '" + _parms._treatment_column + "' not found in the training frame");
else {
_treatment = u;
if(!u.isCategorical())
error("_treatment_column","Invalid treatment column '" + _parms._treatment_column + "', treatment column must be categorical");
_weights = u;
if(u.naCnt() > 0)
error("_treatment_column","Treatment column cannot have missing values.");
if(u.isCategorical() && u.domain().length != 2)
error("_treatment_column","Treatment column must contains only 0 or 1");
if(u.min() != 0)
error("_treatment_column","Min. treatment column value must be 0");
if(u.max() != 1)
error("_treatment_column","Max. treatment column value must be 1");
_train.add(_parms._treatment_column, u);
++res;
}
} else {
_treatment = null;
assert(!hasTreatmentCol());
}
if(isSupervised() && _parms._response_column != null) {
_response = _train.remove(_parms._response_column);
if (_response == null) {
if (isSupervised())
error("_response_column", "Response column '" + _parms._response_column + "' not found in the training frame");
} else {
if(_response == _offset)
error("_response_column", "Response column must be different from offset_column");
if(_response == _weights)
error("_response_column", "Response column must be different from weights_column");
if(_response == _fold)
error("_response_column", "Response column must be different from fold_column");
if(_response == _treatment)
error("_response_column", "Response column must be different from treatment_column");
_train.add(_parms._response_column, _response);
++res;
}
} else {
_response = null;
}
return res;
}
protected boolean ignoreStringColumns() {
return true;
}
protected boolean ignoreConstColumns() {
return _parms._ignore_const_cols;
}
protected boolean ignoreUuidColumns() {
return true;
}
/**
* Ignore constant columns, columns with all NAs and strings.
* @param npredictors
* @param expensive
*/
protected void ignoreBadColumns(int npredictors, boolean expensive){
// Drop all-constant and all-bad columns.
if(_parms._ignore_const_cols)
new FilterCols(npredictors) {
@Override protected boolean filter(Vec v, String name) {
boolean isBad = v.isBad();
boolean skipConst = ignoreConstColumns() && v.isConst(canLearnFromNAs()); // NAs can have information
boolean skipString = ignoreStringColumns() && v.isString();
boolean skipUuid = ignoreUuidColumns() && v.isUUID();
boolean skip = isBad || skipConst || skipString || skipUuid;
return skip;
}
}.doIt(_train,"Dropping bad and constant columns: ",expensive);
}
/**
* Indicates that the algorithm is able to natively learn from NA values, there is no need
* to eg. impute missing values or skip rows that have missing values.
* @return whether model builder natively supports NAs
*/
protected boolean canLearnFromNAs() {
return false;
}
/**
* Checks response variable attributes and adds errors if response variable is unusable.
*/
protected void checkResponseVariable() {
if (_response != null && (!_response.isNumeric() && !_response.isCategorical() && !_response.isTime())) {
error("_response_column", "Use numerical, categorical or time variable. Currently used " + _response.get_type_str());
}
}
/**
* Ignore invalid columns (columns that have a very high max value, which can cause issues in DHistogram)
* @param npredictors
* @param expensive
*/
protected void ignoreInvalidColumns(int npredictors, boolean expensive){}
/**
* Makes sure the final model will fit in memory.
*
* Note: This method should not be overridden (override checkMemoryFootPrint_impl instead). It is
* not declared 'final' to not to break 3rd party implementations. It might be declared final in the future
* if necessary.
*/
protected void checkMemoryFootPrint() {
if (Boolean.getBoolean(H2O.OptArgs.SYSTEM_PROP_PREFIX + "debug.noMemoryCheck")) return; // skip check if disabled
checkMemoryFootPrint_impl();
}
/**
* Override this method to call error() if the model is expected to not fit in memory, and say why
*/
protected void checkMemoryFootPrint_impl() {}
transient double [] _distribution;
transient protected double [] _priorClassDist;
protected boolean computePriorClassDistribution(){
return isClassifier();
}
/** A list of field validation issues. */
public ValidationMessage[] _messages = new ValidationMessage[0];
private int _error_count = -1; // -1 ==> init not run yet, for those Jobs that have an init, like ModelBuilder. Note, this counts ONLY errors, not WARNs and etc.
public int error_count() { assert _error_count >= 0 : "init() not run yet"; return _error_count; }
public void hide (String field_name, String message) { message(Log.TRACE, field_name, message); }
public void info (String field_name, String message) { message(Log.INFO , field_name, message); }
public void warn (String field_name, String message) { message(Log.WARN , field_name, message); }
public void error(String field_name, String message) { message(Log.ERRR , field_name, message); _error_count++; }
public void clearValidationErrors() {
_messages = new ValidationMessage[0];
_error_count = 0;
}
public void message(byte log_level, String field_name, String message) {
_messages = Arrays.copyOf(_messages, _messages.length + 1);
_messages[_messages.length - 1] = new ValidationMessage(log_level, field_name, message);
if (log_level == Log.ERRR) _error_count++;
}
public ValidationMessage[] getMessagesByFieldAndSeverity(String fieldName, byte logLevel) {
return Arrays.stream(_messages)
.filter((msg) -> msg._field_name.equals(fieldName) && msg._log_level == logLevel)
.toArray(ValidationMessage[]::new);
}
/** Get a string representation of only the ERROR ValidationMessages (e.g., to use in an exception throw). */
public String validationErrors() {
return validationMessage(Log.ERRR);
}
public String validationWarnings() {
return validationMessage(Log.WARN);
}
private String validationMessage(int level) {
StringBuilder sb = new StringBuilder();
for( ValidationMessage vm : _messages )
if( vm._log_level == level )
sb.append(vm.toString()).append("\n");
return sb.toString();
}
/** Can be an ERROR, meaning the parameters can't be used as-is,
* a TRACE, which means the specified field should be hidden given
* the values of other fields, or a WARN or INFO for informative
* messages to the user. */
public static final class ValidationMessage extends Iced {
final byte _log_level; // See util/Log.java for levels
final String _field_name;
final String _message;
public ValidationMessage(byte log_level, String field_name, String message) {
_log_level = log_level;
_field_name = field_name;
_message = message;
Log.log(log_level,field_name + ": " + message);
}
public int log_level() { return _log_level; }
public String field() { return _field_name; }
public String message() { return _message; }
@Override public String toString() { return Log.LVLS[_log_level] + " on field: " + _field_name + ": " + _message; }
}
// ==========================================================================
/** Initialize the ModelBuilder, validating all arguments and preparing the
* training frame. This call is expected to be overridden in the subclasses
* and each subclass will start with "super.init();". This call is made by
* the front-end whenever the GUI is clicked, and needs to be fast whenever
* {@code expensive} is false; it will be called once again at the start of
* model building {@see #trainModel()} with expensive set to true.
*<p>
* The incoming training frame (and validation frame) will have ignored
* columns dropped out, plus whatever work the parent init did.
*<p>
* NOTE: The front end initially calls this through the parameters validation
* endpoint with no training_frame, so each subclass's {@code init()} method
* has to work correctly with the training_frame missing.
*<p>
*/
public void init(boolean expensive) {
// Log parameters
if( expensive && logMe() ) {
Log.info("Building H2O " + this.getClass().getSimpleName() + " model with these parameters:");
Log.info(new String(_parms.writeJSON(new AutoBuffer()).buf()));
}
// NOTE: allow re-init:
clearInitState();
initWorkspace(expensive);
assert _parms != null; // Parms must already be set in
if( _parms._train == null ) {
if (expensive)
error("_train", "Missing training frame");
return;
} else {
// Catch the number #1 reason why a junit test is failing non-deterministically on a missing Vec: forgotten DKV update after a Frame is modified locally
new ObjectConsistencyChecker(_parms._train).doAllNodes();
}
Frame tr = _train != null ? _train : _parms.train();
if (tr == null) {
error("_train", "Missing training frame: "+_parms._train);
return;
}
if (expensive) Scope.protect(_parms.train(), _parms.valid());
setTrain(new Frame(null /* not putting this into KV */, tr._names.clone(), tr.vecs().clone()));
if (expensive) {
_parms.getOrMakeRealSeed();
}
if (_parms._categorical_encoding.needsResponse() && !isSupervised()) {
error("_categorical_encoding", "Categorical encoding scheme cannot be "
+ _parms._categorical_encoding.toString() + " - no response column available.");
}
if (_parms._nfolds < 0 || _parms._nfolds == 1) {
error("_nfolds", "nfolds must be either 0 or >1.");
}
if (_parms._nfolds > 1 && _parms._nfolds > train().numRows()) {
error("_nfolds", "nfolds cannot be larger than the number of rows (" + train().numRows() + ").");
}
if (_parms._fold_column != null) {
hide("_fold_assignment", "Fold assignment is ignored when a fold column is specified.");
if (_parms._nfolds > 1) {
error("_nfolds", "nfolds cannot be specified at the same time as a fold column.");
} else {
hide("_nfolds", "nfolds is ignored when a fold column is specified.");
}
if (_parms._fold_assignment != Model.Parameters.FoldAssignmentScheme.AUTO && _parms._fold_assignment != null && _parms != null) {
error("_fold_assignment", "Fold assignment is not allowed in conjunction with a fold column.");
}
}
if (_parms._nfolds > 1) {
hide("_fold_column", "Fold column is ignored when nfolds > 1.");
}
// hide cross-validation parameters unless cross-val is enabled
if (!nFoldCV()) {
hide("_keep_cross_validation_models", "Only for cross-validation.");
hide("_keep_cross_validation_predictions", "Only for cross-validation.");
hide("_keep_cross_validation_fold_assignment", "Only for cross-validation.");
hide("_fold_assignment", "Only for cross-validation.");
if (_parms._fold_assignment != Model.Parameters.FoldAssignmentScheme.AUTO && _parms._fold_assignment != null) {
error("_fold_assignment", "Fold assignment is only allowed for cross-validation.");
}
}
if (_parms._distribution == DistributionFamily.modified_huber) {
error("_distribution", "Modified Huber distribution is not supported yet.");
}
if (_parms._distribution != DistributionFamily.tweedie) {
hide("_tweedie_power", "Only for Tweedie Distribution.");
}
if (_parms._tweedie_power <= 1 || _parms._tweedie_power >= 2) {
error("_tweedie_power", "Tweedie power must be between 1 and 2 (exclusive). " +
"For tweedie power = 1, use Poisson distribution. For tweedie power = 2, use Gamma distribution.");
}
// Drop explicitly dropped columns
if( _parms._ignored_columns != null ) {
Set<String> ignoreColumnSet = new HashSet<>(Arrays.asList(_parms._ignored_columns));
Collection<String> usedColumns = _parms.getUsedColumns(tr._names);
ignoreColumnSet.removeAll(usedColumns);
String[] actualIgnoredColumns = ignoreColumnSet.toArray(new String[0]);
_train.remove(actualIgnoredColumns);
if (expensive) Log.info("Dropping ignored columns: " + Arrays.toString(actualIgnoredColumns));
}
if(_parms._checkpoint != null) {
if(DKV.get(_parms._checkpoint) == null){
error("_checkpoint", "Checkpoint has to point to existing model!");
}
// Do not ignore bad columns, as only portion of the training data might be supplied (e.g. continue from checkpoint)
final Model checkpointedModel = _parms._checkpoint.get();
final String[] warnings = checkpointedModel.adaptTestForTrain(_train, expensive, false);
for (final String warning : warnings){
warn("_checkpoint", warning);
}
separateFeatureVecs(); // set MB's fields (like response)
} else {
// Drop all non-numeric columns (e.g., String and UUID). No current algo
// can use them, and otherwise all algos will then be forced to remove
// them. Text algos (grep, word2vec) take raw text columns - which are
// numeric (arrays of bytes).
ignoreBadColumns(separateFeatureVecs(), expensive);
ignoreInvalidColumns(separateFeatureVecs(), expensive);
checkResponseVariable();
}
// Rebalance train and valid datasets (after invalid/bad columns are dropped)
if (expensive && error_count() == 0 && _parms._auto_rebalance) {
setTrain(rebalance(_train, false, _result + ".temporary.train"));
separateFeatureVecs(); // need to reset MB's fields (like response) after rebalancing
_valid = rebalance(_valid, false, _result + ".temporary.valid");
}
// Check that at least some columns are not-constant and not-all-NAs
if (_train.numCols() == 0)
error("_train", "There are no usable columns to generate model");
if(isSupervised()) {
if(_response != null) {
if (_parms._distribution != DistributionFamily.tweedie) {
hide("_tweedie_power", "Tweedie power is only used for Tweedie distribution.");
}
if (_parms._distribution != DistributionFamily.quantile) {
hide("_quantile_alpha", "Quantile (alpha) is only used for Quantile regression.");
}
if (expensive) checkDistributions();
_nclass = init_getNClass();
if (_parms._check_constant_response && _response.isConst()) {
error("_response", "Response cannot be constant.");
}
if (validateBinaryResponse() && _nclass == 1 && _response.isBinary(true)) {
warn("_response",
"We have detected that your response column has only 2 unique values (0/1). " +
"If you wish to train a binary model instead of a regression model, " +
"convert your target column to categorical before training."
);
}
}
if (! _parms._balance_classes)
hide("_max_after_balance_size", "Balance classes is false, hide max_after_balance_size");
else if (_parms._weights_column != null && _weights != null && !_weights.isBinary())
error("_balance_classes", "Balance classes and observation weights are not currently supported together.");
if( _parms._max_after_balance_size <= 0.0 )
error("_max_after_balance_size","Max size after balancing needs to be positive, suggest 1.0f");
if( _train != null ) {
if (_train.numCols() <= 1 && !getClass().toString().equals("class hex.gam.GAM")) // gam can have zero predictors
error("_train", "Training data must have at least 2 features (incl. response).");
if( null == _parms._response_column) {
error("_response_column", "Response column parameter not set.");
return;
}
if(_response != null && computePriorClassDistribution()) {
if (isClassifier() && isSupervised()) {
if(_parms.getDistributionFamily() == DistributionFamily.quasibinomial){
String[] quasiDomains = new VecUtils.CollectDoubleDomain(null,2).doAll(_response).stringDomain(_response.isInt());
MRUtils.ClassDistQuasibinomial cdmt =
_weights != null ? new MRUtils.ClassDistQuasibinomial(quasiDomains).doAll(_response, _weights) : new MRUtils.ClassDistQuasibinomial(quasiDomains).doAll(_response);
_distribution = cdmt.dist();
_priorClassDist = cdmt.relDist();
} else {
MRUtils.ClassDist cdmt =
_weights != null ? new MRUtils.ClassDist(nclasses()).doAll(_response, _weights) : new MRUtils.ClassDist(nclasses()).doAll(_response);
_distribution = cdmt.dist();
_priorClassDist = cdmt.relDist();
}
} else { // Regression; only 1 "class"
_distribution = new double[]{ (_weights != null ? _weights.mean() : 1.0) * train().numRows() };
_priorClassDist = new double[]{1.0f};
}
}
}
if( !isClassifier() ) {
hide("_balance_classes", "Balance classes is only applicable to classification problems.");
hide("_class_sampling_factors", "Class sampling factors is only applicable to classification problems.");
hide("_max_after_balance_size", "Max after balance size is only applicable to classification problems.");
hide("_max_confusion_matrix_size", "Max confusion matrix size is only applicable to classification problems.");
}
if (_nclass <= 2) {
hide("_max_confusion_matrix_size", "Only for multi-class classification problems.");
}
if( !_parms._balance_classes ) {
hide("_max_after_balance_size", "Only used with balanced classes");
hide("_class_sampling_factors", "Class sampling factors is only applicable if balancing classes.");
}
}
else {
if (!isResponseOptional()) {
hide("_response_column", "Ignored for unsupervised methods.");
_vresponse = null;
}
hide("_balance_classes", "Ignored for unsupervised methods.");
hide("_class_sampling_factors", "Ignored for unsupervised methods.");
hide("_max_after_balance_size", "Ignored for unsupervised methods.");
hide("_max_confusion_matrix_size", "Ignored for unsupervised methods.");
_response = null;
_nclass = 1;
}
if( _nclass > Model.Parameters.MAX_SUPPORTED_LEVELS ) {
error("_nclass", "Too many levels in response column: " + _nclass + ", maximum supported number of classes is " + Model.Parameters.MAX_SUPPORTED_LEVELS + ".");
}
// Build the validation set to be compatible with the training set.
// Toss out extra columns, complain about missing ones, remap categoricals
Frame va = _parms.valid(); // User-given validation set
if (va != null) {
if (isResponseOptional() && _parms._response_column != null && _response == null) {
_vresponse = va.vec(_parms._response_column);
}
_valid = adaptFrameToTrain(va, "Validation Frame", "_validation_frame", expensive, false); // see PUBDEV-7785
if (!isResponseOptional() || (_parms._response_column != null && _valid.find(_parms._response_column) >= 0)) {
_vresponse = _valid.vec(_parms._response_column);
}
} else {
_valid = null;
_vresponse = null;
}
if (expensive) {
boolean scopeTrack = !_parms._is_cv_model;
Frame newtrain = applyPreprocessors(_train, true, scopeTrack);
newtrain = encodeFrameCategoricals(newtrain, scopeTrack); //we could turn this into a preprocessor later
if (newtrain != _train) {
_origTrain = _train;
_origNames = _train.names();
_origDomains = _train.domains();
setTrain(newtrain);
separateFeatureVecs(); //fix up the pointers to the special vecs
} else {
_origTrain = null;
}
if (_valid != null) {
Frame newvalid = applyPreprocessors(_valid, false, scopeTrack);
newvalid = encodeFrameCategoricals(newvalid, scopeTrack /* for CV, need to score one more time in outer loop */);
setValid(newvalid);
}
boolean restructured = false;
Vec[] vecs = _train.vecs();
for (int j = 0; j < vecs.length; ++j) {
Vec v = vecs[j];
if (v == _response || v == _fold) continue;
if (v.isCategorical() && shouldReorder(v)) {
final int len = v.domain().length;
Log.info("Reordering categorical column " + _train.name(j) + " (" + len + " levels) based on the mean (weighted) response per level.");
VecUtils.MeanResponsePerLevelTask mrplt = new VecUtils.MeanResponsePerLevelTask(len).doAll(v,
_parms._weights_column != null ? _train.vec(_parms._weights_column) : v.makeCon(1.0),
_train.vec(_parms._response_column));
double[] meanWeightedResponse = mrplt.meanWeightedResponse;
// Option 1: Order the categorical column by response to make better splits
int[] idx=new int[len];
for (int i=0;i<len;++i) idx[i] = i;
ArrayUtils.sort(idx, meanWeightedResponse);
int[] invIdx=new int[len];
for (int i=0;i<len;++i) invIdx[idx[i]] = i;
Vec vNew = new VecUtils.ReorderTask(invIdx).doAll(1, Vec.T_NUM, new Frame(v)).outputFrame().anyVec();
String[] newDomain = new String[len];
for (int i = 0; i < len; ++i) newDomain[i] = v.domain()[idx[i]];
vNew.setDomain(newDomain);
vecs[j] = vNew;
restructured = true;
}
}
if (restructured)
_train.restructure(_train.names(), vecs);
}
boolean names_may_differ = _parms._categorical_encoding == Model.Parameters.CategoricalEncodingScheme.Binary;
boolean names_differ = _valid !=null && ArrayUtils.difference(_train._names, _valid._names).length != 0;;
assert (!expensive || names_may_differ || !names_differ);
if (names_differ && names_may_differ) {
for (String name : _train._names)
assert(ArrayUtils.contains(_valid._names, name)) : "Internal error during categorical encoding: training column " + name + " not in validation frame with columns " + Arrays.toString(_valid._names);
}
if (_parms._stopping_tolerance < 0) {
error("_stopping_tolerance", "Stopping tolerance must be >= 0.");
}
if (_parms._stopping_tolerance >= 1) {
error("_stopping_tolerance", "Stopping tolerance must be < 1.");
}
if (_parms._stopping_rounds == 0) {
if (_parms._stopping_metric != ScoreKeeper.StoppingMetric.AUTO)
warn("_stopping_metric", "Stopping metric is ignored for _stopping_rounds=0.");
if (_parms._stopping_tolerance != _parms.defaultStoppingTolerance())
warn("_stopping_tolerance", "Stopping tolerance is ignored for _stopping_rounds=0.");
} else if (_parms._stopping_rounds < 0) {
error("_stopping_rounds", "Stopping rounds must be >= 0.");
}
else { // early stopping is enabled
checkEarlyStoppingReproducibility();
if (validateStoppingMetric()) {
if (isClassifier()) {
if (_parms._stopping_metric == ScoreKeeper.StoppingMetric.deviance && !getClass().getSimpleName().contains("GLM")) {
error("_stopping_metric", "Stopping metric cannot be deviance for classification.");
}
} else {
if (_parms._stopping_metric.isClassificationOnly()) {
error("_stopping_metric", "Stopping metric cannot be " + _parms._stopping_metric + " for regression.");
}
}
}
}
if (_parms._stopping_metric == ScoreKeeper.StoppingMetric.custom || _parms._stopping_metric == ScoreKeeper.StoppingMetric.custom_increasing) {
checkCustomMetricForEarlyStopping();
}
if (_parms._max_runtime_secs < 0) {
error("_max_runtime_secs", "Max runtime (in seconds) must be greater than 0 (or 0 for unlimited).");
}
if (!StringUtils.isNullOrEmpty(_parms._export_checkpoints_dir)) {
if (!_parms._is_cv_model) {
// we do not need to check if the checkpoint directory is writeable on CV-models, it was already checked on the main model
if (!H2O.getPM().isWritableDirectory(_parms._export_checkpoints_dir)) {
error("_export_checkpoints_dir", "Checkpoints directory path must point to a writable path.");
}
}
}
}
protected void checkCustomMetricForEarlyStopping() {
if (_parms._custom_metric_func == null) {
error("_custom_metric_func", "Custom metric function needs to be defined in order to use it for early stopping.");
}
}
/**
* Adapts a given frame to the same schema as the training frame.
* This includes encoding of categorical variables (if expensive is enabled).
*
* Note: This method should only be used during ModelBuilder initialization - it should be called in init(..) method.
*
* @param fr input frame
* @param frDesc frame description, eg. "Validation Frame" - will be shown in validation error messages
* @param field name of a field for validation errors
* @param expensive indicates full ("expensive") processing
* @return adapted frame
*/
public Frame init_adaptFrameToTrain(Frame fr, String frDesc, String field, boolean expensive) {
Frame adapted = adaptFrameToTrain(fr, frDesc, field, expensive, false);
if (expensive)
adapted = encodeFrameCategoricals(adapted, true);
return adapted;
}
private Frame adaptFrameToTrain(Frame fr, String frDesc, String field, boolean expensive, boolean catEncoded) {
if (fr.numRows()==0) error(field, frDesc + " must have > 0 rows.");
Frame adapted = new Frame(null /* not putting this into KV */, fr._names.clone(), fr.vecs().clone());
try {
String[] msgs = Model.adaptTestForTrain(
adapted,
null,
null,
_train._names,
_train.domains(),
_parms,
expensive,
true,
null,
getToEigenVec(),
_workspace.getToDelete(expensive),
catEncoded
);
Vec response = adapted.vec(_parms._response_column);
if (response == null && _parms._response_column != null && !isResponseOptional())
error(field, frDesc + " must have a response column '" + _parms._response_column + "'.");
if (expensive) {
for (String s : msgs) {
Log.info(s);
warn(field, s);
}
}
} catch (IllegalArgumentException iae) {
error(field, iae.getMessage());
}
return adapted;
}
private Frame applyPreprocessors(Frame fr, boolean isTraining, boolean scopeTrack) {
if (_parms._preprocessors == null) return fr;
for (Key<ModelPreprocessor> key : _parms._preprocessors) {
DKV.prefetch(key);
}
Frame result = fr;
Frame encoded;
for (Key<ModelPreprocessor> key : _parms._preprocessors) {
ModelPreprocessor preprocessor = key.get();
encoded = isTraining ? preprocessor.processTrain(result, _parms) : preprocessor.processValid(result, _parms);
if (encoded != result) trackEncoded(encoded, scopeTrack);
result = encoded;
}
if (!scopeTrack) Scope.untrack(result); // otherwise encoded frame is fully removed on CV model completion, raising exception when computing CV scores.
return result;
}
private Frame encodeFrameCategoricals(Frame fr, boolean scopeTrack) {
Frame encoded = FrameUtils.categoricalEncoder(
fr,
_parms.getNonPredictors(),
_parms._categorical_encoding,
getToEigenVec(),
_parms._max_categorical_levels
);
if (encoded != fr) trackEncoded(encoded, scopeTrack);
return encoded;
}
private void trackEncoded(Frame fr, boolean scopeTrack) {
assert fr._key != null;
if (scopeTrack)
Scope.track(fr);
else
_workspace.getToDelete(true).put(fr._key, Arrays.toString(Thread.currentThread().getStackTrace()));
}
/**
* Rebalance a frame for load balancing
* @param original_fr Input frame
* @param local Whether to only create enough chunks to max out all cores on one node only
* WARNING: This behavior is not actually implemented in the methods defined in this class, the default logic
* doesn't take this parameter into consideration.
* @param name Name of rebalanced frame
* @return Frame that has potentially more chunks
*/
protected Frame rebalance(final Frame original_fr, boolean local, final String name) {
if (original_fr == null) return null;
int chunks = desiredChunks(original_fr, local);
String dataset = name.substring(name.length()-5);
double rebalanceRatio = rebalanceRatio();
int nonEmptyChunks = original_fr.anyVec().nonEmptyChunks();
if (nonEmptyChunks >= chunks * rebalanceRatio) {
if (chunks>1)
Log.info(dataset + " dataset already contains " + nonEmptyChunks + " (non-empty) " +
" chunks. No need to rebalance. [desiredChunks=" + chunks, ", rebalanceRatio=" + rebalanceRatio + "]");
return original_fr;
}
raiseReproducibilityWarning(dataset, chunks);
Log.info("Rebalancing " + dataset + " dataset into " + chunks + " chunks.");
Key newKey = Key.makeUserHidden(name + ".chunks" + chunks);
RebalanceDataSet rb = new RebalanceDataSet(original_fr, newKey, chunks);
H2O.submitTask(rb).join();
Frame rebalanced_fr = DKV.get(newKey).get();
Scope.track(rebalanced_fr);
return rebalanced_fr;
}
protected void raiseReproducibilityWarning(String datasetName, int chunks) {
// for children
}
private double rebalanceRatio() {
String mode = H2O.getCloudSize() == 1 ? "single" : "multi";
String ratioStr = getSysProperty("rebalance.ratio." + mode, "1.0");
return Double.parseDouble(ratioStr);
}
/**
* Find desired number of chunks. If fewer, dataset will be rebalanced.
* @return Lower bound on number of chunks after rebalancing.
*/
protected int desiredChunks(final Frame original_fr, boolean local) {
if (H2O.getCloudSize() > 1 && Boolean.parseBoolean(getSysProperty("rebalance.enableMulti", "false")))
return desiredChunkMulti(original_fr);
else
return desiredChunkSingle(original_fr);
}
// single-node version (original version)
private int desiredChunkSingle(final Frame originalFr) {
return Math.min((int) Math.ceil(originalFr.numRows() / 1e3), H2O.NUMCPUS);
}
// multi-node version (experimental version)
private int desiredChunkMulti(final Frame fr) {
for (int type : fr.types()) {
if (type != Vec.T_NUM && type != Vec.T_CAT) {
Log.warn("Training frame contains columns non-numeric/categorical columns. Using old rebalance logic.");
return desiredChunkSingle(fr);
}
}
// estimate size of the Frame on disk as if it was represented in a binary _uncompressed_ format with no overhead
long itemCnt = 0;
for (Vec v : fr.vecs())
itemCnt += v.length() - v.naCnt();
final int itemSize = 4; // magic constant size of both Numbers and Categoricals
final long size = Math.max(itemCnt * itemSize, fr.byteSize());
final int desiredChunkSize = FileVec.calcOptimalChunkSize(size, fr.numCols(),
fr.numCols() * itemSize, H2O.NUMCPUS, H2O.getCloudSize(), false, true);
final int desiredChunks = (int) ((size / desiredChunkSize) + (size % desiredChunkSize > 0 ? 1 : 0));
Log.info("Calculated optimal number of chunks = " + desiredChunks);
return desiredChunks;
}
protected String getSysProperty(String name, String def) {
return System.getProperty(H2O.OptArgs.SYSTEM_PROP_PREFIX + name, def);
}
protected int init_getNClass() {
int nclass = _response.isCategorical() ? _response.cardinality() : 1;
if (_parms._distribution == DistributionFamily.quasibinomial) {
nclass = 2;
}
return nclass;
}
public void checkDistributions() {
if (_parms._distribution == DistributionFamily.poisson) {
if (_response.min() < 0)
error("_response", "Response must be non-negative for Poisson distribution.");
} else if (_parms._distribution == DistributionFamily.gamma) {
if (_response.min() < 0)
error("_response", "Response must be non-negative for Gamma distribution.");
} else if (_parms._distribution == DistributionFamily.tweedie) {
if (_parms._tweedie_power >= 2 || _parms._tweedie_power <= 1)
error("_tweedie_power", "Tweedie power must be between 1 and 2.");
if (_response.min() < 0)
error("_response", "Response must be non-negative for Tweedie distribution.");
} else if (_parms._distribution == DistributionFamily.quantile) {
if (_parms._quantile_alpha > 1 || _parms._quantile_alpha < 0)
error("_quantile_alpha", "Quantile alpha must be between 0 and 1.");
} else if (_parms._distribution == DistributionFamily.huber) {
if (_parms._huber_alpha <0 || _parms._huber_alpha>1)
error("_huber_alpha", "Huber alpha must be between 0 and 1.");
}
}
transient public HashSet<String> _removedCols = new HashSet<>();
public abstract class FilterCols {
final int _specialVecs; // special vecs to skip at the end
public FilterCols(int n) {_specialVecs = n;}
abstract protected boolean filter(Vec v, String name);
public void doIt( Frame f, String msg, boolean expensive ) {
List<Integer> rmcolsList = new ArrayList<>();
for( int i = 0; i < f.vecs().length - _specialVecs; i++ )
if( filter(f.vec(i), f._names[i])) rmcolsList.add(i);
if( !rmcolsList.isEmpty() ) {
_removedCols = new HashSet<>(rmcolsList.size());
int[] rmcols = new int[rmcolsList.size()];
for (int i=0;i<rmcols.length;++i) {
rmcols[i]=rmcolsList.get(i);
_removedCols.add(f._names[rmcols[i]]);
}
f.remove(rmcols); //bulk-remove
msg += _removedCols.toString();
warn("_train", msg);
if (expensive) Log.info(msg);
}
}
}
//stitch together holdout predictions into one large Frame
Frame combineHoldoutPredictions(Key<Frame>[] predKeys, Key<Frame> key) {
int precision = _parms._keep_cross_validation_predictions_precision;
if (precision < 0) {
precision = isClassifier() ? 8 : 0;
}
return combineHoldoutPredictions(predKeys, key, precision);
}
static Frame combineHoldoutPredictions(Key<Frame>[] predKeys, Key<Frame> key, int precision) {
int N = predKeys.length;
Frame template = predKeys[0].get();
Vec[] vecs = new Vec[N*template.numCols()];
int idx=0;
for (Key<Frame> predKey : predKeys)
for (int j = 0; j < predKey.get().numCols(); ++j)
vecs[idx++] = predKey.get().vec(j);
HoldoutPredictionCombiner combiner = makeHoldoutPredictionCombiner(N, template.numCols(), precision);
return combiner.doAll(template.types(),new Frame(vecs))
.outputFrame(key, template.names(),template.domains());
}
static HoldoutPredictionCombiner makeHoldoutPredictionCombiner(int folds, int cols, int precision) {
if (precision < 0) {
throw new IllegalArgumentException("Precision cannot be negative, got precision = " + precision);
} else if (precision == 0) {
return new HoldoutPredictionCombiner(folds, cols);
} else {
return new ApproximatingHoldoutPredictionCombiner(folds, cols, precision);
}
}
// helper to combine multiple holdout prediction Vecs (each only has 1/N-th filled with non-zeros) into 1 Vec
static class HoldoutPredictionCombiner extends MRTask<HoldoutPredictionCombiner> {
int _folds, _cols;
public HoldoutPredictionCombiner(int folds, int cols) { _folds=folds; _cols=cols; }
@Override public final void map(Chunk[] cs, NewChunk[] nc) {
for (int c = 0; c < _cols; c++) {
double[] vals = new double[cs[0].len()];
ChunkVisitor.CombiningDoubleAryVisitor visitor = new ChunkVisitor.CombiningDoubleAryVisitor(vals);
for (int f = 0; f < _folds; f++) {
cs[f * _cols + c].processRows(visitor, 0, vals.length);
visitor.reset();
}
populateChunk(nc[c], vals);
}
}
protected void populateChunk(NewChunk nc, double[] vals) {
nc.setDoubles(vals);
}
}
static class ApproximatingHoldoutPredictionCombiner extends HoldoutPredictionCombiner {
private final int _precision;
public ApproximatingHoldoutPredictionCombiner(int folds, int cols, int precision) {
super(folds, cols);
_precision = precision;
}
@Override
protected void populateChunk(NewChunk nc, double[] vals) {
final long scale = PrettyPrint.pow10i(_precision);
for (double val : vals) {
if (Double.isNaN(val))
nc.addNA();
else {
long approx = Math.round(val * scale);
nc.addNum(approx, -_precision);
}
}
}
}
private TwoDimTable makeCrossValidationSummaryTable(Key[] cvmodels) {
if (cvmodels == null || cvmodels.length == 0) return null;
int N = cvmodels.length;
int extra_length=2; //mean/sigma/cv1/cv2/.../cvN
String[] colTypes = new String[N+extra_length];
Arrays.fill(colTypes, "float");
String[] colFormats = new String[N+extra_length];
Arrays.fill(colFormats, "%f");
String[] colNames = new String[N+extra_length];
colNames[0] = "mean";
colNames[1] = "sd";
for (int i=0;i<N;++i)
colNames[i+extra_length] = "cv_" + (i+1) + "_valid";
Set<String> excluded = new HashSet<>();
excluded.add("total_rows");
excluded.add("makeSchema");
excluded.add("hr");
excluded.add("frame");
excluded.add("model");
excluded.add("remove");
excluded.add("cm");
excluded.add("auc_obj");
excluded.add("aucpr");
if (null == _parms._custom_metric_func) { // hide custom metrics when not available
excluded.add("custom");
excluded.add("custom_increasing");
}
List<Method> methods = new ArrayList<>();
{
Model m = DKV.getGet(cvmodels[0]);
ModelMetrics mm = m._output._validation_metrics;
if (mm != null) {
for (Method meth : mm.getClass().getMethods()) {
if (excluded.contains(meth.getName())) continue;
try {
double c = (double) meth.invoke(mm);
methods.add(meth);
} catch (Exception ignored) {}
}
ConfusionMatrix cm = mm.cm();
if (cm != null) {
for (Method meth : cm.getClass().getMethods()) {
if (excluded.contains(meth.getName())) continue;
try {
double c = (double) meth.invoke(cm);
methods.add(meth);
} catch (Exception ignored) {}
}
}
}
}
// make unique, and sort alphabetically
Set<String> rowNames=new TreeSet<>();
for (Method m : methods) rowNames.add(m.getName());
List<Method> meths = new ArrayList<>();
OUTER:
for (String n : rowNames)
for (Method m : methods)
if (m.getName().equals(n)) { //find the first method that has that name
meths.add(m);
continue OUTER;
}
int numMetrics = rowNames.size();
TwoDimTable table = new TwoDimTable("Cross-Validation Metrics Summary",
null,
rowNames.toArray(new String[0]), colNames, colTypes, colFormats, "");
MathUtils.BasicStats stats = new MathUtils.BasicStats(numMetrics);
double[][] vals = new double[N][numMetrics];
int i = 0;
for (Key<Model> km : cvmodels) {
Model m = DKV.getGet(km);
if (m==null) continue;
ModelMetrics mm = m._output._validation_metrics;
int j=0;
for (Method meth : meths) {
if (excluded.contains(meth.getName())) continue;
try {
double val = (double) meth.invoke(mm);
vals[i][j] = val;
table.set(j++, i+extra_length, (float)val);
} catch (Throwable e) { }
if (mm.cm()==null) continue;
try {
double val = (double) meth.invoke(mm.cm());
vals[i][j] = val;
table.set(j++, i+extra_length, (float)val);
} catch (Throwable e) { }
}
i++;
}
MathUtils.SimpleStats simpleStats = new MathUtils.SimpleStats(numMetrics);
for (i=0;i<N;++i)
simpleStats.add(vals[i],1);
for (i=0;i<numMetrics;++i) {
table.set(i, 0, (float)simpleStats.mean()[i]);
table.set(i, 1, (float)simpleStats.sigma()[i]);
}
Log.info(table);
return table;
}
/**
* Overridable Model Builder name used in generated code, in case the name of the ModelBuilder class is not suitable.
*
* @return Name of the builder to be used in generated code
*/
public String getName() {
return getClass().getSimpleName().toLowerCase();
}
private void cleanUp() {
_workspace.cleanUp();
}
@SuppressWarnings("WeakerAccess") // optionally allow users create workspace directly (instead of relying on init)
protected final void initWorkspace(boolean expensive) {
if (expensive)
_workspace = new Workspace(true);
}
static class Workspace {
private final IcedHashMap<Key,String> _toDelete;
private Workspace(boolean expensive) {
_toDelete = expensive ? new IcedHashMap<>() : null;
}
IcedHashMap<Key, String> getToDelete(boolean expensive) {
if (! expensive)
return null; // incorrect usages during "inexpensive" initialization will fail
if (_toDelete == null) {
throw new IllegalStateException("ModelBuilder was not correctly initialized. " +
"Expensive phase requires field `_toDelete` to be non-null. " +
"Does your implementation of init method call super.init(true) or alternatively initWorkspace(true)?");
}
return _toDelete;
}
/** must be called before Scope.exit() */
void cleanUp() {
if (_toDelete == null) return;
// converting Workspace-tracked keys to Scope-tracked keys
// much safer than strictly removing everything as frame like training/validation frames are protected in Scope.
Key[] tracked = _toDelete.keySet().toArray(new Key[0]);
for (Key k: tracked) {
Value v = DKV.get(k);
if (v==null) continue;
if (v.isFrame()) Scope.track(v.get(Frame.class));
else if (v.isVec()) Scope.track(v.get(Vec.class));
else Scope.track_generic(v.get(Keyed.class));
}
}
}
public PojoWriter makePojoWriter(Model<?, ?, ?> genericModel, MojoModel mojoModel) {
throw new UnsupportedOperationException("MOJO Model for algorithm '" + mojoModel._algoName +
"' doesn't support conversion to POJO.");
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelBuilderHelper.java
|
package hex;
import water.H2O;
import water.ParallelizationTask;
public class ModelBuilderHelper {
/**
* A helper method facilitating parallel training of a collection of models.
*
* @param mbs an array of ModelBuilders ready to be trained (ideally with parameters already validated)
* @param parallelization level of parallelization - we will run up to #parallelization models concurrently
* @param <E> type of ModelBuilder
* @return finished ModelBuilders
*/
public static <E extends ModelBuilder<?, ?, ?>> E[] trainModelsParallel(E[] mbs, int parallelization) {
TrainModelTask[] tasks = new TrainModelTask[mbs.length];
for (int i = 0; i < mbs.length; i++) {
tasks[i] = new TrainModelTask(mbs[i]);
}
H2O.submitTask(new ParallelizationTask<>(tasks, parallelization, null)).join();
return mbs;
}
/**
* Simple wrapper around ModelBuilder#trainModel; we could alternatively get the H2OCompleter used
* by the model builder but then we would need to deal with managing Job's lifecycle.
*/
private static class TrainModelTask extends H2O.H2OCountedCompleter<TrainModelTask> {
private final ModelBuilder<?, ?, ?> _mb;
TrainModelTask(ModelBuilder<?, ?, ?> mb) {
_mb = mb;
}
@Override
public void compute2() {
_mb.trainModel().get();
tryComplete();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelBuilderListener.java
|
package hex;
import water.Iced;
public abstract class ModelBuilderListener<D extends Iced> extends Iced<D> {
/**
* Callback for successfully finished model builds
*
* @param model Model built
*/
abstract void onModelSuccess(Model model);
/**
* Callback for failed model builds
*
* @param cause An instance of {@link Throwable} - cause of failure
* @param parameters An instance of Model.Parameters used in the attempt to build the model
*/
abstract void onModelFailure(Throwable cause, Model.Parameters parameters);
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelContainer.java
|
package hex;
import water.Key;
public interface ModelContainer<M extends Model> {
Key<M>[] getModelKeys();
M[] getModels();
int getModelCount();
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelExportOption.java
|
package hex;
import water.api.StreamWriteOption;
public enum ModelExportOption implements StreamWriteOption {
INCLUDE_CV_PREDICTIONS;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetrics.java
|
package hex;
import water.*;
import water.exceptions.H2OIllegalArgumentException;
import water.exceptions.H2OKeyNotFoundArgumentException;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.*;
import java.lang.reflect.Method;
import java.util.*;
/** Container to hold the metric for a model as scored on a specific frame.
*
* The MetricBuilder class is used in a hot inner-loop of a Big Data pass, and
* when given a class-distribution, can be used to compute CM's, and AUC's "on
* the fly" during ModelBuilding - or after-the-fact with a Model and a new
* Frame to be scored.
*/
public class ModelMetrics extends Keyed<ModelMetrics> {
public String _description;
// Model specific information
private Key _modelKey;
private ModelCategory _model_category;
private long _model_checksum;
// Frame specific information
private Key _frameKey;
private long _frame_checksum; // when constant column is dropped, frame checksum changed. Need re-assign for GLRM.
public final long _scoring_time;
public final CustomMetric _custom_metric;
// Cached fields - cached them when needed
private transient Model _model;
private transient Frame _frame;
public final double _MSE; // Mean Squared Error (Every model is assumed to have this, otherwise leave at NaN)
public final long _nobs;
public ModelMetrics(Model model, Frame frame, long nobs, double MSE, String desc, CustomMetric customMetric) {
super(buildKey(model, frame));
withModelAndFrame(model, frame);
_description = desc;
_MSE = MSE;
_nobs = nobs;
_scoring_time = System.currentTimeMillis();
_custom_metric = customMetric;
}
private void setModelAndFrameFields(Model model, Frame frame) {
PojoUtils.setField(this, "_modelKey", model == null ? null : model._key);
PojoUtils.setField(this, "_frameKey", frame == null ? null : frame._key);
PojoUtils.setField(this, "_model_category", model == null ? null : model._output.getModelCategory());
PojoUtils.setField(this, "_model_checksum", model == null ? 0 : model.checksum());
try {
PojoUtils.setField(this, "_frame_checksum", frame.checksum());
}
catch (Throwable t) { }
}
public final void setModelKey(Key modelKey) {
_modelKey = modelKey;
}
public final ModelMetrics withModelAndFrame(Model model, Frame frame) {
_modelKey = model == null ? null : model._key;
_model_category = model == null ? null : model._output.getModelCategory();
_model_checksum = model == null ? 0 : model.checksum();
_frameKey = frame == null ? null : frame._key;
try { _frame_checksum = frame == null ? 0 : frame.checksum(); } catch (Throwable t) { }
_key = buildKey(model, frame);
return this;
}
public ModelMetrics withDescription(String desc) {
_description = desc;
return this;
}
/**
* Utility used by code which creates metrics on a different frame and model than
* the ones that we want the metrics object to be accessible for. An example is
* StackedEnsembleModel, which computes the metrics with a metalearner model.
* @param model
* @param frame
* @return
*/
public ModelMetrics deepCloneWithDifferentModelAndFrame(Model model, Frame frame) {
ModelMetrics m = this.clone();
m._key = buildKey(model, frame);
m.setModelAndFrameFields(model, frame);
return m;
}
public long residual_degrees_of_freedom(){throw new UnsupportedOperationException("residual degrees of freedom is not supported for this metric class");}
@Override public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("Model Metrics Type: " + this.getClass().getSimpleName().substring(12) + "\n");
sb.append(" Description: " + (_description == null ? "N/A" : _description) + "\n");
sb.append(" model id: " + _modelKey + "\n");
sb.append(" frame id: " + _frameKey + "\n");
return appendToStringMetrics(sb).toString();
}
protected StringBuilder appendToStringMetrics(StringBuilder sb) {
sb.append(" MSE: ").append((float)_MSE).append("\n");
sb.append(" RMSE: ").append((float)rmse()).append("\n");
return sb;
}
public final Model model() { return _model==null ? (_model=DKV.getGet(_modelKey)) : _model; }
public final Frame frame() { return _frame==null ? (_frame=DKV.getGet(_frameKey)) : _frame; }
public double custom() { return _custom_metric == null ? Double.NaN : _custom_metric.value; }
public double custom_increasing() { return _custom_metric == null ? Double.NaN : _custom_metric.value; } // same as custom but informs stopping criteria that higher is better
public double mse() { return _MSE; }
public double rmse() { return Math.sqrt(_MSE);}
public ConfusionMatrix cm() { return null; }
public float[] hr() { return null; }
public AUC2 auc_obj() { return null; }
public static ModelMetrics defaultModelMetrics(Model model) {
return model._output._cross_validation_metrics != null ? model._output._cross_validation_metrics
: model._output._validation_metrics != null ? model._output._validation_metrics
: model._output._training_metrics;
}
public static double getMetricFromModel(Key<Model> key, String criterion) {
Model model = DKV.getGet(key);
if (null == model) throw new H2OIllegalArgumentException("Cannot find model " + key);
return getMetricFromModelMetric(defaultModelMetrics(model), criterion);
}
public static double getMetricFromModelMetric(ModelMetrics mm, String criterion) {
if (null == criterion || criterion.equals("")) {
throw new H2OIllegalArgumentException("Need a valid criterion, but got '" + criterion + "'.");
}
Method method = null;
Object obj = null;
criterion = criterion.toLowerCase();
if ("custom".equals(criterion)){
return mm.custom();
}
// Constructing confusion matrix based on criterion
ConfusionMatrix cm;
if(mm instanceof ModelMetricsBinomial) {
AUC2.ThresholdCriterion criterionAsEnum = AUC2.ThresholdCriterion.fromString(criterion);
if(criterionAsEnum != null) {
ModelMetricsBinomial mmb = (ModelMetricsBinomial) mm;
cm = mmb.cm(criterionAsEnum);
}
else
cm = mm.cm();
}
else
cm = mm.cm();
// Getting (by reflection) method that corresponds to a given criterion
try {
method = mm.getClass().getMethod(criterion);
obj = mm;
}
catch (Exception e) {
// fall through
}
if (null == method && null != cm) {
try {
method = cm.getClass().getMethod(criterion);
obj = cm;
}
catch (Exception e) {
// fall through
}
}
if (null == method)
throw new H2OIllegalArgumentException("Failed to find ModelMetrics for criterion: " + criterion + " for model_id: " + mm._modelKey);
try {
return (double) method.invoke(obj);
} catch (Exception e) {
Log.err(e);
throw new H2OIllegalArgumentException(
"Failed to get metric: " + criterion + " from ModelMetrics object: " + mm,
"Failed to get metric: " + criterion + " from ModelMetrics object: " + mm + ", criterion: " + method + ", exception: " + e.getMessage()
);
}
}
private static class MetricsComparator implements Comparator<Key<Model>> {
String _sort_by = null;
boolean decreasing = false;
public MetricsComparator(String sort_by, boolean decreasing) {
this._sort_by = sort_by;
this.decreasing = decreasing;
}
public int compare(Key<Model> key1, Key<Model> key2) {
double c1 = getMetricFromModel(key1, _sort_by);
double c2 = getMetricFromModel(key2, _sort_by);
return decreasing ? Double.compare(c2, c1) : Double.compare(c1, c2);
}
}
private static class MetricsComparatorForFrame implements Comparator<Key<Model>> {
String _sort_by = null;
boolean decreasing = false;
Frame frame = null;
IcedHashMap<Key<Model>, ModelMetrics> cachedMetrics = new IcedHashMap<>();
public MetricsComparatorForFrame(Frame frame, String sort_by, boolean decreasing) {
this._sort_by = sort_by;
this.decreasing = decreasing;
this.frame = frame;
}
private final ModelMetrics findMetricsForModel(Key<Model> modelKey) {
ModelMetrics mm = cachedMetrics.get(modelKey);
if (null != mm) {
return mm;
}
Model m = modelKey.get();
if (null == m) {
Log.warn("Tried to compare metrics for a model which was not found in the DKV: " + modelKey);
throw new H2OKeyNotFoundArgumentException(modelKey.toString());
}
Model model = modelKey.get();
mm = ModelMetrics.getFromDKV(model, this.frame);
if (null == mm) {
// call score() and immediately delete the resulting frame to avoid leaks
model.score(this.frame).delete();
mm = ModelMetrics.getFromDKV(model, this.frame);
if (null == mm) {
Log.warn("Tried to compare metrics for a model/frame combination which was not found in the DKV: (" + modelKey + ", " + frame._key.toString() + ")");
throw new H2OKeyNotFoundArgumentException(modelKey.toString());
}
}
cachedMetrics.put(modelKey, mm);
return mm;
}
public int compare(Key<Model> key1, Key<Model> key2) {
ModelMetrics mm1 = findMetricsForModel(key1);
ModelMetrics mm2 = findMetricsForModel(key2);
double c1 = getMetricFromModelMetric(mm1, _sort_by);
double c2 = getMetricFromModelMetric(mm2, _sort_by);
return decreasing ? Double.compare(c2, c1) : Double.compare(c1, c2);
}
}
//
public static Set<String> getAllowedMetrics(Key<Model> key) {
Set<String> res = new HashSet<>();
Model model = DKV.getGet(key);
if (null == model) throw new H2OIllegalArgumentException("Cannot find model " + key);
ModelMetrics m = defaultModelMetrics(model);
ConfusionMatrix cm = m.cm();
Set<String> excluded = new HashSet<>();
excluded.add("makeSchema");
excluded.add("hr");
excluded.add("cm");
excluded.add("auc_obj");
excluded.add("remove");
excluded.add("nobs");
if (m!=null) {
if (null == m._custom_metric) { // hide custom metrics when not available
excluded.add("custom");
excluded.add("custom_increasing");
}
for (Method meth : m.getClass().getMethods()) {
if (excluded.contains(meth.getName())) continue;
try {
double c = (double) meth.invoke(m);
res.add(meth.getName().toLowerCase());
} catch (Exception e) {
// fall through
}
}
}
if (cm!=null) {
for (Method meth : cm.getClass().getMethods()) {
if (excluded.contains(meth.getName())) continue;
try {
double c = (double) meth.invoke(cm);
res.add(meth.getName().toLowerCase());
} catch (Exception e) {
// fall through
}
}
}
return res;
}
/**
* Return a new list of models sorted on their xval, validation or training metrics, by the named criterion.
* The criterion (metric) can be such things as as "auc", mse", "hr", "err", "err_count",
* "accuracy", "specificity", "recall", "precision", "mcc", "max_per_class_error", "f1", "f2", "f0point5". . .
* @param sort_by criterion by which we should sort
* @param decreasing sort by decreasing metrics or not
* @param modelKeys keys of models to sortm
* @return keys of the models, sorted by the criterion
*/
public static List<Key<Model>> sortModelsByMetric(String sort_by, boolean decreasing, List<Key<Model>>modelKeys) {
List<Key<Model>> sorted = new ArrayList<>();
sorted.addAll(modelKeys);
Comparator<Key<Model>> c = new MetricsComparator(sort_by, decreasing);
Collections.sort(sorted, c);
return sorted;
}
/**
* Return a new list of models sorted on metrics computed on the given frame, by the named criterion.
* The criterion (metric) can be such things as as "auc", mse", "hr", "err", "err_count",
* "accuracy", "specificity", "recall", "precision", "mcc", "max_per_class_error", "f1", "f2", "f0point5". . .
* @param frame frame on which to compute the metrics; looked up in the DKV first to see if it was previously computed
* @param sort_by criterion by which we should sort
* @param decreasing sort by decreasing metrics or not
* @param modelKeys keys of models to sortm
* @return keys of the models, sorted by the criterion
*/
public static List<Key<Model>> sortModelsByMetric(Frame frame, String sort_by, boolean decreasing, List<Key<Model>>modelKeys) {
List<Key<Model>> sorted = new ArrayList<>();
sorted.addAll(modelKeys);
Comparator<Key<Model>> c = new MetricsComparatorForFrame(frame, sort_by, decreasing);
Collections.sort(sorted, c);
return sorted;
}
public static TwoDimTable calcVarImp(VarImp vi) {
if (vi == null) return null;
return calcVarImp(vi._varimp, vi._names);
}
public static TwoDimTable calcVarImp(final float[] rel_imp, String[] coef_names) {
double[] dbl_rel_imp = ArrayUtils.toDouble(rel_imp);
return calcVarImp(dbl_rel_imp, coef_names);
}
public static TwoDimTable calcVarImp(final double[] rel_imp, String[] coef_names) {
return calcVarImp(rel_imp, coef_names, "Variable Importances", new String[]{"Relative Importance", "Scaled Importance", "Percentage"});
}
public static TwoDimTable calcVarImp(final double[] rel_imp, String[] coef_names, String table_header, String[] col_headers) {
if(rel_imp == null) return null;
if(coef_names == null) {
coef_names = new String[rel_imp.length];
for(int i = 0; i < coef_names.length; i++)
coef_names[i] = "C" + String.valueOf(i+1);
}
// Sort in descending order by relative importance
Integer[] sorted_idx = new Integer[rel_imp.length];
for(int i = 0; i < sorted_idx.length; i++) sorted_idx[i] = i;
Arrays.sort(sorted_idx, new Comparator<Integer>() {
public int compare(Integer idx1, Integer idx2) {
return Double.compare(-rel_imp[idx1], -rel_imp[idx2]);
}
});
double total = 0;
double max = rel_imp[sorted_idx[0]];
String[] sorted_names = new String[rel_imp.length];
double[][] sorted_imp = new double[rel_imp.length][3];
// First pass to sum up relative importance measures
int j = 0;
for(int i : sorted_idx) {
total += rel_imp[i];
sorted_names[j] = coef_names[i];
sorted_imp[j][0] = rel_imp[i]; // Relative importance
sorted_imp[j++][1] = rel_imp[i] / max; // Scaled importance
}
// Second pass to calculate percentages
j = 0;
for(int i : sorted_idx)
sorted_imp[j++][2] = rel_imp[i] / total; // Percentage
String [] col_types = new String[3];
String [] col_formats = new String[3];
Arrays.fill(col_types, "double");
Arrays.fill(col_formats, "%5f");
return new TwoDimTable(table_header, null, sorted_names, col_headers, col_types, col_formats, "Variable",
new String[rel_imp.length][], sorted_imp);
}
public static Key<ModelMetrics> buildKey(Key model_key, long model_checksum, Key frame_key, long frame_checksum) {
return Key.make("modelmetrics_" + model_key + "@" + model_checksum + "_on_" + frame_key + "@" + frame_checksum);
}
public static Key<ModelMetrics> buildKey(Model model, Frame frame) {
return frame==null || model == null ? null : buildKey(model._key, model.checksum(), frame._key, frame.checksum());
}
public boolean isForModel(Model m) { return _model_checksum == m.checksum(); }
public boolean isForFrame(Frame f) { return _frame_checksum == f.checksum(); }
public static ModelMetrics getFromDKV(Model model, Frame frame) {
return DKV.getGet(buildKey(model, frame));
}
@Override protected long checksum_impl() { return _frame_checksum * 13 + _model_checksum * 17; }
/** Class used to compute AUCs, CMs & HRs "on the fly" during other passes
* over Big Data. This class is intended to be embedded in other MRTask
* objects. The {@code perRow} method is called once-per-scored-row, and
* the {@code reduce} method called once per MRTask.reduce, and the {@code
* <init>} called once per MRTask.map.
*/
public static abstract class MetricBuilder<T extends MetricBuilder<T>> extends Iced<T> {
transient public double[] _work;
public double _sumsqe; // Sum-squared-error
public long _count;
public double _wcount;
public double _wY; // (Weighted) sum of the response
public double _wYY; // (Weighted) sum of the squared response
// Custom metric holder
public CustomMetric _customMetric = null;
public CMetricScoringTask _CMetricScoringTask = null;
public double weightedSigma() {
// double sampleCorrection = _count/(_count-1); //sample variance -> depends on the number of ACTUAL ROWS (not the weighted count)
double sampleCorrection = 1; //this will make the result (and R^2) invariant to globally scaling the weights
return _count <= 1 ? 0 : Math.sqrt(sampleCorrection*(_wYY/_wcount - (_wY*_wY)/(_wcount*_wcount)));
}
abstract public double[] perRow(double ds[], float yact[], Model m);
public double[] perRow(double ds[], float yact[], double weight, double offset, Model m) {
assert(weight == 1 && offset == 0);
return perRow(ds, yact, m);
}
public void reduce( T mb ) {
_sumsqe += mb._sumsqe;
_count += mb._count;
_wcount += mb._wcount;
_wY += mb._wY;
_wYY += mb._wYY;
}
// For Sparkling Water - Scala has troubles with Java generics at this place.
public void reduce(Object mb) {
reduce((T)mb);
}
public void reduceForCV(T mb){
if (null != _CMetricScoringTask) {
_CMetricScoringTask.reduceCustomMetric(mb._CMetricScoringTask);
_customMetric = _CMetricScoringTask.computeCustomMetric();
}
this.reduce(mb);
}
public void postGlobal() {
postGlobal(null);
}
public void postGlobal(CustomMetric customMetric) {
this._customMetric = customMetric;
}
/**
* Having computed a MetricBuilder, this method fills in a ModelMetrics
* @param m Model
* @param f Scored Frame
* @param adaptedFrame Adapted Frame
*@param preds Predictions of m on f (optional) @return Filled Model Metrics object
*/
public abstract ModelMetrics makeModelMetrics(Model m, Frame f, Frame adaptedFrame, Frame preds);
/**
* Set value of custom metric.
* @param customMetric computed custom metric outside of this default builder
*/
public void setCustomMetric(CustomMetric customMetric) {
_customMetric = customMetric;
}
public Frame makePredictionCache(Model m, Vec response) {
return null;
}
public void cachePrediction(double[] cdist, Chunk[] chks, int row, int cacheChunkIdx, Model m) {
throw new UnsupportedOperationException("Should be overridden in implementation (together with makePredictionCache(..)).");
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsAutoEncoder.java
|
package hex;
import water.H2O;
import water.fvec.Frame;
public class ModelMetricsAutoEncoder extends ModelMetricsUnsupervised {
public ModelMetricsAutoEncoder(Model model, Frame frame, CustomMetric customMetric) {
super(model, frame, 0, Double.NaN, customMetric);
}
public ModelMetricsAutoEncoder(Model model, Frame frame, long nobs, double mse, CustomMetric customMetric) {
super(model, frame, nobs, mse, customMetric);
}
public static class MetricBuilderAutoEncoder extends MetricBuilderUnsupervised<MetricBuilderAutoEncoder> {
public MetricBuilderAutoEncoder(int dims) {
_work = new double[dims];
}
@Override public double[] perRow(double ds[], float yact[], Model m) {
throw H2O.unimpl();
}
// Having computed a MetricBuilder, this method fills in a ModelMetrics
public ModelMetrics makeModelMetrics(Model m, Frame f) {
return m.addModelMetrics(new ModelMetricsAutoEncoder(m, f, _customMetric));
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsBinomial.java
|
package hex;
import hex.genmodel.GenModel;
import hex.genmodel.utils.DistributionFamily;
import water.MRTask;
import water.Scope;
import water.exceptions.H2OIllegalArgumentException;
import water.fvec.C8DVolatileChunk;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.ArrayUtils;
import water.util.MathUtils;
import java.util.Arrays;
import java.util.Optional;
public class ModelMetricsBinomial extends ModelMetricsSupervised {
public final AUC2 _auc;
public final double _logloss;
public final double _loglikelihood;
public final double _aic;
public double _mean_per_class_error;
public final GainsLift _gainsLift;
public ModelMetricsBinomial(Model model, Frame frame, long nobs, double mse, String[] domain,
double sigma, AUC2 auc, double logloss, double loglikelihood, double aic, GainsLift gainsLift,
CustomMetric customMetric) {
super(model, frame, nobs, mse, domain, sigma, customMetric);
_auc = auc;
_logloss = logloss;
_loglikelihood = loglikelihood;
_aic = aic;
_gainsLift = gainsLift;
_mean_per_class_error = cm() == null ? Double.NaN : cm().mean_per_class_error();
}
public ModelMetricsBinomial(Model model, Frame frame, long nobs, double mse, String[] domain,
double sigma, AUC2 auc, double logloss, GainsLift gainsLift,
CustomMetric customMetric) {
this(model, frame, nobs, mse, domain, sigma, auc, logloss, Double.NaN, Double.NaN,
gainsLift, customMetric);
}
public static ModelMetricsBinomial getFromDKV(Model model, Frame frame) {
ModelMetrics mm = ModelMetrics.getFromDKV(model, frame);
if( !(mm instanceof ModelMetricsBinomial) )
throw new H2OIllegalArgumentException("Expected to find a Binomial ModelMetrics for model: " + model._key.toString() + " and frame: " + frame._key.toString(),
"Expected to find a ModelMetricsBinomial for model: " + model._key.toString() + " and frame: " + frame._key.toString() + " but found a: " + (mm == null ? null : mm.getClass()));
return (ModelMetricsBinomial) mm;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(super.toString());
if (_auc != null) {
sb.append(" AUC: " + (float)_auc._auc + "\n");
sb.append(" pr_auc: " + (float)_auc.pr_auc() + "\n");
}
sb.append(" logloss: " + (float)_logloss + "\n");
sb.append(" loglikelihood: " + (float)_loglikelihood + "\n");
sb.append(" AIC: " + (float)_aic + "\n");
sb.append(" mean_per_class_error: " + (float)_mean_per_class_error + "\n");
sb.append(" default threshold: " + (_auc == null ? 0.5 : (float)_auc.defaultThreshold()) + "\n");
if (cm() != null) sb.append(" CM: " + cm().toASCII());
if (_gainsLift != null) sb.append(_gainsLift);
return sb.toString();
}
public double logloss() { return _logloss; }
public double loglikelihood() { return _loglikelihood; }
public double aic() { return _aic; }
public double mean_per_class_error() { return _mean_per_class_error; }
@Override public AUC2 auc_obj() { return _auc; }
@Override public ConfusionMatrix cm() {
if( _auc == null ) return null;
double[][] cm = _auc.defaultCM();
return cm == null ? null : new ConfusionMatrix(cm, _domain);
}
public ConfusionMatrix cm(AUC2.ThresholdCriterion criterion) {
if( _auc == null ) return null;
double[][] cm = _auc.cmByCriterion(criterion);
return cm == null ? null : new ConfusionMatrix(cm, _domain);
}
public GainsLift gainsLift() { return _gainsLift; }
// expose simple metrics criteria for sorting
public double auc() { return auc_obj()._auc; }
public double pr_auc() { return auc_obj()._pr_auc; }
public double aucpr() { return auc_obj()._pr_auc; } // for compatibility with naming in ScoreKeeper.StoppingMetric annotation
public double lift_top_group() { return gainsLift().response_rates[0] / gainsLift().avg_response_rate; }
/**
* Build a Binomial ModelMetrics object from target-class probabilities, from actual labels, and a given domain for both labels (and domain[1] is the target class)
* @param targetClassProbs A Vec containing target class probabilities
* @param actualLabels A Vec containing the actual labels (can be for fewer labels than what's in domain, since the predictions can be for a small subset of the data)
* @return ModelMetrics object
*/
static public ModelMetricsBinomial make(Vec targetClassProbs, Vec actualLabels) {
return make(targetClassProbs,actualLabels,actualLabels.domain());
}
static public ModelMetricsBinomial make(Vec targetClassProbs, Vec actualLabels, String[] domain) {
return make(targetClassProbs, actualLabels, null, domain);
}
/**
* Build a Binomial ModelMetrics object from target-class probabilities, from actual labels, and a given domain for both labels (and domain[1] is the target class)
* @param targetClassProbs A Vec containing target class probabilities
* @param actualLabels A Vec containing the actual labels (can be for fewer labels than what's in domain, since the predictions can be for a small subset of the data)
* @param weights A Vec containing the observation weights.
* @param domain The two class labels (domain[0] is the non-target class, domain[1] is the target class, for which probabilities are given)
* @return ModelMetrics object
*/
static public ModelMetricsBinomial make(Vec targetClassProbs, Vec actualLabels, Vec weights, String[] domain) {
Scope.enter();
try {
Vec labels = actualLabels.toCategoricalVec();
if (domain == null) domain = labels.domain();
if (labels == null || targetClassProbs == null)
throw new IllegalArgumentException("Missing actualLabels or predictedProbs for binomial metrics!");
if (!targetClassProbs.isNumeric())
throw new IllegalArgumentException("Predicted probabilities must be numeric per-class probabilities for binomial metrics.");
if (targetClassProbs.min() < 0 || targetClassProbs.max() > 1)
throw new IllegalArgumentException("Predicted probabilities must be between 0 and 1 for binomial metrics.");
if (domain.length != 2)
throw new IllegalArgumentException("Domain must have 2 class labels, but is " + Arrays.toString(domain) + " for binomial metrics.");
labels = labels.adaptTo(domain);
if (labels.cardinality() != 2)
throw new IllegalArgumentException("Adapted domain must have 2 class labels, but is " + Arrays.toString(labels.domain()) + " for binomial metrics.");
Frame fr = new Frame(targetClassProbs);
fr.add("labels", labels);
if (weights != null) {
fr.add("weights", weights);
}
MetricBuilderBinomial mb = new BinomialMetrics(labels.domain()).doAll(fr)._mb;
labels.remove();
Frame preds = new Frame(targetClassProbs);
ModelMetricsBinomial mm = (ModelMetricsBinomial) mb.makeModelMetrics(null, fr, preds,
fr.vec("labels"), fr.vec("weights")); // use the Vecs from the frame (to make sure the ESPC is identical)
mm._description = "Computed on user-given predictions and labels, using F1-optimal threshold: " + mm.auc_obj().defaultThreshold() + ".";
return mm;
} finally {
Scope.exit();
}
}
// helper to build a ModelMetricsBinomial for a N-class problem from a Frame that contains N per-class probability columns, and the actual label as the (N+1)-th column
private static class BinomialMetrics extends MRTask<BinomialMetrics> {
public BinomialMetrics(String[] domain) { this.domain = domain; }
String[] domain;
public MetricBuilderBinomial _mb;
@Override public void map(Chunk[] chks) {
_mb = new MetricBuilderBinomial(domain);
Chunk actuals = chks[1];
Chunk weights = chks.length == 3 ? chks[2] : null;
double[] ds = new double[3];
float[] acts = new float[1];
for (int i=0;i<chks[0]._len;++i) {
ds[2] = chks[0].atd(i); //class 1 probs (user-given)
ds[1] = 1-ds[2]; //class 0 probs
ds[0] = GenModel.getPrediction(ds, null, ds, Double.NaN/*ignored - uses AUC's default threshold*/); //label
acts[0] = (float) actuals.atd(i);
double weight = weights != null ? weights.atd(i) : 1;
_mb.perRow(ds, acts, weight, 0,null);
}
}
@Override public void reduce(BinomialMetrics mrt) { _mb.reduce(mrt._mb); }
}
public static class MetricBuilderBinomial<T extends MetricBuilderBinomial<T>> extends MetricBuilderSupervised<T> {
protected double _logloss;
protected double _loglikelihood;
protected AUC2.AUCBuilder _auc;
public MetricBuilderBinomial( String[] domain ) { super(2,domain); _auc = new AUC2.AUCBuilder(AUC2.NBINS); }
public double auc() {return new AUC2(_auc)._auc;}
public double pr_auc() { return new AUC2(_auc)._pr_auc;}
// Passed a float[] sized nclasses+1; ds[0] must be a prediction. ds[1...nclasses-1] must be a class
// distribution;
@Override public double[] perRow(double ds[], float[] yact, Model m) {return perRow(ds, yact, 1, 0, m);}
@Override public double[] perRow(double ds[], float[] yact, double w, double o, Model m) {
if( Float .isNaN(yact[0]) ) return ds; // No errors if actual is missing
if(ArrayUtils.hasNaNs(ds)) return ds; // No errors if prediction has missing values (can happen for GLM)
if(w == 0 || Double.isNaN(w)) return ds;
int iact = (int)yact[0];
boolean quasibinomial = (m!=null && m._parms._distribution == DistributionFamily.quasibinomial);
if (quasibinomial) {
if (yact[0] != 0)
iact = _domain[0].equals(String.valueOf((int) yact[0])) ? 0 : 1; // actual response index needed for confusion matrix, AUC, etc.
_wY += w * yact[0];
_wYY += w * yact[0] * yact[0];
// Compute error
double err = yact[0] - ds[iact + 1];
_sumsqe += w * err * err; // Squared error
// Compute negative loglikelihood loss, according to https://h2o-3-jira-github-migration.s3.amazonaws.com/TMLErare.pdf Appendix C
_logloss += - w * (yact[0] * Math.log(Math.max(1e-15, ds[2])) + (1-yact[0]) * Math.log(Math.max(1e-15, ds[1])));
} else {
if (iact != 0 && iact != 1) return ds; // The actual is effectively a NaN
_wY += w * iact;
_wYY += w * iact * iact;
// Compute error
double err = 1 - ds[iact + 1]; // Error: distance from predicting ycls as 1.0
_sumsqe += w * err * err; // Squared error
// Compute log loss
_logloss += w * MathUtils.logloss(err);
}
if(m != null && m.isGeneric()) { // only perform for generic model, will increase run time for training if performs
_loglikelihood += m.likelihood(w, yact[0], ds);
}
_count++;
_wcount += w;
assert !Double.isNaN(_sumsqe);
_auc.perRow(ds[2], iact, w);
return ds; // Flow coding
}
@Override public void reduce( T mb ) {
super.reduce(mb); // sumseq, count
_logloss += mb._logloss;
_loglikelihood += mb._loglikelihood;
_auc.reduce(mb._auc);
}
/**
* Create a ModelMetrics for a given model and frame
* @param m Model
* @param f Frame
* @param frameWithWeights Frame that contains extra columns such as weights
* @param preds Optional predictions (can be null), only used to compute Gains/Lift table for binomial problems @return
* @return ModelMetricsBinomial
*/
@Override public ModelMetrics makeModelMetrics(final Model m, final Frame f,
Frame frameWithWeights, final Frame preds) {
Vec resp = null;
Vec weight = null;
if (_wcount > 0) {
if (preds!=null) {
if (frameWithWeights == null)
frameWithWeights = f;
resp = m==null && frameWithWeights.vec(f.numCols()-1).isCategorical() ?
frameWithWeights.vec(f.numCols()-1) //work-around for the case where we don't have a model, assume that the last column is the actual response
:
frameWithWeights.vec(m._parms._response_column);
if (resp != null) {
weight = m==null?null : frameWithWeights.vec(m._parms._weights_column);
}
}
}
return makeModelMetrics(m, f, preds, resp, weight);
}
private ModelMetrics makeModelMetrics(final Model m, final Frame f, final Frame preds,
final Vec resp, final Vec weight) {
GainsLift gl = null;
if (_wcount > 0) {
if (preds != null) {
if (resp != null) {
final Optional<GainsLift> optionalGainsLift = calculateGainsLift(m, preds, resp, weight);
if(optionalGainsLift.isPresent()){
gl = optionalGainsLift.get();
}
}
}
}
return makeModelMetrics(m, f, gl);
}
private ModelMetrics makeModelMetrics(Model m, Frame f, GainsLift gl) {
double mse = Double.NaN;
double loglikelihood = Double.NaN;
double aic = Double.NaN;
double logloss = Double.NaN;
double sigma = Double.NaN;
final AUC2 auc;
if (_wcount > 0) {
sigma = weightedSigma();
mse = _sumsqe / _wcount;
logloss = _logloss / _wcount;
if(m != null && m.getClass().toString().contains("Generic")) {
loglikelihood = -1 * _loglikelihood ; // get likelihood from negative loglikelihood
aic = m.aic(loglikelihood);
}
auc = new AUC2(_auc);
} else {
auc = new AUC2();
}
ModelMetricsBinomial mm = new ModelMetricsBinomial(m, f, _count, mse, _domain, sigma, auc, logloss, loglikelihood, aic, gl, _customMetric);
if (m!=null) m.addModelMetrics(mm);
return mm;
}
/**
* @param m Model to calculate GL for
* @param preds Predictions
* @param resp Actual label
* @param weights Weights
* @return An Optional with GainsLift instance if GainsLift is not disabled (gainslift_bins = 0). Otherwise an
* empty Optional.
*/
private Optional<GainsLift> calculateGainsLift(Model m, Frame preds, Vec resp, Vec weights) {
final GainsLift gl = new GainsLift(preds.lastVec(), resp, weights);
if (m != null && m._parms._gainslift_bins < -1) {
throw new IllegalArgumentException("Number of G/L bins must be greater or equal than -1.");
} else if (m != null && (m._parms._gainslift_bins > 0 || m._parms._gainslift_bins == -1)) {
gl._groups = m._parms._gainslift_bins;
} else if (m != null && m._parms._gainslift_bins == 0){
return Optional.empty();
}
gl.exec(m != null ? m._output._job : null);
return Optional.of(gl);
}
@Override
public Frame makePredictionCache(Model m, Vec response) {
return new Frame(response.makeVolatileDoubles(1));
}
@Override
public void cachePrediction(double[] cdist, Chunk[] chks, int row, int cacheChunkIdx, Model m) {
assert cdist.length == 3;
((C8DVolatileChunk) chks[cacheChunkIdx]).getValues()[row] = cdist[cdist.length - 1];
}
public String toString(){
if(_wcount == 0) return "empty, no rows";
return "auc = " + MathUtils.roundToNDigits(auc(),3) + ", logloss = " + _logloss / _wcount;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsBinomialGLM.java
|
package hex;
import water.fvec.Frame;
public class ModelMetricsBinomialGLM extends ModelMetricsBinomial implements GLMMetrics {
public final long _nullDegreesOfFreedom;
public final long _residualDegreesOfFreedom;
public final double _resDev;
public final double _nullDev;
public final double _AIC;
public final double _loglikelihood;
public ModelMetricsBinomialGLM(Model model, Frame frame, long nobs, double mse, String[] domain,
double sigma, AUC2 auc, double logloss, double resDev, double nullDev,
double aic, long nDof, long rDof, GainsLift gainsLift,
CustomMetric customMetric, double loglikelihood) {
super(model, frame, nobs, mse, domain, sigma, auc, logloss, loglikelihood, aic, gainsLift, customMetric);
_resDev = resDev;
_nullDev = nullDev;
_AIC = aic;
_nullDegreesOfFreedom = nDof;
_residualDegreesOfFreedom = rDof;
_loglikelihood = loglikelihood;
}
@Override
public double residual_deviance() {return _resDev;}
@Override
public double null_deviance() {return _nullDev;}
@Override
public long residual_degrees_of_freedom(){
return _residualDegreesOfFreedom;
}
@Override
public long null_degrees_of_freedom() {return _nullDegreesOfFreedom;}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(super.toString());
sb.append(" null DOF: " + (float) _nullDegreesOfFreedom+ "\n");
sb.append(" residual DOF: " + (float) _residualDegreesOfFreedom+ "\n");
sb.append(" null deviance: " + (float)_nullDev + "\n");
sb.append(" residual deviance: " + (float)_resDev + "\n");
return sb.toString();
}
@Override public boolean equals(Object o) {
if(!(o instanceof ModelMetricsBinomialGLM))
return false;
ModelMetricsBinomialGLM mm = (ModelMetricsBinomialGLM)o;
return
_residualDegreesOfFreedom == mm._residualDegreesOfFreedom &&
_nullDegreesOfFreedom == mm._nullDegreesOfFreedom &&
Math.abs(_resDev - mm._resDev) < 1e-8;
}
public static class ModelMetricsMultinomialGLM extends ModelMetricsMultinomial implements GLMMetrics {
public final long _nullDegreesOfFreedom;
public final long _residualDegreesOfFreedom;
public final double _resDev;
public final double _nullDev;
public final double _AIC;
public final double _loglikelihood;
public ModelMetricsMultinomialGLM(Model model, Frame frame, long nobs, double mse, String[] domain,
double sigma, ConfusionMatrix cm, float [] hr, double logloss,
double resDev, double nullDev, double aic, long nDof, long rDof,
MultinomialAUC auc, CustomMetric customMetric, double loglikelihood) {
super(model, frame, nobs, mse, domain, sigma, cm, hr, logloss, loglikelihood, aic, auc, customMetric);
_resDev = resDev;
_nullDev = nullDev;
_AIC = aic;
_nullDegreesOfFreedom = nDof;
_residualDegreesOfFreedom = rDof;
_loglikelihood = loglikelihood;
}
@Override
public double residual_deviance() {return _resDev;}
@Override
public double null_deviance() {return _nullDev;}
@Override
public long residual_degrees_of_freedom(){
return _residualDegreesOfFreedom;
}
@Override
public long null_degrees_of_freedom() {return _nullDegreesOfFreedom;}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(super.toString());
sb.append(" null DOF: " + (float) _nullDegreesOfFreedom+ "\n");
sb.append(" residual DOF: " + (float) _residualDegreesOfFreedom+ "\n");
sb.append(" null deviance: " + (float)_nullDev + "\n");
sb.append(" residual deviance: " + (float)_resDev + "\n");
return sb.toString();
}
@Override public boolean equals(Object o) {
if(!(o instanceof ModelMetricsMultinomialGLM))
return false;
ModelMetricsMultinomialGLM mm = (ModelMetricsMultinomialGLM)o;
return
_residualDegreesOfFreedom == mm._residualDegreesOfFreedom &&
_nullDegreesOfFreedom == mm._nullDegreesOfFreedom &&
Math.abs(_resDev - mm._resDev) < 1e-8;
}
}
public static class ModelMetricsOrdinalGLM extends ModelMetricsOrdinal implements GLMMetrics {
public final long _nullDegreesOfFreedom;
public final long _residualDegreesOfFreedom;
public final double _resDev;
public final double _nullDev;
public final double _AIC;
public final double _loglikelihood;
public ModelMetricsOrdinalGLM(Model model, Frame frame, long nobs, double mse, String[] domain,
double sigma, ConfusionMatrix cm, float [] hr, double logloss,
double resDev, double nullDev, double aic, long nDof, long rDof,
CustomMetric customMetric, double loglikelihood) {
super(model, frame, nobs, mse, domain, sigma, cm, hr, logloss, customMetric);
_resDev = resDev;
_nullDev = nullDev;
_AIC = aic;
_nullDegreesOfFreedom = nDof;
_residualDegreesOfFreedom = rDof;
_loglikelihood = loglikelihood;
}
@Override
public double residual_deviance() {return _resDev;}
@Override
public double null_deviance() {return _nullDev;}
@Override
public long residual_degrees_of_freedom(){
return _residualDegreesOfFreedom;
}
@Override
public long null_degrees_of_freedom() {return _nullDegreesOfFreedom;}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(super.toString());
sb.append(" null DOF: " + (float) _nullDegreesOfFreedom+ "\n");
sb.append(" residual DOF: " + (float) _residualDegreesOfFreedom+ "\n");
sb.append(" null deviance: " + (float)_nullDev + "\n");
sb.append(" residual deviance: " + (float)_resDev + "\n");
return sb.toString();
}
@Override public boolean equals(Object o) {
if(!(o instanceof ModelMetricsOrdinalGLM))
return false;
ModelMetricsOrdinalGLM mm = (ModelMetricsOrdinalGLM)o;
return
_residualDegreesOfFreedom == mm._residualDegreesOfFreedom &&
_nullDegreesOfFreedom == mm._nullDegreesOfFreedom &&
Math.abs(_resDev - mm._resDev) < 1e-8;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsBinomialGLMGeneric.java
|
package hex;
import water.fvec.Frame;
import water.util.TwoDimTable;
public class ModelMetricsBinomialGLMGeneric extends ModelMetricsBinomialGeneric {
public final long _nullDegreesOfFreedom;
public final long _residualDegreesOfFreedom;
public final double _resDev;
public final double _nullDev;
public final double _AIC;
public final double _loglikelihood;
public final TwoDimTable _coefficients_table;
public ModelMetricsBinomialGLMGeneric(Model model, Frame frame, long nobs, double mse, String[] domain,
double sigma, AUC2 auc, double logloss, TwoDimTable gainsLiftTable,
CustomMetric customMetric, double mean_per_class_error, TwoDimTable thresholds_and_metric_scores,
TwoDimTable max_criteria_and_metric_scores, TwoDimTable confusion_matrix,
long nullDegreesOfFreedom, long residualDegreesOfFreedom, double resDev, double nullDev,
double aic, TwoDimTable coefficients_table, double r2, String description, double loglikelihood) {
super(model, frame, nobs, mse, domain, sigma, auc, logloss, gainsLiftTable, customMetric, mean_per_class_error,
thresholds_and_metric_scores, max_criteria_and_metric_scores, confusion_matrix, r2, description);
_nullDegreesOfFreedom = nullDegreesOfFreedom;
_residualDegreesOfFreedom = residualDegreesOfFreedom;
_resDev = resDev;
_nullDev = nullDev;
_AIC = aic;
_coefficients_table = coefficients_table;
_loglikelihood = loglikelihood;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsBinomialGeneric.java
|
package hex;
import water.fvec.Frame;
import water.util.TwoDimTable;
public class ModelMetricsBinomialGeneric extends ModelMetricsBinomial {
public final TwoDimTable _gainsLiftTable;
public final TwoDimTable _thresholds_and_metric_scores;
public final TwoDimTable _max_criteria_and_metric_scores;
public final TwoDimTable _confusion_matrix;
public final double _r2;
public ModelMetricsBinomialGeneric(Model model, Frame frame, long nobs, double mse, String[] domain,
double sigma, AUC2 auc, double logloss, TwoDimTable gainsLiftTable,
CustomMetric customMetric, double mean_per_class_error, TwoDimTable thresholds_and_metric_scores,
TwoDimTable max_criteria_and_metric_scores, TwoDimTable confusion_matrix, double r2,
final String description) {
super(model, frame, nobs, mse, domain, sigma, auc, logloss, null, customMetric);
_gainsLiftTable = gainsLiftTable;
_thresholds_and_metric_scores = thresholds_and_metric_scores;
_max_criteria_and_metric_scores = max_criteria_and_metric_scores;
_confusion_matrix = confusion_matrix;
_mean_per_class_error = mean_per_class_error;
_r2 = r2;
_description = description;
}
@Override
public double r2() {
return _r2;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsBinomialUplift.java
|
package hex;
import water.MRTask;
import water.Scope;
import water.exceptions.H2OIllegalArgumentException;
import water.fvec.*;
import water.util.Log;
import java.util.Arrays;
public class ModelMetricsBinomialUplift extends ModelMetricsSupervised {
public final AUUC _auuc;
public double _ate;
public double _att;
public double _atc;
public ModelMetricsBinomialUplift(Model model, Frame frame, long nobs, String[] domain,
double ate, double att, double atc, double sigma, AUUC auuc,
CustomMetric customMetric) {
super(model, frame, nobs, 0, domain, sigma, customMetric);
_ate = ate;
_att = att;
_atc = atc;
_auuc = auuc;
}
public static ModelMetricsBinomialUplift getFromDKV(Model model, Frame frame) {
ModelMetrics mm = ModelMetrics.getFromDKV(model, frame);
if( !(mm instanceof ModelMetricsBinomialUplift) )
throw new H2OIllegalArgumentException("Expected to find a Binomial ModelMetrics for model: " + model._key.toString() + " and frame: " + frame._key.toString(),
"Expected to find a ModelMetricsBinomial for model: " + model._key.toString() + " and frame: " + frame._key.toString() + " but found a: " + (mm == null ? null : mm.getClass()));
return (ModelMetricsBinomialUplift) mm;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(super.toString());
sb.append("ATE:" ).append((float) _ate).append("\n");
sb.append("ATT:" ).append((float) _att).append("\n");
sb.append("ATC:" ).append((float) _atc).append("\n");
if(_auuc != null){
sb.append("Default AUUC: ").append((float) _auuc.auuc()).append("\n");
sb.append("Qini AUUC: ").append((float) _auuc.auucByType(AUUC.AUUCType.qini)).append("\n");
sb.append("Lift AUUC: ").append((float) _auuc.auucByType(AUUC.AUUCType.lift)).append("\n");
sb.append("Gain AUUC: ").append((float) _auuc.auucByType(AUUC.AUUCType.gain)).append("\n");
sb.append("Normalized Qini AUUC: ").append((float) _auuc.auucNormalizedByType(AUUC.AUUCType.qini)).append("\n");
sb.append("Normalized Lift AUUC: ").append((float) _auuc.auucNormalizedByType(AUUC.AUUCType.lift)).append("\n");
sb.append("Normalized Gain AUUC: ").append((float) _auuc.auucNormalizedByType(AUUC.AUUCType.gain)).append("\n");
sb.append("Qini: ").append((float) _auuc.qini()).append("\n");
}
return sb.toString();
}
public double auuc() {return _auuc.auuc();}
public double qini(){return _auuc.qini();}
public double auucNormalized(){return _auuc.auucNormalized();}
public int nbins(){return _auuc._nBins;}
public double ate() {return _ate;}
public double att() {return _att;}
public double atc() {return _atc;}
@Override
protected StringBuilder appendToStringMetrics(StringBuilder sb) {
return sb;
}
/**
* Build a Binomial ModelMetrics object from predicted probabilities, from actual labels, and a given domain for both labels (and domain[1] is the target class)
* @param predictedProbs A Vec containing predicted probabilities
* @param actualLabels A Vec containing the actual labels (can be for fewer labels than what's in domain, since the predictions can be for a small subset of the data)
* @param treatment A Vec containing the treatment values
* @param domain The two class labels (domain[0] is the non-target class, domain[1] is the target class, for which probabilities are given)
* @param auucType Type of default AUUC
* @param auucNbins Number of bins to calculate AUUC (-1 means default value 1000, the number has to be higher than zero)
* @param customAuucThresholds custom threshold to calculate AUUC, if is not specified, the thresholds will be calculated from prediction vector
* @return ModelMetrics object
*/
static public ModelMetricsBinomialUplift make(Vec predictedProbs, Vec actualLabels, Vec treatment, String[] domain, AUUC.AUUCType auucType, int auucNbins, double[] customAuucThresholds) {
Scope.enter();
try {
Vec labels = actualLabels.toCategoricalVec();
if (domain == null) domain = labels.domain();
if (labels == null || predictedProbs == null || treatment == null)
throw new IllegalArgumentException("Missing actualLabels or predicted probabilities or treatment values for uplift binomial metrics!");
if (!predictedProbs.isNumeric())
throw new IllegalArgumentException("Predicted probabilities must be numeric per-class probabilities for uplift binomial metrics.");
if (domain.length != 2)
throw new IllegalArgumentException("Domain must have 2 class labels, but is " + Arrays.toString(domain) + " for uplift binomial metrics.");
labels = labels.adaptTo(domain);
if (labels.cardinality() != 2)
throw new IllegalArgumentException("Adapted domain must have 2 class labels, but is " + Arrays.toString(labels.domain()) + " for uplift binomial metrics.");
if (!treatment.isCategorical() || treatment.cardinality() != 2)
throw new IllegalArgumentException("Treatment values should be catecorical value and have 2 class " + Arrays.toString(treatment.domain()) + " for uplift binomial uplift metrics.");
long dataSize = treatment.length();
if (customAuucThresholds != null) {
if(customAuucThresholds.length == 0){
throw new IllegalArgumentException("Custom AUUC thresholds array should have size greater than 0.");
}
if (auucNbins != customAuucThresholds.length) {
Log.info("Custom AUUC thresholds are specified, so number of AUUC bins will equal to thresholds size.");
}
}
if (auucNbins < -1 || auucNbins == 0 || auucNbins > dataSize)
throw new IllegalArgumentException("The number of bins to calculate AUUC need to be -1 (default value) or higher than zero, but less than data size.");
if(auucNbins == -1)
auucNbins = AUUC.NBINS > dataSize ? (int) dataSize : AUUC.NBINS;
Frame fr = new Frame(predictedProbs);
fr.add("labels", labels);
fr.add("treatment", treatment);
MetricBuilderBinomialUplift mb;
if (customAuucThresholds == null) {
mb = new UpliftBinomialMetrics(labels.domain(), AUUC.calculateQuantileThresholds(auucNbins, predictedProbs)).doAll(fr)._mb;
} else {
mb = new UpliftBinomialMetrics(labels.domain(), customAuucThresholds).doAll(fr)._mb;
}
labels.remove();
ModelMetricsBinomialUplift mm = (ModelMetricsBinomialUplift) mb.makeModelMetrics(null, fr, auucType);
mm._description = "Computed on user-given predictions and labels.";
return mm;
} finally {
Scope.exit();
}
}
// helper to build a ModelMetricsBinomialUplift from a Frame that contains prediction probability column and the actual label
private static class UpliftBinomialMetrics extends MRTask<UpliftBinomialMetrics> {
String[] domain;
double[] thresholds;
public MetricBuilderBinomialUplift _mb;
public UpliftBinomialMetrics(String[] domain, double[] thresholds) {
this.domain = domain;
this.thresholds = thresholds;
}
@Override public void map(Chunk[] chks) {
_mb = new MetricBuilderBinomialUplift(domain, thresholds);
Chunk uplift = chks[0];
Chunk actuals = chks[1];
Chunk treatment = chks[2];
double[] ds = new double[1];
float[] acts = new float[2];
for (int i=0; i<chks[0]._len;++i) {
ds[0] = uplift.atd(i);
acts[0] = (float) actuals.atd(i);
acts[1] = (float) treatment.atd(i);
_mb.perRow(ds, acts, 1, 0, null);
}
}
@Override public void reduce(UpliftBinomialMetrics mrt) { _mb.reduce(mrt._mb); }
}
public static class MetricBuilderBinomialUplift extends MetricBuilderSupervised<MetricBuilderBinomialUplift> {
protected AUUC.AUUCBuilder _auuc;
public double _sumTE;
public double _sumTETreatment;
public long _treatmentCount;
public MetricBuilderBinomialUplift( String[] domain, double[] thresholds) {
super(2,domain);
if(thresholds != null) {
_auuc = new AUUC.AUUCBuilder(thresholds);
}
}
@Override public double[] perRow(double[] ds, float[] yact, Model m) {
return perRow(ds, yact,1, 0, m);
}
@Override
public double[] perRow(double[] ds, float[] yact, double weight, double offset, Model m) {
assert yact.length == 2 : "Treatment must be included in `yact` when calculating AUUC";
if(Float .isNaN(yact[0])) return ds; // No errors if actual is missing
if(weight == 0 || Double.isNaN(weight)) return ds;
int y = (int)yact[0];
if (y != 0 && y != 1) return ds; // The actual is effectively a NaN
_wY += weight * y;
_wYY += weight * y * y;
_count++;
_wcount += weight;
int treatmentGroup = (int)yact[1]; // treatment = 1, control = 0
double treatmentEffect = ds[0] * weight;
_sumTE += treatmentEffect; // result prediction
_sumTETreatment += treatmentGroup * treatmentEffect;
_treatmentCount += treatmentGroup * weight;
if (_auuc != null) {
_auuc.perRow(treatmentEffect, weight, y, treatmentGroup);
}
return ds;
}
@Override public void reduce(MetricBuilderBinomialUplift mb ) {
super.reduce(mb);
if(_auuc != null) {
_auuc.reduce(mb._auuc);
}
_sumTE += mb._sumTE;
_sumTETreatment += mb._sumTETreatment;
_treatmentCount += mb._treatmentCount;
}
/**
* Create a ModelMetrics for a given model and frame
* @param m Model
* @param f Frame
* @param frameWithExtraColumns Frame that contains extra columns such as weights
* @param preds Optional predictions (can be null), only used to compute Gains/Lift table for binomial problems @return
* @return ModelMetricsBinomialUplift
*/
@Override public ModelMetrics makeModelMetrics(final Model m, final Frame f,
Frame frameWithExtraColumns, final Frame preds) {
Vec resp = null;
Vec treatment = null;
AUUC.AUUCType auucType = m==null ? AUUC.AUUCType.AUTO : m._parms._auuc_type;
if (preds!=null) {
if (frameWithExtraColumns == null)
frameWithExtraColumns = f;
resp = m==null && frameWithExtraColumns.vec(f.numCols()-1).isCategorical() ?
frameWithExtraColumns.vec(f.numCols()-1) //work-around for the case where we don't have a model, assume that the last column is the actual response
:
frameWithExtraColumns.vec(m._parms._response_column);
if(m != null && m._parms._treatment_column != null){
treatment = frameWithExtraColumns.vec(m._parms._treatment_column);
}
}
int auucNbins = m==null || m._parms._auuc_nbins == -1?
AUUC.NBINS : m._parms._auuc_nbins;
return makeModelMetrics(m, f, preds, resp, treatment, auucType, auucNbins);
}
private ModelMetrics makeModelMetrics(final Model m, final Frame f, final Frame preds,
final Vec resp, final Vec treatment, AUUC.AUUCType auucType, int nbins) {
AUUC auuc = null;
if (preds != null) {
if (resp != null) {
auuc = new AUUC(preds.vec(0), resp, treatment, auucType, nbins);
}
}
return makeModelMetrics(m, f, auuc);
}
private ModelMetrics makeModelMetrics(final Model m, final Frame f, AUUC.AUUCType auucType) {
return makeModelMetrics(m, f, new AUUC(_auuc, auucType));
}
public ModelMetrics makeModelMetrics(Model m, Frame f, AUUC auuc) {
double sigma = Double.NaN;
double ate = Double.NaN;
double atc = Double.NaN;
double att = Double.NaN;
if(_wcount > 0) {
if (auuc == null) {
sigma = weightedSigma();
auuc = new AUUC(_auuc, m._parms._auuc_type);
}
ate = _sumTE/_wcount;
att = _sumTETreatment/_treatmentCount;
atc = (_sumTE-_sumTETreatment)/(_wcount-_treatmentCount);
} else {
auuc = new AUUC();
}
ModelMetricsBinomialUplift mm = new ModelMetricsBinomialUplift(m, f, _count, _domain, ate, att, atc, sigma, auuc, _customMetric);
if (m!=null) m.addModelMetrics(mm);
return mm;
}
@Override
public Frame makePredictionCache(Model m, Vec response) {
return new Frame(response.makeVolatileDoubles(1));
}
@Override
public void cachePrediction(double[] cdist, Chunk[] chks, int row, int cacheChunkIdx, Model m) {
assert cdist.length == 3;
((C8DVolatileChunk) chks[cacheChunkIdx]).getValues()[row] = cdist[0];
}
public String toString(){
return "";
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsBinomialUpliftGeneric.java
|
package hex;
import water.fvec.Frame;
import water.util.TwoDimTable;
public class ModelMetricsBinomialUpliftGeneric extends ModelMetricsBinomialUplift {
public final TwoDimTable _thresholds_and_metric_scores;
public final TwoDimTable _auuc_table;
public final TwoDimTable _aecu_table;
public ModelMetricsBinomialUpliftGeneric(Model model, Frame frame, long nobs, String[] domain, double ate, double att, double atc, double sigma, AUUC auuc, CustomMetric customMetric, TwoDimTable thresholds_and_metric_scores, TwoDimTable auuc_table, TwoDimTable aecu_table, final String description) {
super(model, frame, nobs, domain, ate, att, atc, sigma, auuc, customMetric);
_thresholds_and_metric_scores = thresholds_and_metric_scores;
_auuc_table = auuc_table;
_aecu_table = aecu_table;
_description = description;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsClustering.java
|
package hex;
import hex.ClusteringModel.ClusteringOutput;
import hex.ClusteringModel.ClusteringParameters;
import water.Key;
import water.exceptions.H2OIllegalArgumentException;
import water.fvec.Frame;
import water.util.ArrayUtils;
import water.util.TwoDimTable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class ModelMetricsClustering extends ModelMetricsUnsupervised {
public long[/*k*/] _size;
public double[/*k*/] _withinss;
public double _totss;
public double _tot_withinss;
public double _betweenss;
public double totss() { return _totss; }
public double tot_withinss() { return _tot_withinss; }
public double betweenss() { return _betweenss; }
public ModelMetricsClustering(Model model, Frame frame, CustomMetric customMetric) {
super(model, frame, 0, Double.NaN, customMetric);
_size = null;
_withinss = null;
_totss = _tot_withinss = _betweenss = Double.NaN;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(super.toString());
sb.append(" total sum of squares: " + (float)_totss + "\n");
sb.append(" total within sum of squares: " + (float)_tot_withinss + "\n");
sb.append(" total between sum of squares: " + (float)_betweenss + "\n");
if (_size != null) sb.append(" per cluster sizes: " + Arrays.toString(_size) + "\n");
if (_withinss != null) sb.append(" per cluster within sum of squares: " + Arrays.toString(_withinss) + "\n");
return sb.toString();
}
/**
* Populate TwoDimTable from members _size and _withinss
* @return TwoDimTable
*/
public TwoDimTable createCentroidStatsTable() {
if (_size == null || _withinss == null)
return null;
List<String> colHeaders = new ArrayList<>();
List<String> colTypes = new ArrayList<>();
List<String> colFormat = new ArrayList<>();
colHeaders.add("Centroid"); colTypes.add("long"); colFormat.add("%d");
colHeaders.add("Size"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Within Cluster Sum of Squares"); colTypes.add("double"); colFormat.add("%.5f");
final int K = _size.length;
assert(_withinss.length == K);
TwoDimTable table = new TwoDimTable(
"Centroid Statistics", null,
new String[K],
colHeaders.toArray(new String[0]),
colTypes.toArray(new String[0]),
colFormat.toArray(new String[0]),
"");
for (int k =0; k<K; ++k) {
int col = 0;
table.set(k, col++, k+1);
table.set(k, col++, _size[k]);
table.set(k, col, _withinss[k]);
}
return table;
}
public static class MetricBuilderClustering extends MetricBuilderUnsupervised<MetricBuilderClustering> {
public long[] _size; // Number of elements in cluster
public double[] _within_sumsqe; // Within-cluster sum of squared error
private double[/*features*/] _colSum; // Sum of each column
private double[/*features*/] _colSumSq; // Sum of squared values of each column
public MetricBuilderClustering(int ncol, int nclust) {
_work = new double[ncol];
_size = new long[nclust];
_within_sumsqe = new double[nclust];
Arrays.fill(_size, 0);
Arrays.fill(_within_sumsqe, 0);
_colSum = new double[ncol];
_colSumSq = new double[ncol];
Arrays.fill(_colSum, 0);
Arrays.fill(_colSumSq, 0);
}
// Compare row (dataRow) against centroid it was assigned to (preds[0])
@Override
public double[] perRow(double[] preds, float[] dataRow, Model m) {
assert m instanceof ClusteringModel;
assert !Double.isNaN(preds[0]);
ClusteringModel clm = (ClusteringModel) m;
boolean standardize = ((((ClusteringOutput) clm._output)._centers_std_raw) != null);
double[][] centers = standardize ? ((ClusteringOutput) clm._output)._centers_std_raw: ((ClusteringOutput) clm._output)._centers_raw;
int clus = (int)preds[0];
double [] colSum = new double[_colSum.length];
double [] colSumSq = new double[_colSumSq.length];
double sqr = hex.genmodel.GenModel.KMeans_distance(centers[clus], dataRow, ((ClusteringOutput) clm._output)._mode, colSum, colSumSq);
ArrayUtils.add(_colSum, colSum);
ArrayUtils.add(_colSumSq, colSumSq);
_count++;
_size[clus]++;
_sumsqe += sqr;
_within_sumsqe[clus] += sqr;
if (Double.isNaN(_sumsqe))
throw new H2OIllegalArgumentException("Sum of Squares is invalid (Double.NaN) - Check for missing values in the dataset.");
return preds; // Flow coding
}
@Override
public void reduce(MetricBuilderClustering mm) {
super.reduce(mm);
ArrayUtils.add(_size, mm._size);
ArrayUtils.add(_within_sumsqe, mm._within_sumsqe);
ArrayUtils.add(_colSum, mm._colSum);
ArrayUtils.add(_colSumSq, mm._colSumSq);
}
/**
* Reduce the Metric builder clustering for CV without cluster statistics.
*
* @param mm metric builder to be reduced
*/
public void reduceForCV(MetricBuilderClustering mm) {
super.reduce(mm);
ArrayUtils.add(_colSum, mm._colSum);
ArrayUtils.add(_colSumSq, mm._colSumSq);
}
@Override
public ModelMetrics makeModelMetrics(Model m, Frame f) {
assert m instanceof ClusteringModel;
ModelMetricsClustering mm = new ModelMetricsClustering(m, f, _customMetric);
setOverallStatToModelMetrics((ClusteringModel) m, f.numRows(), mm);
if(this._size != null && this._within_sumsqe != null) {
setCentroidsStatToModelMetrics(mm);
}
return m.addMetrics(mm);
}
private void setOverallStatToModelMetrics(ClusteringModel clm, long numRows, ModelMetricsClustering mm){
mm._tot_withinss = _sumsqe;
if (clm._parms._weights_column != null) numRows = _count;
// Sum-of-square distance from grand mean
if (((ClusteringParameters) clm._parms)._k == 1)
mm._totss = mm._tot_withinss;
else {
mm._totss = 0;
for (int i = 0; i < _colSum.length; i++) {
if (((ClusteringOutput) clm._output)._mode[i] == -1)
mm._totss += _colSumSq[i] - (_colSum[i] * _colSum[i]) / numRows;
else
mm._totss += _colSum[i]; // simply add x[i] != modes[i] for categoricals
}
}
mm._betweenss = mm._totss - mm._tot_withinss;
}
private void setCentroidsStatToModelMetrics(ModelMetricsClustering mm){
mm._size = _size;
mm._withinss = new double[_size.length];
for (int i = 0; i < mm._withinss.length; i++)
mm._withinss[i] = _within_sumsqe[i];
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsMultinomial.java
|
package hex;
import hex.genmodel.GenModel;
import water.MRTask;
import water.Scope;
import water.exceptions.H2OIllegalArgumentException;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.ArrayUtils;
import water.util.Log;
import water.util.MathUtils;
import water.util.TwoDimTable;
import java.util.Arrays;
public class ModelMetricsMultinomial extends ModelMetricsSupervised {
public final float[] _hit_ratios; // Hit ratios
public final ConfusionMatrix _cm;
public final double _logloss;
public final double _loglikelihood;
public final double _aic;
public double _mean_per_class_error;
public MultinomialAUC _auc;
public ModelMetricsMultinomial(Model model, Frame frame, long nobs, double mse, String[] domain, double sigma,
ConfusionMatrix cm, float[] hr, double logloss, double loglikelihood, double aic,
MultinomialAUC auc, CustomMetric customMetric) {
super(model, frame, nobs, mse, domain, sigma, customMetric);
_cm = cm;
_hit_ratios = hr;
_logloss = logloss;
_loglikelihood = loglikelihood;
_aic = aic;
_mean_per_class_error = cm==null || cm.tooLarge() ? Double.NaN : cm.mean_per_class_error();
_auc = auc;
}
public ModelMetricsMultinomial(Model model, Frame frame, long nobs, double mse, String[] domain, double sigma,
ConfusionMatrix cm, float[] hr, double logloss, MultinomialAUC auc,
CustomMetric customMetric) {
this(model, frame, nobs, mse, domain, sigma, cm, hr, logloss, Double.NaN, Double.NaN, auc, customMetric);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(super.toString());
sb.append(" logloss: " + (float)_logloss + "\n");
sb.append(" loglikelihood: " + (float)_loglikelihood + "\n");
sb.append(" AIC: " + (float)_aic + "\n");
sb.append(" mean_per_class_error: " + (float)_mean_per_class_error + "\n");
sb.append(" hit ratios: " + Arrays.toString(_hit_ratios) + "\n");
sb.append(" AUC: "+auc()+ "\n");
sb.append(" pr_auc: "+ pr_auc()+ "\n");
if(_auc.getAucTable() == null){
sb.append(" AUC table: is not computed because it is disabled (model parameter 'auc_type' is set to AUTO or NONE) or due to domain size (maximum is 50 domains).\n");
sb.append(" pr_auc table: is not computed because it is disabled (model parameter 'auc_type' is set to AUTO or NONE) or due to domain size (maximum is 50 domains).\n");
} else if(_domain.length <= 20) {
sb.append(" AUC table: " + _auc.getAucTable()+"\n");
sb.append(" pr_auc table: " + _auc.getAucPrTable()+"\n");
} else {
sb.append(" AUC table: too large to print.\n");
sb.append(" pr_auc table: too large to print.\n");
}
if (cm() != null) {
if (cm().nclasses() <= 20)
sb.append(" CM: " + cm().toASCII());
else
sb.append(" CM: too large to print.\n");
}
return sb.toString();
}
public double logloss() { return _logloss; }
public double loglikelihood() { return _loglikelihood; }
public double aic() { return _aic; }
public double mean_per_class_error() { return _mean_per_class_error; }
@Override public ConfusionMatrix cm() { return _cm; }
@Override public float[] hr() { return _hit_ratios; }
public double auc() {
if(_auc != null) {
return _auc.auc();
} else {
return Double.NaN;
}
}
public double pr_auc() {
if(_auc != null) {
return _auc.pr_auc();
} else {
return Double.NaN;
}
}
public double aucpr(){
return pr_auc();
}
public static ModelMetricsMultinomial getFromDKV(Model model, Frame frame) {
ModelMetrics mm = ModelMetrics.getFromDKV(model, frame);
if (! (mm instanceof ModelMetricsMultinomial))
throw new H2OIllegalArgumentException("Expected to find a Multinomial ModelMetrics for model: " + model._key.toString() + " and frame: " + frame._key.toString(),
"Expected to find a ModelMetricsMultinomial for model: " + model._key.toString() + " and frame: " + frame._key.toString() + " but found a: " + mm.getClass());
return (ModelMetricsMultinomial) mm;
}
public static void updateHits(double w, int iact, double[] ds, double[] hits) {
updateHits(w, iact,ds,hits,null);
}
public static void updateHits(double w, int iact, double[] ds, double[] hits, double[] priorClassDistribution) {
if (iact == ds[0]) { hits[0]++; return; }
double before = ArrayUtils.sum(hits);
// Use getPrediction logic to see which top K labels we would have predicted
// Pick largest prob, assign label, then set prob to 0, find next-best label, etc.
double[] ds_copy = Arrays.copyOf(ds, ds.length); //don't modify original ds!
ds_copy[1+(int)ds[0]] = 0;
for (int k=1; k<hits.length; ++k) {
final int pred_labels = GenModel.getPrediction(ds_copy, priorClassDistribution, ds, 0.5 /*ignored*/); //use tie-breaking of getPrediction
ds_copy[1+pred_labels] = 0; //next iteration, we'll find the next-best label
if (pred_labels==iact) {
hits[k]+=w;
break;
}
}
// must find at least one hit if K == n_classes
if (hits.length == ds.length-1) {
double after = ArrayUtils.sum(hits);
if (after == before) hits[hits.length-1]+=w; //assume worst case
}
}
public static TwoDimTable getHitRatioTable(float[] hits) {
String tableHeader = "Top-" + hits.length + " Hit Ratios";
String[] rowHeaders = new String[hits.length];
for (int k=0; k<hits.length; ++k)
rowHeaders[k] = Integer.toString(k+1);
String[] colHeaders = new String[]{"Hit Ratio"};
String[] colTypes = new String[]{"float"};
String[] colFormats = new String[]{"%f"};
String colHeaderForRowHeaders = "K";
TwoDimTable table = new TwoDimTable(tableHeader, null/*tableDescription*/, rowHeaders, colHeaders, colTypes, colFormats, colHeaderForRowHeaders);
for (int k=0; k<hits.length; ++k)
table.set(k, 0, hits[k]);
return table;
}
/**
* Build a Multinomial ModelMetrics object from per-class probabilities (in Frame preds - no labels!), from actual labels, and a given domain for all possible labels (maybe more than what's in labels)
* @param perClassProbs Frame containing predicted per-class probabilities (and no predicted labels)
* @param actualLabels A Vec containing the actual labels (can be for fewer labels than what's in domain, since the predictions can be for a small subset of the data)
* @param aucType Type of multinomial AUC/AUCPR calculation, if NONE is set, multinomila AUC and AUCPR will not be computed
* @return ModelMetrics object
*/
static public ModelMetricsMultinomial make(Frame perClassProbs, Vec actualLabels, MultinomialAucType aucType) {
String[] names = perClassProbs.names();
String[] label = actualLabels.domain();
String[] union = ArrayUtils.union(names, label, true);
if (union.length == names.length + label.length)
throw new IllegalArgumentException("Column names of per-class-probabilities and categorical domain of actual labels have no common values!");
return make(perClassProbs, actualLabels, perClassProbs.names(), aucType);
}
static public ModelMetricsMultinomial make(Frame perClassProbs, Vec actualLabels, String[] domain, MultinomialAucType aucType) {
return make(perClassProbs, actualLabels, null, domain, aucType);
}
/**
* Build a Multinomial ModelMetrics object from per-class probabilities (in Frame preds - no labels!), from actual labels, and a given domain for all possible labels (maybe more than what's in labels)
* @param perClassProbs Frame containing predicted per-class probabilities (and no predicted labels)
* @param actualLabels A Vec containing the actual labels (can be for fewer labels than what's in domain, since the predictions can be for a small subset of the data)
* @param weights A Vec containing the observation weights.
* @param domain Ordered list of factor levels for which the probabilities are given (perClassProbs[i] are the per-observation probabilities for belonging to class domain[i])
* @param aucType Type of multinomial AUC/AUCPR calculation, if NONE is set, multinomila AUC and AUCPR will not be computed
* @return ModelMetrics object
*/
static public ModelMetricsMultinomial make(Frame perClassProbs, Vec actualLabels, Vec weights, String[] domain, MultinomialAucType aucType) {
Scope.enter();
Vec labels = actualLabels.toCategoricalVec();
if (labels == null || perClassProbs == null)
throw new IllegalArgumentException("Missing actualLabels or predictedProbs for multinomial metrics!");
if (labels.length() != perClassProbs.numRows())
throw new IllegalArgumentException("Both arguments must have the same length for multinomial metrics (" + labels.length() + "!=" + perClassProbs.numRows() + ")!");
for (Vec p : perClassProbs.vecs()) {
if (!p.isNumeric())
throw new IllegalArgumentException("Predicted probabilities must be numeric per-class probabilities for multinomial metrics.");
if (p.min() < 0 || p.max() > 1)
throw new IllegalArgumentException("Predicted probabilities must be between 0 and 1 for multinomial metrics.");
}
if ((aucType.equals(MultinomialAucType.AUTO) || (aucType.equals(MultinomialAucType.NONE)))){
Log.info("Multinomial AUC and AUCPR will not be calculated in metric summary. The model parameter auc_type is set to \"NONE\" or \"AUTO\" or the maximum size of domain (50) was reached.");
}
int nclasses = perClassProbs.numCols();
if (domain.length!=nclasses)
throw new IllegalArgumentException("Given domain has " + domain.length + " classes, but predictions have " + nclasses + " columns (per-class probabilities) for multinomial metrics.");
labels = labels.adaptTo(domain);
Frame fr = new Frame(perClassProbs);
fr.add("labels", labels);
if (weights != null) {
fr.add("weights", weights);
}
MetricBuilderMultinomial mb = new MultinomialMetrics((labels.domain()), aucType).doAll(fr)._mb;
labels.remove();
ModelMetricsMultinomial mm = (ModelMetricsMultinomial)mb.makeModelMetrics(null, fr, null, null);
mm._description = "Computed on user-given predictions and labels.";
Scope.exit();
return mm;
}
// helper to build a ModelMetricsMultinomial for a N-class problem from a Frame that contains N per-class probability columns,
// and the actual label as the (N+1)-th column with optional weights column at the end of the Frame
private static class MultinomialMetrics extends MRTask<MultinomialMetrics> {
private final String[] _domain;
private final MultinomialAucType _aucType;
private MetricBuilderMultinomial _mb;
MultinomialMetrics(String[] domain, MultinomialAucType aucType) {
_domain = domain;
_aucType = aucType;
}
@Override public void map(Chunk[] chks) {
_mb = new MetricBuilderMultinomial(_domain.length, _domain, _aucType);
Chunk actuals = chks[_domain.length];
Chunk weights = chks.length == _domain.length + 2 ? chks[_domain.length + 1] : null;
double[] ds = new double[_domain.length + 1];
float[] acts = new float[1];
for (int i = 0; i < chks[0]._len; i++) {
for (int c = 0; c < ds.length - 1; c++)
ds[c + 1] = chks[c].atd(i); //per-class probs - user-given
ds[0] = GenModel.getPrediction(ds, null, ds, 0.5 /*ignored*/);
acts[0] = actuals.at8(i);
double w = weights != null ? weights.atd(i) : 1;
_mb.perRow(ds, acts, w, 0, null);
}
}
@Override public void reduce(MultinomialMetrics mrt) { _mb.reduce(mrt._mb); }
}
public static class MetricBuilderMultinomial<T extends MetricBuilderMultinomial<T>> extends MetricBuilderSupervised<T> {
double[/*nclasses*/][/*nclasses*/] _cm;
double[/*K*/] _hits; // the number of hits for hitratio, length: K
int _K; // TODO: Let user set K
double _logloss;
protected double _loglikelihood;
boolean _calculateAuc;
AUC2.AUCBuilder[/*nclasses*/][/*nclasses*/] _ovoAucs;
AUC2.AUCBuilder[/*nclasses*/] _ovrAucs;
MultinomialAucType _aucType;
public MetricBuilderMultinomial() {}
public MetricBuilderMultinomial( int nclasses, String[] domain, MultinomialAucType aucType) {
super(nclasses,domain);
int domainLength = domain.length;
_cm = domain.length > ConfusionMatrix.maxClasses() ? null : new double[domainLength][domainLength];
_K = Math.min(10,_nclasses);
_hits = new double[_K];
// matrix for pairwise AUCs
_aucType = aucType;
_calculateAuc = !_aucType.equals(MultinomialAucType.NONE) && !_aucType.equals(MultinomialAucType.AUTO) && domainLength <= MultinomialAUC.MAX_AUC_CLASSES;
if(_calculateAuc) {
_ovoAucs = new AUC2.AUCBuilder[domainLength][domainLength];
_ovrAucs = new AUC2.AUCBuilder[domainLength];
for (int i = 0; i < domainLength; i++) {
_ovrAucs[i] = new AUC2.AUCBuilder(AUC2.NBINS);
for (int j = 0; j < domainLength; j++) {
// diagonal is not used
if (i != j) {
_ovoAucs[i][j] = new AUC2.AUCBuilder(AUC2.NBINS);
}
}
}
}
}
public transient double [] _priorDistribution;
// Passed a float[] sized nclasses+1; ds[0] must be a prediction. ds[1...nclasses-1] must be a class
// distribution;
@Override public double[] perRow(double ds[], float[] yact, Model m) { return perRow(ds, yact, 1, 0, m); }
@Override public double[] perRow(double ds[], float[] yact, double w, double o, Model m) {
if (_cm == null) return ds;
if( Float .isNaN(yact[0]) ) return ds; // No errors if actual is missing
if(ArrayUtils.hasNaNs(ds)) return ds;
if(w == 0 || Double.isNaN(w)) return ds;
final int iact = (int)yact[0];
_count++;
_wcount += w;
_wY += w*iact;
_wYY += w*iact*iact;
// Compute error
double err = iact+1 < ds.length ? 1-ds[iact+1] : 1; // Error: distance from predicting ycls as 1.0
_sumsqe += w*err*err; // Squared error
assert !Double.isNaN(_sumsqe);
assert iact < _cm.length : "iact = " + iact + "; _cm.length = " + _cm.length;
assert (int)ds[0] < _cm.length : "ds[0] = " + ds[0] + "; _cm.length = " + _cm.length;
// Plain Olde Confusion Matrix
_cm[iact][(int)ds[0]]++; // actual v. predicted
// Compute hit ratio
if( _K > 0 && iact < ds.length-1)
updateHits(w,iact,ds,_hits,m != null?m._output._priorClassDist:_priorDistribution);
// Compute log loss
_logloss += w*MathUtils.logloss(err);
// compute multinomial pairwise AUCs
if(_calculateAuc) {
calculateAucsPerRow(ds, iact, w);
}
if(m != null && m.isGeneric()) { // only perform for generic model, will increase run time for training if perform
_loglikelihood += m.likelihood(w, yact[0], ds);
}
return ds; // Flow coding
}
private void calculateAucsPerRow(double ds[], int iact, double w){
if (iact >= _domain.length) {
iact = _domain.length - 1;
}
for(int i = 0; i < _domain.length; i++){
// diagonal is empty
double p1 = 0, p2 = 0;
if(i < ds.length-1){
p1 = ds[i+1];
}
if(iact < ds.length-1){
p2 = ds[iact+1];
}
if(i != iact) {
_ovoAucs[iact][i].perRow(p1, 0, w);
_ovoAucs[i][iact].perRow(p2, 1, w);
_ovrAucs[i].perRow(p1, 0, w);
} else {
_ovrAucs[iact].perRow(p2, 1, w);
}
}
}
@Override public void reduce( T mb ) {
if (_cm == null) return;
super.reduce(mb);
assert mb._K == _K;
ArrayUtils.add(_cm, mb._cm);
_hits = ArrayUtils.add(_hits, mb._hits);
_logloss += mb._logloss;
_loglikelihood += mb._loglikelihood;
if(_calculateAuc) {
for (int i = 0; i < _ovoAucs.length; i++) {
_ovrAucs[i].reduce(mb._ovrAucs[i]);
for (int j = 0; j < _ovoAucs[0].length; j++) {
if (i != j) {
_ovoAucs[i][j].reduce(mb._ovoAucs[i][j]);
}
}
}
}
}
@Override public ModelMetrics makeModelMetrics(Model m, Frame f, Frame adaptedFrame, Frame preds) {
double mse = Double.NaN;
double logloss = Double.NaN;
double loglikelihood = Double.NaN;
double aic = Double.NaN;
float[] hr = new float[_K];
ConfusionMatrix cm = new ConfusionMatrix(_cm, _domain);
double sigma = weightedSigma();
if(_wcount > 0){
if (_hits != null) {
for (int i = 0; i < hr.length; i++) hr[i] = (float) (_hits[i] / _wcount);
for (int i = 1; i < hr.length; i++) hr[i] += hr[i - 1];
}
mse = _sumsqe / _wcount;
logloss = _logloss / _wcount;
if(m != null && m.getClass().toString().contains("Generic")) {
loglikelihood = -1 * _loglikelihood ; // get likelihood from negative loglikelihood
aic = m.aic(loglikelihood);
}
}
MultinomialAUC auc = new MultinomialAUC(_ovrAucs,_ovoAucs, _domain, _wcount == 0, _aucType);
ModelMetricsMultinomial mm = new ModelMetricsMultinomial(m, f, _count, mse, _domain, sigma, cm,
hr, logloss, loglikelihood, aic, auc, _customMetric);
if (m!=null) m.addModelMetrics(mm);
return mm;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsMultinomialGLMGeneric.java
|
package hex;
import water.fvec.Frame;
import water.util.TwoDimTable;
public class ModelMetricsMultinomialGLMGeneric extends ModelMetricsMultinomialGeneric {
public final long _nullDegreesOfFreedom;
public final long _residualDegreesOfFreedom;
public final double _resDev;
public final double _nullDev;
public final double _AIC;
public final double _loglikelihood;
public final TwoDimTable _coefficients_table;
public ModelMetricsMultinomialGLMGeneric(Model model, Frame frame, long nobs, double mse, String[] domain, double sigma,
TwoDimTable confusion_matrix, TwoDimTable hit_ratio_table, double logloss, CustomMetric customMetric,
double mean_per_class_error, long nullDegreesOfFreedom, long residualDegreesOfFreedom,
double resDev, double nullDev, TwoDimTable coefficients_table, double r2,
TwoDimTable multinomial_auc_table, TwoDimTable multinomial_aucpr_table, MultinomialAucType type,
final String description) {
this(model, frame, nobs, mse, domain, sigma, confusion_matrix, hit_ratio_table, logloss, customMetric,
mean_per_class_error, nullDegreesOfFreedom, residualDegreesOfFreedom, resDev, nullDev, Double.NaN,
coefficients_table, r2, multinomial_auc_table, multinomial_aucpr_table, type, description, Double.NaN);
}
public ModelMetricsMultinomialGLMGeneric(Model model, Frame frame, long nobs, double mse, String[] domain, double sigma,
TwoDimTable confusion_matrix, TwoDimTable hit_ratio_table, double logloss, CustomMetric customMetric,
double mean_per_class_error, long nullDegreesOfFreedom, long residualDegreesOfFreedom,
double resDev, double nullDev, double aic, TwoDimTable coefficients_table, double r2,
TwoDimTable multinomial_auc_table, TwoDimTable multinomial_aucpr_table, MultinomialAucType type,
final String description, double loglikelihood) {
super(model, frame, nobs, mse, domain, sigma, confusion_matrix, hit_ratio_table, logloss, customMetric, mean_per_class_error, r2,
multinomial_auc_table, multinomial_aucpr_table, type, description);
_nullDegreesOfFreedom = nullDegreesOfFreedom;
_residualDegreesOfFreedom = residualDegreesOfFreedom;
_resDev = resDev;
_nullDev = nullDev;
_AIC = aic;
_coefficients_table = coefficients_table;
_loglikelihood = loglikelihood;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsMultinomialGeneric.java
|
package hex;
import water.fvec.Frame;
import water.util.TwoDimTable;
public class ModelMetricsMultinomialGeneric extends ModelMetricsMultinomial {
public final TwoDimTable _hit_ratio_table;
public final TwoDimTable _confusion_matrix_table;
public final TwoDimTable _multinomial_auc_table;
public final TwoDimTable _multinomial_aucpr_table;
public final double _r2;
public ModelMetricsMultinomialGeneric(Model model, Frame frame, long nobs, double mse, String[] domain, double sigma,
TwoDimTable confusion_matrix, TwoDimTable hit_ratio_table, double logloss, CustomMetric customMetric,
double mean_per_class_error, double r2, TwoDimTable multinomial_auc_table, TwoDimTable multinomial_aucpr_table,
MultinomialAucType type, final String description) {
super(model, frame, nobs, mse, domain, sigma, null, null, logloss, null, customMetric);
_confusion_matrix_table = confusion_matrix;
_hit_ratio_table = hit_ratio_table;
_auc = new MultinomialAUC(multinomial_auc_table, multinomial_aucpr_table, domain, type);
_multinomial_auc_table = multinomial_auc_table;
_multinomial_aucpr_table = multinomial_aucpr_table;
_mean_per_class_error = mean_per_class_error;
_r2 = r2;
_description = description;
}
@Override
public double r2() {
return _r2;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsOrdinal.java
|
package hex;
import hex.genmodel.GenModel;
import water.MRTask;
import water.Scope;
import water.exceptions.H2OIllegalArgumentException;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.ArrayUtils;
import water.util.MathUtils;
import water.util.TwoDimTable;
import java.util.Arrays;
public class ModelMetricsOrdinal extends ModelMetricsSupervised {
public final float[] _hit_ratios; // Hit ratios
public final ConfusionMatrix _cm;
public final double _logloss;
public final double _mean_per_class_error;
public ModelMetricsOrdinal(Model model, Frame frame, long nobs, double mse, String[] domain, double sigma, ConfusionMatrix cm, float[] hr, double logloss, CustomMetric customMetric) {
super(model, frame, nobs, mse, domain, sigma, customMetric);
_cm = cm;
_hit_ratios = hr;
_logloss = logloss;
_mean_per_class_error = cm==null || cm.tooLarge() ? Double.NaN : cm.mean_per_class_error();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(super.toString());
sb.append(" logloss: " + (float)_logloss + "\n");
sb.append(" mean_per_class_error: " + (float)_mean_per_class_error + "\n");
sb.append(" hit ratios: " + Arrays.toString(_hit_ratios) + "\n");
if (cm() != null) {
if (cm().nclasses() <= 20)
sb.append(" CM: " + cm().toASCII());
else
sb.append(" CM: too large to print.\n");
}
return sb.toString();
}
public double logloss() { return _logloss; }
public double mean_per_class_error() { return _mean_per_class_error; }
@Override public ConfusionMatrix cm() { return _cm; }
@Override public float[] hr() { return _hit_ratios; }
public static ModelMetricsOrdinal getFromDKV(Model model, Frame frame) {
ModelMetrics mm = ModelMetrics.getFromDKV(model, frame);
if (! (mm instanceof ModelMetricsOrdinal))
throw new H2OIllegalArgumentException("Expected to find a Multinomial ModelMetrics for model: " + model._key.toString() + " and frame: " + frame._key.toString(),
"Expected to find a ModelMetricsMultinomial for model: " + model._key.toString() + " and frame: " + frame._key.toString() + " but found a: " + mm.getClass());
return (ModelMetricsOrdinal) mm;
}
public static void updateHits(double w, int iact, double[] ds, double[] hits) {
updateHits(w, iact,ds,hits,null);
}
public static void updateHits(double w, int iact, double[] ds, double[] hits, double[] priorClassDistribution) {
if (iact == ds[0]) { hits[0]++; return; }
double before = ArrayUtils.sum(hits);
// Use getPrediction logic to see which top K labels we would have predicted
// Pick largest prob, assign label, then set prob to 0, find next-best label, etc.
double[] ds_copy = Arrays.copyOf(ds, ds.length); //don't modify original ds!
ds_copy[1+(int)ds[0]] = 0;
for (int k=1; k<hits.length; ++k) {
final int pred_labels = GenModel.getPrediction(ds_copy, priorClassDistribution, ds, 0.5 /*ignored*/); //use tie-breaking of getPrediction
ds_copy[1+pred_labels] = 0; //next iteration, we'll find the next-best label
if (pred_labels==iact) {
hits[k]+=w;
break;
}
}
// must find at least one hit if K == n_classes
if (hits.length == ds.length-1) {
double after = ArrayUtils.sum(hits);
if (after == before) hits[hits.length-1]+=w; //assume worst case
}
}
public static TwoDimTable getHitRatioTable(float[] hits) {
String tableHeader = "Top-" + hits.length + " Hit Ratios";
String[] rowHeaders = new String[hits.length];
for (int k=0; k<hits.length; ++k)
rowHeaders[k] = Integer.toString(k+1);
String[] colHeaders = new String[]{"Hit Ratio"};
String[] colTypes = new String[]{"float"};
String[] colFormats = new String[]{"%f"};
String colHeaderForRowHeaders = "K";
TwoDimTable table = new TwoDimTable(tableHeader, null/*tableDescription*/, rowHeaders, colHeaders, colTypes, colFormats, colHeaderForRowHeaders);
for (int k=0; k<hits.length; ++k)
table.set(k, 0, hits[k]);
return table;
}
/**
* Build a Multinomial ModelMetrics object from per-class probabilities (in Frame preds - no labels!), from actual labels, and a given domain for all possible labels (maybe more than what's in labels)
* @param perClassProbs Frame containing predicted per-class probabilities (and no predicted labels)
* @param actualLabels A Vec containing the actual labels (can be for fewer labels than what's in domain, since the predictions can be for a small subset of the data)
* @return ModelMetrics object
*/
static public ModelMetricsOrdinal make(Frame perClassProbs, Vec actualLabels) {
String[] names = perClassProbs.names();
String[] label = actualLabels.domain();
String[] union = ArrayUtils.union(names, label, true);
if (union.length == names.length + label.length)
throw new IllegalArgumentException("Column names of per-class-probabilities and categorical domain of actual labels have no common values!");
return make(perClassProbs, actualLabels, perClassProbs.names());
}
/**
* Build a Multinomial ModelMetrics object from per-class probabilities (in Frame preds - no labels!), from actual labels, and a given domain for all possible labels (maybe more than what's in labels)
* @param perClassProbs Frame containing predicted per-class probabilities (and no predicted labels)
* @param actualLabels A Vec containing the actual labels (can be for fewer labels than what's in domain, since the predictions can be for a small subset of the data)
* @param domain Ordered list of factor levels for which the probabilities are given (perClassProbs[i] are the per-observation probabilities for belonging to class domain[i])
* @return ModelMetrics object
*/
static public ModelMetricsOrdinal make(Frame perClassProbs, Vec actualLabels, String[] domain) {
Scope.enter();
Vec _labels = actualLabels.toCategoricalVec();
if (_labels == null || perClassProbs == null)
throw new IllegalArgumentException("Missing actualLabels or predictedProbs for multinomial metrics!");
if (_labels.length() != perClassProbs.numRows())
throw new IllegalArgumentException("Both arguments must have the same length for multinomial metrics (" + _labels.length() + "!=" + perClassProbs.numRows() + ")!");
for (Vec p : perClassProbs.vecs()) {
if (!p.isNumeric())
throw new IllegalArgumentException("Predicted probabilities must be numeric per-class probabilities for multinomial metrics.");
if (p.min() < 0 || p.max() > 1)
throw new IllegalArgumentException("Predicted probabilities must be between 0 and 1 for multinomial metrics.");
}
int nclasses = perClassProbs.numCols();
if (domain.length!=nclasses)
throw new IllegalArgumentException("Given domain has " + domain.length + " classes, but predictions have " + nclasses + " columns (per-class probabilities) for multinomial metrics.");
_labels = _labels.adaptTo(domain);
Frame predsLabel = new Frame(perClassProbs);
predsLabel.add("labels", _labels);
MetricBuilderOrdinal mb = new OrdinalMetrics((_labels.domain())).doAll(predsLabel)._mb;
_labels.remove();
ModelMetricsOrdinal mm = (ModelMetricsOrdinal)mb.makeModelMetrics(null, predsLabel, null, null);
mm._description = "Computed on user-given predictions and labels.";
Scope.exit();
return mm;
}
// helper to build a ModelMetricsMultinomial for a N-class problem from a Frame that contains N per-class probability columns, and the actual label as the (N+1)-th column
private static class OrdinalMetrics extends MRTask<OrdinalMetrics> {
public OrdinalMetrics(String[] domain) { this.domain = domain; }
String[] domain;
private MetricBuilderOrdinal _mb;
@Override public void map(Chunk[] chks) {
_mb = new MetricBuilderOrdinal(domain.length, domain);
Chunk actuals = chks[chks.length-1];
double [] ds = new double[chks.length];
for (int i=0;i<chks[0]._len;++i) {
for (int c=1;c<chks.length;++c)
ds[c] = chks[c-1].atd(i); //per-class probs - user-given
ds[0] = GenModel.getPrediction(ds, null, ds, 0.5 /*ignored*/);
_mb.perRow(ds, new float[]{actuals.at8(i)}, null);
}
}
@Override public void reduce(OrdinalMetrics mrt) { _mb.reduce(mrt._mb); }
}
public static class MetricBuilderOrdinal<T extends MetricBuilderOrdinal<T>> extends MetricBuilderSupervised<T> {
double[/*nclasses*/][/*nclasses*/] _cm;
double[/*K*/] _hits; // the number of hits for hitratio, length: K
int _K; // TODO: Let user set K
double _logloss;
public MetricBuilderOrdinal( int nclasses, String[] domain ) {
super(nclasses,domain);
_cm = domain.length > ConfusionMatrix.maxClasses() ? null : new double[domain.length][domain.length];
_K = Math.min(10,_nclasses);
_hits = new double[_K];
}
public transient double [] _priorDistribution;
// Passed a float[] sized nclasses+1; ds[0] must be a prediction. ds[1...nclasses-1] must be a class
// distribution;
@Override public double[] perRow(double ds[], float[] yact, Model m) { return perRow(ds, yact, 1, 0, m); }
@Override public double[] perRow(double ds[], float[] yact, double w, double o, Model m) {
if (_cm == null) return ds;
if( Float .isNaN(yact[0]) ) return ds; // No errors if actual is missing
if(ArrayUtils.hasNaNs(ds)) return ds;
if(w == 0 || Double.isNaN(w)) return ds;
final int iact = (int)yact[0];
_count++;
_wcount += w;
_wY += w*iact;
_wYY += w*iact*iact;
// Compute error
double err = iact+1 < ds.length ? 1-ds[iact+1] : 1; // Error: distance from predicting ycls as 1.0
_sumsqe += w*err*err; // Squared error
assert !Double.isNaN(_sumsqe);
// Plain Olde Confusion Matrix
_cm[iact][(int)ds[0]]++; // actual v. predicted
// Compute hit ratio
if( _K > 0 && iact < ds.length-1)
updateHits(w,iact,ds,_hits,m != null?m._output._priorClassDist:_priorDistribution);
// Compute log loss
_logloss += w*MathUtils.logloss(err);
return ds; // Flow coding
}
@Override public void reduce( T mb ) {
if (_cm == null) return;
super.reduce(mb);
assert mb._K == _K;
ArrayUtils.add(_cm, mb._cm);
_hits = ArrayUtils.add(_hits, mb._hits);
_logloss += mb._logloss;
}
@Override public ModelMetrics makeModelMetrics(Model m, Frame f, Frame adaptedFrame, Frame preds) {
double mse = Double.NaN;
double logloss = Double.NaN;
float[] hr = new float[_K];
ConfusionMatrix cm = new ConfusionMatrix(_cm, _domain);
double sigma = weightedSigma();
if (_wcount > 0) {
if (_hits != null) {
for (int i = 0; i < hr.length; i++) hr[i] = (float) (_hits[i] / _wcount);
for (int i = 1; i < hr.length; i++) hr[i] += hr[i - 1];
}
mse = _sumsqe / _wcount;
logloss = _logloss / _wcount;
}
ModelMetricsOrdinal mm = new ModelMetricsOrdinal(m, f, _count, mse, _domain, sigma, cm,
hr, logloss, _customMetric);
if (m!=null) m.addModelMetrics(mm);
return mm;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsOrdinalGLMGeneric.java
|
package hex;
import water.fvec.Frame;
import water.util.TwoDimTable;
public class ModelMetricsOrdinalGLMGeneric extends ModelMetricsOrdinalGeneric {
public final long _nullDegreesOfFreedom;
public final long _residualDegreesOfFreedom;
public final double _resDev;
public final double _nullDev;
public final double _AIC;
public final double _loglikelihood;
public final TwoDimTable _coefficients_table;
public final double _r2;
public ModelMetricsOrdinalGLMGeneric(Model model, Frame frame, long nobs, double mse, String[] domain, double sigma,
TwoDimTable confusionMatrix, float[] hr, double logloss, CustomMetric customMetric, double r2, long nullDegreesOfFreedom,
long residualDegreesOfFreedom, double resDev, double nullDev, double aic, double loglikelihood,
TwoDimTable coefficients_table, TwoDimTable hit_ratio_table, double meanPerClassError, String description) {
super(model, frame, nobs, mse, domain, sigma, confusionMatrix, hr, logloss, customMetric, hit_ratio_table,
meanPerClassError, description);
_nullDegreesOfFreedom = nullDegreesOfFreedom;
_residualDegreesOfFreedom = residualDegreesOfFreedom;
_resDev = resDev;
_nullDev = nullDev;
_AIC = aic;
_loglikelihood = loglikelihood;
_coefficients_table = coefficients_table;
_r2 = r2;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsOrdinalGeneric.java
|
package hex;
import water.fvec.Frame;
import water.util.TwoDimTable;
public class ModelMetricsOrdinalGeneric extends ModelMetricsOrdinal {
public final TwoDimTable _confusion_matrix;
public final TwoDimTable _hit_ratio_table;
public final double _mean_per_class_error;
public ModelMetricsOrdinalGeneric(Model model, Frame frame, long nobs, double mse, String[] domain, double sigma, TwoDimTable confusionMatrix,
float[] hr, double logloss, CustomMetric customMetric, TwoDimTable hit_ratio_table,
double meanPerClassError, String description) {
super(model, frame, nobs, mse, domain, sigma, null, hr, logloss, customMetric);
_confusion_matrix = confusionMatrix;
_hit_ratio_table = hit_ratio_table;
_description = description;
_mean_per_class_error = meanPerClassError;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsRegression.java
|
package hex;
import hex.genmodel.utils.DistributionFamily;
import water.IcedUtils;
import water.MRTask;
import water.exceptions.H2OIllegalArgumentException;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.util.ArrayUtils;
import water.util.MathUtils;
public class ModelMetricsRegression extends ModelMetricsSupervised {
public final double _mean_residual_deviance;
public final double _AIC;
public final double _loglikelihood;
/**
* @return {@link #mean_residual_deviance()} for all algos except GLM, for which it means "total residual deviance".
**/
public double residual_deviance() { return _mean_residual_deviance; }
public double loglikelihood() { return _loglikelihood; }
public double aic() { return _AIC; }
@SuppressWarnings("unused")
public double mean_residual_deviance() { return _mean_residual_deviance; }
public final double _mean_absolute_error;
public double mae() { return _mean_absolute_error; }
public final double _root_mean_squared_log_error;
public double rmsle() { return _root_mean_squared_log_error; }
public ModelMetricsRegression(Model model, Frame frame, long nobs, double mse, double sigma, double mae,double rmsle,
double meanResidualDeviance, CustomMetric customMetric, double loglikelihood, double aic) {
super(model, frame, nobs, mse, null, sigma, customMetric);
_mean_residual_deviance = meanResidualDeviance;
_mean_absolute_error = mae;
_root_mean_squared_log_error = rmsle;
_loglikelihood = loglikelihood;
_AIC = aic;
}
public ModelMetricsRegression(Model model, Frame frame, long nobs, double mse, double sigma, double mae,double rmsle,
double meanResidualDeviance, CustomMetric customMetric) {
this(model, frame, nobs, mse, sigma, mae, rmsle, meanResidualDeviance, customMetric, Double.NaN, Double.NaN);
}
public static ModelMetricsRegression getFromDKV(Model model, Frame frame) {
ModelMetrics mm = ModelMetrics.getFromDKV(model, frame);
if (! (mm instanceof ModelMetricsRegression))
throw new H2OIllegalArgumentException("Expected to find a Regression ModelMetrics for model: " + model._key.toString() + " and frame: " + frame._key.toString(),
"Expected to find a ModelMetricsRegression for model: " + model._key.toString() + " and frame: " + frame._key.toString() + " but found a: " + mm.getClass());
return (ModelMetricsRegression) mm;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(super.toString());
if(!Double.isNaN(_mean_residual_deviance)) {
sb.append(" mean residual deviance: " + (float) _mean_residual_deviance + "\n");
} else {
sb.append(" mean residual deviance: N/A\n");
}
sb.append(" mean absolute error: " + (float)_mean_absolute_error + "\n");
sb.append(" root mean squared log error: " + (float)_root_mean_squared_log_error + "\n");
sb.append(" loglikelihood: " + (float)_loglikelihood + "\n");
sb.append(" AIC: " + (float)_AIC + "\n");
return sb.toString();
}
static public ModelMetricsRegression make(Vec predicted, Vec actual, DistributionFamily family) {
return make(predicted, actual, null, family);
}
/**
* Build a Regression ModelMetrics object from predicted and actual targets
* @param predicted A Vec containing predicted values
* @param actual A Vec containing the actual target values
* @param weights A Vec containing the observation weights (optional)
* @return ModelMetrics object
*/
static public ModelMetricsRegression make(Vec predicted, Vec actual, Vec weights, DistributionFamily family) {
if (predicted == null || actual == null)
throw new IllegalArgumentException("Missing actual or predicted targets for regression metrics!");
if (!predicted.isNumeric())
throw new IllegalArgumentException("Predicted values must be numeric for regression metrics.");
if (!actual.isNumeric())
throw new IllegalArgumentException("Actual values must be numeric for regression metrics.");
if (family == DistributionFamily.quantile || family == DistributionFamily.tweedie || family == DistributionFamily.huber)
throw new IllegalArgumentException("Unsupported distribution family, requires additional parameters which cannot be specified right now.");
Frame fr = new Frame(predicted);
fr.add("actual", actual);
if (weights != null) {
fr.add("weights", weights);
}
family = family ==null ? DistributionFamily.gaussian : family;
MetricBuilderRegression mb = new RegressionMetrics(family).doAll(fr)._mb;
ModelMetricsRegression mm = (ModelMetricsRegression) mb.makeModelMetrics(null, fr, null, null);
mm._description = "Computed on user-given predictions and targets, distribution: " + family.toString() + ".";
return mm;
}
// helper to build a ModelMetricsRegression for a N-class problem from a Frame that contains N per-class probability columns, and the actual label as the (N+1)-th column
private static class RegressionMetrics extends MRTask<RegressionMetrics> {
public MetricBuilderRegression _mb;
final Distribution _distribution;
RegressionMetrics(DistributionFamily family) {
_distribution = DistributionFactory.getDistribution(family);
}
@Override public void map(Chunk[] chks) {
_mb = new MetricBuilderRegression(_distribution);
Chunk preds = chks[0];
Chunk actuals = chks[1];
Chunk weights = chks.length == 3 ? chks[2] : null;
double[] ds = new double[1];
float[] acts = new float[1];
for (int i=0;i<chks[0]._len;++i) {
ds[0] = preds.atd(i);
acts[0] = (float) actuals.atd(i);
double w = weights != null ? weights.atd(i) : 1;
_mb.perRow(ds, acts, w, 0, null);
}
}
@Override public void reduce(RegressionMetrics mrt) { _mb.reduce(mrt._mb); }
}
public static class MetricBuilderRegression<T extends MetricBuilderRegression<T>> extends MetricBuilderSupervised<T> {
double _sumdeviance;
Distribution _dist;
double _abserror;
double _rmslerror;
protected double _loglikelihood;
public MetricBuilderRegression() {
super(1,null); //this will make _work = new float[2];
}
public MetricBuilderRegression(Distribution dist) {
super(1,null); //this will make _work = new float[2];
_dist=dist;
}
// ds[0] has the prediction and ds[1,..,N] is ignored
@Override public double[] perRow(double ds[], float[] yact, Model m) {return perRow(ds, yact, 1, 0, m);}
@Override public double[] perRow(double ds[], float[] yact, double w, double o, Model m) {
if( Float.isNaN(yact[0]) ) return ds; // No errors if actual is missing
if(ArrayUtils.hasNaNs(ds)) return ds; // No errors if prediction has missing values (can happen for GLM)
if(w == 0 || Double.isNaN(w)) return ds;
// Compute error
double err = yact[0] - ds[0]; // Error: distance from the actual
double err_msle = Math.pow(Math.log1p(ds[0]) - Math.log1p(yact[0]),2); //Squared log error
_sumsqe += w*err*err; // Squared error
_abserror += w*Math.abs(err);
_rmslerror += w*err_msle;
assert !Double.isNaN(_sumsqe);
// Deviance method is not supported in custom distribution
if((m != null && m._parms._distribution != DistributionFamily.custom) || (_dist != null && _dist ._family != DistributionFamily.custom)) {
if (m != null && !m.isDistributionHuber()) {
_sumdeviance += m.deviance(w, yact[0], ds[0]);
} else if (_dist != null) {
_sumdeviance += _dist.deviance(w, yact[0], ds[0]);
}
}
if(m != null && m.isGeneric()) { // only perform for generic model, will increase run time for training if performs
_loglikelihood += m.likelihood(w, yact[0], ds);
}
_count++;
_wcount += w;
_wY += w*yact[0];
_wYY += w*yact[0]*yact[0];
return ds; // Flow coding
}
@Override public void reduce( T mb ) {
super.reduce(mb);
_sumdeviance += mb._sumdeviance;
_abserror += mb._abserror;
_rmslerror += mb._rmslerror;
_loglikelihood += mb._loglikelihood;
}
// Having computed a MetricBuilder, this method fills in a ModelMetrics
public ModelMetricsRegression makeModelMetrics(Model m, Frame f, Frame adaptedFrame, Frame preds) {
ModelMetricsRegression mm = computeModelMetrics(m, f, adaptedFrame, preds);
if (m!=null) m.addModelMetrics(mm);
return mm;
}
ModelMetricsRegression computeModelMetrics(Model m, Frame f, Frame adaptedFrame, Frame preds) {
double mse = _sumsqe / _wcount;
double mae = _abserror/_wcount; //Mean Absolute Error
double rmsle = Math.sqrt(_rmslerror/_wcount); //Root Mean Squared Log Error
double loglikelihood = Double.NaN;
double aic = Double.NaN;
if (adaptedFrame ==null) adaptedFrame = f;
double meanResDeviance = 0;
if (m != null && m.isDistributionHuber()){
assert(_sumdeviance==0); // should not yet be computed
if (preds != null) {
Vec actual = adaptedFrame.vec(m._parms._response_column);
Vec weight = adaptedFrame.vec(m._parms._weights_column);
//compute huber delta based on huber alpha quantile on absolute prediction error
double huberDelta = computeHuberDelta(actual, preds.anyVec(), weight, m._parms._huber_alpha);
// make a deep copy of the model's current distribution state (huber delta)
_dist = IcedUtils.deepCopy(m._dist);
_dist.setHuberDelta(huberDelta);
meanResDeviance = new MeanResidualDeviance(_dist, preds.anyVec(), actual, weight).exec().meanResidualDeviance;
}
} else if((m != null && m._parms._distribution != DistributionFamily.custom) || (_dist != null && _dist._family != DistributionFamily.custom) ) {
meanResDeviance = _sumdeviance / _wcount; //mean residual deviance
} else {
meanResDeviance = Double.NaN;
}
if(m != null && m.getClass().toString().contains("Generic")) {
loglikelihood = -1 * _loglikelihood ; // get likelihood from negative loglikelihood
aic = m.aic(loglikelihood);
}
ModelMetricsRegression mm = new ModelMetricsRegression(m, f, _count, mse, weightedSigma(), mae, rmsle,
meanResDeviance, _customMetric, loglikelihood, aic);
return mm;
}
}
public static double computeHuberDelta(Vec actual, Vec preds, Vec weight, double huberAlpha) {
Vec absdiff = new MRTask() {
@Override
public void map(Chunk[] cs, NewChunk[] nc) {
for (int i = 0; i < cs[0].len(); ++i)
nc[0].addNum(Math.abs(cs[0].atd(i) - cs[1].atd(i)));
}
}.doAll(1, (byte) 3, new Frame(new String[]{"preds", "actual"}, new Vec[]{preds, actual})).outputFrame().anyVec();
// make a deep copy of the model's current distribution state (huber delta)
//compute huber delta based on huber alpha quantile on absolute prediction error
double hd = MathUtils.computeWeightedQuantile(weight, absdiff, huberAlpha);
absdiff.remove();
return hd;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsRegressionCoxPH.java
|
package hex;
import water.DKV;
import water.MRTask;
import water.Scope;
import water.exceptions.H2OIllegalArgumentException;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import java.util.*;
import java.util.stream.DoubleStream;
import org.apache.commons.lang.ArrayUtils;
import water.rapids.Merge;
import static java.util.Arrays.stream;
import static java.util.stream.Collectors.*;
public class ModelMetricsRegressionCoxPH extends ModelMetricsRegression {
private double _concordance;
private long _concordant;
private long _discordant;
private long _tied_y;
public double concordance() { return _concordance; }
public long concordant() { return _concordant; }
public long discordant() { return _discordant; }
public long tiedY() { return _tied_y; }
public ModelMetricsRegressionCoxPH(Model model, Frame frame, long nobs, double mse, double sigma, double mae,
double rmsle, double meanResidualDeviance, CustomMetric customMetric,
double concordance, long concordant, long discordant, long tied_y) {
super(model, frame, nobs, mse, sigma, mae, rmsle, meanResidualDeviance, customMetric);
this._concordance = concordance;
this._concordant = concordant;
this._discordant = discordant;
this._tied_y = tied_y;
}
public static ModelMetricsRegressionCoxPH getFromDKV(Model model, Frame frame) {
ModelMetrics mm = ModelMetrics.getFromDKV(model, frame);
if (! (mm instanceof ModelMetricsRegressionCoxPH))
throw new H2OIllegalArgumentException("Expected to find a Regression ModelMetrics for model: " + model._key.toString() + " and frame: " + frame._key.toString(),
"Expected to find a ModelMetricsRegression for model: " + model._key.toString() + " and frame: " + frame._key.toString() + " but found a: " + mm.getClass());
return (ModelMetricsRegressionCoxPH) mm;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(super.toString());
if(!Double.isNaN(_concordance)) {
sb.append(" concordance: " + (float) _concordance + "\n");
} else {
sb.append(" concordance: N/A\n");
}
sb.append(" concordant: " + _concordant + "\n");
sb.append(" discordant: " + _discordant + "\n");
sb.append(" tied.y: " + _tied_y + "\n");
return sb.toString();
}
public static class MetricBuilderRegressionCoxPH<T extends MetricBuilderRegressionCoxPH<T>> extends MetricBuilderRegression<T> {
private final String startVecName;
private final String stopVecName;
private final boolean isStratified;
private final String[] stratifyBy;
public MetricBuilderRegressionCoxPH(String startVecName, String stopVecName, boolean isStratified, String[] stratifyByName) {
this.startVecName = startVecName;
this.stopVecName = stopVecName;
this.isStratified = isStratified;
this.stratifyBy = stratifyByName;
}
// Having computed a MetricBuilder, this method fills in a ModelMetrics
public ModelMetricsRegressionCoxPH makeModelMetrics(Model m, Frame f, Frame adaptedFrame, Frame preds) {
final ModelMetricsRegression modelMetricsRegression = super.computeModelMetrics(m, f, adaptedFrame, preds);
final Stats stats = concordance(m, f, adaptedFrame, preds);
ModelMetricsRegressionCoxPH mm = new ModelMetricsRegressionCoxPH(m, f, _count, modelMetricsRegression.mse(),
weightedSigma(), modelMetricsRegression.mae() , modelMetricsRegression.rmsle(), modelMetricsRegression.mean_residual_deviance(),
_customMetric, stats.c(), stats.nconcordant, stats.discordant(), stats.nties);
if (m!=null) m.addModelMetrics(mm);
return mm;
}
private Stats concordance(Model m, Frame fr, Frame adaptFrm, Frame scored) {
final Vec startVec = adaptFrm.vec(startVecName);
final Vec stopVec = adaptFrm.vec(stopVecName);
final Vec statusVec = adaptFrm.lastVec();
final Vec estimateVec = scored.lastVec();
final List<Vec> strataVecs =
isStratified ?
Arrays.asList(stratifyBy).stream().map(s -> fr.vec(s) ).collect(toList()) :
Collections.emptyList();
return concordance(startVec, stopVec, statusVec, strataVecs, estimateVec);
}
static class Stats {
final long ntotals;
final long nconcordant;
final long nties;
Stats() {
this(0, 0, 0);
}
Stats(long ntotals, long nconcordant, long nties) {
this.ntotals = ntotals;
this.nconcordant = nconcordant;
this.nties = nties;
}
double c() {
return (nconcordant + 0.5d * nties) / ntotals;
}
long discordant() {
return ntotals - nconcordant - nties;
}
@Override
public String toString() {
return "Stats{" +
"ntotals=" + ntotals +
", nconcordant=" + nconcordant +
", ndiscordant=" + discordant() +
", nties=" + nties +
'}';
}
Stats plus(Stats s2) {
return new Stats(ntotals + s2.ntotals, nconcordant + s2.nconcordant, nties + s2.nties);
}
}
static Stats concordance(final Vec startVec, final Vec stopVec, final Vec eventVec, List<Vec> strataVecs, final Vec estimateVec) {
try {
Scope.enter();
final Vec durations = durations(startVec, stopVec);
Frame fr = prepareFrameForConcordanceComputation(eventVec, strataVecs, estimateVec, durations);
return concordanceStats(fr);
} finally {
Scope.exit();
}
}
private static Frame prepareFrameForConcordanceComputation(Vec eventVec, List<Vec> strataVecs, Vec estimateVec, Vec durations) {
final Frame fr = new Frame();
fr.add("duration", durations);
fr.add("event", eventVec);
fr.add("estimate", estimateVec);
for (int i = 0; i < strataVecs.size(); i++) {
fr.add("strata_" + i, strataVecs.get(i));
}
return fr;
}
private static Vec durations(Vec startVec, Vec stopVec) {
if (null == startVec) {
return stopVec;
}
final Frame frame = new MRTask() {
@Override
public void map(Chunk c0, Chunk c1, NewChunk nc) {
for (int i = 0; i < c0._len; i++)
nc.addNum(c1.atd(i) - c0.atd(i));
}
}.doAll(Vec.T_NUM, startVec, stopVec)
.outputFrame(new String[]{"durations"}, null);
final Vec result = frame.vec(0);
DKV.put(result);
Scope.track(result);
return result;
}
private static Stats concordanceStats(Frame fr){
final Frame withoutNas = removeNAs(fr);
final int[] stratasAndDuration = new int[withoutNas.numCols() - 2];
final int[] strataIndexes = new int[withoutNas.numCols() - 3];
for (int i = 0; i < strataIndexes.length; i++) {
stratasAndDuration[i] = i + 3;
strataIndexes[i] = i + 3;
}
stratasAndDuration[withoutNas.numCols() - 3] = 0;
if (0 == withoutNas.numRows()) {
return new Stats();
}
final Frame sorted = withoutNas.sort(stratasAndDuration);
Scope.track(sorted);
final List<Vec.Reader> strataCols = stream(strataIndexes).boxed().map(i -> sorted.vec(i).new Reader()).collect(toList());
long lastStart = 0L;
List lastRow = new ArrayList(sorted.numCols() - 3);
Stats statsAcc = new Stats();
for (long i = 0; i < sorted.numRows(); i++) {
final List row = new ArrayList(sorted.numCols() - 3);
for (Vec.Reader strataCol : strataCols) {
row.add(strataCol.at(i));
}
if (!lastRow.equals(row)) {
lastRow = row;
Stats stats = statsForAStrata(sorted.vec("duration").new Reader()
, sorted.vec("event").new Reader()
, sorted.vec("estimate").new Reader()
, lastStart
, i);
lastStart = i;
statsAcc = statsAcc.plus(stats);
}
}
Stats stats = statsForAStrata(sorted.vec("duration").new Reader()
, sorted.vec("event").new Reader()
, sorted.vec("estimate").new Reader()
, lastStart
, sorted.numRows());
return statsAcc.plus(stats);
}
private static Frame removeNAs(Frame fr) {
final int[] iDontWantNAsInThisCols = new int[]{0, 2};
final Frame withoutNas = new Merge.RemoveNAsTask(iDontWantNAsInThisCols)
.doAll(fr.types(), fr)
.outputFrame(fr.names(), fr.domains());
Scope.track(withoutNas);
stream(withoutNas.vecs()).forEach(Scope::track);
withoutNas.replace(1, withoutNas.vec("event"));
return withoutNas;
}
private static Stats statsForAStrata(Vec.Reader duration, Vec.Reader eventVec, Vec.Reader estimateVec, long firstIndex, long lastIndex) {
if (lastIndex == firstIndex) {
return new Stats();
}
int countOfCensored = 0;
int countOfDead = 0;
for (long i = firstIndex; i < lastIndex; i++) {
if (0 == eventVec.at(i)) {
countOfCensored++;
} else {
countOfDead++;
}
}
long[] indexesOfDead = new long[countOfDead];
long[] indexesOfCensored = new long[countOfCensored];
countOfCensored = 0;
countOfDead = 0;
for (long i = firstIndex; i < lastIndex; i++) {
if (0 == eventVec.at(i)) {
indexesOfCensored[countOfCensored++] = i;
} else {
indexesOfDead[countOfDead++] = i;
}
}
assert indexesOfCensored.length + indexesOfDead.length == lastIndex - firstIndex;
int diedIndex = 0;
int censoredIndex = 0;
final DoubleStream estimatesOfDead = stream(indexesOfDead).mapToDouble(i -> estimateTime(estimateVec, i));
final StatTree timesToCompare = new StatTree(estimatesOfDead.distinct().sorted().toArray());
long nTotals = 0L;
long nConcordant = 0L;
long nTied = 0L;
for(;;) {
final boolean hasMoreCensored = censoredIndex < indexesOfCensored.length;
final boolean hasMoreDead = diedIndex < indexesOfDead.length;
// Should we look at some censored indices next, or died indices?
if (hasMoreCensored && (!hasMoreDead || deadTime(duration, indexesOfDead[diedIndex]) > deadTime(duration,indexesOfCensored[censoredIndex]))) {
final PairStats pairStats = handlePairs(indexesOfCensored, estimateVec, censoredIndex, timesToCompare);
nTotals += pairStats.pairs;
nConcordant += pairStats.concordant;
nTied += pairStats.tied;
censoredIndex = pairStats.next_ix;
} else if (hasMoreDead && (!hasMoreCensored || deadTime(duration, indexesOfDead[diedIndex]) <= deadTime(duration, indexesOfCensored[censoredIndex]))) {
final PairStats pairStats = handlePairs(indexesOfDead, estimateVec, diedIndex, timesToCompare);
for (int i = diedIndex; i < pairStats.next_ix; i++) {
final double pred = estimateTime(estimateVec, indexesOfDead[i]);
timesToCompare.insert(pred);
}
nTotals += pairStats.pairs;
nConcordant += pairStats.concordant;
nTied += pairStats.tied;
diedIndex = pairStats.next_ix;
} else {
assert !(hasMoreDead || hasMoreCensored);
break;
}
}
return new Stats(nTotals, nConcordant, nTied);
}
private static double deadTime(Vec.Reader duration, long i) {
return duration.at(i);
}
private static double estimateTime(Vec.Reader estimateVec, long i) {
return -estimateVec.at(i);
}
static class PairStats {
final long pairs;
final long concordant;
final long tied;
final int next_ix;
public PairStats(long pairs, long concordant, long tied, int next_ix) {
this.pairs = pairs;
this.concordant = concordant;
this.tied = tied;
this.next_ix = next_ix;
}
@Override
public String toString() {
return "PairStats{" +
"pairs=" + pairs +
", concordant=" + concordant +
", tied=" + tied +
", next_ix=" + next_ix +
'}';
}
}
static PairStats handlePairs(long[] truth, Vec.Reader estimateVec, int first_ix, StatTree statTree) {
int next_ix = first_ix;
while (next_ix < truth.length && truth[next_ix] == truth[first_ix]) {
next_ix++;
}
final long pairs = statTree.len() * (next_ix - first_ix);
long correct = 0L;
long tied = 0L;
for (int i = first_ix; i < next_ix; i++) {
double estimateTime = estimateTime(estimateVec, truth[i]);
StatTree.RankAndCount rankAndCount = statTree.rankAndCount(estimateTime);
correct += rankAndCount.rank;
tied += rankAndCount.count;
}
PairStats pairStats = new PairStats(pairs, correct, tied, next_ix);
return pairStats;
}
}
static class StatTree {
final double[] values;
final long[] counts;
StatTree(double[] possibleValues) {
assert null != possibleValues;
assert sortedAscending(possibleValues);
this.values = new double[possibleValues.length];
final int filled = fillTree(possibleValues, 0, possibleValues.length, 0);
addMissingValues(possibleValues, filled);
this.counts = new long[possibleValues.length];
assert containsAll(possibleValues, this.values);
assert isSearchTree(this.values);
assert allZeroes(this.counts);
}
private void addMissingValues(double[] possibleValues, int filled) {
final int missing = possibleValues.length - filled;
for (int i = 0; i < missing; i++) {
this.values[filled + i] = possibleValues[i * 2];
}
}
private int fillTree(final double[] inputValues, final int start, final int stop, final int rootIndex) {
int len = stop - start;
if (0 >= len) {
return 0;
}
final int lastFullRow = 32 - Integer.numberOfLeadingZeros(len + 1) - 1;
final int fillable = (1 << lastFullRow) - 1;
final int totalOverflow = len - fillable;
final int leftOverflow = Math.min(totalOverflow, (1 << (lastFullRow - 1)));
final int leftTreeSize = (1 << (lastFullRow - 1)) - 1 + leftOverflow;
this.values[rootIndex] = inputValues[start + leftTreeSize];
fillTree(inputValues, start, start + leftTreeSize, leftChild(rootIndex));
fillTree(inputValues, start + leftTreeSize + 1, stop, rightChild(rootIndex));
return fillable;
}
static private boolean sortedAscending(double[] a) {
int i = 1;
while (i < a.length) {
if (a[i - 1] > a[i]) return false;
i++;
}
return true;
}
static private boolean containsAll(double[] a, double b[]) {
for (int i = 0; i < b.length; i++) {
if (!ArrayUtils.contains(a, b[i])) {
return false;
}
}
return true;
}
static private boolean isSearchTree(double[] a) {
for (int i = 0; i < a.length; i++) {
final int leftChild = leftChild(i);
if (leftChild < a.length && a[i] < a[leftChild]){
return false;
}
final int rightChild = rightChild(i);
if (rightChild < a.length && a[i] > a[rightChild]){
return false;
}
}
return true;
}
static private boolean allZeroes(long[] a) {
for (int i = 0; i < a.length; i++) {
if (0L != a[i]){
return false;
}
}
return true;
}
void insert(final double value) {
int i = 0;
final long n = this.values.length;
while (i < n) {
double cur = this.values[i];
this.counts[i]++;
if (value < cur) {
i = leftChild(i);
} else if (value > cur) {
i = rightChild(i);
} else {
return;
}
}
throw new IllegalArgumentException("Value " + value + " not contained in tree. Tree counts now in illegal state;");
}
public int size() {
return this.values.length;
}
public long len() {
return counts[0];
}
static class RankAndCount {
final long rank;
final long count;
public RankAndCount(long rank, long count) {
this.rank = rank;
this.count = count;
}
@Override
public String toString() {
return "RankAndCount{" +
"rank=" + rank +
", count=" + count +
'}';
}
}
RankAndCount rankAndCount(double value) {
int i = 0;
int rank = 0;
long count = 0;
while (i < this.values.length) {
double cur = this.values[i];
if (value < cur) {
i = leftChild(i);
} else if (value > cur) {
rank += this.counts[i];
//subtract off the right tree if exists
final int nexti = rightChild(i);
if (nexti < this.values.length) {
rank -= this.counts[nexti];
i = nexti;
} else {
return new RankAndCount(rank,count);
}
} else { //value == cur
count = this.counts[i];
final int lefti = leftChild(i);
if (lefti < this.values.length) {
long nleft = this.counts[lefti];
count -= nleft;
rank += nleft;
final int righti = rightChild(i);
if (righti < this.values.length) {
count -= this.counts[righti];
}
}
return new RankAndCount(rank, count);
}
}
return new RankAndCount(rank, count);
}
@Override
public String toString() {
return toString(new StringBuilder()).toString();
}
private StringBuilder toString(StringBuilder strBuilder) {
int i = 0;
int to = 2;
for (;;) {
for (; i < to - 1; i++) {
if (i < this.values.length) {
strBuilder.append(this.values[i]).append('(').append(this.counts[i]).append(')').append(" ");
} else {
return strBuilder;
}
}
strBuilder.append("\n");
to*=2;
}
}
private static int leftChild(int i) {
return 2 * i + 1;
}
private static int rightChild(int i) {
return 2 * i + 2;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsRegressionCoxPHGeneric.java
|
package hex;
import water.fvec.Frame;
public class ModelMetricsRegressionCoxPHGeneric extends ModelMetricsRegressionGeneric {
public final double _concordance;
public final long _concordant;
public final long _discordant;
public final long _tied_y;
public ModelMetricsRegressionCoxPHGeneric(Model model, Frame frame, long nobs, double mse, double sigma, double mae, double rmsle,
double meanResidualDeviance, CustomMetric customMetric,
double concordance, long concordant, long discordant, long tied_y,
String description) {
super(model, frame, nobs, mse, sigma, mae, rmsle, meanResidualDeviance, customMetric, description);
_concordance = concordance;
_concordant = concordant;
_discordant = discordant;
_tied_y = tied_y;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsRegressionGLM.java
|
package hex;
import water.fvec.Frame;
/**
* Created by tomasnykodym on 4/20/15.
*/
public class ModelMetricsRegressionGLM extends ModelMetricsRegression implements GLMMetrics {
public final long _nullDegressOfFreedom;
public final long _residualDegressOfFreedom;
public final double _resDev;
public final double _nullDev;
public ModelMetricsRegressionGLM(Model model, Frame frame, long nobs, double mse, double sigma,
double mae, double rmsle, double resDev, double meanResDev,
double nullDev, double aic, long nDof, long rDof,
CustomMetric customMetric, double loglikelihood) {
super(model, frame, nobs, mse, sigma, mae, rmsle, meanResDev, customMetric, loglikelihood, aic);
_resDev = resDev;
_nullDev = nullDev;
_nullDegressOfFreedom = nDof;
_residualDegressOfFreedom = rDof;
}
@Override
public double residual_deviance() {return _resDev;}
@Override
public double null_deviance() {return _nullDev;}
@Override
public long residual_degrees_of_freedom(){
return _residualDegressOfFreedom;
}
@Override
public long null_degrees_of_freedom() {return _nullDegressOfFreedom;}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(super.toString());
sb.append(" null DOF: " + (float)_nullDegressOfFreedom + "\n");
sb.append(" residual DOF: " + (float)_residualDegressOfFreedom + "\n");
sb.append(" null deviance: " + (float)_nullDev + "\n");
sb.append(" residual deviance: " + (float)_resDev + "\n");
sb.append(" AIC: " + (float)_AIC + "\n");
return sb.toString();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsRegressionGLMGeneric.java
|
package hex;
import water.fvec.Frame;
import water.util.TwoDimTable;
public class ModelMetricsRegressionGLMGeneric extends ModelMetricsRegressionGLM {
public final double _r2;
public TwoDimTable _coefficients_table;
public ModelMetricsRegressionGLMGeneric(Model model, Frame frame, long nobs, double mse, double sigma, double mae, double rmsle,
double meanResidualDeviance, CustomMetric customMetric, double r2, long nullDegreesOfFreedom,
long residualDegreesOfFreedom, double resDev, double nullDev, double aic, double loglikelihood,
TwoDimTable coefficients_table) {
super(model, frame, nobs, mse, sigma, mae, rmsle, resDev, meanResidualDeviance, nullDev, aic, nullDegreesOfFreedom, residualDegreesOfFreedom, customMetric, loglikelihood);
_r2 = r2;
_coefficients_table = coefficients_table;
}
@Override
public double r2() {
return _r2;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsRegressionGeneric.java
|
package hex;
import water.fvec.Frame;
public class ModelMetricsRegressionGeneric extends ModelMetricsRegression {
public ModelMetricsRegressionGeneric(Model model, Frame frame, long nobs, double mse, double sigma, double mae, double rmsle,
double meanResidualDeviance, CustomMetric customMetric, String description) {
super(model, frame, nobs, mse, sigma, mae, rmsle, meanResidualDeviance, customMetric);
_description = description;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsRegressionHGLM.java
|
package hex;
import Jama.Matrix;
import water.exceptions.H2OIllegalArgumentException;
import water.fvec.Frame;
import water.util.ArrayUtils;
import java.util.Arrays;
import static water.util.ArrayUtils.*;
public class ModelMetricsRegressionHGLM extends ModelMetricsRegression {
// the doc = document attached to https://github.com/h2oai/h2o-3/issues/8487, title HGLM_H2O_Implementation.pdf
// I will be referring to the doc and different parts of it to explain my implementation.
public static final double LOG_2PI = Math.log(2*Math.PI);
public final double[] _beta; // fixed coefficients
public final double[][] _ubeta; // random coefficients
public final double[] _icc;
public final int _iterations;
public final double[][] _tmat;
public final double _var_residual; // variance of residual error
public final double _log_likelihood; // llg from reference [2] of the doc
public final double _mse_fixed; // mse of with fixed effect only
public ModelMetricsRegressionHGLM(Model model, Frame frame, long nobs, double sigma, double loglikelihood,
CustomMetric customMetric, int iter, double[] beta, double[][] ubeta,
double[][] tmat, double varResidual, double mse, double mse_fixed, double mae,
double rmlse, double meanResidualDeviance, double aic) {
super(model, frame, nobs, mse, sigma, mae, rmlse, meanResidualDeviance, customMetric, loglikelihood, aic);
_beta = beta;
_ubeta = ubeta;
_iterations = iter;
_tmat = tmat;
_var_residual = varResidual;
_icc = calICC(tmat, varResidual);
_log_likelihood = loglikelihood;
_mse_fixed = mse_fixed;
}
/***
*
* This method calculates the log-likelihood as described in section II.VI of the doc. Please keep this method
* even though nobody is calling it.
*/
public static double calHGLMllg2(long nobs, double[][] tmat, double varResidual, double[][] zTTimesZ,
double yMinsXFixSqure, double[][] yMinusXFixTimesZ) {
double llg = nobs*LOG_2PI;
double oneOVar = 1.0/varResidual;
double oneOVarSq = oneOVar*oneOVar;
double[][] gMat = expandMat(tmat, yMinusXFixTimesZ.length);
double[][] tInvPlusZTT = calInnverV(gMat, zTTimesZ, oneOVar);
llg += Math.log(varResidual * new Matrix(tInvPlusZTT).det() * new Matrix(gMat).det());
double[] yMinusXFixTimesZVec = flattenArray(yMinusXFixTimesZ);
Matrix yMinusXFixTimesZMat = new Matrix(new double[][] {yMinusXFixTimesZVec}).transpose();
llg += oneOVar*yMinsXFixSqure -
yMinusXFixTimesZMat.transpose().times(new Matrix(tInvPlusZTT).inverse()).times(yMinusXFixTimesZMat).times(oneOVarSq).getArray()[0][0];
return -0.5*llg;
}
/**
* See the doc section II.V, calculates G inverse + transpose(Z)*Z/var_e.
*/
public static double[][] calInnverV(double[][] gmat, double[][] zTTimesZ, double oneOVar) {
try {
double[][] gmatInv = new Matrix(gmat).inverse().getArray();
double[][] tempzTTimesZ = copy2DArray(zTTimesZ);
ArrayUtils.mult(tempzTTimesZ, oneOVar);
ArrayUtils.add(gmatInv, tempzTTimesZ);
return gmatInv;
} catch(Exception ex) {
throw new RuntimeException("Tmat matrix is singular.");
}
}
public static ModelMetricsRegressionHGLM getFromDKV(Model model, Frame frame) {
ModelMetrics mm = ModelMetrics.getFromDKV(model, frame);
if (!(mm instanceof ModelMetricsRegressionHGLM))
throw new H2OIllegalArgumentException("Expected to find a HGLM ModelMetrics for model: " + model._key.toString()
+ " and frame: " + frame._key.toString(), "Expected to find a ModelMetricsHGLM for model: " +
model._key.toString() + " and frame: " + frame._key.toString() + " but found a: " + (mm == null ? null : mm.getClass()));
return (ModelMetricsRegressionHGLM) mm;
}
public static double[] calICC(double[][] tmat, double varResidual) {
int numLevel2 = tmat.length;
double[] icc = new double[numLevel2];
double denom = varResidual;
denom += new Matrix(tmat).trace(); // sum of diagonal
double oOverDenom = 1.0/denom;
for (int index=0; index<numLevel2; index++)
icc[index] = tmat[index][index]*oOverDenom;
return icc;
}
public double llg() {
return _log_likelihood;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(super.toString());
sb.append(" mean square error with fixed predictor coefficients: "+_mse_fixed);
int numLevel2 = _ubeta.length;
for (int index=0; index<numLevel2; index++)
sb.append(" standard error of random effects for level 2 index " + index + ": "+_tmat[index][index]);
sb.append(" standard error of residual error: "+_var_residual);
sb.append(" ICC: "+Arrays.toString(_icc));
sb.append(" loglikelihood: "+_log_likelihood);
sb.append(" iterations taken to build model: " + _iterations);
sb.append(" coefficients for fixed effect: "+Arrays.toString(_beta));
for (int index=0; index<numLevel2; index++)
sb.append(" coefficients for random effect for level 2 index: "+index+": "+Arrays.toString(_ubeta[index]));
return sb.toString();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsRegressionHGLMGeneric.java
|
package hex;
import water.fvec.Frame;
public class ModelMetricsRegressionHGLMGeneric extends ModelMetricsRegressionHGLM {
public ModelMetricsRegressionHGLMGeneric(Model model, Frame frame, long nobs, double sigma, double loglikelihood,
CustomMetric customMetric, int iter, double[] beta, double[][] ubeta,
double[][] tmat, double varResidual, double mse, double mse_fixed, double mae,
double rmsle, double meanresidualdeviance, double aic) {
super(model, frame, nobs, sigma, loglikelihood, customMetric, iter, beta, ubeta, tmat, varResidual, mse, mse_fixed,
mae, rmsle, meanresidualdeviance, aic);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsSupervised.java
|
package hex;
import water.fvec.Frame;
public class ModelMetricsSupervised extends ModelMetrics {
public final String[] _domain;// Name of classes
public final double _sigma; // stddev of the response (if any)
public ModelMetricsSupervised(Model model, Frame frame, long nobs, double mse, String[] domain, double sigma, CustomMetric customMetric) {
super(model, frame, nobs, mse, null, customMetric);
_domain = domain;
_sigma = sigma;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(super.toString());
return sb.toString();
}
public double r2() { // TODO: Override for GLM Regression - create new Generic & Generic V3 versions
double var = _sigma*_sigma;
return 1.0-_MSE /var;
}
abstract public static class MetricBuilderSupervised<T extends MetricBuilderSupervised<T>> extends MetricBuilder<T> {
protected final String[] _domain;
protected final int _nclasses;
public MetricBuilderSupervised() {
_domain = null;
_nclasses = -1;
}
public MetricBuilderSupervised(int nclasses, String[] domain) {
_nclasses = nclasses;
_domain = domain;
_work = new double[_nclasses+1];
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMetricsUnsupervised.java
|
package hex;
import water.fvec.Frame;
public class ModelMetricsUnsupervised extends ModelMetrics {
public ModelMetricsUnsupervised(Model model, Frame frame, long nobs, double MSE, CustomMetric customMetric) {
super(model, frame, nobs, MSE, null, customMetric);
}
public ModelMetricsUnsupervised(Model model, Frame frame, long nobs, String description, CustomMetric customMetric) {
super(model, frame, nobs, Double.NaN, description, customMetric);
}
public static abstract class MetricBuilderUnsupervised<T extends MetricBuilderUnsupervised<T>>
extends MetricBuilder<T> {
@Override
public final ModelMetrics makeModelMetrics(Model m, Frame f, Frame adaptedFrame, Frame preds) {
return makeModelMetrics(m, f);
}
public abstract ModelMetrics makeModelMetrics(Model m, Frame f);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelMojoWriter.java
|
package hex;
import hex.genmodel.AbstractMojoWriter;
import water.api.SchemaServer;
import water.api.StreamWriteOption;
import water.api.StreamWriter;
import water.api.schemas3.ModelSchemaV3;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.zip.ZipOutputStream;
/**
* Base class for serializing models into the MOJO format.
*
* <p/> The function of a MOJO writer is simply to write the model into a Zip archive consisting of several
* text/binary files. This base class handles serialization of some parameters that are common to all `Model`s, but
* anything specific to a particular Model should be implemented in that Model's corresponding ModelMojoWriter subclass.
*
* <p/> When implementing a subclass, you have to override the single functions {@link #writeModelData()}. Within
* this function you can use any of the following:
* <ul>
* <li>{@link #writekv(String, Object)} to serialize any "simple" values (those that can be represented as a
* single-line string).</li>
* <li>{@link #writeblob(String, byte[])} to add arbitrary blobs of data to the archive.</li>
* <li>{@link #startWritingTextFile(String)} / {@link #writeln(String)} / {@link #finishWritingTextFile()} to
* add text files to the archive.</li>
* </ul>
*
* After subclassing this class, you should also override the {@link Model#getMojo()} method in your model's class to
* return an instance of your new child class.
*
* @param <M> model class that your ModelMojoWriter serializes
* @param <P> model parameters class that corresponds to your model
* @param <O> model output class that corresponds to your model
*/
public abstract class ModelMojoWriter<M extends Model<M, P, O>, P extends Model.Parameters, O extends Model.Output>
extends AbstractMojoWriter
implements StreamWriter
{
protected M model;
//--------------------------------------------------------------------------------------------------------------------
// Inheritance interface: ModelMojoWriter subclasses are expected to override these methods to provide custom behavior
//--------------------------------------------------------------------------------------------------------------------
public ModelMojoWriter() {
super(null);
}
public ModelMojoWriter(M model) {
super(model.modelDescriptor());
this.model = model;
}
//--------------------------------------------------------------------------------------------------------------------
// Private
//--------------------------------------------------------------------------------------------------------------------
/**
* Used from `ModelsHandler.fetchMojo()` to serialize the Mojo into a StreamingSchema.
* The structure of the zip will be the following:
* model.ini
* domains/
* d000.txt
* d001.txt
* ...
* (extra model files written by the subclasses)
* Each domain file is a plain text file with one line per category (not quoted).
*/
@Override public void writeTo(OutputStream os, StreamWriteOption... option) {
ZipOutputStream zos = new ZipOutputStream(os);
try {
writeTo(zos);
zos.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
protected abstract void writeModelData() throws IOException;
@Override
protected void writeExtraInfo() throws IOException {
super.writeExtraInfo();
writeModelDetails();
writeModelDetailsReadme();
}
/** Create file that contains model details in JSON format.
* This information is pulled from the models schema.
*/
private void writeModelDetails() throws IOException {
ModelSchemaV3 modelSchema = (ModelSchemaV3) SchemaServer.schema(3, model).fillFromImpl(model);
startWritingTextFile("experimental/modelDetails.json");
writeln(modelSchema.toJsonString());
finishWritingTextFile();
}
private void writeModelDetailsReadme() throws IOException {
startWritingTextFile("experimental/README.md");
writeln("Outputting model information in JSON is an experimental feature and we appreciate any feedback.\n" +
"The contents of this folder may change with another version of H2O.");
finishWritingTextFile();
}
public void writeStringArrays(String[] sArrays, String title) throws IOException {
startWritingTextFile(title);
for (String sName : sArrays) {
writeln(sName);
}
finishWritingTextFile();
}
public void writeRectangularDoubleArray(double[][] array, String title) throws IOException {
assert null != array;
assert null != title;
writekv(title + "_size1", array.length);
writekv(title + "_size2", array.length > 0 ? array[0].length : 0);
writeDoubleArray(array, title);
}
public void writeDoubleArray(double[][] array, String title) throws IOException {
assert null != array;
assert null != title;
write2DArray(array, title);
}
public void write2DStringArrays(String[][] sArrays, String title) throws IOException {
startWritingTextFile(title);
int numCols = sArrays.length;
for (int index = 0; index < numCols; index++)
if (sArrays[index] != null) {
for (String sName : sArrays[index]) {
writeln(sName);
}
}
finishWritingTextFile();
}
public void write2DArray(double[][] array, String title) throws IOException {
int totArraySize = 0;
for (double[] row : array)
totArraySize += row.length;
ByteBuffer bb = ByteBuffer.wrap(new byte[totArraySize * 8]);
for (double[] row : array)
for (double val : row)
bb.putDouble(val);
writeblob(title, bb.array());
}
public void write3DIntArray(int[][][] array, String title) throws IOException {
int totArraySize = 0;
int outDim = array.length;
for (int index = 0; index < outDim; index++) {
for (int[] row : array[index])
totArraySize += row.length;
}
ByteBuffer bb = ByteBuffer.wrap(new byte[totArraySize * 4]);
for (int index = 0; index < outDim; index++)
for (int[] row : array[index])
for (int val : row)
bb.putInt(val);
writeblob(title, bb.array());
}
public void write3DArray(double[][][] array, String title) throws IOException {
int totArraySize = 0;
int outDim = array.length;
for (int index = 0; index < outDim; index++) {
for (double[] row : array[index])
totArraySize += row.length;
}
ByteBuffer bb = ByteBuffer.wrap(new byte[totArraySize * 8]);
for (int index = 0; index < outDim; index++)
for (double[] row : array[index])
for (double val : row)
bb.putDouble(val);
writeblob(title, bb.array());
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelParametersBuilderFactory.java
|
package hex;
import water.util.PojoUtils;
/**
* Factory for creating model parameters builders.
*
* @param <MP> type of produced model parameters object
*/
public interface ModelParametersBuilderFactory<MP extends Model.Parameters> {
/** Get parameters builder for initial parameters.
*
* <p>
* The builder modifies passed parameters object, so the caller
* is responsible for cloning it if it is necessary.
* </p>
*
* @param initialParams initial model parameters which will be modified
* @return this parameters builder
*/
ModelParametersBuilder<MP> get(MP initialParams);
/**
* Returns mapping from input parameter specification to
* attribute names of <code>MP</code>.
* @return naming strategy
*/
PojoUtils.FieldNaming getFieldNamingStrategy();
/** A generic interface to configure a given initial parameters object
* via sequence of {@link #set} method calls.
*
* <p>
* The usage is sequence of <code>set</code> calls finalized by
* <code>build</code> call which produces final version of parameters.
* </p>
*
* @param <MP> type of produced model parameters object
*/
interface ModelParametersBuilder<MP extends Model.Parameters> {
ModelParametersBuilder<MP> set(String name, Object value);
MP build();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelPreprocessor.java
|
package hex;
import water.Key;
import water.Keyed;
import water.fvec.Frame;
/**
* WARNING!
* This is a temporary abstraction used to preprocess frames during training and scoring.
* As such, this class can be deprecated or even removed at any time, so don't extend or use directly yet.
*/
public abstract class ModelPreprocessor<T extends ModelPreprocessor> extends Keyed<T> {
public ModelPreprocessor() {
super();
}
public ModelPreprocessor(Key<T> key) {
super(key);
}
public abstract Frame processTrain(Frame fr, Model.Parameters params);
public abstract Frame processValid(Frame fr, Model.Parameters params);
public abstract Frame processScoring(Frame fr, Model model);
public abstract Model asModel();
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ModelTrainingEventsPublisher.java
|
package hex;
import java.util.concurrent.BlockingQueue;
public class ModelTrainingEventsPublisher {
public enum Event {ONE_DONE, ALL_DONE}
private final BlockingQueue<Event> _events;
public ModelTrainingEventsPublisher(BlockingQueue<Event> events) {
_events = events;
}
public void onIterationComplete() {
_events.add(Event.ONE_DONE);
}
public void onAllIterationsComplete() {
_events.add(Event.ALL_DONE);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/MultiModelMojoWriter.java
|
package hex;
import java.io.IOException;
import java.util.List;
import java.util.zip.ZipOutputStream;
public abstract class MultiModelMojoWriter<M extends Model<M, P, O>, P extends Model.Parameters, O extends Model.Output>
extends ModelMojoWriter<M, P, O> {
public MultiModelMojoWriter() {}
public MultiModelMojoWriter(M model) {
super(model);
}
protected abstract List<Model> getSubModels();
protected abstract void writeParentModelData() throws IOException;
protected final void writeModelData() throws IOException {
List<Model> subModels = getSubModels();
writekv("submodel_count", subModels.size());
int modelNum = 0;
for (Model model : subModels) {
writekv("submodel_key_" + modelNum, model._key.toString());
writekv("submodel_dir_" + modelNum, getZipDirectory(model));
modelNum++;
}
writeParentModelData();
}
protected void writeTo(ZipOutputStream zos) throws IOException {
super.writeTo(zos);
for (Model model : getSubModels()) {
String zipDir = getZipDirectory(model);
ModelMojoWriter writer = model.getMojo();
writer.writeTo(zos, zipDir);
}
}
private static String getZipDirectory(Model m) {
String algo = m._parms.algoName();
String key = m._key.toString();
return "models/" + algo + "/" + key + "/";
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/MultinomialAUC.java
|
package hex;
import water.Iced;
import water.util.TwoDimTable;
public class MultinomialAUC extends Iced {
public SimpleAUC[] _ovrAucs;
public PairwiseAUC[] _ovoAucs;
public final MultinomialAucType _default_auc_type;
public final String[] _domain;
public static final int MAX_AUC_CLASSES = 50;
public final boolean _calculateAuc;
// keep this final aggregate value outside to save time
public final double _macroOvrAuc;
public final double _weightedOvrAuc;
public final double _macroOvoAuc;
public final double _weightedOvoAuc;
public final double _macroOvrAucPr;
public final double _weightedOvrAucPr;
public final double _macroOvoAucPr;
public final double _weightedOvoAucPr;
public MultinomialAUC(AUC2.AUCBuilder[] ovrAucs, AUC2.AUCBuilder[][] ovoAucs, String[] domain, boolean zeroWeights, MultinomialAucType type){
_default_auc_type = type;
_domain = domain;
int domainLength = _domain.length;
_calculateAuc = !_default_auc_type.equals(MultinomialAucType.AUTO) && !_default_auc_type.equals(MultinomialAucType.NONE) && domainLength <= MAX_AUC_CLASSES;
if(_calculateAuc) {
_ovoAucs = new PairwiseAUC[(domainLength * domainLength - domainLength) / 2];
_ovrAucs = new SimpleAUC[domainLength];
int aucsIndex = 0;
if (!zeroWeights) {
for (int i = 0; i < domainLength - 1; i++) {
AUC2 tmpAucObject = ovrAucs[i]._n > 0 ? new AUC2(ovrAucs[i]) : new AUC2();
_ovrAucs[i] = new SimpleAUC(tmpAucObject._auc, tmpAucObject._pr_auc, tmpAucObject._p, tmpAucObject._n+tmpAucObject._p);
for (int j = i + 1; j < domainLength; j++) {
AUC2 first = ovoAucs[i][j]._n > 0 ? new AUC2(ovoAucs[i][j]) : new AUC2();
AUC2 second = ovoAucs[j][i]._n > 0 ? new AUC2(ovoAucs[j][i]) : new AUC2();
_ovoAucs[aucsIndex++] = new PairwiseAUC(first, second, _domain[i], _domain[j]);
}
}
AUC2 tmpAucObject = ovrAucs[domainLength - 1]._n > 0 ? new AUC2(ovrAucs[domainLength - 1]) : new AUC2();
_ovrAucs[domainLength - 1] = new SimpleAUC(tmpAucObject._auc, tmpAucObject._pr_auc, tmpAucObject._p, tmpAucObject._n+tmpAucObject._p);
} else {
for (int i = 0; i < ovoAucs.length - 1; i++) {
_ovrAucs[i] = new SimpleAUC();
for (int j = i + 1; j < ovoAucs[0].length; j++) {
if (i < j) {
_ovoAucs[aucsIndex++] = new PairwiseAUC(new AUC2(), new AUC2(), _domain[i], _domain[j]);
}
}
}
_ovrAucs[domainLength - 1] = new SimpleAUC();
}
_macroOvoAuc = computeOvoMacroAuc(false);
_weightedOvoAuc = computeOvoWeightedAuc(false);
_macroOvrAuc = computeOvrMacroAuc(false);
_weightedOvrAuc = computeOvrWeightedAuc(false);
_macroOvoAucPr = computeOvoMacroAuc(true);
_weightedOvoAucPr = computeOvoWeightedAuc(true);
_macroOvrAucPr = computeOvrMacroAuc(true);
_weightedOvrAucPr = computeOvrWeightedAuc(true);
} else { // else no result for multinomial AUC - memory issue
_macroOvoAuc = Double.NaN;
_weightedOvoAuc = Double.NaN;
_macroOvrAuc = Double.NaN;
_weightedOvrAuc = Double.NaN;
_macroOvoAucPr = Double.NaN;
_weightedOvoAucPr = Double.NaN;
_macroOvrAucPr = Double.NaN;
_weightedOvrAucPr = Double.NaN;
}
}
public MultinomialAUC(TwoDimTable aucTable, TwoDimTable aucprTable, String[] domain, MultinomialAucType type){
_default_auc_type = type;
_domain = domain;
int domainLength = _domain.length;
_calculateAuc = !_default_auc_type.equals(MultinomialAucType.AUTO) && !_default_auc_type.equals(MultinomialAucType.NONE) && domainLength <= MAX_AUC_CLASSES;
if(_calculateAuc) {
_ovoAucs = new PairwiseAUC[(domainLength * domainLength - domainLength) / 2];
_ovrAucs = new SimpleAUC[domainLength];
int aucsIndex = 0;
for (int i = 0; i < _ovrAucs.length; i++) {
AUC2 auc = new AUC2();
auc._auc = (double) aucTable.get(i,3);
auc._pr_auc = (double) aucprTable.get(i,3);
_ovrAucs[i] = new SimpleAUC(auc._auc, auc._pr_auc, 0, 0);
}
_macroOvrAuc = (double) aucTable.get(_ovrAucs.length,3);
_weightedOvrAuc = (double) aucTable.get(_ovrAucs.length + 1,3);
_macroOvrAucPr = (double) aucprTable.get(_ovrAucs.length,3);
_weightedOvrAucPr = (double) aucprTable.get(_ovrAucs.length + 1,3);
int lastOvoIndex = _ovrAucs.length + _ovoAucs.length + 2;
for (int j = _ovrAucs.length + 2; j < lastOvoIndex; j++) {
_ovoAucs[aucsIndex++] = new PairwiseAUC((double) aucTable.get(j, 3), /*AUC*/
(double) aucprTable.get(j, 3), /*PR AUC*/
(String) aucTable.get(j, 1) /*first domain*/,
(String) aucTable.get(j, 2) /*second domain*/);
}
_macroOvoAuc = (double)aucTable.get(lastOvoIndex, 3);
_weightedOvoAuc = (double) aucTable.get(lastOvoIndex + 1, 3);
_macroOvoAucPr = (double)aucprTable.get(lastOvoIndex, 3);
_weightedOvoAucPr = (double)aucprTable.get(lastOvoIndex, 3);
} else { // else no result for multinomial AUC - memory issue
_macroOvoAuc = Double.NaN;
_weightedOvoAuc = Double.NaN;
_macroOvrAuc = Double.NaN;
_weightedOvrAuc = Double.NaN;
_macroOvoAucPr = Double.NaN;
_weightedOvoAucPr = Double.NaN;
_macroOvrAucPr = Double.NaN;
_weightedOvrAucPr = Double.NaN;
}
}
public double auc() {
switch (_default_auc_type) {
case MACRO_OVR:
return getMacroOvrAuc();
case MACRO_OVO:
return getMacroOvoAuc();
case WEIGHTED_OVO:
return getWeightedOvoAuc();
case WEIGHTED_OVR:
return getWeightedOvrAuc();
default:
return Double.NaN;
}
}
public double pr_auc() {
switch (_default_auc_type) {
case MACRO_OVR:
return get_macroOvrAucPr();
case MACRO_OVO:
return getMacroOvoAucPr();
case WEIGHTED_OVO:
return getWeightedOvoAucPr();
default:
return getWeightedOvrAucPr();
}
}
public double computeOvrMacroAuc(boolean isPr){
double macroAuc = 0;
for(SimpleAUC ovrAuc : _ovrAucs){
macroAuc += isPr ? ovrAuc.aucpr() : ovrAuc.auc();
}
return macroAuc/_ovrAucs.length;
}
public double computeOvrWeightedAuc(boolean isPr){
double weightedAuc = 0;
double sumWeights = 0;
for(SimpleAUC ovrAuc : _ovrAucs){
double positives = ovrAuc.positives();
sumWeights += positives;
weightedAuc += isPr ? ovrAuc.aucpr() * positives : ovrAuc.auc() * positives;
}
return weightedAuc/sumWeights;
}
public double computeOvoMacroAuc(boolean isPr){
double macroAuc = 0;
for(PairwiseAUC ovoAuc : _ovoAucs){
macroAuc += isPr ? ovoAuc.getPrAuc() : ovoAuc.getAuc();
}
return macroAuc/_ovoAucs.length;
}
public double computeOvoWeightedAuc(boolean isPr){
double n = _ovrAucs[0].ncases();
double weightedAuc = 0;
double sumWeights = 0;
for(PairwiseAUC ovoAuc : _ovoAucs){
double weight = ovoAuc.getSumPositives() / n;
weightedAuc += isPr ? ovoAuc.getPrAuc() * weight : ovoAuc.getAuc() * weight;
sumWeights += weight;
}
return weightedAuc/sumWeights;
}
public double getMacroOvrAuc() {
return _macroOvrAuc;
}
public double getWeightedOvrAuc() {
return _weightedOvrAuc;
}
public double getMacroOvoAuc() {
return _macroOvoAuc;
}
public double getWeightedOvoAuc() {
return _weightedOvoAuc;
}
public double get_macroOvrAucPr() {
return _macroOvrAucPr;
}
public double getWeightedOvrAucPr() {
return _weightedOvrAucPr;
}
public double getMacroOvoAucPr() {
return _macroOvoAucPr;
}
public double getWeightedOvoAucPr() {
return _weightedOvoAucPr;
}
public TwoDimTable getAucTable(){
return getTable(false);
}
public TwoDimTable getAucPrTable(){
return getTable(true);
}
public TwoDimTable getTable(boolean isPr) {
if(_calculateAuc) {
String metric = isPr ? "auc_pr" : "AUC";
String tableHeader = "Multinomial " + metric + " values";
int rows = _ovrAucs.length + _ovoAucs.length + 4 /*2 + 2 weighted aucs*/;
String[] rowHeaders = new String[rows];
for (int i = 0; i < _ovrAucs.length; i++)
rowHeaders[i] = _domain[i] + " vs Rest";
rowHeaders[_ovrAucs.length] = "Macro OVR";
rowHeaders[_ovrAucs.length + 1] = "Weighted OVR";
for (int i = 0; i < _ovoAucs.length; i++)
rowHeaders[_ovrAucs.length + 2 + i] = _ovoAucs[i].getPairwiseDomainsString();
rowHeaders[rows - 2] = "Macro OVO";
rowHeaders[rows - 1] = "Weighted OVO";
String[] colHeaders = new String[]{"First class domain", "Second class domain", metric};
String[] colTypes = new String[]{"String", "String", "double"};
String[] colFormats = new String[]{"%s", "%s", "%d"};
String colHeaderForRowHeaders = "Type";
TwoDimTable table = new TwoDimTable(tableHeader, null, rowHeaders, colHeaders, colTypes, colFormats, colHeaderForRowHeaders);
double sumWeights = 0;
for (int i = 0; i < _ovrAucs.length; i++) {
SimpleAUC auc = _ovrAucs[i];
double aucValue = isPr ? auc.aucpr() : auc.auc();
table.set(i, 0, _domain[i]);
table.set(i, 2, aucValue);
}
table.set(_ovrAucs.length, 2, isPr ? _macroOvrAucPr : _macroOvrAuc);
table.set(_ovrAucs.length + 1, 2, isPr ? _weightedOvrAucPr : _weightedOvrAuc);
sumWeights = 0;
for (int i = 0; i < _ovoAucs.length; i++) {
PairwiseAUC auc = _ovoAucs[i];
double aucValue = isPr ? auc.getPrAuc() : auc.getAuc();
table.set(_ovrAucs.length + 2 + i, 0, auc.getDomainFirst());
table.set(_ovrAucs.length + 2 + i, 1, auc.getDomainSecond());
table.set(_ovrAucs.length + 2 + i, 2, aucValue);
}
table.set(rows - 2, 2, isPr ? _macroOvoAucPr : _macroOvoAuc);
table.set(rows - 1, 2, isPr ? _weightedOvoAucPr : _weightedOvoAuc);
return table;
} else {
return null;
}
}
}
/**
* Simple AUC object to store only auc and aucpr and other important values to save memory
*/
class SimpleAUC extends Iced {
private final double _auc;
private final double _aucpr;
private final double _positives;
private final double _ncases;
public SimpleAUC(double auc, double aucpr, double positives, double n) {
this._auc = auc;
this._aucpr = aucpr;
this._positives = positives;
this._ncases = n;
}
public SimpleAUC() {
this._auc = Double.NaN;
this._aucpr = Double.NaN;
this._positives = Double.NaN;
this._ncases = Double.NaN;
}
public double auc() {
return _auc;
}
public double aucpr() {
return _aucpr;
}
public double positives() {
return _positives;
}
public double ncases() {
return _ncases;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/MultinomialAucType.java
|
package hex;
public enum MultinomialAucType {
AUTO,
NONE,
MACRO_OVR,
WEIGHTED_OVR,
MACRO_OVO,
WEIGHTED_OVO
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ObjectConsistencyChecker.java
|
package hex;
import water.*;
public class ObjectConsistencyChecker extends MRTask<ObjectConsistencyChecker> {
private final Key<?> _key;
private final byte[] _bytes;
public ObjectConsistencyChecker(Key<?> key) {
_key = key;
Iced<?> pojo = DKV.getGet(key);
if (pojo == null) {
throw new IllegalArgumentException("Object with key='" + key + "' doesn't exist in DKV.");
}
_bytes = pojo.asBytes();
}
@Override
protected void setupLocal() {
Value val = H2O.STORE.get(_key);
if (val == null)
return;
if (!val.isConsistent()) {
throw new IllegalStateException("Object " + _key + " is locally modified on node " + H2O.SELF + ".");
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/PairwiseAUC.java
|
package hex;
import water.Iced;
public class PairwiseAUC extends Iced {
private double _auc;
private double _prAuc;
private double _sumPositives;
private String _domainFirst;
private String _domainSecond;
public PairwiseAUC(AUC2 aucFirst, AUC2 aucSecond, String domainFirst, String domainSecond) {
this._auc = (aucFirst._auc + aucSecond._auc)/2;
if(Double.isNaN(this._auc)){
this._auc = 0;
}
this._prAuc = (aucFirst._pr_auc + aucSecond._pr_auc)/2;
if(Double.isNaN(this._prAuc)){
this._prAuc = 0;
}
this._sumPositives = aucFirst._p + aucSecond._p;
this._domainFirst = domainFirst;
this._domainSecond = domainSecond;
}
public PairwiseAUC(double auc, double prauc, String domainFirst, String domainSecond) {
this._auc = auc;
this._prAuc = prauc;
this._domainFirst = domainFirst;
this._domainSecond = domainSecond;
}
public double getSumPositives(){
return _sumPositives;
}
public double getAuc(){ return _auc; }
public double getPrAuc(){ return _prAuc; }
public String getDomainFirst() { return _domainFirst; }
public String getDomainSecond() { return _domainSecond; }
public String getPairwiseDomainsString(){
return "Class "+_domainFirst+" vs. "+_domainSecond;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ParallelModelBuilder.java
|
package hex;
import jsr166y.ForkJoinTask;
import org.apache.log4j.Logger;
import water.Iced;
import water.util.IcedAtomicInt;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Dispatcher for parallel model building. Starts building models every time the run method is invoked.
* After each model is finished building, the `modelFinished` method is invoked, which in turn invokes modelFeeder callback.
* ModelFeeder receives the model built and can deal with it in any way - e.g. put it into a Grid, or discard it if the resulting model
* is a failure. It also has the power to invoke the training of any number of new models. Or stop the parallel model builder,
* released the barrier inside.
*/
public class ParallelModelBuilder extends ForkJoinTask<ParallelModelBuilder> {
private static final Logger LOG = Logger.getLogger(ParallelModelBuilder.class);
public static abstract class ParallelModelBuilderCallback<D extends ParallelModelBuilderCallback> extends Iced<D> {
public abstract void onBuildSuccess(final Model model, final ParallelModelBuilder parallelModelBuilder);
public abstract void onBuildFailure(final ModelBuildFailure modelBuildFailure, final ParallelModelBuilder parallelModelBuilder);
}
private final transient ParallelModelBuilderCallback _callback;
private final transient IcedAtomicInt _modelInProgressCounter = new IcedAtomicInt();
private final transient ParallelModelBuiltListener _parallelModelBuiltListener;
public ParallelModelBuilder(final ParallelModelBuilderCallback callback) {
Objects.requireNonNull(callback);
_callback = callback;
_parallelModelBuiltListener = new ParallelModelBuiltListener();
}
/**
* Runs given collection of {@link ModelBuilder} in parallel. After each model is finished building,
* one of the callbacks (on model failure / on model completion) is called.
*
* @param modelBuilders An {@link Collection} of {@link ModelBuilder} to execute in parallel.
*/
public void run(final Collection<ModelBuilder> modelBuilders) {
if (LOG.isTraceEnabled()) LOG.trace("run with " + modelBuilders.size() + " models");
for (final ModelBuilder modelBuilder : modelBuilders) {
_modelInProgressCounter.incrementAndGet();
modelBuilder.trainModel(_parallelModelBuiltListener);
}
}
private class ParallelModelBuiltListener extends ModelBuilderListener<ParallelModelBuiltListener> {
@Override
public void onModelSuccess(Model model) {
try {
_callback.onBuildSuccess(model, ParallelModelBuilder.this);
} finally {
attemptComplete();
}
}
@Override
public void onModelFailure(Throwable cause, Model.Parameters parameters) {
try {
final ModelBuildFailure modelBuildFailure = new ModelBuildFailure(cause, parameters);
_callback.onBuildFailure(modelBuildFailure, ParallelModelBuilder.this);
} finally {
attemptComplete();
}
}
}
/**
* Contains all the necessary information after a model builder has failed to build the model
*/
public static class ModelBuildFailure {
private final Throwable _throwable;
private final Model.Parameters _parameters;
public ModelBuildFailure(Throwable throwable, Model.Parameters parameters) {
this._throwable = throwable;
this._parameters = parameters;
}
public Throwable getThrowable() {
return _throwable;
}
public Model.Parameters getParameters() {
return _parameters;
}
}
private void attemptComplete() {
int modelsInProgress = _modelInProgressCounter.decrementAndGet();
if (LOG.isTraceEnabled()) LOG.trace("Completed a model, left in progress: " + modelsInProgress);
if (modelsInProgress == 0) {
complete(this);
}
}
@Override
public ParallelModelBuilder getRawResult() {
return this;
}
@Override
protected void setRawResult(ParallelModelBuilder value) {
}
@Override
protected boolean exec() {
return false;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/PartialDependence.java
|
package hex;
import jsr166y.CountedCompleter;
import water.*;
import water.api.schemas3.KeyV3;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Rapids;
import water.util.FrameUtils.CalculateWeightMeanSTD;
import water.util.Log;
import water.util.TwoDimTable;
import java.util.Arrays;
import java.util.ArrayList;
public class PartialDependence extends Lockable<PartialDependence> {
transient final public Job _job;
public Key<Model> _model_id;
public Key<Frame> _frame_id;
public long _row_index = -1; // row index, -1 implies no specific row to use in PDP calculation
public String[] _cols;
public ArrayList<String> _cols_1d_2d; // include all columns specified for 1D pdp and 2D pdp
public int _weight_column_index = -1; // weight column index, -1 implies no weight
public boolean _add_missing_na = false; // set to be false for default
public int _nbins = 20;
public String[] _targets;
public TwoDimTable[] _partial_dependence_data; //OUTPUT
public double[] _user_splits = null; // store all user splits for all column
public double[][] _user_split_per_col = null; // point to correct location of user splits per column
public int[] _num_user_splits = null; // record number of user split values per column
public String[] _user_cols = null; // contains columns with user defined splits
public boolean _user_splits_present = false;
public String[][] _col_pairs_2dpdp = null;
public int _num_2D_pairs = 0; // number of 2D pdp pairs to work on
public int _num_1D = 0;
public int _predictor_column; // predictor column to use in calculating partial dependence
public int[] _predictor_columns;
public PartialDependence(Key<PartialDependence> dest, Job j) {
super(dest);
_job = j;
}
public PartialDependence(Key<PartialDependence> dest) {
this(dest, new Job<>(dest, PartialDependence.class.getName(), "PartialDependence"));
}
public PartialDependence execNested() {
checkSanityAndFillParams();
this.delete_and_lock(_job);
_frame_id.get().write_lock(_job._key);
new PartialDependenceDriver().compute2();
return this;
}
public Job<PartialDependence> execImpl() {
checkSanityAndFillParams();
delete_and_lock(_job);
_frame_id.get().write_lock(_job._key);
// Don't lock the model since the act of unlocking at the end would
// freshen the DKV version, but the live POJO must survive all the way
// to be able to delete the model metrics that got added to it.
// Note: All threads doing the scoring call model_id.get() and then
// update the _model_metrics only on the temporary live object, not in DKV.
// At the end, we call model.remove() and we need those model metrics to be
// deleted with it, so we must make sure we keep the live POJO alive.
_job.start(new PartialDependenceDriver(), _num_1D+_num_2D_pairs);
return _job;
}
private int findTargetClassPredictorIndex(Model m, String target){
int index = Arrays.asList(m._output.classNames()).indexOf(target);
if (index == -1) {
throw new IllegalArgumentException("Incorrect target class: " + target + ".");
}
return index+1;
}
private int[] findTargetClassPredictorIndices(Model m, String[] targets){
int[] result = new int[targets.length];
for(int i=0; i < targets.length; i++){
result[i] = findTargetClassPredictorIndex(m, targets[i]);
}
return result;
}
private void checkSanityAndFillParams() {
Model m = _model_id.get();
if (m == null) {
throw new IllegalArgumentException("Model not found.");
}
if (!m._output.isSupervised()) {
throw new IllegalArgumentException("Partial dependence plots are only implemented for supervised models");
}
int nclasses = m._output.nclasses();
if(nclasses <= 2 && _targets != null){
throw new IllegalArgumentException("Targets parameter is available only for multinomial classification.");
}
if(nclasses == 1){
_predictor_column = 0;
_predictor_columns = new int[]{_predictor_column};
} else if(nclasses == 2) {
_predictor_column = 2;
_predictor_columns = new int[]{_predictor_column};
} else {
if (_targets == null) {
throw new IllegalArgumentException("Targets parameter has to be set for multinomial classification.");
} else {
_predictor_columns = findTargetClassPredictorIndices(m, _targets);
}
}
if (_cols != null || _col_pairs_2dpdp != null) {
_cols_1d_2d = new ArrayList<>();
if (_cols != null)
_cols_1d_2d.addAll(Arrays.asList(_cols));
if (_col_pairs_2dpdp != null) {
_num_2D_pairs = _col_pairs_2dpdp.length * _predictor_columns.length;
for (int index=0; index < _num_2D_pairs; index++) {
if (!(_cols_1d_2d.contains(_col_pairs_2dpdp[index][0])))
_cols_1d_2d.add(_col_pairs_2dpdp[index][0]);
if (!(_cols_1d_2d.contains(_col_pairs_2dpdp[index][1])))
_cols_1d_2d.add(_col_pairs_2dpdp[index][1]);
}
}
} else
_cols_1d_2d = null; // all columns are null for some reason
if (_cols_1d_2d==null) { // no cols or cols pairs are specified
Frame f = _frame_id.get();
if (f==null) throw new IllegalArgumentException("Frame not found.");
if (Model.GetMostImportantFeatures.class.isAssignableFrom(m.getClass())) {
_cols = ((Model.GetMostImportantFeatures) m).getMostImportantFeatures(10);
if (_cols != null) {
Log.info("Selecting the top " + _cols.length + " features from the model's variable importances.");
}
} else {
_cols = m._output._names;
if (_cols != null) {
Log.info("Selecting all features from the training data.");
}
}
_cols_1d_2d = new ArrayList<>();
_cols_1d_2d.addAll(Arrays.asList(_cols));
}
_num_1D = _cols==null ? 0 : _cols.length * _predictor_columns.length;
if (_nbins < 2) {
throw new IllegalArgumentException("_nbins must be >=2.");
}
if ((_user_splits != null) && (_user_splits.length > 0)) {
_user_splits_present = true;
int numUserSplits = _user_cols.length;
// convert one dimension info into two dimension
_user_split_per_col = new double[numUserSplits][];
int[] user_splits_start = new int[numUserSplits];
for (int cindex = 1; cindex < numUserSplits; cindex++) { // fixed bug in user_splits_start
user_splits_start[cindex] = _num_user_splits[cindex-1]+user_splits_start[cindex-1];
}
for (int cindex=0; cindex < numUserSplits; cindex++) {
int splitNum = _num_user_splits[cindex];
_user_split_per_col[cindex] = new double[splitNum];
System.arraycopy(_user_splits, user_splits_start[cindex], _user_split_per_col[cindex], 0, splitNum);
}
}
final Frame fr = _frame_id.get();
if (_weight_column_index >= 0) { // grab and make weight column as a separate frame
if (!fr.vec(_weight_column_index).isNumeric() || fr.vec(_weight_column_index).isCategorical())
throw new IllegalArgumentException("Weight column " + _weight_column_index + " must be a numerical column.");
}
if (! _user_splits_present) {
for (String col : _cols_1d_2d) {
Vec v = fr.vec(col);
if (v.isCategorical() && v.cardinality() > _nbins) {
throw new IllegalArgumentException("Column " + col + "'s cardinality of " + v.cardinality() + " > nbins of " + _nbins);
}
}
}
}
double[] extractColValues(String col, int actualbins, Vec v) {
double[] colVals;
if (_user_splits_present && Arrays.asList(_user_cols).contains(col)) {
int user_col_index = Arrays.asList(_user_cols).indexOf(col);
actualbins = _num_user_splits[user_col_index];
colVals = _add_missing_na?new double[_num_user_splits[user_col_index]+1]:new double[_num_user_splits[user_col_index]];
for (int rindex = 0; rindex < _num_user_splits[user_col_index]; rindex++) {
colVals[rindex] = _user_split_per_col[user_col_index][rindex];
}
} else {
if (v.isInt() && (v.max() - v.min() + 1) < _nbins) {
actualbins = (int) (v.max() - v.min() + 1);
}
colVals = _add_missing_na ? new double[actualbins+1] : new double[actualbins];
double delta = (v.max() - v.min()) / (actualbins - 1);
if (actualbins == 1) delta = 0;
for (int j = 0; j < colVals.length; ++j) {
colVals[j] = v.min() + j * delta;
}
}
if (_add_missing_na)
colVals[actualbins] = Double.NaN; // set last bin to contain nan
Log.debug("Computing PartialDependence for column " + col + " at the following values: ");
Log.debug(Arrays.toString(colVals));
return colVals;
}
private class PartialDependenceDriver extends H2O.H2OCountedCompleter<PartialDependenceDriver> {
public void compute2() {
assert (_job != null);
final Frame fr = _frame_id.get();
// loop over PDPs (columns)
int num_cols_1d_2d = _num_1D+_num_2D_pairs;
_partial_dependence_data = new TwoDimTable[num_cols_1d_2d];
int column = 0;
for (int i = 0; i < num_cols_1d_2d; ++i) { // take care of the 1d pdp first, then 2d pdp
boolean workingOn1D = (i < _num_1D);
final String col = workingOn1D ? _cols[column] : _col_pairs_2dpdp[column - _num_1D][0];
final String col2 = workingOn1D ? null : _col_pairs_2dpdp[column - _num_1D][1];
final int whichPredictorColumn = i % _predictor_columns.length;
Log.debug("Computing partial dependence of model on '" + col + "'"+(_targets == null ? "." : " and class "+_targets[whichPredictorColumn]+"."));
double[] colVals = extractColValues(col, _nbins, fr.vec(col));
double[] col2Vals = workingOn1D ? null : extractColValues(col2, _nbins, fr.vec(col2));
Futures fs = new Futures();
int responseLength = workingOn1D ? colVals.length : colVals.length * col2Vals.length;
final double[] meanResponse = new double[responseLength];
final double[] stddevResponse = new double[responseLength];
final double[] stdErrorOfTheMeanResponse = new double[responseLength];
final boolean cat = fr.vec(col).isCategorical();
final boolean cat2 = workingOn1D ? false : fr.vec(col2).isCategorical();
// loop over column values (fill one PartialDependence)
if (workingOn1D) { // 1d pdp
for (int k = 0; k < colVals.length; ++k) {
final double value = colVals[k];
CalculatePdpPerBin pdp = new CalculatePdpPerBin(col, col2, value, -1, cat, cat2, k,
false, meanResponse, stddevResponse, stdErrorOfTheMeanResponse, _predictor_columns[whichPredictorColumn]); // perform actual pdp calculation
fs.add(H2O.submitTask(pdp));
}
} else { // 2d pdp
int colLen1 = colVals.length;
int colLen2 = col2Vals.length;
int totLen = colLen1*colLen2;
for (int k=0; k < totLen; k++) {
int index1 = k / colLen2;
int index2 = k % colLen2;
final double value = colVals[index1];
final double value2 = col2Vals[index2];
CalculatePdpPerBin pdp = new CalculatePdpPerBin(col, col2, value, value2, cat, cat2, k, true,
meanResponse, stddevResponse,stdErrorOfTheMeanResponse, _predictor_columns[whichPredictorColumn]); // perform actual pdp calculation
fs.add(H2O.submitTask(pdp));
}
}
fs.blockForPending();
if (workingOn1D) {
_partial_dependence_data[i] = new TwoDimTable("PartialDependence",
_row_index < 0 ? ("Partial Dependence Plot of model " + _model_id + " on column '" + col + "'" + (_targets == null ? "." : " and class "+ _targets[whichPredictorColumn])) :
("Partial Dependence Plot of model " + _model_id + " on column '" + col + "'" + (_targets == null ? "'" : " and class "+_targets[whichPredictorColumn]) +" for row index" + _row_index),
new String[colVals.length],
new String[]{col, "mean_response", "stddev_response", "std_error_mean_response"},
new String[]{cat ? "string" : "double", "double", "double", "double"},
new String[]{cat ? "%s" : "%5f", "%5f", "%5f", "%5f"}, null);
} else {
_partial_dependence_data[i] = new TwoDimTable("2D-PartialDependence",
_row_index < 0 ? ("2D Partial Dependence Plot of model " + _model_id + " on 1st column '" +
col + "' and 2nd column '" + col2 +"'") :
("Partial Dependence Plot of model " + _model_id + " on columns '" +
col + "', '"+ col2
+"' for row " + _row_index),
new String[colVals.length*col2Vals.length],
new String[]{col, col2, "mean_response",
"stddev_response", "std_error_mean_response"},
new String[]{cat ? "string" : "double", cat2 ? "string":"double", "double", "double", "double"},
new String[]{cat ? "%s" : "%5f", cat2 ? "%s" : "%5f","%5f", "%5f", "%5f"}, null);
}
for (int j = 0; j < meanResponse.length; ++j) {
int colIndex = 0;
int countval1 = workingOn1D? j : j / col2Vals.length;
if (fr.vec(col).isCategorical()) {
if (_add_missing_na && Double.isNaN(colVals[countval1]))
_partial_dependence_data[i].set(j, colIndex, ".missing(NA)"); // accomodate NA
else
_partial_dependence_data[i].set(j, colIndex, fr.vec(col).domain()[(int) colVals[countval1]]);
} else {
_partial_dependence_data[i].set(j, colIndex, colVals[countval1]);
}
colIndex++;
if (!workingOn1D) {
int countval2 = j%col2Vals.length;
if (fr.vec(col2).isCategorical()) {
if (_add_missing_na && Double.isNaN(col2Vals[countval2]))
_partial_dependence_data[i].set(j, colIndex, ".missing(NA)"); // accomodate NA
else
_partial_dependence_data[i].set(j, colIndex, fr.vec(col2).domain()[(int) col2Vals[countval2]]);
} else {
_partial_dependence_data[i].set(j, colIndex, col2Vals[countval2]);
}
colIndex++;
}
_partial_dependence_data[i].set(j, colIndex++, meanResponse[j]);
_partial_dependence_data[i].set(j, colIndex++, stddevResponse[j]);
_partial_dependence_data[i].set(j, colIndex++, stdErrorOfTheMeanResponse[j]);
}
if(_targets == null){
column++;
} else if((i+1) % _targets.length == 0){
column++;
}
_job.update(1);
update(_job);
if (_job.stop_requested())
break;
}
tryComplete();
}
public CalculateWeightMeanSTD getWeightedStat(Frame dataFrame, Frame pred, int targetIndex) {
CalculateWeightMeanSTD calMeansSTD = new CalculateWeightMeanSTD();
calMeansSTD.doAll(pred.vec(targetIndex), dataFrame.vec(_weight_column_index));
return calMeansSTD;
}
@Override
public void onCompletion(CountedCompleter caller) {
_frame_id.get().unlock(_job._key);
unlock(_job);
}
@Override
public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller) {
_frame_id.get().unlock(_job._key);
unlock(_job);
return true;
}
private class CalculatePdpPerBin extends H2O.H2OCountedCompleter<CalculatePdpPerBin> {
final String _col; // column name
final String _col2; // column name for 2nd column for 2d pdp
final double _value; // value of column to keep constant
final double _value2; // value of 2nd column to keep constant for 2d pdp
final boolean _workOn2D; // true for 2d pdp, false for 1d pdp
final int _pdp_row_index; // column index into pdp frame
final boolean _col1_cat; // true if first column is enum
final boolean _col2_cat; // true if second column is enum
final double[] _meanResponse;
final double[] _stddevResponse;
final double[] _stdErrorOfTheMeanResponse;
final int _predictorColumn;
CalculatePdpPerBin(String col, String col2, double value, double value2, boolean cat, boolean cat2, int which,
boolean workon2D, double[] meanResp, double[] stddevResp, double[] stdErrMeanResp, int predictorColumn) {
_col = col;
_col2 = col2;
_value = value;
_value2 = value2;
_workOn2D = workon2D;
_pdp_row_index = which;
_col1_cat = cat;
_col2_cat = cat2;
_meanResponse = meanResp;
_stddevResponse = stddevResp;
_stdErrorOfTheMeanResponse = stdErrMeanResp;
_predictorColumn = predictorColumn;
}
public void compute2() {
Frame fr;
if (_row_index >= 0) {
fr = Rapids.exec("(rows " + _frame_id + " " + _row_index + ")").getFrame();
} else {
fr = _frame_id.get();
}
Frame test = new Frame(fr);
Vec orig = test.remove(_col);
Vec cons = orig.makeCon(_value);
if (_col1_cat) cons.setDomain(fr.vec(_col).domain());
test.add(_col, cons);
Vec cons2 = null;
if (_workOn2D) {
Vec orig2 = test.remove(_col2);
cons2 = orig2.makeCon(_value2);
if (_col2_cat) cons2.setDomain(fr.vec(_col2).domain());
test.add(_col2, cons2);
}
Frame preds = null;
try {
preds = _model_id.get().score(test, Key.make().toString(), _job, false);
if (preds == null || preds.numRows() == 0) { // this can happen if algo will not predict on rows with NAs
_meanResponse[_pdp_row_index] = Double.NaN;
_stddevResponse[_pdp_row_index] = Double.NaN;
_stdErrorOfTheMeanResponse[_pdp_row_index] = Double.NaN;
} else {
CalculateWeightMeanSTD calMeansSTD = (_weight_column_index >= 0)?
getWeightedStat(fr, preds, _predictorColumn):null;
_meanResponse[_pdp_row_index] = (_weight_column_index >= 0)?calMeansSTD.getWeightedMean()
:preds.vec(_predictorColumn).mean();
_stddevResponse[_pdp_row_index] = (_weight_column_index >= 0)?calMeansSTD.getWeightedSigma()
:preds.vec(_predictorColumn).sigma();
_stdErrorOfTheMeanResponse[_pdp_row_index] = _stddevResponse[_pdp_row_index]/Math.sqrt(preds.numRows());
}
} finally {
if (preds != null) preds.remove();
}
cons.remove();
if (cons2!=null) cons2.remove();
if (_row_index >= 0) {
fr.remove();
}
tryComplete();
}
}
}
@Override public Class<KeyV3.PartialDependenceKeyV3> makeSchema() { return KeyV3.PartialDependenceKeyV3.class; }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/PojoWriter.java
|
package hex;
import water.codegen.CodeGeneratorPipeline;
import water.util.SBPrintStream;
public interface PojoWriter {
boolean toJavaCheckTooBig();
// Override in subclasses to provide some top-level model-specific goodness
SBPrintStream toJavaInit(SBPrintStream sb, CodeGeneratorPipeline fileContext);
// Override in subclasses to provide some inside 'predict' call goodness
// Method returns code which should be appended into generated top level class after
// predict method.
void toJavaPredictBody(SBPrintStream body,
CodeGeneratorPipeline classCtx,
CodeGeneratorPipeline fileCtx,
boolean verboseCode);
// Generates optional "transform" method, transform method will have a different signature depending on the algo
// Empty by default - can be overridden by Model implementation
default SBPrintStream toJavaTransform(SBPrintStream ccsb,
CodeGeneratorPipeline fileCtx,
boolean verboseCode) {
return ccsb;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/RegexTokenizer.java
|
package hex;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.parser.BufferedString;
/**
* RegexTokenizer splits rows of a given Frame into delimited sequences of tokens using a regular expression.
* The output structure is suitable for use in the word2vec algorithm.
*
* <p>
* Example usage:
* <pre>{@code
* final RegexTokenizer tokenizer = new RegexTokenizer.Builder()
* .setRegex("[,;]")
* .setMinLength(2)
* .setToLowercase(true)
* .create();
* final Frame tokens = tokenizer.transform(inputFrame);
* }
* </pre>
*/
public class RegexTokenizer extends MRTask<RegexTokenizer> {
private final String _regex;
private final boolean _toLowercase;
private final int _minLength;
public RegexTokenizer(String regex) {
this(regex, false, 0);
}
private RegexTokenizer(String regex, boolean toLowercase, int minLength) {
_regex = regex;
_toLowercase = toLowercase;
_minLength = minLength;
}
@Override
public void map(Chunk[] cs, NewChunk nc) {
BufferedString tmpStr = new BufferedString();
for (int row = 0; row < cs[0]._len; row++) {
for (Chunk chk : cs) {
if (chk.isNA(row))
continue; // input NAs are skipped
String str = chk.atStr(tmpStr, row).toString();
if (_toLowercase) {
str = str.toLowerCase();
}
String[] ss = str.split(_regex);
for (String s : ss) {
if (s.length() >= _minLength) {
nc.addStr(s);
}
}
}
nc.addNA(); // sequences of tokens are delimited by NAs
}
}
/**
* Tokenizes a given Frame
* @param input Input Frame is expected to only contain String columns. Each row of the Frame represents a logical
* sentence. The sentence can span one or more cells of the row.
* @return Frame made of a single String column where original sentences are split into tokens and delimited by NAs.
*/
public Frame transform(Frame input) {
return doAll(Vec.T_STR, input).outputFrame();
}
public static class Builder {
private String _regex;
private boolean _toLowercase;
private int _minLength;
public Builder setRegex(String regex) {
_regex = regex;
return this;
}
public Builder setToLowercase(boolean toLowercase) {
_toLowercase = toLowercase;
return this;
}
public Builder setMinLength(int minLength) {
_minLength = minLength;
return this;
}
public RegexTokenizer create() {
return new RegexTokenizer(_regex, _toLowercase, _minLength);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ScoreKeeper.java
|
package hex;
import water.H2O;
import water.Iced;
import water.exceptions.H2OIllegalArgumentException;
import water.util.ArrayUtils;
import water.util.Log;
import water.util.MathUtils;
import java.util.Arrays;
/**
* Low-weight keeper of scores
* solely intended for display (either direct or as helper to create scoring history TwoDimTable).
* Not intended to store large AUC object or ConfusionMatrices, etc.
*/
public class ScoreKeeper extends Iced {
public double _mean_residual_deviance = Double.NaN;
public double _mse = Double.NaN;
public double _rmse = Double.NaN;
public double _mae = Double.NaN;
public double _rmsle = Double.NaN;
public double _logloss = Double.NaN;
public double _AUC = Double.NaN;
public double _pr_auc = Double.NaN;
public double _classError = Double.NaN;
public double _mean_per_class_error = Double.NaN;
public double _custom_metric = Double.NaN;
public float[] _hitratio;
public double _lift = Double.NaN; //Lift in top group
public double _r2 = Double.NaN;
public double _anomaly_score = Double.NaN;
public double _anomaly_score_normalized = Double.NaN;
public double _AUUC = Double.NaN;
public double _auuc_normalized = Double.NaN;
public double _qini = Double.NaN;
public int _auuc_nbins = 0;
public double _ate = Double.NaN;
public double _att = Double.NaN;
public double _atc = Double.NaN;
public ScoreKeeper() {}
/**
* Keep score of mean squared error <i>only</i>.
* @param mse
*/
public ScoreKeeper(double mse) { _mse = mse; }
/**
* Keep score of a given ModelMetrics.
* @param mm ModelMetrics to keep track of.
*/
public ScoreKeeper(ModelMetrics mm) { fillFrom(mm); }
/**
* Keep score for a model using its validation_metrics if available and training_metrics if not.
* @param m model for which we should keep score
*/
public ScoreKeeper(Model m) {
if (null == m) throw new H2OIllegalArgumentException("model", "ScoreKeeper(Model model)", null);
if (null == m._output) throw new H2OIllegalArgumentException("model._output", "ScoreKeeper(Model model)", null);
if (null != m._output._cross_validation_metrics) {
fillFrom(m._output._cross_validation_metrics);
} else if (null != m._output._validation_metrics) {
fillFrom(m._output._validation_metrics);
} else {
fillFrom(m._output._training_metrics);
}
}
public boolean isEmpty() {
return Double.isNaN(_mse) &&
Double.isNaN(_logloss) &&
Double.isNaN(_anomaly_score_normalized) &&
Double.isNaN(_custom_metric); // at least one of them should always be filled
}
public void fillFrom(ModelMetrics m) {
if (m == null) return;
fillFrom(m, m._custom_metric);
}
public void fillFrom(ModelMetrics m, CustomMetric customMetric) {
if (m == null) return;
_mse = m._MSE;
_rmse = m.rmse();
if (m instanceof ModelMetricsRegression) {
_mean_residual_deviance = ((ModelMetricsRegression)m)._mean_residual_deviance;
_mae = ((ModelMetricsRegression)m)._mean_absolute_error;
_rmsle = ((ModelMetricsRegression)m)._root_mean_squared_log_error;
_r2 = ((ModelMetricsRegression)m).r2();
}
if (m instanceof ModelMetricsBinomial) {
_logloss = ((ModelMetricsBinomial)m)._logloss;
_r2 = ((ModelMetricsBinomial)m).r2();
if (((ModelMetricsBinomial)m)._auc != null) {
_AUC = ((ModelMetricsBinomial) m)._auc._auc;
_pr_auc = ((ModelMetricsBinomial) m)._auc.pr_auc();
_classError = ((ModelMetricsBinomial) m)._auc.defaultErr();
_mean_per_class_error = ((ModelMetricsBinomial)m).mean_per_class_error();
}
GainsLift gl = ((ModelMetricsBinomial)m)._gainsLift;
if (gl != null && gl.response_rates != null && gl.response_rates.length > 0) {
_lift = gl.response_rates[0] / gl.avg_response_rate;
}
}
else if (m instanceof ModelMetricsMultinomial) {
_logloss = ((ModelMetricsMultinomial)m)._logloss;
_classError = ((ModelMetricsMultinomial)m)._cm.err();
_mean_per_class_error = ((ModelMetricsMultinomial)m).mean_per_class_error();
_hitratio = ((ModelMetricsMultinomial)m)._hit_ratios;
_r2 = ((ModelMetricsMultinomial)m).r2();
_AUC = ((ModelMetricsMultinomial)m).auc();
_pr_auc = ((ModelMetricsMultinomial)m).pr_auc();
} else if (m instanceof ModelMetricsOrdinal) {
_logloss = ((ModelMetricsOrdinal)m)._logloss;
_classError = ((ModelMetricsOrdinal)m)._cm.err();
_mean_per_class_error = ((ModelMetricsOrdinal)m).mean_per_class_error();
_hitratio = ((ModelMetricsOrdinal)m)._hit_ratios;
_r2 = ((ModelMetricsOrdinal)m).r2();
} else if (m instanceof ScoreKeeperAware) {
((ScoreKeeperAware) m).fillTo(this);
} else if (m instanceof ModelMetricsBinomialUplift){
_AUUC = ((ModelMetricsBinomialUplift)m).auuc();
_auuc_normalized = ((ModelMetricsBinomialUplift)m).auucNormalized();
_qini = ((ModelMetricsBinomialUplift)m).qini();
_auuc_nbins = ((ModelMetricsBinomialUplift)m).nbins();
_ate = ((ModelMetricsBinomialUplift)m).ate();
_att = ((ModelMetricsBinomialUplift)m).att();
_atc = ((ModelMetricsBinomialUplift)m).atc();
}
if (customMetric != null ) {
_custom_metric = customMetric.value;
}
}
public interface IStoppingMetric {
int direction();
boolean isLowerBoundBy0();
IConvergenceStrategy getConvergenceStrategy();
double metricValue(ScoreKeeper sk);
}
public enum StoppingMetric implements IStoppingMetric {
AUTO(ConvergenceStrategy.AUTO, false, false),
deviance(ConvergenceStrategy.LESS_IS_BETTER, false, false),
logloss(ConvergenceStrategy.LESS_IS_BETTER, true, true),
MSE(ConvergenceStrategy.LESS_IS_BETTER, true, false),
RMSE(ConvergenceStrategy.LESS_IS_BETTER, true, false),
MAE(ConvergenceStrategy.LESS_IS_BETTER, true, false),
RMSLE(ConvergenceStrategy.LESS_IS_BETTER, true, false),
AUC(ConvergenceStrategy.MORE_IS_BETTER, true, true),
AUCPR(ConvergenceStrategy.MORE_IS_BETTER, true, true),
lift_top_group(ConvergenceStrategy.MORE_IS_BETTER, false, true),
misclassification(ConvergenceStrategy.LESS_IS_BETTER, true, true),
mean_per_class_error(ConvergenceStrategy.LESS_IS_BETTER, true, true),
anomaly_score(ConvergenceStrategy.NON_DIRECTIONAL, false, false),
AUUC(ConvergenceStrategy.MORE_IS_BETTER, false, false),
ATE(ConvergenceStrategy.MORE_IS_BETTER, false, false),
ATT(ConvergenceStrategy.MORE_IS_BETTER, false, false),
ATC(ConvergenceStrategy.MORE_IS_BETTER, false, false),
qini(ConvergenceStrategy.MORE_IS_BETTER, false, false),
custom(ConvergenceStrategy.LESS_IS_BETTER, false, false),
custom_increasing(ConvergenceStrategy.MORE_IS_BETTER, false, false),
;
private final ConvergenceStrategy _convergence;
private final boolean _lowerBoundBy0;
private final boolean _classificationOnly;
StoppingMetric(ConvergenceStrategy convergence, boolean lowerBoundBy0, boolean classificationOnly) {
_convergence = convergence;
_lowerBoundBy0 = lowerBoundBy0;
_classificationOnly = classificationOnly;
}
public int direction() {
return _convergence._direction;
}
public boolean isLowerBoundBy0() {
return _lowerBoundBy0;
}
public boolean isClassificationOnly() {
return _classificationOnly;
}
public ConvergenceStrategy getConvergenceStrategy() {
return _convergence;
}
@Override
public double metricValue(ScoreKeeper skj) {
double val;
switch (this) {
case AUC:
val = skj._AUC;
break;
case MSE:
val = skj._mse;
break;
case RMSE:
val = skj._rmse;
break;
case MAE:
val = skj._mae;
break;
case RMSLE:
val = skj._rmsle;
break;
case deviance:
val = skj._mean_residual_deviance;
break;
case logloss:
val = skj._logloss;
break;
case misclassification:
val = skj._classError;
break;
case AUCPR:
val = skj._pr_auc;
break;
case mean_per_class_error:
val = skj._mean_per_class_error;
break;
case lift_top_group:
val = skj._lift;
break;
case custom:
case custom_increasing:
val = skj._custom_metric;
break;
case anomaly_score:
val = skj._anomaly_score_normalized;
break;
case AUUC:
val = skj._AUUC;
break;
case ATE:
val = skj._ate;
break;
case ATT:
val = skj._att;
break;
case ATC:
val = skj._atc;
break;
case qini:
val = skj._qini;
break;
default:
throw H2O.unimpl("Undefined stopping criterion.");
}
return val;
}
}
public enum ProblemType {
regression(StoppingMetric.deviance),
classification(StoppingMetric.logloss),
anomaly_detection(StoppingMetric.anomaly_score),
autoencoder(StoppingMetric.MSE),
uplift(StoppingMetric.AUUC);
private final StoppingMetric _defaultMetric;
ProblemType(StoppingMetric defaultMetric) {
_defaultMetric = defaultMetric;
}
public StoppingMetric defaultMetric() {
return _defaultMetric;
}
public static ProblemType forSupervised(boolean isClassifier, boolean isUplift) {
return isClassifier ? isUplift ? uplift : classification : regression;
}
public static ProblemType forSupervised(boolean isClassifier) {
return forSupervised(isClassifier,false);
}
}
/** Based on the given array of ScoreKeeper and stopping criteria what is the best scoring iteration of the last k iterations? */
public static int best(ScoreKeeper[] sk, final int k, IStoppingMetric criterion) {
int best = sk.length - 1;
ScoreKeeper.IConvergenceStrategy cs = criterion.getConvergenceStrategy();
if (cs != ConvergenceStrategy.LESS_IS_BETTER && cs != ConvergenceStrategy.MORE_IS_BETTER) {
return best;
}
double bestVal = criterion.metricValue(sk[best]);
for (int i = 1; i < k; i++) {
int idx = sk.length - i - 1;
if (idx < 0)
break;
double val = criterion.metricValue(sk[idx]);
if (cs == ConvergenceStrategy.LESS_IS_BETTER) {
if (val < bestVal) {
best = idx;
bestVal = val;
}
} else {
if (val > bestVal) {
best = idx;
bestVal = val;
}
}
}
return best;
}
/** Based on the given array of ScoreKeeper and stopping criteria should we stop early? */
public static boolean stopEarly(ScoreKeeper[] sk, int k, ProblemType type, IStoppingMetric criterion, double rel_improvement, String what, boolean verbose) {
if (k == 0) return false;
int len = sk.length - 1; //how many "full"/"conservative" scoring events we have (skip the first)
if (len < 2*k) return false; //need at least k for SMA and another k to tell whether the model got better or not
if (StoppingMetric.AUTO.equals(criterion)) {
criterion = type.defaultMetric();
}
IConvergenceStrategy convergenceStrategy = criterion.getConvergenceStrategy();
double movingAvg[] = new double[k+1]; //need one moving average value for the last k+1 scoring events
double lastBeforeK = Double.MAX_VALUE;
double minInLastK = Double.MAX_VALUE;
double maxInLastK = -Double.MAX_VALUE;
for (int i=0;i<movingAvg.length;++i) {
movingAvg[i] = 0;
// compute k+1 simple moving averages of window size k
// need to go back 2*k steps
// Example: 20 scoring events, k=3
// need to go back from idx 19 to idx 14
// movingAvg[0] is based on scoring events indices 14,15,16 <- reference
// movingAvg[1] is based on scoring events indices 15,16,17 <- first "new" smooth score
// movingAvg[2] is based on scoring events indices 16,17,18 <- second "new" smooth score
// movingAvg[3] is based on scoring events indices 17,18,19 <- third "new" smooth score
// Example: 18 scoring events, k=2
// need to go back from idx 17 to idx 14
// movingAvg[0] is based on scoring events indices 14,15 <- reference
// movingAvg[1] is based on scoring events indices 15,16 <- first "new" smooth score
// movingAvg[2] is based on scoring events indices 16,17 <- second "new" smooth score
// Example: 18 scoring events, k=1
// need to go back from idx 17 to idx 16
// movingAvg[0] is based on scoring event index 16 <- reference
// movingAvg[1] is based on scoring event index 17 <- first "new" score
int startIdx = sk.length-2*k+i;
for (int j = 0; j < k; ++j) {
ScoreKeeper skj = sk[startIdx+j];
double val = criterion.metricValue(skj);
movingAvg[i] += val;
}
movingAvg[i]/=k;
if (Double.isNaN(movingAvg[i])) return false;
if (i==0)
lastBeforeK = movingAvg[i];
else {
minInLastK = Math.min(movingAvg[i], minInLastK);
maxInLastK = Math.max(movingAvg[i], maxInLastK);
}
}
assert(lastBeforeK != Double.MAX_VALUE);
assert(maxInLastK != -Double.MAX_VALUE);
assert(minInLastK != Double.MAX_VALUE);
if (verbose)
Log.info("Windowed averages (window size " + k + ") of " + what + " " + (k+1) + " " + criterion.toString() + " metrics: " + Arrays.toString(movingAvg));
if (criterion.isLowerBoundBy0() && lastBeforeK == 0.0) {
Log.info("Checking convergence with " + criterion.toString() + " metric: " + lastBeforeK + " (metric converged to its lower bound).");
return true;
}
final double extremePoint = convergenceStrategy.extremePoint(lastBeforeK, minInLastK, maxInLastK);
// zero-crossing could be for residual deviance or r^2 -> mark it not yet stopEarly, avoid division by 0 or weird relative improvements math below
if (Math.signum(ArrayUtils.maxValue(movingAvg)) != Math.signum(ArrayUtils.minValue(movingAvg))) return false;
if (Math.signum(extremePoint) != Math.signum(lastBeforeK))
return false;
boolean stopEarly = convergenceStrategy.stopEarly(lastBeforeK, minInLastK, maxInLastK, rel_improvement);
if (verbose)
Log.info("Checking convergence with " + criterion.toString() + " metric: " + lastBeforeK + " --> " + extremePoint + (stopEarly ? " (converged)." : " (still improving)."));
return stopEarly;
} // stopEarly
interface IConvergenceStrategy {
double extremePoint(double lastBeforeK, double minInLastK, double maxInLastK);
boolean stopEarly(double lastBeforeK, double minInLastK, double maxInLastK, double rel_improvement);
}
enum ConvergenceStrategy implements IConvergenceStrategy {
AUTO(0), // dummy - should never be actually used (meant to be assigned to AUTO metric)
MORE_IS_BETTER(1) {
@Override
public double extremePoint(double lastBeforeK, double minInLastK, double maxInLastK) {
return maxInLastK;
}
@Override
public boolean stopEarly(double lastBeforeK, double minInLastK, double maxInLastK, double rel_improvement) {
double ratio = maxInLastK / lastBeforeK;
if (Double.isNaN(ratio))
return false;
return ratio <= 1 + rel_improvement;
}
},
LESS_IS_BETTER(-1) {
@Override
public double extremePoint(double lastBeforeK, double minInLastK, double maxInLastK) {
return minInLastK;
}
@Override
public boolean stopEarly(double lastBeforeK, double minInLastK, double maxInLastK, double rel_improvement) {
double ratio = minInLastK / lastBeforeK;
if (Double.isNaN(ratio))
return false;
return ratio >= 1 - rel_improvement;
}
},
NON_DIRECTIONAL(0) {
@Override
public double extremePoint(double lastBeforeK, double minInLastK, double maxInLastK) {
return Math.abs(lastBeforeK - minInLastK) > Math.abs(lastBeforeK - maxInLastK) ? minInLastK : maxInLastK;
}
@Override
public boolean stopEarly(double lastBeforeK, double minInLastK, double maxInLastK, double rel_change) {
double extreme = extremePoint(lastBeforeK, minInLastK, maxInLastK);
double ratio = extreme / lastBeforeK;
if (Double.isNaN(ratio))
return false;
return ratio >= 1 - rel_change && ratio <= 1 + rel_change;
}
};
final int _direction;
ConvergenceStrategy(int direction) {
_direction = direction;
}
@Override
public double extremePoint(double lastBeforeK, double minInLastK, double maxInLastK) {
throw new IllegalStateException("Should overridden in Strategy implementation");
}
@Override
public boolean stopEarly(double lastBeforeK, double minInLastK, double maxInLastK, double rel_improvement) {
throw new IllegalStateException("Should overridden in Strategy implementation");
}
}
/**
* Compare this ScoreKeeper with that ScoreKeeper
* @param that
* @return true if they are equal (up to 1e-6 absolute and relative error, or both contain NaN for the same values)
*/
@Override public boolean equals(Object that) {
if (! (that instanceof ScoreKeeper)) return false;
ScoreKeeper o = (ScoreKeeper)that;
if (_hitratio == null && ((ScoreKeeper) that)._hitratio != null) return false;
if (_hitratio != null && ((ScoreKeeper) that)._hitratio == null) return false;
if (_hitratio != null && ((ScoreKeeper) that)._hitratio != null) {
if (_hitratio.length != ((ScoreKeeper) that)._hitratio.length) return false;
for (int i=0; i<_hitratio.length; ++i) {
if (!MathUtils.compare(_hitratio[i], ((ScoreKeeper) that)._hitratio[i], 1e-6, 1e-6)) return false;
}
}
return MathUtils.compare(_mean_residual_deviance, o._mean_residual_deviance, 1e-6, 1e-6)
&& MathUtils.compare(_mse, o._mse, 1e-6, 1e-6)
&& MathUtils.compare(_mae, o._mae, 1e-6, 1e-6)
&& MathUtils.compare(_rmsle, o._rmsle, 1e-6, 1e-6)
&& MathUtils.compare(_logloss, o._logloss, 1e-6, 1e-6)
&& MathUtils.compare(_classError, o._classError, 1e-6, 1e-6)
&& MathUtils.compare(_mean_per_class_error, o._mean_per_class_error, 1e-6, 1e-6)
&& MathUtils.compare(_r2, o._r2, 1e-6, 1e-6)
&& MathUtils.compare(_lift, o._lift, 1e-6, 1e-6);
}
@Override
public String toString() {
return "ScoreKeeper{" +
" _mean_residual_deviance=" + _mean_residual_deviance +
", _rmse=" + _rmse +
",_mae=" + _mae +
",_rmsle=" + _rmsle +
", _logloss=" + _logloss +
", _AUC=" + _AUC +
", _pr_auc="+_pr_auc+
", _classError=" + _classError +
", _mean_per_class_error=" + _mean_per_class_error +
", _hitratio=" + Arrays.toString(_hitratio) +
", _lift=" + _lift +
", _anomaly_score_normalized=" + _anomaly_score_normalized +
", _custom_metric=" + _custom_metric +
'}';
}
public interface ScoreKeeperAware {
void fillTo(ScoreKeeper sk);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ScoringInfo.java
|
package hex;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import water.H2O;
import water.Iced;
import water.util.PrettyPrint;
import water.util.TwoDimTable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
/**
* Lightweight scoring history snapshot, for things like displaying the scoring history.
*/
public class ScoringInfo extends Iced<ScoringInfo> {
public long time_stamp_ms; //absolute time the model metrics were computed
public long total_training_time_ms; //total training time until this scoring event (including checkpoints)
public long total_scoring_time_ms; //total scoring time until this scoring event (including checkpoints)
public long total_setup_time_ms; //total setup time until this scoring event (including checkpoints)
public long this_scoring_time_ms; //scoring time for this scoring event (only)
public boolean is_classification;
public boolean is_autoencoder;
public boolean validation;
public boolean cross_validation;
public ScoreKeeper scored_train = new ScoreKeeper();
public ScoreKeeper scored_valid = new ScoreKeeper();
public ScoreKeeper scored_xval = new ScoreKeeper();
public VarImp variable_importances;
public interface HasEpochs{ public double epoch_counter(); }
public interface HasSamples { public double training_samples(); public long score_training_samples(); public long score_validation_samples(); }
public interface HasIterations { public int iterations(); }
/**
* Add a new ScoringInfo to the given array and return the new array. Note: this has no side effects.
* @param scoringInfo
*/
public static ScoringInfo[] prependScoringInfo(ScoringInfo scoringInfo, ScoringInfo[] scoringInfos) {
if (scoringInfos == null) {
return new ScoringInfo[]{ scoringInfo };
} else {
ScoringInfo[] bigger = new ScoringInfo[scoringInfos.length + 1];
System.arraycopy(scoringInfos, 0, bigger, 0, scoringInfos.length);
bigger[bigger.length - 1] = scoringInfo;
return bigger;
}
}
/** For a given array of ScoringInfo return an array of the cross-validation, validation or training ScoreKeepers, as available. */
public static ScoreKeeper[] scoreKeepers(ScoringInfo[] scoring_history) {
ScoreKeeper[] sk = new ScoreKeeper[scoring_history.length];
for (int i=0;i<sk.length;++i) {
sk[i] = scoring_history[i].cross_validation ? scoring_history[i].scored_xval
: scoring_history[i].validation ? scoring_history[i].scored_valid
: scoring_history[i].scored_train;
}
return sk;
}
public double metric(ScoreKeeper.StoppingMetric criterion) {
switch (criterion) {
case AUC: { return cross_validation ? scored_xval._AUC : validation ? scored_valid._AUC : scored_train._AUC; }
case MSE: { return cross_validation ? scored_xval._mse : validation ? scored_valid._mse : scored_train._mse; }
case RMSE: { return cross_validation ? scored_xval._rmse : validation ? scored_valid._rmse : scored_train._rmse; }
case MAE: { return cross_validation ? scored_xval._mae : validation ? scored_valid._mae : scored_train._mae; }
case RMSLE: { return cross_validation ? scored_xval._rmsle : validation ? scored_valid._rmsle : scored_train._rmsle; }
case deviance: { return cross_validation ? scored_xval._mean_residual_deviance : validation ? scored_valid._mean_residual_deviance : scored_train._mean_residual_deviance; }
case logloss: { return cross_validation ? scored_xval._logloss : validation ? scored_valid._logloss : scored_train._logloss; }
case misclassification: { return cross_validation ? scored_xval._classError : validation ? scored_valid._classError : scored_train._classError; }
case AUCPR: { return cross_validation ? scored_xval._pr_auc : validation ? scored_valid._pr_auc : scored_train._pr_auc; }
case lift_top_group: { return cross_validation ? scored_xval._lift : validation ? scored_valid._lift : scored_train._lift; }
case mean_per_class_error: { return cross_validation ? scored_xval._mean_per_class_error : validation ? scored_valid._mean_per_class_error : scored_train._mean_per_class_error; }
// case r2: { return cross_validation ? scored_xval._r2 : validation ? scored_valid._r2 : scored_train._r2; }
default: throw H2O.unimpl("Undefined stopping criterion: " + criterion);
}
}
/**
* Create a java.util.Comparator which allows us to sort an array of ScoringInfo based
* on a stopping criterion / metric. Uses cross-validation or validation metrics if
* available, otherwise falls back to training metrics. Understands whether more is
* better for the given criterion and will order the array so that the best models are
* last (to fit into the behavior of a model that improves over time)
* @param criterion scalar model metric / stopping criterion by which to sort
* @return a Comparator on a stopping criterion / metric
*/
public static Comparator<ScoringInfo> comparator(final ScoreKeeper.StoppingMetric criterion) {
final int direction = criterion.direction();
return new Comparator<ScoringInfo>() {
@Override
public int compare(ScoringInfo o1, ScoringInfo o2) {
return direction * (int)Math.signum(o1.metric(criterion) - o2.metric(criterion));
}
};
}
/**
* Sort an array of ScoringInfo based on a stopping criterion / metric. Uses
* cross-validation or validation metrics if available, otherwise falls back to training
* metrics. Understands whether more is better for the given criterion and will order
* the array so that the best models are last
* @param scoringInfos array of ScoringInfo to sort
* @param criterion scalar model metric / stopping criterion by which to sort
*/
public static void sort(ScoringInfo[] scoringInfos, ScoreKeeper.StoppingMetric criterion) {
if (null == scoringInfos) return;
if (scoringInfos.length == 0) return;
// handle StoppingMetric.AUTO
if (criterion == ScoreKeeper.StoppingMetric.AUTO)
criterion = scoringInfos[0].is_classification ? ScoreKeeper.StoppingMetric.logloss
: scoringInfos[0].is_autoencoder ? ScoreKeeper.StoppingMetric.RMSE
: ScoreKeeper.StoppingMetric.deviance;
Arrays.sort(scoringInfos, ScoringInfo.comparator(criterion));
}
/**
* Create a TwoDimTable to display the scoring history from an array of scoringInfo.
* @param scoringInfos array of ScoringInfo to render
* @param hasValidation do we have validation metrics?
* @param hasCrossValidation do we have cross-validation metrics?
* @param modelCategory the category for the model or models
* @param isAutoencoder is the model or are the models autoencoders?
* @return
*/
public static TwoDimTable createScoringHistoryTable(ScoringInfo[] scoringInfos, boolean hasValidation, boolean hasCrossValidation, ModelCategory modelCategory, boolean isAutoencoder, boolean hasCustomMetric) {
boolean hasEpochs = (scoringInfos instanceof HasEpochs[]);
boolean hasSamples = (scoringInfos instanceof HasSamples[]);
boolean hasIterations = (scoringInfos instanceof HasIterations[]) || (scoringInfos != null &&
scoringInfos.length > 0 && scoringInfos[0] instanceof HasIterations);
boolean isClassifier = (modelCategory == ModelCategory.Binomial || modelCategory == ModelCategory.Multinomial
|| modelCategory == ModelCategory.Ordinal);
List<String> colHeaders = new ArrayList<>();
List<String> colTypes = new ArrayList<>();
List<String> colFormat = new ArrayList<>();
colHeaders.add("Timestamp"); colTypes.add("string"); colFormat.add("%s");
colHeaders.add("Duration"); colTypes.add("string"); colFormat.add("%s");
if (hasSamples) { colHeaders.add("Training Speed"); colTypes.add("string"); colFormat.add("%s"); }
if (hasEpochs) { colHeaders.add("Epochs"); colTypes.add("double"); colFormat.add("%.5f"); }
if (hasIterations) { colHeaders.add("Iterations"); colTypes.add("int"); colFormat.add("%d"); }
if (hasSamples) { colHeaders.add("Samples"); colTypes.add("double"); colFormat.add("%f"); }
colHeaders.add("Training RMSE"); colTypes.add("double"); colFormat.add("%.5f");
if (modelCategory == ModelCategory.Regression) {
colHeaders.add("Training Deviance"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Training MAE"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Training r2"); colTypes.add("double"); colFormat.add("%.5f");
}
if (isClassifier) {
colHeaders.add("Training LogLoss"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Training r2"); colTypes.add("double"); colFormat.add("%.5f");
}
if (modelCategory == ModelCategory.Binomial) {
colHeaders.add("Training AUC"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Training pr_auc"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Training Lift"); colTypes.add("double"); colFormat.add("%.5f");
}
if (isClassifier) {
colHeaders.add("Training Classification Error"); colTypes.add("double"); colFormat.add("%.5f");
}
if(modelCategory == ModelCategory.Multinomial){
colHeaders.add("Training AUC"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Training pr_auc"); colTypes.add("double"); colFormat.add("%.5f");
}
if(modelCategory == ModelCategory.AutoEncoder) {
colHeaders.add("Training MSE"); colTypes.add("double"); colFormat.add("%.5f");
}
if (hasCustomMetric) {
colHeaders.add("Training Custom"); colTypes.add("double"); colFormat.add("%.5f");
}
if (hasValidation) {
colHeaders.add("Validation RMSE"); colTypes.add("double"); colFormat.add("%.5f");
if (modelCategory == ModelCategory.Regression) {
colHeaders.add("Validation Deviance"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Validation MAE"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Validation r2"); colTypes.add("double"); colFormat.add("%.5f");
}
if (isClassifier) {
colHeaders.add("Validation LogLoss"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Validation r2"); colTypes.add("double"); colFormat.add("%.5f");
}
if (modelCategory == ModelCategory.Binomial) {
colHeaders.add("Validation AUC"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Validation pr_auc"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Validation Lift"); colTypes.add("double"); colFormat.add("%.5f");
}
if (isClassifier) {
colHeaders.add("Validation Classification Error"); colTypes.add("double"); colFormat.add("%.5f");
}
if (modelCategory == ModelCategory.Multinomial) {
colHeaders.add("Validation AUC"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Validation pr_auc"); colTypes.add("double"); colFormat.add("%.5f");
}
if(modelCategory == ModelCategory.AutoEncoder) {
colHeaders.add("Validation MSE"); colTypes.add("double"); colFormat.add("%.5f");
}
if (hasCustomMetric) {
colHeaders.add("Validation Custom"); colTypes.add("double"); colFormat.add("%.5f");
}
} // (hasValidation)
if (hasCrossValidation) {
colHeaders.add("Cross-Validation RMSE"); colTypes.add("double"); colFormat.add("%.5f");
if (modelCategory == ModelCategory.Regression) {
colHeaders.add("Cross-Validation Deviance"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Cross-Validation MAE"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Cross-Validation r2"); colTypes.add("double"); colFormat.add("%.5f");
}
if (isClassifier) {
colHeaders.add("Cross-Validation LogLoss"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Cross-Validation r2"); colTypes.add("double"); colFormat.add("%.5f");
}
if (modelCategory == ModelCategory.Binomial) {
colHeaders.add("Cross-Validation AUC"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Cross-Validation pr_auc"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Cross-Validation Lift"); colTypes.add("double"); colFormat.add("%.5f");
}
if (isClassifier) {
colHeaders.add("Cross-Validation Classification Error"); colTypes.add("double"); colFormat.add("%.5f");
}
if (modelCategory == ModelCategory.Multinomial) {
colHeaders.add("Cross-Validation AUC"); colTypes.add("double"); colFormat.add("%.5f");
colHeaders.add("Cross-Validation pr_auc"); colTypes.add("double"); colFormat.add("%.5f");
}
if(modelCategory == ModelCategory.AutoEncoder) {
colHeaders.add("Cross-Validation MSE"); colTypes.add("double"); colFormat.add("%.5f");
}
if (hasCustomMetric) {
colHeaders.add("Cross-Validation Custom"); colTypes.add("double"); colFormat.add("%.5f");
}
} // (hasCrossValidation)
final int rows = scoringInfos == null ? 0 : scoringInfos.length;
String[] s = new String[0];
TwoDimTable table = new TwoDimTable(
"Scoring History", null,
new String[rows],
colHeaders.toArray(s),
colTypes.toArray(s),
colFormat.toArray(s),
"");
int row = 0;
if (null == scoringInfos)
return table;
for (ScoringInfo si : scoringInfos) {
int col = 0;
assert (row < table.getRowDim());
assert (col < table.getColDim());
DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss");
table.set(row, col++, fmt.print(si.time_stamp_ms));
table.set(row, col++, PrettyPrint.msecs(si.total_training_time_ms, true));
if (hasSamples) {
// Log.info("1st speed: (samples: " + si.training_samples + ", total_run_time: " + si.total_training_time_ms + ", total_scoring_time: " + si.total_scoring_time_ms + ", total_setup_time: " + si.total_setup_time_ms + ")");
float speed = (float) (((HasSamples)si).training_samples() / ((1.+si.total_training_time_ms - si.total_scoring_time_ms - si.total_setup_time_ms) / 1e3));
assert (speed >= 0) : "Speed should not be negative! " + speed + " = (float)(" + ((HasSamples)si).training_samples() + "/((" + si.total_training_time_ms + "-" + si.total_scoring_time_ms + "-" + si.total_setup_time_ms + ")/1e3)";
table.set(row, col++, si.total_training_time_ms == 0 ? null : (
speed>10 ? String.format("%d", (int)speed) : String.format("%g", speed)
) + " obs/sec");
}
if (hasEpochs) table.set(row, col++, ((HasEpochs)si).epoch_counter());
if (hasIterations) table.set(row, col++, ((HasIterations)si).iterations());
if (hasSamples) table.set(row, col++, ((HasSamples)si).training_samples());
table.set(row, col++, si.scored_train != null ? si.scored_train._rmse : Double.NaN);
if (modelCategory == ModelCategory.Regression) {
table.set(row, col++, si.scored_train != null ? si.scored_train._mean_residual_deviance : Double.NaN);
table.set(row, col++, si.scored_train != null ? si.scored_train._mae : Double.NaN);
table.set(row, col++, si.scored_train != null ? si.scored_train._r2 : Double.NaN);
}
if (isClassifier) {
table.set(row, col++, si.scored_train != null ? si.scored_train._logloss : Double.NaN);
table.set(row, col++, si.scored_train != null ? si.scored_train._r2 : Double.NaN);
}
if (modelCategory == ModelCategory.Binomial) {
table.set(row, col++, si.scored_train != null ? si.scored_train._AUC : Double.NaN);
table.set(row, col++, si.scored_train != null ? si.scored_train._pr_auc : Double.NaN);
table.set(row, col++, si.scored_train != null ? si.scored_train._lift : Double.NaN);
}
if (isClassifier) {
table.set(row, col++, si.scored_train != null ? si.scored_train._classError : Double.NaN);
}
if (modelCategory == ModelCategory.Multinomial) {
table.set(row, col++, si.scored_train != null ? si.scored_train._AUC : Double.NaN);
table.set(row, col++, si.scored_train != null ? si.scored_train._pr_auc : Double.NaN);
}
if (isAutoencoder) {
table.set(row, col++, si.scored_train != null ? si.scored_train._mse : Double.NaN);
}
if (hasCustomMetric) {
table.set(row, col++, si.scored_train != null ? si.scored_train._custom_metric : Double.NaN);
}
if (hasValidation) {
table.set(row, col++, si.scored_valid != null ? si.scored_valid._rmse : Double.NaN);
if (modelCategory == ModelCategory.Regression) {
table.set(row, col++, si.scored_valid != null ? si.scored_valid._mean_residual_deviance : Double.NaN);
table.set(row, col++, si.scored_valid != null ? si.scored_valid._mae : Double.NaN);
table.set(row, col++, si.scored_valid != null ? si.scored_valid._r2 : Double.NaN);
}
if (isClassifier) {
table.set(row, col++, si.scored_valid != null ? si.scored_valid._logloss : Double.NaN);
table.set(row, col++, si.scored_valid != null ? si.scored_valid._r2 : Double.NaN);
}
if (modelCategory == ModelCategory.Binomial) {
table.set(row, col++, si.scored_valid != null ? si.scored_valid._AUC : Double.NaN);
table.set(row, col++, si.scored_valid != null ? si.scored_valid._pr_auc : Double.NaN);
table.set(row, col++, si.scored_valid != null ? si.scored_valid._lift : Double.NaN);
}
if (isClassifier) {
table.set(row, col++, si.scored_valid != null ? si.scored_valid._classError : Double.NaN);
}
if (modelCategory == ModelCategory.Multinomial) {
table.set(row, col++, si.scored_valid != null ? si.scored_valid._AUC : Double.NaN);
table.set(row, col++, si.scored_valid != null ? si.scored_valid._pr_auc : Double.NaN);
}
if (isAutoencoder) {
table.set(row, col++, si.scored_valid != null ? si.scored_valid._mse : Double.NaN);
}
if (hasCustomMetric) {
table.set(row, col++, si.scored_valid != null ? si.scored_valid._custom_metric : Double.NaN);
}
} // hasValidation
if (hasCrossValidation) {
table.set(row, col++, si.scored_xval != null ? si.scored_xval._rmse : Double.NaN);
if (modelCategory == ModelCategory.Regression) {
table.set(row, col++, si.scored_xval != null ? si.scored_xval._mean_residual_deviance : Double.NaN);
table.set(row, col++, si.scored_xval != null ? si.scored_xval._mae : Double.NaN);
table.set(row, col++, si.scored_xval != null ? si.scored_xval._r2 : Double.NaN);
}
if (isClassifier) {
table.set(row, col++, si.scored_xval != null ? si.scored_xval._logloss : Double.NaN);
table.set(row, col++, si.scored_xval != null ? si.scored_xval._r2 : Double.NaN);
}
if (modelCategory == ModelCategory.Binomial) {
table.set(row, col++, si.scored_xval != null ? si.scored_xval._AUC : Double.NaN);
table.set(row, col++, si.scored_xval != null ? si.scored_xval._pr_auc : Double.NaN);
table.set(row, col++, si.scored_xval != null ? si.scored_xval._lift : Double.NaN);
}
if (isClassifier) {
table.set(row, col, si.scored_xval != null ? si.scored_xval._classError : Double.NaN);
}
if (modelCategory == ModelCategory.Multinomial) {
table.set(row, col++, si.scored_xval != null ? si.scored_xval._AUC : Double.NaN);
table.set(row, col++, si.scored_xval != null ? si.scored_xval._pr_auc : Double.NaN);
}
if (isAutoencoder) {
table.set(row, col++, si.scored_xval != null ? si.scored_xval._mse : Double.NaN);
}
if (hasCustomMetric) {
table.set(row, col++, si.scored_xval != null ? si.scored_xval._custom_metric : Double.NaN);
}
} // hasCrossValidation
row++;
}
return table;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/SignificantRulesCollector.java
|
package hex;
import water.fvec.Frame;
import water.util.TwoDimTable;
/**
* Implementors of this interface have significant rules collection implemented.
*/
public interface SignificantRulesCollector {
TwoDimTable getRuleImportanceTable();
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/SplitFrame.java
|
package hex;
import water.*;
import water.fvec.*;
import water.util.ArrayUtils;
import static water.util.FrameUtils.generateNumKeys;
/**
* Split given frame based on given ratio.
*
* If single number is given then it splits a given frame into two frames (FIXME: will throw exception)
* if N ratios are given then then N-splits are produced.
*/
public class SplitFrame extends Transformer<SplitFrame.Frames> {
/** Input dataset to split */
public Frame _dataset;
/** Split ratios */
public double[] _ratios;
/** Output destination keys. */
public Key<Frame>[] _destination_frames;
public SplitFrame(Frame dataset, double... ratios) {
this(dataset, ratios, null);
}
public SplitFrame(Frame dataset, double[] ratios, Key<Frame>[] destination_frames) {
this();
_dataset = dataset;
_ratios = ratios;
_destination_frames = destination_frames;
}
public SplitFrame() { super(null, "hex.SplitFrame$Frames", "SplitFrame"); }
@Override public Job<Frames> execImpl() {
if (_ratios.length < 0) throw new IllegalArgumentException("No ratio specified!");
if (_ratios.length > 100) throw new IllegalArgumentException("Too many frame splits demanded!");
// Check the case for single ratio - FIXME in /4 version change this to throw exception
for (double r : _ratios)
if (r <= 0.0) new IllegalArgumentException("Ratio must be > 0!");
if (_ratios.length == 1)
if( _ratios[0] < 0.0 || _ratios[0] > 1.0 ) throw new IllegalArgumentException("Ratio must be between 0 and 1!");
if (_destination_frames != null &&
!((_ratios.length == 1 && _destination_frames.length == 2) || (_ratios.length == _destination_frames.length)))
throw new IllegalArgumentException("Number of destination keys has to match to a number of split ratios!");
// If array of ratios is given scale them and take first n-1 and pass them to FrameSplitter
final double[] computedRatios;
if (_ratios.length > 1) {
double sum = ArrayUtils.sum(_ratios);
if (sum <= 0.0) throw new IllegalArgumentException("Ratios sum has to be > 0!");
if( sum < 1 ) computedRatios = _ratios;
else {
computedRatios = new double[_ratios.length - 1];
for (int i = 0; i < _ratios.length - 1; i++) computedRatios[i] = _ratios[i] / sum;
}
} else {
computedRatios = _ratios;
}
// Create destination keys if not specified
if (_destination_frames == null) _destination_frames = generateNumKeys(_dataset._key, computedRatios.length+1);
FrameSplitter fs = new FrameSplitter(_dataset, computedRatios, _destination_frames, _job._key);
return _job.start(fs, computedRatios.length + 1);
}
public static class Frames extends Keyed { public Key<Frame>[] _keys; }
public static Frame[] splitFrame(Frame fr, double... ratios) {
SplitFrame sf = new SplitFrame(fr, ratios);
sf.exec().get();
Frame[] frames = new Frame[sf._destination_frames.length];
for (int i = 0; i < sf._destination_frames.length; i++) {
frames[i] = sf._destination_frames[i].get();
}
return frames;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/SplitValueHistogram.java
|
package hex;
import org.apache.commons.lang.mutable.MutableInt;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
public class SplitValueHistogram {
private final TreeMap<Double, MutableInt> map;
public SplitValueHistogram() {
this.map = new TreeMap<>();
}
public void addValue(double splitValue, int count) {
if (!map.containsKey(splitValue)) {
map.put(splitValue, new MutableInt(0));
}
map.get(splitValue).add(count);
}
public void merge(SplitValueHistogram histogram) {
for (Map.Entry<Double, MutableInt> entry: histogram.entrySet()) {
this.addValue(entry.getKey(), entry.getValue().intValue());
}
}
public Set<Map.Entry<Double, MutableInt>> entrySet() {
return map.entrySet();
}
public MutableInt get(Object key) {
return map.get(key);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/StringPair.java
|
package hex;
import water.Iced;
public class StringPair extends Iced<StringPair> {
public StringPair() {}
public StringPair(String a, String b) {
_a = a;
_b = b;
}
public String _a;
public String _b;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/SubModelBuilder.java
|
package hex;
import org.apache.log4j.Logger;
import water.H2O;
import water.Job;
import water.ParallelizationTask;
import water.util.Log;
/**
* Execute build of a collection of sub-models (CV models, main model) in parallel
*
* This class is conceptually similar to CVModelBuilder and follows the same API. As opposed to CVModelBuilder
* it is not limited to building just CV models but can built a mixture of CV and main models.
* It also uses more efficient technique of model parallelization that works better when different sub-models
* (eg. CV folds) take vastly different time to complete.
*
* It currently lacks prepare/finished feature of CVModelBuilder
*/
public class SubModelBuilder {
private static final Logger LOG = Logger.getLogger(SubModelBuilder.class);
private final Job<?> job;
private final ModelBuilder<?, ?, ?>[] modelBuilders;
private final int parallelization;
/**
* @param job parent job (processing will be stopped if stop of a parent job was requested)
* @param modelBuilders list of model builders to run in bulk
* @param parallelization level of parallelization (how many models can be built at the same time)
*/
public SubModelBuilder(
Job<?> job, ModelBuilder<?, ?, ?>[] modelBuilders, int parallelization
) {
this.job = job;
this.modelBuilders = modelBuilders;
this.parallelization = parallelization;
}
public void bulkBuildModels() {
TrainModelTask[] tasks = new TrainModelTask[modelBuilders.length];
for (int i = 0; i < modelBuilders.length; i++) {
tasks[i] = new TrainModelTask(modelBuilders[i]);
}
H2O.submitTask(new ParallelizationTask<>(tasks, parallelization, job)).join();
}
private static class TrainModelTask extends H2O.H2OCountedCompleter<TrainModelTask> {
private final ModelBuilder<?, ?, ?> _mb;
TrainModelTask(ModelBuilder<?, ?, ?> mb) {
_mb = mb;
}
@Override
public void compute2() {
LOG.info("Building " + _mb._desc + ".");
boolean success = false;
try {
_mb.startClock();
_mb.submitTrainModelTask().join();
success = true;
} finally {
LOG.info(_mb._desc + (success ? " completed successfully." : " failed."));
}
tryComplete();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/ToEigenVec.java
|
package hex;
import water.fvec.Vec;
/**
* Created by arno on 7/8/16.
*/
public interface ToEigenVec {
Vec toEigenVec(Vec src);
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/Transformer.java
|
package hex;
import water.Iced;
import water.Job;
import water.Key;
import water.Keyed;
/**
* Representation of transformation from type X to Y.
*
* Experimental API (to support nice Java/Scala API) and share common code with ModelBuilder.
*/
abstract public class Transformer<T extends Keyed> extends Iced {
public final Job<T> _job;
public Transformer(Key<T> dest, String clz_of_T, String desc) { _job = new Job(dest, clz_of_T, desc); }
/** Execution endpoint for transformations. */
public final Job<T> exec() { return execImpl(); }
/** Implementation endpoint for transformations. */
protected abstract Job<T> execImpl();
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/VarImp.java
|
package hex;
import water.Iced;
import water.util.ArrayUtils;
import java.util.HashMap;
import java.util.Map;
public class VarImp extends Iced {
final public float[] _varimp; // Variable importance of individual variables, unscaled
final public String[] _names; // Names of variables.
public VarImp(float[] varimp, String[] names) { _varimp = varimp; _names = names; }
// Scaled, so largest value is 1.0
public float[] scaled_values() { return ArrayUtils.div (_varimp.clone(),ArrayUtils.maxValue(_varimp)); }
// Scaled so all elements total to 100%
public float[] summary() { return ArrayUtils.mult(_varimp.clone(),100.0f/ArrayUtils.sum(_varimp)); }
public Map<String, Float> toMap() {
Map<String, Float> varImpMap = new HashMap<>(_varimp.length);
for (int i = 0; i < _varimp.length; i++) {
varImpMap.put(_names[i], _varimp[i]);
}
return varImpMap;
}
public int numberOfUsedVariables() {
int numberOfUsedVariables = 0;
for (float varimp : _varimp) {
if (varimp != 0) {
numberOfUsedVariables++;
}
}
return numberOfUsedVariables;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe/CreateFrameColumnMaker.java
|
package hex.createframe;
import water.Iced;
import water.fvec.NewChunk;
import java.util.Random;
/**
* Base class for all "column makers" used by the CreateFrameExecutor to construct the frame.
* Typically a subclass would be creating just a single column having a certain type, name
* and the distribution of values. However it is also possible to create a subclass that
* constructs 0 columns (i.e. just modifies the previously constructed ones), or one that
* creates more than 1 columns at once.
*/
public abstract class CreateFrameColumnMaker extends Iced<CreateFrameColumnMaker> {
protected int index;
/**
* Implement this method in a subclass to actually build the columns.
*
* @param nrows Number of rows in the current chunk. If method is creating new columns,
* then it is supposed to add this many rows.
* @param ncs The `NewChunk`s array passed down from the `map()` method in `MRTask`.
* A subclass is expected to know which NewChunks it is allowed to touch,
* usually with the help of the {@link #index} variable.
* @param rng Random number generator that the subclass may use to fill the columns
* randomly. Do NOT use any other random generator as doing so will break
* the reproducibility promise of the CreateFrame service.
*/
public abstract void exec(int nrows, NewChunk[] ncs, Random rng);
/**
* Number of columns described by this column maker. Usually this is 1, however it is possible that some tasks
* may create either 0 columns (i.e. they only modify existing ones), or create several columns at once (for example
* if you're trying to create one-hot encoded categorical).
*/
public int numColumns() {
return 1;
}
/**
* Types of the columns produced by the column maker. The returned array should have
* exactly the same number of elements as given by {@link #numColumns()}.
*/
public abstract byte[] columnTypes();
/**
* Names of the columns produces by this column maker. Should also have the same
* number of elements as given by {@link #numColumns()}.
*/
public abstract String[] columnNames();
/**
* Domains for categorical columns being created (if any).
*/
public String[][] columnDomains() {
return null;
}
//--------------------------------------------------------------------------------------------------------------------
/**
* Index of the first column that this column maker will be creating. This
* method is used by the executor, and the {@link #index} variable it sets
* can be used to determine which columns in the <code>ncs</code> array to
* fill during the {@link #exec(int, NewChunk[], Random)} step.
*/
public void setIndex(int i) {
index = i;
}
/**
* Estimated byte size of a single row created by this column maker. This
* estimate is later used to determine optimal chunk size for the produced
* frame, thus it doesn't have to be very precise.
*/
public float byteSizePerRow() {
return 4;
}
/**
* <p>Relative amount of work this column maker performs to fill a chunk. The
* base amount of 100 corresponds to a method that draws a single random
* number per row and then uses simple arithmetic before adding a value to
* the NewChunk.
* <p>The output will be used to inform te {@link water.Job} about progress
* being made. It needn't be very precise.
*/
public int workAmount() {
return 100;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe/CreateFrameExecutor.java
|
package hex.createframe;
import water.*;
import water.fvec.*;
import water.util.RandomUtils;
import java.util.ArrayList;
import java.util.Random;
/**
* <p>This class carries out the frame creation job.</p>
*
* <p>Frame creation is conceptually done in 3 stages: First, a build "recipe"
* is prepared. This recipe is the detailed specification of how the frame is
* to be constructed. Second, an MRTask is run that actually creates the frame,
* according to the specification in the recipe. In this step all "column
* makers" are executed in order they were added, for each chunk-row being
* created. Finally, a set of postprocessing steps are performed on the
* resulting frame.</p>
*
* <p>Usage example:
* <pre>{@code
* Job<Frame> job = new Job<>(destination_key, Frame.class.getName(), "CreateFrame");
* CreateFrameExecutor cfe = new CreateFrameExecutor(job);
* cfe.setNumRows(10000);
* cfe.setSeed(0xDECAFC0FEE);
* cfe.addColumnMaker(new RealColumnCfcm("col0", -1, 1));
* cfe.addColumnMaker(new IntegerColumnCfcm("col1", 0, 100));
* cfe.addPostprocessStep(new MissingInserterCfps(0.05));
* job.start(cfe, cfe.workAmount());
* }</pre></p>
*/
public class CreateFrameExecutor extends H2O.H2OCountedCompleter<CreateFrameExecutor> {
private Job<Frame> job;
private ArrayList<CreateFrameColumnMaker> columnMakers;
private ArrayList<CreateFramePostprocessStep> postprocessSteps;
private int workAmountPerRow;
private int workAmountPostprocess;
private float bytesPerRow;
private int numRows;
private int numCols;
private long seed;
/**
* Make a new CreateFrameExecutor.
* @param job The {@link Job} instance which is wrapping this executor. This
* instance will be used to update it with the current task
* progress.
*/
public CreateFrameExecutor(Job<Frame> job) {
this.job = job;
columnMakers = new ArrayList<>(10);
postprocessSteps = new ArrayList<>(2);
seed = -1;
}
/**
* Set number of rows to be created in the resulting frame. (However a
* postprocess step may remove some of the rows).
*/
public void setNumRows(int n) {
numRows = n;
}
/**
* Set the seed for the random number generator. Two frames created from the
* same seed will be identical. Seed value of -1 (the default) means that a
* random seed will be issued.
*/
public void setSeed(long s) {
seed = s;
}
/**
* Add a "column maker" task, responsible for creation of a single (rarely
* married or widowed) column.
*/
public void addColumnMaker(CreateFrameColumnMaker maker) {
maker.setIndex(numCols);
columnMakers.add(maker);
workAmountPerRow += maker.workAmount();
bytesPerRow += maker.byteSizePerRow();
numCols += maker.numColumns();
}
/**
* Add a step to be performed in the end after the frame has been created.
* This step can then modify the frame in any way.
*/
public void addPostprocessStep(CreateFramePostprocessStep step) {
postprocessSteps.add(step);
workAmountPostprocess += step.workAmount();
}
/**
* Return total amount of work that will be performed by the executor. This
* is needed externally in the Job execution context to determine the
* progress if the task if it is long-running.
*/
public int workAmount() {
return numRows * workAmountPerRow + workAmountPostprocess;
}
/**
* Estimated size of the frame (in bytes), to be used in determining the
* optimal chunk size. This estimate may not be absolutely precise.
*/
public long estimatedByteSize() {
return (long)(numRows * bytesPerRow);
}
//--------------------------------------------------------------------------------------------------------------------
// Private
//--------------------------------------------------------------------------------------------------------------------
@Override public void compute2() {
int logRowsPerChunk = (int) Math.ceil(Math.log1p(rowsPerChunk()));
Vec dummyVec = Vec.makeCon(0, numRows, logRowsPerChunk, false);
if (seed == -1)
seed = Double.doubleToLongBits(Math.random());
// Create types, names & domains
byte[] types = new byte[numCols];
String[] names = new String[numCols];
String[][] domains = new String[numCols][];
int i = 0;
for (CreateFrameColumnMaker maker : columnMakers) {
int it = 0, in = 0, id = 0;
for (byte t : maker.columnTypes()) types[i + it++] = t;
for (String n : maker.columnNames()) names[i + in++] = n;
String[][] colDomains = maker.columnDomains();
if (colDomains != null) {
for (String[] d : colDomains)
domains[i + id++] = d;
} // otherwise don't do anything and leave those entries in `domains` as nulls.
assert in == it && (id == it || id == 0) && it == maker.numColumns();
i += it;
}
// Make the frame
Frame out = new ActualFrameCreator(columnMakers, seed, job)
.doAll(types, dummyVec)
.outputFrame(job._result, names, domains);
// Post-process the frame
Random rng = RandomUtils.getRNG(seed + 40245345791L);
rng.setSeed(rng.nextLong());
for (CreateFramePostprocessStep step: postprocessSteps) {
long nextSeed = rng.nextLong();
step.exec(out, rng);
rng.setSeed(nextSeed);
job.update(step.workAmount());
}
// Clean up
DKV.put(out);
dummyVec.remove();
tryComplete();
}
/** Compute optimal number of rows per chunk in the resulting frame. */
private int rowsPerChunk() {
return FileVec.calcOptimalChunkSize(
estimatedByteSize(),
numCols,
numCols * 4,
H2ORuntime.availableProcessors(),
H2O.getCloudSize(),
false,
false
);
}
private static class ActualFrameCreator extends MRTask<ActualFrameCreator> {
private long seed;
private ArrayList<CreateFrameColumnMaker> columnMakers;
private Job<Frame> job;
public ActualFrameCreator(ArrayList<CreateFrameColumnMaker> columnMakers, long seed, Job<Frame> job) {
this.columnMakers = columnMakers;
this.seed = seed;
this.job = job;
}
@Override public void map(Chunk[] cs, NewChunk[] ncs) {
if (job.stop_requested()) return;
int numRowsInChunk = cs[0]._len;
long chunkPosition = cs[0].start();
Random rng = RandomUtils.getRNG(0);
long taskIndex = 0;
for (CreateFrameColumnMaker colTask : columnMakers) {
rng.setSeed(seed + chunkPosition * 138457623L + (taskIndex++) * 967058L);
rng.setSeed(rng.nextLong());
colTask.exec(numRowsInChunk, ncs, rng);
job.update(colTask.workAmount());
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe/CreateFramePostprocessStep.java
|
package hex.createframe;
import water.Iced;
import water.fvec.Frame;
import java.util.Random;
/**
* <p>Base class for any "postprocessing" steps that should be undertaken after
* the frame has been created using {@link CreateFrameColumnMaker}s.</p>
*
* <p>Each postprocess step takes a frame as an input, and then modifies it
* in-place. Examples of such postprocessing tasks could be: column renaming /
* reordering; removal of some temporary columns; etc.</p>
*/
public abstract class CreateFramePostprocessStep extends Iced<CreateFramePostprocessStep> {
/**
* This method performs the actual work of the postprocessing task.
*
* @param fr Frame that the task modifies.
* @param rng Random number generator to use if the task needs to modify the
* frame randomly.
*/
public abstract void exec(Frame fr, Random rng);
/**
* Approximate work amount for this step. The default value of 100 is the
* same as each column maker's amount of work per chunk.
*/
public int workAmount() {
return 100;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe/CreateFrameRecipe.java
|
package hex.createframe;
import water.H2O;
import water.Iced;
import water.Job;
import water.Key;
import water.fvec.Frame;
import water.util.Log;
import java.util.Random;
/**
* Base class for all frame creation recipes.
*/
public abstract class CreateFrameRecipe<T extends CreateFrameRecipe<T>> extends Iced<T> {
public Key<Frame> dest;
public long seed = -1;
//--------------------------------------------------------------------------------------------------------------------
// Inheritance interface
//--------------------------------------------------------------------------------------------------------------------
/**
* Test whether the input parameters are valid, and throw error if they
* aren't. You may use the {@link #check(boolean, String)} helper function
* to make this somewhat easier.
*/
protected abstract void checkParametersValidity();
/**
* Set up the provided {@link CreateFrameExecutor} so that it knows how to
* construct the frame corresponding to the recipe being built.
*/
protected abstract void buildRecipe(CreateFrameExecutor cfe);
//--------------------------------------------------------------------------------------------------------------------
// Other
//--------------------------------------------------------------------------------------------------------------------
/**
* This function will be called by the REST API handler to initiate making
* of the recipe. It returns a {@link Job} which will hold the created frame
* once it is finished.
*/
public Job<Frame> exec() {
fillMissingParameters();
Job<Frame> job = new Job<>(dest, Frame.class.getName(), "CreateFrame:original");
CreateFrameExecutor cfe = new CreateFrameExecutor(job);
checkParametersValidity();
buildRecipe(cfe);
checkParametersValidity2(cfe);
return job.start(cfe, cfe.workAmount());
}
/**
* Resolve parameter values that cannot be initialized to static defaults.
* If you're overriding this method, please make sure to invoke the super
* implementation as well.
*/
protected void fillMissingParameters() {
if (dest == null) {
dest = Key.make();
}
if (seed == -1) {
seed = new Random().nextLong();
Log.info("Generated seed: " + seed);
}
}
/**
* Final step of parameter testing, after the {@link CreateFrameExecutor}
* has been set up, but just before the actual frame creation commences.
* This method shall only be used to perform checks that cannot be done
* without the {@link CreateFrameExecutor} instance.
*/
protected void checkParametersValidity2(CreateFrameExecutor cfe) {
long byteEstimate = cfe.estimatedByteSize();
long clusterFreeMem = H2O.CLOUD.free_mem();
double gb = (double) (1 << 30);
check(byteEstimate <= clusterFreeMem,
String.format("Frame is expected to require %.3fGb, which will not fit into H2O's free memory of %.3fGb",
byteEstimate/gb, clusterFreeMem/gb));
}
/** Simple helper function for parameter testing. */
protected void check(boolean test, String msg) {
if (!test) throw new IllegalArgumentException(msg);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe/columns/BinaryColumnCfcm.java
|
package hex.createframe.columns;
import hex.createframe.CreateFrameColumnMaker;
import water.fvec.NewChunk;
import water.fvec.Vec;
import java.util.Random;
/**
* Random binary column.
*/
public class BinaryColumnCfcm extends CreateFrameColumnMaker {
private String name;
private double p;
public BinaryColumnCfcm() {
}
public BinaryColumnCfcm(String colName, double ones_fraction) {
name = colName;
p = ones_fraction;
}
@Override public void exec(int nrows, NewChunk[] ncs, Random rng) {
for (int row = 0; row < nrows; ++row)
ncs[index].addNum(rng.nextFloat() <= p? 1 : 0);
}
@Override public String[] columnNames() {
return new String[]{name};
}
@Override public byte[] columnTypes() {
return new byte[]{Vec.T_NUM};
}
@Override public float byteSizePerRow() {
return 0.125f;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe/columns/CategoricalColumnCfcm.java
|
package hex.createframe.columns;
import hex.createframe.CreateFrameColumnMaker;
import water.fvec.NewChunk;
import water.fvec.Vec;
import java.util.Random;
/**
* Random categorical column.
*/
public class CategoricalColumnCfcm extends CreateFrameColumnMaker {
private String name;
private int numFactors;
private String[] domain;
public CategoricalColumnCfcm() {
}
public CategoricalColumnCfcm(String colName, int nFactors) {
name = colName;
numFactors = nFactors;
if (name.equals("response"))
prepareAnimalDomain();
else
prepareSimpleDomain();
}
@Override public void exec(int nrows, NewChunk[] ncs, Random rng) {
for (int row = 0; row < nrows; ++row)
ncs[index].addNum((int)(rng.nextDouble() * numFactors));
}
@Override public String[] columnNames() {
return new String[]{name};
}
@Override public byte[] columnTypes() {
return new byte[]{Vec.T_CAT};
}
@Override public String[][] columnDomains() {
return new String[][]{domain};
}
@Override public float byteSizePerRow() {
return numFactors < 128 ? 1 : numFactors < 32768 ? 2 : 4;
}
private void prepareSimpleDomain() {
domain = new String[numFactors];
for (int i = 0; i < numFactors; ++i) {
domain[i] = "c" + index + ".l" + i;
}
}
private static String[] _animals =
new String[]{"cat", "dog", "fish", "cow", "horse", "pig", "bird", "lion", "sheep", "rhino", "bull", "eagle",
"crab", "wolf", "duck", "crow", "fox", "bear", "hare", "camel", "bat", "frog", "ant", "otter",
"tiger", "rat", "snake", "zebra", "seal", "bison", "newt", "deer", "mouse", "turkey"};
private void prepareAnimalDomain() {
domain = new String[numFactors];
System.arraycopy(_animals, 0, domain, 0, Math.min(numFactors, _animals.length));
if (numFactors > _animals.length) {
int k = _animals.length;
OUTER:
for (int i = 0; i < _animals.length; i++)
for (int j = 0; j < _animals.length; j++) {
if (i == j) continue;
domain[k++] = _animals[i] + _animals[j];
if (k == numFactors) break OUTER;
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe/columns/IntegerColumnCfcm.java
|
package hex.createframe.columns;
import hex.createframe.CreateFrameColumnMaker;
import water.fvec.NewChunk;
import water.fvec.Vec;
import java.util.Random;
/**
* Integer-valued random column.
*/
public class IntegerColumnCfcm extends CreateFrameColumnMaker {
private String name;
private long lowerBound;
private long upperBound;
public IntegerColumnCfcm() {}
public IntegerColumnCfcm(String colName, int lBound, int uBound) {
name = colName;
lowerBound = lBound;
upperBound = uBound;
}
@Override public void exec(int nrows, NewChunk[] ncs, Random rng) {
long span = upperBound - lowerBound + 1;
if (span == 1) {
for (int row = 0; row < nrows; ++row)
ncs[index].addNum(lowerBound);
} else {
for (int row = 0; row < nrows; ++row)
ncs[index].addNum(lowerBound + (long)(rng.nextDouble()*span));
}
}
@Override public String[] columnNames() {
return new String[]{name};
}
@Override public byte[] columnTypes() {
return new byte[]{Vec.T_NUM};
}
@Override public float byteSizePerRow() {
long integer_range = Math.max(Math.abs(upperBound), Math.abs(lowerBound));
if (integer_range < 128) return 1;
if (integer_range < 32768) return 2;
if (integer_range < 1L << 31) return 4;
return 8;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe/columns/RealColumnCfcm.java
|
package hex.createframe.columns;
import hex.createframe.CreateFrameColumnMaker;
import water.fvec.NewChunk;
import water.fvec.Vec;
import java.util.Random;
/**
* Real-valued random column.
*/
public class RealColumnCfcm extends CreateFrameColumnMaker {
private String name;
private double lowerBound;
private double upperBound;
public RealColumnCfcm() {}
public RealColumnCfcm(String colName, double lBound, double uBound) {
name = colName;
lowerBound = lBound;
upperBound = uBound;
}
@Override public void exec(int nrows, NewChunk[] ncs, Random rng) {
double span = upperBound - lowerBound;
for (int row = 0; row < nrows; ++row)
ncs[index].addNum(lowerBound + (span == 0? 0 : rng.nextDouble() * span));
}
@Override public String[] columnNames() {
return new String[]{name};
}
@Override public float byteSizePerRow() {
return 8;
}
@Override public byte[] columnTypes() {
return new byte[]{Vec.T_NUM};
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe/columns/StringColumnCfcm.java
|
package hex.createframe.columns;
import hex.createframe.CreateFrameColumnMaker;
import water.fvec.NewChunk;
import water.fvec.Vec;
import java.util.Random;
/**
* Random string column.
*/
public class StringColumnCfcm extends CreateFrameColumnMaker {
private String name;
private int len;
public StringColumnCfcm() {}
public StringColumnCfcm(String colName, int length) {
name = colName;
len = length;
}
@Override public void exec(int nrows, NewChunk[] ncs, Random rng) {
byte[] buf = new byte[len];
for (int row = 0; row < nrows; ++row) {
for (int i = 0; i < len; ++i)
buf[i] = (byte)(65 + rng.nextInt(25));
ncs[index].addStr(new String(buf));
}
}
@Override public String[] columnNames() {
return new String[]{name};
}
@Override public byte[] columnTypes() {
return new byte[]{Vec.T_STR};
}
@Override public float byteSizePerRow() {
return len;
}
@Override public int workAmount() {
return 60 + len * 50;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe/columns/TimeColumnCfcm.java
|
package hex.createframe.columns;
import hex.createframe.CreateFrameColumnMaker;
import water.fvec.NewChunk;
import water.fvec.Vec;
import java.util.Random;
/**
* Time-valued random column.
*/
public class TimeColumnCfcm extends CreateFrameColumnMaker {
private String name;
private long lowerBound;
private long upperBound;
public TimeColumnCfcm() {}
public TimeColumnCfcm(String colName, long lBound, long uBound) {
name = colName;
lowerBound = lBound;
upperBound = uBound;
}
@Override public void exec(int nrows, NewChunk[] ncs, Random rng) {
long span = upperBound - lowerBound + 1;
for (int row = 0; row < nrows; ++row)
ncs[index].addNum(lowerBound + (long)(rng.nextDouble() * span));
}
@Override public String[] columnNames() {
return new String[]{name};
}
@Override public byte[] columnTypes() {
return new byte[]{Vec.T_TIME};
}
@Override public float byteSizePerRow() {
return 8;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe/postprocess/MissingInserterCfps.java
|
package hex.createframe.postprocess;
import hex.createframe.CreateFramePostprocessStep;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.util.RandomUtils;
import java.util.Random;
/**
* This action randomly injects missing values into the dataframe.
*/
public class MissingInserterCfps extends CreateFramePostprocessStep {
private double p;
public MissingInserterCfps() {}
/**
* @param p Fraction of values to be converted into NAs.
*/
public MissingInserterCfps(double p) {
assert p >= 0 && p < 1 : "p should be in the range [0, 1), got " + p;
this.p = p;
}
/** Execute this post-processing step. */
@Override
public void exec(Frame fr, Random rng) {
// No need to do anything if p == 0
if (p > 0)
new InsertNAs(p, rng).doAll(fr);
}
/**
* Task that does the actual job of imputing missing values.
*
* Typically the fraction p of values to be replaced with missings is fairly small, therefore it is inefficient
* to visit each value individually and decide whether to flip it to NA based on comparing a uniform random number
* against p. Instead we rely on the fact that the distribution of gaps between "successes" in a bernoulli experiment
* follows the Geometric(p) distribution. Drawing from such distribution is also easy: if u is uniform on (0,1) then
* <code>floor(log(u)/log(1-p))</code> is geometric with parameter p.
*/
private static class InsertNAs extends MRTask<InsertNAs> {
private long seed;
private double p;
public InsertNAs(double prob, Random random) {
p = prob;
seed = random.nextLong();
}
@Override
public void map(Chunk[] cs) {
int numRows = cs[0]._len;
long chunkStart = cs[0].start();
double denom = Math.log(1 - p);
Random rng = RandomUtils.getRNG(0);
for (int i = 0; i < cs.length; i++) {
rng.setSeed(seed + i * 35602489 + chunkStart * 47582);
int l = 0;
while (true) {
l += (int) Math.floor(Math.log(rng.nextDouble()) / denom);
if (l < numRows)
cs[i].set(l++, Double.NaN);
else
break;
}
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe/postprocess/ShuffleColumnsCfps.java
|
package hex.createframe.postprocess;
import hex.createframe.CreateFramePostprocessStep;
import water.DKV;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.ArrayUtils;
import java.util.HashMap;
import java.util.Random;
/**
* Action to shuffle the columns of the frame.
*/
public class ShuffleColumnsCfps extends CreateFramePostprocessStep {
private boolean reassignNames;
private boolean responseFirst;
public ShuffleColumnsCfps() {}
/**
* @param reassignNames If true, the columns will be renamed within each group starting with a common alpha-prefix.
* I.e. if the original frame had columns [A1, A3, B2, B5] then after shuffling it may look like
* this: [B1, A1, A2, B2]. In this new frame column "A1" may have been either "A1" or "A3" in
* the original frame.
* If false, each vec will keep its name.
* @param responseFirst If true, the "response" column will be moved to the beginning of the frame. Otherwise it
* will be shuffled together with the rest of the columns.
*/
public ShuffleColumnsCfps(boolean reassignNames, boolean responseFirst) {
this.reassignNames = reassignNames;
this.responseFirst = responseFirst;
}
@Override
public void exec(Frame fr, Random rng) {
// Initial shuffle
int numCols = fr.numCols();
if (numCols == 0) return;
int[] idx = ArrayUtils.seq(0, numCols);
ArrayUtils.shuffleArray(idx, rng);
// Move the response column to the beginning of the frame
if (responseFirst) {
int responseIndex = ArrayUtils.find(fr.names(), "response");
if (responseIndex == -1) responseIndex = ArrayUtils.find(fr.names(), "Response");
if (responseIndex >= 0) {
int shuffledIndex = ArrayUtils.find(idx, responseIndex);
idx[shuffledIndex] = idx[0];
idx[0] = responseIndex;
}
}
// Construct shuffled arrays of names and vecs
Vec[] newVecs = new Vec[numCols];
String[] newNames = new String[numCols];
for (int i = 0; i < numCols; ++i) {
newVecs[i] = fr.vec(idx[i]);
newNames[i] = fr.name(idx[i]);
}
// Rename columns in order to hide the fact that they were shuffled
if (reassignNames) {
HashMap<String, Integer> prefixCounts = new HashMap<>();
for (int i = 0; i < numCols; i++){
String prefix = removeNumericSuffix(newNames[i]);
int count = prefixCounts.containsKey(prefix)? prefixCounts.get(prefix) + 1 : 1;
prefixCounts.put(prefix, count);
if (!newNames[i].equals("response"))
newNames[i] = prefix + count;
}
}
// Reshape the original dataframe
fr.restructure(newNames, newVecs);
DKV.put(fr);
}
/**
* Helper function which strips the provided name from any numeric suffix in the end.
* Equivalent to <code>name.rstrip("0123456789")</code> in Python.
*/
public static String removeNumericSuffix(String name) {
int i = name.length();
while (--i >= 0) {
char ch = name.charAt(i);
if (ch < '0' || ch > '9') break;
}
return name.substring(0, i + 1);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe/recipes/OriginalCreateFrameRecipe.java
|
package hex.createframe.recipes;
import hex.createframe.CreateFrameExecutor;
import hex.createframe.CreateFrameRecipe;
import hex.createframe.columns.*;
import hex.createframe.postprocess.MissingInserterCfps;
import hex.createframe.postprocess.ShuffleColumnsCfps;
/**
* This recipe tries to match the behavior of the original hex.CreateFrame class.
*/
public class OriginalCreateFrameRecipe extends CreateFrameRecipe<OriginalCreateFrameRecipe> {
private int rows = 10000;
private int cols = 10;
private double real_range = 100;
private double categorical_fraction = 0.2;
private int factors = 100;
private boolean randomize = true;
private long value = 0;
private double integer_fraction = 0.2;
private double time_fraction = 0.0;
private double string_fraction = 0.0;
private int integer_range = 100;
private double binary_fraction = 0.1;
private double binary_ones_fraction = 0.02;
private double missing_fraction = 0.01;
private int response_factors = 2;
private boolean positive_response = false; // only for response_factors == 1
private boolean has_response = false;
@Override
protected void checkParametersValidity() {
double total_fraction = integer_fraction + binary_fraction + categorical_fraction + time_fraction + string_fraction;
check(total_fraction < 1.00000001, "Integer, binary, categorical, time and string fractions must add up to <= 1");
check(missing_fraction >= 0 && missing_fraction < 1, "Missing fraction must be between 0 and 1");
check(integer_fraction >= 0 && integer_fraction <= 1, "Integer fraction must be between 0 and 1");
check(binary_fraction >= 0 && binary_fraction <= 1, "Binary fraction must be between 0 and 1");
check(time_fraction >= 0 && time_fraction <= 1, "Time fraction must be between 0 and 1");
check(string_fraction >= 0 && string_fraction <= 1, "String fraction must be between 0 and 1");
check(binary_ones_fraction >= 0 && binary_ones_fraction <= 1, "Binary ones fraction must be between 0 and 1");
check(categorical_fraction >= 0 && categorical_fraction <= 1, "Categorical fraction must be between 0 and 1");
check(categorical_fraction == 0 || factors >= 2, "Factors must be larger than 2 for categorical data");
check(response_factors >= 1, "Response factors must be either 1 (real-valued response), or >=2 (factor levels)");
check(response_factors <= 1024, "Response factors must be <= 1024");
check(factors <= 10000000, "Number of factors must be <= 10,000,000");
check(cols > 0 && rows > 0, "Must have number of rows and columns > 0");
check(real_range >= 0, "Real range must be a nonnegative number");
check(integer_range >= 0, "Integer range must be a nonnegative number");
check(dest != null, "Destination frame must have a key");
if (positive_response)
check(response_factors == 1, "positive_response can only be requested for real-valued response column");
if (randomize)
check(value == 0, "Cannot set data to a constant value if randomize is true");
else {
check(!has_response, "Cannot have response column if randomize is false");
check(total_fraction == 0,
"Cannot have integer, categorical, string, binary or time columns if randomize is false");
}
}
@Override
protected void buildRecipe(CreateFrameExecutor cfe) {
cfe.setSeed(seed);
cfe.setNumRows(rows);
// Sometimes the client requests, say, 0.3 categorical columns. By the time this number arrives here, it becomes
// something like 0.299999999997. If we just multiply by the number of columns (say 10000) and take integer part,
// we'd have 2999 columns only -- not what the client expects. This is why we add 0.1 to each count before taking
// the floor part.
int catcols = (int)(categorical_fraction * cols + 0.1);
int intcols = (int)(integer_fraction * cols + 0.1);
int bincols = (int)(binary_fraction * cols + 0.1);
int timecols = (int)(time_fraction * cols + 0.1);
int stringcols = (int)(string_fraction * cols + 0.1);
int realcols = cols - catcols - intcols - bincols - timecols - stringcols;
// At this point we might have accidentally allocated too many columns -- in such case adjust their counts.
if (realcols < 0 && catcols > 0) { catcols--; realcols++; }
if (realcols < 0 && intcols > 0) { intcols--; realcols++; }
if (realcols < 0 && bincols > 0) { bincols--; realcols++; }
if (realcols < 0 && timecols > 0) { timecols--; realcols++; }
if (realcols < 0 && stringcols > 0) { stringcols--; realcols++; }
assert catcols >= 0 && intcols >= 0 && bincols >= 0 && realcols >= 0 && timecols >= 0 && stringcols >= 0;
// Create response column
if (has_response) {
if (response_factors == 1)
cfe.addColumnMaker(new RealColumnCfcm("response", positive_response? 0 : -real_range, real_range));
else
cfe.addColumnMaker(new CategoricalColumnCfcm("response", response_factors));
}
// Create "feature" columns
if (randomize) {
int j = 0;
for (int i = 0; i < intcols; i++)
cfe.addColumnMaker(new IntegerColumnCfcm("C" + (++j), -integer_range, integer_range));
for (int i = 0; i < realcols; i++)
cfe.addColumnMaker(new RealColumnCfcm("C" + (++j), -real_range, real_range));
for (int i = 0; i < catcols; i++)
cfe.addColumnMaker(new CategoricalColumnCfcm("C" + (++j), factors));
for (int i = 0; i < bincols; i++)
cfe.addColumnMaker(new BinaryColumnCfcm("C" + (++j), binary_ones_fraction));
for (int i = 0; i < timecols; i++)
cfe.addColumnMaker(new TimeColumnCfcm("C" + (++j), 0, 50L * 365 * 24 * 3600 * 1000)); // 1970...2020
for (int i = 0; i < stringcols; i++)
cfe.addColumnMaker(new StringColumnCfcm("C" + (++j), 8));
} else {
assert catcols + intcols + bincols + timecols + stringcols == 0;
for (int i = 0; i < realcols; i++)
cfe.addColumnMaker(new RealColumnCfcm("C" + (i+1), value, value));
}
// Add post-processing steps
cfe.addPostprocessStep(new MissingInserterCfps(missing_fraction));
cfe.addPostprocessStep(new ShuffleColumnsCfps(true, true));
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/createframe/recipes/SimpleCreateFrameRecipe.java
|
package hex.createframe.recipes;
import hex.createframe.CreateFrameExecutor;
import hex.createframe.CreateFrameRecipe;
import hex.createframe.columns.*;
import hex.createframe.postprocess.MissingInserterCfps;
import hex.createframe.postprocess.ShuffleColumnsCfps;
/**
* Similar to {@link OriginalCreateFrameRecipe}, except that this recipe
* requires to specify the number of columns of each type explicitly (not
* as fractions). It also uses different naming scheme, so that columns of
* different types have names according to that type: integer columns are
* {@code I1, I2, ...}, binary are {@code B1, B2, ...}, and so on.
*/
public class SimpleCreateFrameRecipe extends CreateFrameRecipe<SimpleCreateFrameRecipe> {
public int nrows = 100;
public int ncols_real = 0;
public int ncols_int = 0;
public int ncols_enum = 0;
public int ncols_bool = 0;
public int ncols_str = 0;
public int ncols_time = 0;
public double real_lb = -100;
public double real_ub = 100;
public int int_lb = -100;
public int int_ub = 100;
public int enum_nlevels = 10;
public double bool_p = 0.3;
public long time_lb = 365L * 24 * 3600 * 1000 * (2000 - 1970); // ~ 2000-01-01
public long time_ub = 365L * 24 * 3600 * 1000 * (2020 - 1970); // ~ 2020-01-01
public int str_length = 8;
public double missing_fraction = 0;
public ResponseType response_type = ResponseType.NONE;
public double response_lb = 0;
public double response_ub = 10;
public double response_p = 0.6;
public int response_nlevels = 25;
public enum ResponseType {
NONE, REAL, INT, ENUM, BOOL, TIME
}
protected void checkParametersValidity() {
check(nrows > 0, "Number of rows must be greater than 0");
check(ncols_real >= 0, "Number of real columns cannot be negative");
check(ncols_int >= 0, "Number of integer columns cannot be negative");
check(ncols_bool >= 0, "Number of bool (binary) columns cannot be negative");
check(ncols_enum >= 0, "Number of enum (categorical) columns cannot be negative");
check(ncols_str >= 0, "Number of string columns cannot be negative");
check(ncols_time >= 0, "Number of time columns cannot be negative");
check(!Double.isNaN(real_lb), "Real range's lower bound cannot be NaN");
check(!Double.isNaN(real_ub), "Real range's upper bound cannot be NaN");
check(!Double.isInfinite(real_lb), "Real range's lower bound cannot be infinite");
check(!Double.isInfinite(real_ub), "Real range's upper bound cannot be infinite");
check(real_lb <= real_ub, "Invalid real range interval: lower bound exceeds the upper bound");
check(int_lb <= int_ub, "Invalid integer range interval: lower bound exceeds the upper bound");
check(!Double.isNaN(bool_p), "Boolean frequency parameter cannot be NaN");
check(bool_p >= 0 && bool_p <= 1, "Boolean frequency parameter must be in the range 0..1");
check(time_lb <= time_ub, "Invalid time range interval: lower bound exceeds the upper bound");
check(enum_nlevels > 0, "Number of levels for enum (categorical) columns must be positive");
check(str_length > 0, "Length of string values should be positive");
check(!Double.isNaN(missing_fraction), "Missing fraction cannot be NaN");
check(missing_fraction >= 0 && missing_fraction <= 1, "Missing fraction must be in the range 0..1");
check(!Double.isNaN(response_lb), "Response column's lower bound cannot be NaN");
check(!Double.isNaN(response_ub), "Response column's upper bound cannot be NaN");
check(!Double.isInfinite(response_lb), "Response column's lower bound cannot be infinite");
check(!Double.isInfinite(response_ub), "Response column's upper bound cannot be infinite");
check(response_lb <= response_ub, "Invalid interval for response column: lower bound exceeds the upper bound");
check(!Double.isNaN(response_p), "Response binary frequency parameter (response_p) cannot be NaN");
check(response_p >= 0 && response_p <= 1, "Response binary frequency (response_p) should be in the range 0..1");
check(response_nlevels >= 2, "Number of categorical levels for the response column must be 2 or more");
}
protected void buildRecipe(CreateFrameExecutor cfe) {
cfe.setSeed(seed);
cfe.setNumRows(nrows);
switch (response_type) {
case REAL:
cfe.addColumnMaker(new RealColumnCfcm("response", response_lb, response_ub));
break;
case INT:
cfe.addColumnMaker(new IntegerColumnCfcm("response", (int)response_lb, (int)response_ub));
break;
case ENUM:
cfe.addColumnMaker(new CategoricalColumnCfcm("response", response_nlevels));
break;
case BOOL:
cfe.addColumnMaker(new BinaryColumnCfcm("response", response_p));
break;
case TIME:
cfe.addColumnMaker(new TimeColumnCfcm("response", (long)response_lb, (long)response_ub));
break;
}
for (int i = 1; i <= ncols_real; i++)
cfe.addColumnMaker(new RealColumnCfcm("R" + i, real_lb, real_ub));
for (int i = 1; i <= ncols_int; i++)
cfe.addColumnMaker(new IntegerColumnCfcm("I" + i, int_lb, int_ub));
for (int i = 0; i < ncols_enum; i++)
cfe.addColumnMaker(new CategoricalColumnCfcm("E" + i, enum_nlevels));
for (int i = 1; i <= ncols_bool; i++)
cfe.addColumnMaker(new BinaryColumnCfcm("B" + i, bool_p));
for (int i = 0; i < ncols_time; i++)
cfe.addColumnMaker(new TimeColumnCfcm("T" + i, time_lb, time_ub));
for (int i = 0; i < ncols_str; i++)
cfe.addColumnMaker(new StringColumnCfcm("S" + i, str_length));
cfe.addPostprocessStep(new MissingInserterCfps(missing_fraction));
cfe.addPostprocessStep(new ShuffleColumnsCfps(true, true));
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/faulttolerance/Recoverable.java
|
package hex.faulttolerance;
import hex.ModelExportOption;
import water.Key;
import water.Keyed;
import java.util.List;
import java.util.Set;
/**
* @param <T> type of object to be recovered
*/
public interface Recoverable<T extends Keyed> {
/**
* @return key of this keyed object
*/
Key<T> getKey();
/**
* @param location directory where this recoverable will be written into a single file
* @return path to where data was written
*/
List<String> exportBinary(String location, boolean includingModels, ModelExportOption... options);
/**
* @return list of all keys of objects this recoverable needs to resume operation after recovery
*/
Set<Key<?>> getDependentKeys();
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/faulttolerance/Recovery.java
|
package hex.faulttolerance;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import hex.Model;
import hex.grid.Grid;
import hex.grid.GridSearch;
import org.apache.log4j.Logger;
import water.*;
import water.api.GridSearchHandler;
import water.fvec.Frame;
import water.fvec.persist.FramePersist;
import water.fvec.persist.PersistUtils;
import water.util.FileUtils;
import water.util.IcedHashMap;
import java.io.IOException;
import java.net.URI;
import java.util.*;
/**
* <h2>H2O Auto-Recovery Support</h2>
*
* <p>This class encapsulates what is the core of H2O's (auto)recovery support. It manages
* storing of data needed to recover a {@link Recoverable} process as well as performing
* the actual recovery and re-start of such process.</p>
*
* <h3>Preparing for recovery</h3>
*
* <p>A {@link Recoverable} will instantiate a {@link Recovery} instance at start and call
* the {@link Recovery#onStart(Recoverable, Job)} method first. This should provide sufficient
* data to re-instantiate the Recoverable later on. Every time a {@link Model} is successfully
* built the Recoverable should call {@link Recovery#onModel(Recoverable, Key)} method so that
* this Model can be later recovered and does not need to be trained again. If a Recoverable
* process finishes successfully it will call {@link Recovery#onDone(Recoverable)}, this will
* lead to all stored data being cleaned up.</p>
*
* <h3>Recovering manually</h3>
*
* <p>Recoverable objects may use this class to restore their state in case user has sent such a
* request. This is useful since this class implements mechanisms not implemented by other
* components in the system (such as storing parameter references).</p>
*
* <h3>Auto-Recovery</h3>
*
* <p>Calling {@link Recovery#autoRecover(Optional)} will trigger an auto-recovery. The method
* will check if there is any recovery data present in the supplied directory and if there is
* it will load all the stored data, references and models and resume the Recoverable process.
* It is the responsibility of the Recoverable to check in what state was it stored (models
* already trained) and continue on a best-effort basis with its job such that no unnecessary
* repetition of work is done.</p>
*
* @param <T> Type of object to be recovered
*/
public class Recovery<T extends Keyed> {
private static final Logger LOG = Logger.getLogger(Recovery.class);
public static final String REFERENCES_META_FILE_SUFFIX = "_references";
public static final String RECOVERY_META_FILE = "recovery.json";
public static final String INFO_CLASS = "class";
public static final String INFO_RESULT_KEY = "resultKey";
public static final String INFO_JOB_KEY = "jobKey";
/**
* Will check the supplied directory for presence of recovery metadata and
* if found, trigger a recovery of interrupted Recoverable process.
*
* @param autoRecoveryDirOpt directory from which to load recovery data
*/
public static void autoRecover(Optional<String> autoRecoveryDirOpt) {
if (!autoRecoveryDirOpt.isPresent() || autoRecoveryDirOpt.get().length() == 0) {
LOG.debug("Auto recovery dir not configured.");
} else {
String autoRecoveryDir = autoRecoveryDirOpt.get();
LOG.info("Initializing auto recovery from " + autoRecoveryDir);
H2O.submitTask(new H2O.H2OCountedCompleter(H2O.MIN_PRIORITY) {
@Override
public void compute2() {
new Recovery(autoRecoveryDir).autoRecover();
tryComplete();
}
});
}
}
/**
* {@link Frame} object referenced by params are stored in a distributed manner, hence we need to
* distinguish them from other types of {@link Keyed} references.
*/
public enum ReferenceType {
FRAME, KEYED
}
private final String storagePath;
private final List<String> writtenFiles = new ArrayList<>();
/**
* @param storagePath directory to use as base for recovery snapshots
*/
public Recovery(String storagePath) {
this.storagePath = storagePath;
}
private String recoveryFile(String f) {
return storagePath + "/" + f;
}
private String recoveryFile(Key key) {
return recoveryFile(key.toString());
}
public String referencesMetaFile(Recoverable<T> r) {
return recoveryFile(r.getKey().toString() + REFERENCES_META_FILE_SUFFIX);
}
public String recoveryMetaFile() {
return recoveryFile(RECOVERY_META_FILE);
}
/**
* Called when the training begins, so that initial state can be persisted
*
* @param r a Recoverable to persist
*/
public void onStart(final Recoverable<T> r, final Job job) {
writtenFiles.addAll(r.exportBinary(storagePath, true));
exportReferences(r);
writeRecoveryInfo(r, job.getKey());
}
/**
* Called by the Recoverable to notify of new model was trained and needs to persisted
*
* @param r a Recoverable to update
* @param modelKey key of the newly trained model
*/
public void onModel(final Recoverable<T> r, Key<Model> modelKey) {
try {
String modelFile = recoveryFile(modelKey);
modelKey.get().exportBinaryModel(modelFile, true);
writtenFiles.add(modelFile);
r.exportBinary(storagePath, false);
} catch (IOException e) {
// this should not happen since storagePath should be writable because
// grid was already written to it
throw new RuntimeException("Failed to store model for fault tolerance.", e);
}
}
/**
* Called by the recoverable that the training was finished successfully. This means that
* recovery snapshots (persisted data) is no longer needed and can be deleted.
*/
public void onDone(Recoverable<T> r) {
final URI storageUri = FileUtils.getURI(storagePath);
for (String path : writtenFiles) {
URI pathUri = FileUtils.getURI(path);
H2O.getPM().getPersistForURI(storageUri).delete(pathUri.toString());
}
}
/**
* Saves all of the keyed objects used by this Grid's params. Files are named by objects' keys.
*/
public void exportReferences(final Recoverable<T> r) {
final Set<Key<?>> keys = r.getDependentKeys();
final IcedHashMap<String, String> referenceKeyTypeMap = new IcedHashMap<>();
for (Key<?> k : keys) {
persistObj(k.get(), referenceKeyTypeMap);
}
final URI referencesUri = FileUtils.getURI(referencesMetaFile(r));
writtenFiles.add(referencesUri.toString());
PersistUtils.write(referencesUri, ab -> ab.put(referenceKeyTypeMap));
}
private void writeRecoveryInfo(final Recoverable<T> r, Key<Job> jobKey) {
Map<String, String> info = new HashMap<>();
info.put(INFO_CLASS, r.getClass().getName());
info.put(INFO_JOB_KEY, jobKey.toString());
info.put(INFO_RESULT_KEY, r.getKey().toString());
final URI infoUri = FileUtils.getURI(recoveryMetaFile());
writtenFiles.add(infoUri.toString());
PersistUtils.writeStream(infoUri, w -> w.write(new Gson().toJson(info)));
}
private void persistObj(
final Keyed<?> o,
Map<String, String> referenceKeyTypeMap
) {
if (o instanceof Frame) {
referenceKeyTypeMap.put(o._key.toString(), ReferenceType.FRAME.toString());
String[] writtenFrameFiles = new FramePersist((Frame) o).saveToAndWait(storagePath, true);
writtenFiles.addAll(Arrays.asList(writtenFrameFiles));
} else if (o != null) {
referenceKeyTypeMap.put(o._key.toString(), ReferenceType.KEYED.toString());
String destFile = storagePath + "/" + o._key;
URI dest = FileUtils.getURI(destFile);
PersistUtils.write(dest, ab -> ab.putKey(o._key));
writtenFiles.add(destFile);
}
}
/**
* Will locate a references metadata file and load all objects mentioned in this file.
*
* @param r a Recoverable whose references are to be loaded
*/
public void loadReferences(final Recoverable<T> r) {
final URI referencesUri = FileUtils.getURI(storagePath + "/" + r.getKey() + REFERENCES_META_FILE_SUFFIX);
Map<String, String> referencesMap = PersistUtils.read(referencesUri, AutoBuffer::get);
final Futures fs = new Futures();
referencesMap.forEach((key, type) -> {
switch (ReferenceType.valueOf(type)) {
case FRAME:
FramePersist.loadFrom(Key.make(key), storagePath).get();
break;
case KEYED:
PersistUtils.read(URI.create(storagePath + "/" + key), ab -> ab.getKey(Key.make(key), fs));
break;
default:
throw new IllegalStateException("Unknown reference type " + type);
}
});
fs.blockForPending();
}
void autoRecover() {
URI recoveryMetaUri = FileUtils.getURI(recoveryMetaFile());
if (!PersistUtils.exists(recoveryMetaUri)) {
LOG.info("No auto-recovery information found.");
return;
}
Map<String, String> recoveryInfo = PersistUtils.readStream(
recoveryMetaUri,
r -> new Gson().fromJson(r, new TypeToken<Map<String, String>>(){}.getType())
);
String className = recoveryInfo.get(INFO_CLASS);
Key<Job> jobKey = Key.make(recoveryInfo.get(INFO_JOB_KEY));
Key<?> resultKey = Key.make(recoveryInfo.get(INFO_RESULT_KEY));
if (Grid.class.getName().equals(className)) {
LOG.info("Auto-recovering previously interrupted grid search.");
Grid grid = Grid.importBinary(recoveryFile(resultKey), true);
GridSearch.resumeGridSearch(
jobKey, grid,
new GridSearchHandler.DefaultModelParametersBuilderFactory(),
(Recovery<Grid>) this
);
} else {
LOG.error("Unable to recover object of class " + className);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-core/3.46.0.7/hex/grid/Grid.java
|
package hex.grid;
import hex.*;
import hex.faulttolerance.Recoverable;
import hex.faulttolerance.Recovery;
import water.*;
import water.api.schemas3.KeyV3;
import water.fvec.Frame;
import water.fvec.persist.PersistUtils;
import water.persist.Persist;
import water.util.*;
import water.util.PojoUtils.FieldNaming;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Array;
import java.net.URI;
import java.util.*;
import java.util.stream.Collectors;
import static hex.grid.GridSearch.IGNORED_FIELDS_PARAM_HASH;
/**
* A Grid of Models representing result of hyper-parameter space exploration.
* Lazily filled in, this object represents the potentially infinite variety
* of hyperparameters of a given model & dataset.
*
* @param <MP> type of model build parameters
*/
public class Grid<MP extends Model.Parameters> extends Lockable<Grid<MP>> implements ModelContainer<Model>, Recoverable<Grid<MP>> {
/**
* Publicly available Grid prototype - used by REST API.
*
* @see hex.schemas.GridSchemaV99
*/
public static final Grid GRID_PROTO = new Grid(null, null, null, new HashMap<>(), null, null, 0);
// A cache of double[] hyper-parameters mapping to Models.
private final IcedHashMap<IcedLong, Key<Model>> _models = new IcedHashMap<>();
private final IcedHashMap<Key<Model>, SearchFailure> _failures;
// Used "based" model parameters for this grid search.
private final MP _params;
// Names of used hyper parameters for this grid search.
private final String[] _hyper_names;
private HyperParameters _hyper_params;
private int _parallelism;
private HyperSpaceSearchCriteria _search_criteria;
private final FieldNaming _field_naming_strategy;
private ScoringInfo[] _scoring_infos = null;
/**
* A special key to identify failures of models that did not become part of the {@link Grid}
*/
private static final Key<Model> NO_MODEL_FAILURES_KEY = Key.makeUserHidden("GridSearchFailureEmptyModelKey");
/**
* Failure that occurred during hyperspace exploration.
*/
public static final class SearchFailure<MP extends Model.Parameters> extends Iced<SearchFailure> {
// Failed model parameters - represents points in hyper space for which model
// generation failed. If the element is null, then look into
private MP[] _failed_params;
// Detailed messages about a failure for given failed model parameters in
// <code>_failed_params</code>.
private String[] _failure_details;
// Collected stack trace for failure.
private String[] _failure_stack_traces;
// Contains "raw" representation of parameters which fail The parameters are
// represented in textual form, since simple <code>java.lang.Object</code>
// cannot be serialized by H2O serialization.
private String[][] _failed_raw_params;
// collect warning
private String[] _warning_details;
private SearchFailure(final Class<MP> paramsClass) {
_failed_params = paramsClass != null ? (MP[]) Array.newInstance(paramsClass, 0) : null;
_failure_details = new String[]{};
_failed_raw_params = new String[][]{};
_failure_stack_traces = new String[]{};
_warning_details = new String[]{};
}
/**
* This method appends a new item to the list of failed model parameters.
* <p/>
* <p> The failed parameters object represents a point in hyper space which cannot be used for
* model building. </p>
*
* @param params model parameters which caused model builder failure, can be null
* @param rawParams array of "raw" parameter values
* @param failureDetails textual description of model building failure
* @param stackTrace stringify stacktrace
*/
private void appendFailedModelParameters(MP params, String[] rawParams, String failureDetails, String stackTrace) {
assert rawParams != null : "API has to always pass rawParams";
// Append parameter
MP[] a = _failed_params;
MP[] na = Arrays.copyOf(a, a.length + 1);
na[a.length] = params;
_failed_params = na;
// Append message
String[] m = _failure_details;
String[] nm = Arrays.copyOf(m, m.length + 1);
nm[m.length] = failureDetails;
_failure_details = nm;
// Append raw params
String[][] rp = _failed_raw_params;
String[][] nrp = Arrays.copyOf(rp, rp.length + 1);
nrp[rp.length] = rawParams;
_failed_raw_params = nrp;
// Append stack trace
String[] st = _failure_stack_traces;
String[] nst = Arrays.copyOf(st, st.length + 1);
nst[st.length] = stackTrace;
_failure_stack_traces = nst;
}
private void appendWarningMessage(String[] hyper_parameter, String checkField) {
if (hyper_parameter != null && Arrays.asList(hyper_parameter).contains(checkField)) {
String warningMessage = null;
if ("alpha".equals(checkField)) {
warningMessage = "Adding alpha array to hyperparameter runs slower with gridsearch. " +
"This is due to the fact that the algo has to run initialization for every alpha value. " +
"Setting the alpha array as a model parameter will skip the initialization and run faster overall.";
}
if (warningMessage != null) {
Log.warn(warningMessage);
// Append message
String[] m = _warning_details;
String[] nm = Arrays.copyOf(m, m.length+1);
nm[m.length] = warningMessage;
_warning_details = nm;
}
}
}
public void appendFailedModelParameters(final MP[] params, final String[][] rawParams,
final String[] failureDetails, final String[] stackTraces) {
assert rawParams != null : "API has to always pass rawParams";
_failed_params = ArrayUtils.append(_failed_params, params);
_failed_raw_params = ArrayUtils.append(_failed_raw_params, rawParams);
_failure_details = ArrayUtils.append(_failure_details, failureDetails);
_failure_stack_traces = ArrayUtils.append(_failure_stack_traces, stackTraces);
}
/**
* This method appends a new item to the list of failed hyper-parameters.
* <p/>
* <p> The failed parameters object represents a point in hyper space which cannot be used to
* construct a new model parameters.</p>
* <p/>
* <p> Should be used only from <code>GridSearch</code> job.</p>
*
* @param rawParams list of "raw" hyper values which caused a failure to prepare model parameters
* @param e exception causing a failure
*/
/* package */ void appendFailedModelParameters(Object[] rawParams, Exception e) {
assert rawParams != null : "Raw parameters should be always != null !";
appendFailedModelParameters(null, ArrayUtils.toString(rawParams), e.getMessage(), StringUtils.toString(e));
}
public Model.Parameters[] getFailedParameters() {
return _failed_params;
}
public String[] getFailureDetails() {
return _failure_details;
}
public String[] getWarningDetails() {
return _warning_details;
}
public String[] getFailureStackTraces() {
return _failure_stack_traces;
}
public String[][] getFailedRawParameters() {
return _failed_raw_params;
}
public int getFailureCount() {
return _failed_params.length;
}
}
/**
* Construct a new grid object to store results of grid search.
*
* @param key reference to this object
* @param params initial parameters used by grid search
* @param hyperNames names of used hyper parameters
*/
protected Grid(
Key key, MP params,
String[] hyperNames,
Map<String, Object[]> hyperParams,
HyperSpaceSearchCriteria searchCriteria,
FieldNaming fieldNaming,
int parallelism
) {
super(key);
_params = params != null ? (MP) params.clone() : null;
_hyper_names = hyperNames;
_failures = new IcedHashMap<>();
_field_naming_strategy = fieldNaming;
update(hyperParams, searchCriteria, parallelism);
}
protected Grid(Key key, HyperSpaceWalker<MP, ?> walker, int parallelism) {
this(
key,
walker.getParams(),
walker.getAllHyperParamNames(),
walker.getHyperParams(),
walker.search_criteria(),
walker.getParametersBuilderFactory().getFieldNamingStrategy(),
parallelism
);
}
public void update(Map<String,Object[]> hyperParams, HyperSpaceSearchCriteria searchCriteria, int parallelism) {
_hyper_params = new HyperParameters(hyperParams);
_search_criteria = searchCriteria;
_parallelism = parallelism;
}
public Map<String, Object[]> getHyperParams() {
return _hyper_params.getValues();
}
public HyperSpaceSearchCriteria getSearchCriteria() {
return _search_criteria;
}
public int getParallelism() {
return _parallelism;
}
/**
* Returns name of model included in this object. Note: only sensible for
* Grids which search over a single class of Models.
*
* @return name of model (for example, "DRF", "GBM")
*/
public String getModelName() {
return _params.algoName();
}
public ScoringInfo[] getScoringInfos() {
return _scoring_infos;
}
public void setScoringInfos(ScoringInfo[] scoring_infos) {
this._scoring_infos = scoring_infos;
}
/*
* Ask the Grid for a suggested next hyperparameter value, given an existing Model as a starting
* point and the complete set of hyperparameter limits. Returning a NaN signals there is no next
* suggestion, which is reasonable if the obvious "next" value does not exist (e.g. exhausted all
* possibilities of an categorical). It is OK if a Model for the suggested value already exists; this
* will be checked before building any model.
*
* @param h The h-th hyperparameter
* @param m A model to act as a starting point
* @param hyperLimits Upper bounds for this search
* @return Suggested next value for hyperparameter h or NaN if no next value
protected double suggestedNextHyperValue(int h, Model m, double[] hyperLimits) {
throw H2O.fail();
}*/
/**
* Returns the data frame used to train all these models. <p> All models are trained on the same
* data frame, but might be validated on multiple different frames. </p>
*
* @return training frame shared among all models
*/
public Frame getTrainingFrame() {
return _params.train();
}
/**
* Returns model for given combination of model parameters or null if the model does not exist.
*
* @param params parameters of the model
* @return A model run with these parameters, or null if the model does not exist.
*/
public Model getModel(MP params) {
Key<Model> mKey = getModelKey(params);
return mKey != null ? mKey.get() : null;
}
public Key<Model> getModelKey(MP params) {
long checksum = params.checksum(IGNORED_FIELDS_PARAM_HASH);
return getModelKey(checksum);
}
Key<Model> getModelKey(long paramsChecksum) {
Key<Model> mKey = _models.get(IcedLong.valueOf(paramsChecksum));
return mKey;
}
/* FIXME: should pass model parameters instead of checksum, but model
* parameters are not imutable and model builder modifies them! */
/* package */
synchronized Key<Model> putModel(long checksum, Key<Model> modelKey) {
return _models.put(IcedLong.valueOf(checksum), modelKey);
}
/**
* This method appends a new item to the list of failed model parameters.
* <p/>
* <p> The failed parameters object represents a point in hyper space which cannot be used for
* model building. </p>
*
* @param modelKey Model the failures are related to
* @param params model parameters which caused model builder failure, can be null
* @param rawParams array of "raw" parameter values
* @param t the exception causing a failure
*/
private void appendFailedModelParameters(final Key<Model> modelKey, final MP params, final String[] rawParams,
final Throwable t) {
final String failureDetails = isJobCanceled(t) ? "Job Canceled" : t.getMessage();
final String stackTrace = StringUtils.toString(t);
final Key<Model> searchedKey = modelKey != null ? modelKey : NO_MODEL_FAILURES_KEY;
SearchFailure searchFailure = _failures.get(searchedKey);
if (searchFailure == null) {
searchFailure = new SearchFailure(_params.getClass());
_failures.put(searchedKey, searchFailure);
}
searchFailure.appendFailedModelParameters(params, rawParams, failureDetails, stackTrace);
searchFailure.appendWarningMessage(_hyper_names, "alpha");
}
static boolean isJobCanceled(final Throwable t) {
for (Throwable ex = t; ex != null; ex = ex.getCause()) {
if (ex instanceof Job.JobCancelledException) {
return true;
}
}
return false;
}
/**
* This method appends a new item to the list of failed model parameters.
* <p/>
* <p> The failed parameters object represents a point in hyper space which cannot be used for
* model building.</p>
* <p/>
* <p> Should be used only from <code>GridSearch</code> job.</p>
*
* @param params model parameters which caused model builder failure
* @param t the exception causing a failure
*/
void appendFailedModelParameters(final Key<Model> modelKey, final MP params, final Throwable t) {
assert params != null : "Model parameters should be always != null !";
String[] rawParams = ArrayUtils.toString(getHyperValues(params));
appendFailedModelParameters(modelKey, params, rawParams, t);
}
/**
* This method appends a new item to the list of failed hyper-parameters.
* <p/>
* <p> The failed parameters object represents a point in hyper space which cannot be used to
* construct a new model parameters.</p>
* <p/>
* <p> Should be used only from <code>GridSearch</code> job.</p>
*
* @param rawParams list of "raw" hyper values which caused a failure to prepare model parameters
* @param e the exception causing a failure
*/
void appendFailedModelParameters(final Key<Model> modelKey, final Object[] rawParams, final Exception e) {
assert rawParams != null : "Raw parameters should be always != null !";
appendFailedModelParameters(modelKey, null, ArrayUtils.toString(rawParams), e);
}
/**
* Returns keys of all models included in this object.
*
* @return list of model keys sorted lexically
*/
@Override
public Key<Model>[] getModelKeys() {
Key<Model>[] keys = _models.values().toArray(new Key[_models.size()]);
Arrays.sort(keys);
return keys;
}
/**
* Return all models included in this grid object.
*
* @return all models in this grid
*/
@Override
public Model[] getModels() {
Collection<Key<Model>> modelKeys = _models.values();
Model[] models = new Model[modelKeys.size()];
int i = 0;
for (Key<Model> mKey : modelKeys) {
models[i] = mKey != null ? mKey.get() : null;
i++;
}
return models;
}
/**
* Returns number of models in this grid.
*/
@Override
public int getModelCount() {
return _models.size();
}
/**
* Returns all failures currently listed in this Grid instance, including failures related to models not present in
* the grid that failed during the last run.
*
* @return An instance of {@link SearchFailure} with all failures currently linked to this {@link Grid}.
* An empty {@link SearchFailure} instance is returned if there are no failures listed.
*/
public SearchFailure getFailures() {
final Collection<SearchFailure> values = _failures.values();
// Original failures should be left intact. Also avoid mutability from outer space.
final SearchFailure searchFailure = new SearchFailure(_params != null ? _params.getClass() : null);
for (SearchFailure f : values) {
searchFailure.appendFailedModelParameters(f._failed_params, f._failed_raw_params, f._failure_details,
f._failure_stack_traces);
}
searchFailure.appendWarningMessage(_hyper_names, "alpha");
return searchFailure;
}
public int countTotalFailures() {
return _failures.values().stream().mapToInt(SearchFailure::getFailureCount).sum();
}
/**
* Removes failures found while walking the hyperspace related to models not present in Grid.
*/
protected void clearNonRelatedFailures(){
_failures.remove(NO_MODEL_FAILURES_KEY);
}
/**
* Return value of hyper parameters used for this grid search.
*
* @param parms model parameters
* @return values of hyper parameters used by grid search producing this grid object.
*/
public Object[] getHyperValues(MP parms) {
Object[] result = new Object[_hyper_names.length];
for (int i = 0; i < _hyper_names.length; i++) {
result[i] = PojoUtils.getFieldValue(parms, _hyper_names[i], _field_naming_strategy);
}
return result;
}
/**
* Returns an array of used hyper parameters names.
*
* @return names of hyper parameters used in this hyper search
*/
public String[] getHyperNames() {
return _hyper_names;
}
// Cleanup models and grid
@Override
protected Futures remove_impl(final Futures fs, boolean cascade) {
if (cascade) {
for (Key<Model> k : _models.values())
Keyed.remove(k, fs, true);
}
_models.clear();
return super.remove_impl(fs, cascade);
}
/**
* Write out K/V pairs
*/
@Override
protected AutoBuffer writeAll_impl(AutoBuffer ab) {
for (Key<Model> k : _models.values())
ab.putKey(k);
return super.writeAll_impl(ab);
}
@Override
protected Keyed readAll_impl(AutoBuffer ab, Futures fs) {
throw H2O.unimpl();
}
@Override
protected long checksum_impl() {
throw H2O.unimpl();
}
@Override
public Class<KeyV3.GridKeyV3> makeSchema() {
return KeyV3.GridKeyV3.class;
}
public TwoDimTable createSummaryTable(Key<Model>[] model_ids, String sort_by, boolean decreasing) {
if (_hyper_names == null || model_ids == null || model_ids.length == 0) return null;
int extra_len = sort_by != null ? 2 : 1;
String[] colTypes = new String[_hyper_names.length + extra_len];
String[] colFormats = new String[_hyper_names.length + extra_len];
// Set the default type to string
Arrays.fill(colTypes, "string");
Arrays.fill(colFormats, "%s");
// Change where appropriate (and only the hyper params)
for (int i = 0; i < _hyper_names.length; i++) {
Object[] objects = _hyper_params.getValues().get(_hyper_names[i]);
if (objects != null && objects.length > 0) {
Object obj = objects[0];
if (obj instanceof Double || obj instanceof Float) {
colTypes[i] = "double";
colFormats[i] = "%.5f";
} else if (obj instanceof Integer || obj instanceof Long) {
colTypes[i] = "long";
colFormats[i] = "%d";
}
}
}
if (sort_by != null) {
colTypes[colTypes.length-1] = "double";
colFormats[colFormats.length-1] = "%.5f";
}
String[] colNames = Arrays.copyOf(_hyper_names, _hyper_names.length + extra_len);
colNames[_hyper_names.length] = "model_ids";
if (sort_by != null)
colNames[_hyper_names.length + 1] = sort_by;
TwoDimTable table = new TwoDimTable("Hyper-Parameter Search Summary",
sort_by != null ? "ordered by " + (decreasing ? "decreasing " : "increasing ") + sort_by : null,
new String[_models.size()], colNames, colTypes, colFormats, "");
int i = 0;
for (Key<Model> km : model_ids) {
Model m = DKV.getGet(km);
Model.Parameters parms = m._parms;
int j;
for (j = 0; j < _hyper_names.length; ++j) {
Object paramValue = PojoUtils.getFieldValue(parms, _hyper_names[j], _field_naming_strategy);
if (paramValue.getClass().isArray()) {
// E.g., GLM alpha/lambda parameters can be arrays with one value
if (paramValue instanceof float[] && ((float[])paramValue).length == 1) paramValue = ((float[]) paramValue)[0];
else if (paramValue instanceof double[] && ((double[])paramValue).length == 1) paramValue = ((double[]) paramValue)[0];
else if (paramValue instanceof int[] && ((int[])paramValue).length == 1) paramValue = ((int[]) paramValue)[0];
else if (paramValue instanceof long[] && ((long[])paramValue).length == 1) paramValue = ((long[]) paramValue)[0];
else if (paramValue instanceof Object[] && ((Object[])paramValue).length == 1) paramValue = ((Object[]) paramValue)[0];
}
table.set(i, j, paramValue);
}
table.set(i, j, km.toString());
if (sort_by != null) table.set(i, j + 1, ModelMetrics.getMetricFromModel(km, sort_by));
i++;
}
Log.info(table);
return table;
}
public TwoDimTable createScoringHistoryTable() {
if (0 == _models.values().size()) {
return ScoringInfo.createScoringHistoryTable(_scoring_infos, false, false, ModelCategory.Binomial, false, false);
}
Key<Model> k = null;
for (Key<Model> foo : _models.values()) {
k = foo;
break;
}
Model m = k.get();
if (null == m) {
Log.warn("Cannot create grid scoring history table; Model has been removed: " + k);
return ScoringInfo.createScoringHistoryTable(_scoring_infos, false, false, ModelCategory.Binomial, false, false);
}
ScoringInfo scoring_info = _scoring_infos != null && _scoring_infos.length > 0 ? _scoring_infos[0] : null;
return ScoringInfo.createScoringHistoryTable(_scoring_infos, (scoring_info != null ? scoring_info.validation : false), (scoring_info != null ? scoring_info.cross_validation: false), m._output.getModelCategory(), (scoring_info != null ? scoring_info.is_autoencoder : false), m._parms.hasCustomMetricFunc());
}
/**
* Exports this Grid in a binary format using {@link AutoBuffer}. Related models are not saved.
*
* @param gridExportDir Full path to the folder this {@link Grid} should be saved to
* @return Path of the file written
*/
public List<String> exportBinary(final String gridExportDir, final boolean exportModels, ModelExportOption... options) {
Objects.requireNonNull(gridExportDir);
assert _key != null;
final String gridFilePath = gridExportDir + "/" + _key;
final URI gridUri = FileUtils.getURI(gridFilePath);
PersistUtils.write(gridUri, (ab) -> ab.put(this));
List<String> result = new ArrayList<>();
result.add(gridFilePath);
if (exportModels) {
exportModelsBinary(result, gridExportDir, options);
}
return result;
}
private void exportModelsBinary(final List<String> files, final String exportDir, ModelExportOption... options) {
Objects.requireNonNull(exportDir);
for (Model model : getModels()) {
try {
String modelFile = exportDir + "/" + model._key.toString();
files.add(modelFile);
model.exportBinaryModel(modelFile, true, options);
} catch (IOException e) {
throw new RuntimeException("Failed to write grid model " + model._key.toString(), e);
}
}
}
public static Grid importBinary(final String gridPath, final boolean loadReferences) {
final URI gridUri = FileUtils.getURI(gridPath);
if (!PersistUtils.exists(gridUri)) {
throw new IllegalArgumentException("Grid file not found " + gridUri);
}
final Persist persist = H2O.getPM().getPersistForURI(gridUri);
final String gridDirectory = persist.getParent(gridUri.toString());
final Grid grid = readGridBinary(gridUri, persist);
final Recovery<Grid> recovery = new Recovery<>(gridDirectory);
URI gridReferencesUri = FileUtils.getURI(recovery.referencesMetaFile(grid));
if (loadReferences && !PersistUtils.exists(gridReferencesUri)) {
throw new IllegalArgumentException("Requested to load with references, but the grid was saved without references.");
}
grid.importModelsBinary(gridDirectory);
if (loadReferences) {
recovery.loadReferences(grid);
}
DKV.put(grid);
return grid;
}
private static Grid readGridBinary(final URI gridUri, Persist persist) {
try (final InputStream inputStream = persist.open(gridUri.toString())) {
final AutoBuffer gridAutoBuffer = new AutoBuffer(inputStream);
final Freezable freezable = gridAutoBuffer.get();
if (!(freezable instanceof Grid)) {
throw new IllegalArgumentException(String.format("Given file '%s' is not a Grid", gridUri.toString()));
}
return (Grid) freezable;
} catch (IOException e) {
throw new IllegalStateException("Failed to open grid file.", e);
}
}
private void importModelsBinary(final String exportDir) {
for (Key<Model> k : _models.values()) {
String modelFile = exportDir + "/" + k.toString();
try {
final Model<?, ?, ?> model = Model.importBinaryModel(modelFile);
assert model != null;
} catch (IOException e) {
throw new IllegalStateException("Unable to load model from " + modelFile, e);
}
}
}
@Override
public Set<Key<?>> getDependentKeys() {
return _params.getDependentKeys();
}
public MP getParams() {
return _params;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.