index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/SharedTreeSubgraph.java
|
package hex.genmodel.algos.tree;
import hex.genmodel.tools.PrintMojo;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
/**
* Subgraph for representing a tree.
* A subgraph contains nodes.
*/
public class SharedTreeSubgraph {
public final int subgraphNumber;
public final String name;
public SharedTreeNode rootNode;
public int fontSize=14; // default size
public boolean setDecimalPlaces=false; // default to not change tree split threshold decimal places
public int nPlaces = -1;
// Even though all the nodes are reachable from rootNode, keep a second handy list of nodes.
// For some bookkeeping tasks.
public ArrayList<SharedTreeNode> nodesArray;
/**
* Create a new tree object.
* @param sn Tree number
* @param n Tree name
*/
SharedTreeSubgraph(int sn, String n) {
subgraphNumber = sn;
name = n;
nodesArray = new ArrayList<>();
}
/**
* Make the root node in the tree.
* @return The node
*/
public SharedTreeNode makeRootNode() {
assert nodesArray.size() == 0;
SharedTreeNode n = new SharedTreeNode(0, null, subgraphNumber, 0);
n.setInclusiveNa(true);
nodesArray.add(n);
rootNode = n;
return n;
}
public void setDecimalPlace(int nplaces) {
setDecimalPlaces=true;
nPlaces = nplaces;
}
public void setFontSize(int fontsize) {
fontSize = fontsize;
}
/**
* Make the left child of a node.
* @param parent Parent node
* @return The new child node
*/
public SharedTreeNode makeLeftChildNode(SharedTreeNode parent) {
SharedTreeNode child = new SharedTreeNode(nodesArray.size(), parent, subgraphNumber, parent.getDepth() + 1);
nodesArray.add(child);
makeLeftEdge(parent, child);
return child;
}
/**
* Make the right child of a node.
* @param parent Parent node
* @return The new child node
*/
public SharedTreeNode makeRightChildNode(SharedTreeNode parent) {
SharedTreeNode child = new SharedTreeNode(nodesArray.size(), parent, subgraphNumber, parent.getDepth() + 1);
nodesArray.add(child);
makeRightEdge(parent, child);
return child;
}
private void makeLeftEdge(SharedTreeNode parent, SharedTreeNode child) {
parent.setLeftChild(child);
}
private void makeRightEdge(SharedTreeNode parent, SharedTreeNode child) {
parent.setRightChild(child);
}
public SharedTreeNode walkNodes(final String path) {
SharedTreeNode n = rootNode;
for (int i = 0; i < path.length(); i++) {
if (n == null)
return null;
switch (path.charAt(i)) {
case 'L':
n = n.getLeftChild();
break;
case 'R':
n = n.getRightChild();
break;
default:
throw new IllegalArgumentException("Invalid path specification '" + path +
"'. Paths must only be made of 'L' and 'R' characters.");
}
}
return n;
}
public float scoreTree(double[] data) {
SharedTreeNode n = rootNode;
while (!n.isLeaf()) {
int id = n.next(data);
n = nodesArray.get(id);
}
return n.getPredValue();
}
void print() {
System.out.println("");
System.out.println(" ----- " + name + " -----");
System.out.println(" Nodes");
for (SharedTreeNode n : nodesArray) {
n.print();
}
System.out.println("");
System.out.println(" Edges");
rootNode.printEdges();
}
void printDot(PrintStream os, int maxLevelsToPrintPerEdge, boolean detail, String optionalTitle, PrintMojo.PrintTreeOptions treeOptions) {
os.println("");
os.println("subgraph " + "cluster_" + subgraphNumber + " {");
os.println("/* Nodes */");
int maxLevel = -1;
for (SharedTreeNode n : nodesArray) {
if (n.getDepth() > maxLevel) {
maxLevel = n.getDepth();
}
}
for (int level = 0; level <= maxLevel; level++) {
os.println("");
os.println("/* Level " + level + " */");
os.println("{");
rootNode.printDotNodesAtLevel(os, level, detail, treeOptions);
os.println("}");
}
os.println("");
os.println("/* Edges */");
for (SharedTreeNode n : nodesArray) {
n.printDotEdges(os, maxLevelsToPrintPerEdge, rootNode.getWeight(), detail, treeOptions);
}
os.println("");
os.println("fontsize="+40); // fix title label to be 40pts
String title = SharedTreeNode.escapeQuotes((optionalTitle != null) ? optionalTitle : name);
os.println("label=\"" + title + "\"");
os.println("}");
}
public void printDot(PrintStream os, int maxLevelsToPrintPerEdge, boolean detail, String optionalTitle, PrintMojo.PrintTreeOptions treeOptions, boolean isDirected) {
os.println("");
os.println((isDirected ? "digraph ": "subgraph " + "cluster_") + subgraphNumber + " {");
os.println("/* Nodes */");
int maxLevel = -1;
for (SharedTreeNode n : nodesArray) {
if (n.getDepth() > maxLevel) {
maxLevel = n.getDepth();
}
}
for (int level = 0; level <= maxLevel; level++) {
os.println("");
os.println("/* Level " + level + " */");
os.println("{");
rootNode.printDotNodesAtLevel(os, level, detail, treeOptions);
os.println("}");
}
os.println("");
os.println("/* Edges */");
for (SharedTreeNode n : nodesArray) {
n.printDotEdges(os, maxLevelsToPrintPerEdge, rootNode.getWeight(), detail, treeOptions);
}
os.println("");
os.println("fontsize="+40); // fix title label to be 40pts
String title = SharedTreeNode.escapeQuotes((optionalTitle != null) ? optionalTitle : name);
os.println("label=\"" + title + "\"");
os.println("}");
}
Map<String, Object> toJson() {
Map<String, Object> json = new HashMap<>();
json.put("index", subgraphNumber);
json.put("name", name);
json.put("root", rootNode.toJson());
return json;
}
public SharedTreeNode[] getNodes() {
return this.nodesArray.toArray(new SharedTreeNode[0]);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
SharedTreeSubgraph that = (SharedTreeSubgraph) o;
return subgraphNumber == that.subgraphNumber &&
Objects.equals(name, that.name) &&
Objects.equals(rootNode, that.rootNode) &&
Objects.equals(nodesArray, that.nodesArray);
}
@Override
public int hashCode() {
return Objects.hash(subgraphNumber);
}
@Override
public String toString() {
return "SharedTreeSubgraph{" +
"subgraphNumber=" + subgraphNumber +
", name='" + name + '\'' +
'}';
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/TreeBackedMojoModel.java
|
package hex.genmodel.algos.tree;
public interface TreeBackedMojoModel extends SharedTreeGraphConverter {
int getNTreeGroups();
int getNTreesPerGroup();
double getInitF();
String[] getDecisionPath(final double[] row);
SharedTreeMojoModel.LeafNodeAssignments getLeafNodeAssignments(final double[] row);
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/TreeSHAP.java
|
// Code in this file started as 1-1 conversion of Native XGBoost implementation in C++ to Java
// please see:
// https://github.com/dmlc/xgboost/blob/master/src/tree/tree_model.cc
// All credit for this implementation goes to XGBoost Contributors:
// https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md
// Licensed under Apache License Version 2.0
package hex.genmodel.algos.tree;
import ai.h2o.algos.tree.INode;
import ai.h2o.algos.tree.INodeStat;
import java.io.Serializable;
import java.util.Arrays;
import java.util.BitSet;
public class TreeSHAP<R, N extends INode<R>, S extends INodeStat> implements TreeSHAPPredictor<R> {
private final int rootNodeId;
private final N[] nodes;
private final S[] stats;
private final float expectedTreeValue;
@SuppressWarnings("unchecked")
public TreeSHAP(N[] nodes) {
this(nodes, (S[]) nodes, 0);
}
public TreeSHAP(N[] nodes, S[] stats, int rootNodeId) {
this.rootNodeId = rootNodeId;
this.nodes = nodes;
this.stats = stats;
this.expectedTreeValue = treeMeanValue();
}
private static class PathElement implements Serializable {
int feature_index;
float zero_fraction;
float one_fraction;
float pweight;
void reset() {
feature_index = 0;
zero_fraction = 0;
one_fraction = 0;
pweight = 0;
}
}
// extend our decision path with a fraction of one and zero extensions
private void extendPath(PathPointer unique_path, int unique_depth,
float zero_fraction, float one_fraction,
int feature_index) {
unique_path.get(unique_depth).feature_index = feature_index;
unique_path.get(unique_depth).zero_fraction = zero_fraction;
unique_path.get(unique_depth).one_fraction = one_fraction;
unique_path.get(unique_depth).pweight = (unique_depth == 0 ? 1.0f : 0.0f);
for (int i = unique_depth - 1; i >= 0; i--) {
unique_path.get(i+1).pweight += one_fraction * unique_path.get(i).pweight * (i + 1)
/ (float) (unique_depth + 1);
unique_path.get(i).pweight = zero_fraction * unique_path.get(i).pweight * (unique_depth - i)
/ (float) (unique_depth + 1);
}
}
// undo a previous extension of the decision path
private void unwindPath(PathPointer unique_path, int unique_depth,
int path_index) {
final float one_fraction = unique_path.get(path_index).one_fraction;
final float zero_fraction = unique_path.get(path_index).zero_fraction;
float next_one_portion = unique_path.get(unique_depth).pweight;
for (int i = unique_depth - 1; i >= 0; --i) {
if (one_fraction != 0) {
final float tmp = unique_path.get(i).pweight;
unique_path.get(i).pweight = next_one_portion * (unique_depth + 1)
/ ((i + 1) * one_fraction);
next_one_portion = tmp - unique_path.get(i).pweight * zero_fraction * (unique_depth - i)
/ (float) (unique_depth + 1);
} else if (zero_fraction != 0) {
unique_path.get(i).pweight = (unique_path.get(i).pweight * (unique_depth + 1))
/ (zero_fraction * (unique_depth - i));
} else {
unique_path.get(i).pweight = 0;
}
}
for (int i = path_index; i < unique_depth; ++i) {
unique_path.get(i).feature_index = unique_path.get(i+1).feature_index;
unique_path.get(i).zero_fraction = unique_path.get(i+1).zero_fraction;
unique_path.get(i).one_fraction = unique_path.get(i+1).one_fraction;
}
}
// determine what the total permutation getWeight would be if
// we unwound a previous extension in the decision path
private float unwoundPathSum(final PathPointer unique_path, int unique_depth,
int path_index) {
final float one_fraction = unique_path.get(path_index).one_fraction;
final float zero_fraction = unique_path.get(path_index).zero_fraction;
float next_one_portion = unique_path.get(unique_depth).pweight;
float total = 0;
for (int i = unique_depth - 1; i >= 0; --i) {
if (one_fraction != 0) {
final float tmp = next_one_portion * (unique_depth + 1)
/ ((i + 1) * one_fraction);
total += tmp;
next_one_portion = unique_path.get(i).pweight - tmp * zero_fraction * ((unique_depth - i)
/ (float)(unique_depth + 1));
} else if (zero_fraction != 0) {
total += (unique_path.get(i).pweight / zero_fraction) / ((unique_depth - i)
/ (float)(unique_depth + 1));
} else {
if (unique_path.get(i).pweight != 0)
throw new IllegalStateException("Unique path " + i + " must have zero getWeight");
}
}
return total;
}
// recursive computation of SHAP values for a decision tree
private void treeShap(R feat, float[] phi,
N node, S nodeStat, int unique_depth,
PathPointer parent_unique_path,
float parent_zero_fraction,
float parent_one_fraction, int parent_feature_index,
int condition, int condition_feature,
float condition_fraction) {
// stop if we have no getWeight coming down to us
if (condition_fraction == 0) return;
// extend the unique path
PathPointer unique_path = parent_unique_path.move(unique_depth);
if (condition == 0 || condition_feature != parent_feature_index) {
extendPath(unique_path, unique_depth, parent_zero_fraction,
parent_one_fraction, parent_feature_index);
}
final int split_index = node.getSplitIndex();
// leaf node
if (node.isLeaf()) {
for (int i = 1; i <= unique_depth; ++i) {
final float w = unwoundPathSum(unique_path, unique_depth, i);
final PathElement el = unique_path.get(i);
phi[el.feature_index] += w * (el.one_fraction - el.zero_fraction)
* node.getLeafValue() * condition_fraction;
}
// internal node
} else {
// find which branch is "hot" (meaning x would follow it)
final int hot_index = node.next(feat);
final int cold_index = hot_index == node.getLeftChildIndex() ? node.getRightChildIndex() : node.getLeftChildIndex();
final float w = nodeStat.getWeight();
// if w == 0 then weights in child nodes are 0 as well (are identical) -> that is why we split hot and cold evenly (0.5 fraction)
final float zero_weight_fraction = 0.5f;
final float hot_zero_fraction = w != 0 ? stats[hot_index].getWeight() / w : zero_weight_fraction;
final float cold_zero_fraction = w != 0 ? stats[cold_index].getWeight() / w : zero_weight_fraction;
float incoming_zero_fraction = 1;
float incoming_one_fraction = 1;
// see if we have already split on this feature,
// if so we undo that split so we can redo it for this node
int path_index = 0;
for (; path_index <= unique_depth; ++path_index) {
if (unique_path.get(path_index).feature_index == split_index)
break;
}
if (path_index != unique_depth + 1) {
incoming_zero_fraction = unique_path.get(path_index).zero_fraction;
incoming_one_fraction = unique_path.get(path_index).one_fraction;
unwindPath(unique_path, unique_depth, path_index);
unique_depth -= 1;
}
// divide up the condition_fraction among the recursive calls
float hot_condition_fraction = condition_fraction;
float cold_condition_fraction = condition_fraction;
if (condition > 0 && split_index == condition_feature) {
cold_condition_fraction = 0;
unique_depth -= 1;
} else if (condition < 0 && split_index == condition_feature) {
hot_condition_fraction *= hot_zero_fraction;
cold_condition_fraction *= cold_zero_fraction;
unique_depth -= 1;
}
treeShap(feat, phi, nodes[hot_index], stats[hot_index], unique_depth + 1, unique_path,
hot_zero_fraction * incoming_zero_fraction, incoming_one_fraction,
split_index, condition, condition_feature, hot_condition_fraction);
treeShap(feat, phi, nodes[cold_index], stats[cold_index], unique_depth + 1, unique_path,
cold_zero_fraction * incoming_zero_fraction, 0,
split_index, condition, condition_feature, cold_condition_fraction);
}
}
public static class PathPointer implements TreeSHAPPredictor.Workspace {
PathElement[] path;
int position;
PathPointer(PathElement[] path) {
this.path = path;
}
PathPointer(PathElement[] path, int position) {
this.path = path;
this.position = position;
}
PathElement get(int i) {
return path[position + i];
}
PathPointer move(int len) {
for (int i = 0; i < len; i++) {
path[position + len + i].feature_index = path[position + i].feature_index;
path[position + len + i].zero_fraction = path[position + i].zero_fraction;
path[position + len + i].one_fraction = path[position + i].one_fraction;
path[position + len + i].pweight = path[position + i].pweight;
}
return new PathPointer(path, position + len);
}
void reset() {
path[0].reset();
}
@Override
public int getSize() {
return path.length;
}
}
@Override
public float[] calculateContributions(final R feat, float[] out_contribs) {
return calculateContributions(feat, out_contribs, 0, -1, makeWorkspace());
}
@Override
public float[] calculateContributions(final R feat,
float[] out_contribs, int condition, int condition_feature,
TreeSHAP.Workspace workspace) {
// find the expected value of the tree's predictions
if (condition == 0) {
out_contribs[out_contribs.length - 1] += expectedTreeValue;
}
PathPointer uniquePathWorkspace = (PathPointer) workspace;
uniquePathWorkspace.reset();
treeShap(feat, out_contribs, nodes[rootNodeId], stats[rootNodeId], 0, uniquePathWorkspace,
1, 1, -1, condition, condition_feature, 1);
return out_contribs;
}
@Override
public double[] calculateInterventionalContributions(R feat, R background, double[] out_contribs, int[] catOffsets, boolean expand) {
interventionalTreeShap(feat, background, out_contribs, nodes[rootNodeId], new BitSet(out_contribs.length), new BitSet(out_contribs.length), catOffsets, expand);
return out_contribs;
}
private double w(int k, int d) {
// assume d > k
// (k! (d-k-1)! / d!) =>
// lets denote a = min(k, d-k-1), b = max(k, d-k-1), then
// (a!b!)/d! => a!/((b+1)(b+2)...(d-1)(d))
assert k >= 0;
assert d >= k;
int a = Math.min(k, d-k-1);
int b = Math.max(k, d-k-1);
double nom=1, denom=1;
for (int i = 2; i <= a; i++) {
nom *= i;
}
for (int i = b+1; i <= d; i++) {
denom *= i;
}
return nom/denom;
}
int mapToOutputSpace(int featureIdx, double featureVal, int[] catOffsets, boolean expand) {
if (null == catOffsets)
return featureIdx;
if (expand) {
if (catOffsets[featureIdx+1] - catOffsets[featureIdx] == 1) {
// Numerical variable
return catOffsets[featureIdx];
}
// Categorical variable
if (Double.isNaN(featureVal))
return catOffsets[featureIdx+1]-1;
return catOffsets[featureIdx] + (int)featureVal;
} else {
if (catOffsets[catOffsets.length - 1] < featureIdx)
return featureIdx - catOffsets[catOffsets.length - 1] + catOffsets.length - 1;
int outputIdx = Arrays.binarySearch(catOffsets, featureIdx);
if (outputIdx < 0)
return -outputIdx - 2;
return outputIdx;
}
}
/**
* If catOffsets == null calculate contributions for one hot encoded feature with an assumption that cardinality can change
* @param feat
* @param background
* @param out_contribs
* @param node
* @param sX
* @param sZ
* @param catOffsets
*/
void interventionalTreeShap(R feat, R background, double[] out_contribs, N node, BitSet sX, BitSet sZ, int[] catOffsets, boolean expand) {
// Notation here follows [1]. X denotes data point for which we calculate the contribution (feat) and Z denotes the data point from the background distribution.
// [1] LABERGE, Gabriel and PEQUIGNOT, Yann, 2022. Understanding Interventional TreeSHAP: How and Why it Works. arXiv:2209.15123.
if (node.isLeaf()) { // is leaf
final int sXCard = sX.cardinality();
final int sZCard = sZ.cardinality();
double wPos = sXCard == 0 ? 0 : w(sXCard - 1, sXCard + sZCard);
double wNeg = w(sXCard, sXCard + sZCard);
for (int i = sX.nextSetBit(0); i >= 0; i = sX.nextSetBit(i + 1)) {
out_contribs[i] += wPos * node.getLeafValue();
}
for (int i = sZ.nextSetBit(0); i >= 0; i = sZ.nextSetBit(i + 1)) {
out_contribs[i] -= wNeg * node.getLeafValue();
}
if (sX.cardinality() == 0) // Bias Term
out_contribs[out_contribs.length-1] += node.getLeafValue();
} else { // not a leaf node
final int nextX = node.next(feat);
final int nextZ = node.next(background);
final int iN = mapToOutputSpace(node.getSplitIndex(), feat instanceof double[] ? ((double[])feat)[node.getSplitIndex()]: -1, catOffsets, expand);
if (nextX == nextZ) {
// this feature (iN) is present in both paths (for X and Z) => no change in contributions
interventionalTreeShap(feat, background, out_contribs, nodes[nextX], sX, sZ, catOffsets, expand);
} else if (sX.get(iN)) { // this feature (iN) was already seen in this path -> go the same way to keep the traversal disjoint
interventionalTreeShap(feat, background, out_contribs, nodes[nextX], sX, sZ, catOffsets, expand);
} else if (sZ.get(iN)) { // this feature (iN) was already seen in this path -> go the same way to keep the traversal disjoint
interventionalTreeShap(feat, background, out_contribs, nodes[nextZ], sX, sZ, catOffsets, expand);
} else { // this feature (iN) wasn't seen before go down both ways
BitSet newSx = (BitSet) sX.clone();
BitSet newSz = (BitSet) sZ.clone();
newSx.set(iN);
newSz.set(iN);
interventionalTreeShap(feat, background, out_contribs, nodes[nextX], newSx, sZ, catOffsets, expand);
interventionalTreeShap(feat, background, out_contribs, nodes[nextZ], sX, newSz, catOffsets, expand);
}
}
}
@Override
public PathPointer makeWorkspace() {
int wsSize = getWorkspaceSize();
PathElement[] unique_path_data = new PathElement[wsSize];
for (int i = 0; i < unique_path_data.length; i++) {
unique_path_data[i] = new PathElement();
}
return new PathPointer(unique_path_data);
}
@Override
public int getWorkspaceSize() {
final int maxd = treeDepth() + 2;
return (maxd * (maxd + 1)) / 2;
}
private int treeDepth() {
return nodeDepth(nodes, 0);
}
private static <N extends INode> int nodeDepth(N[] nodes, int node) {
final N n = nodes[node];
if (n.isLeaf()) {
return 1;
} else {
return 1 + Math.max(nodeDepth(nodes, n.getLeftChildIndex()), nodeDepth(nodes, n.getRightChildIndex()));
}
}
private float treeMeanValue() {
return nodeMeanValue(nodes, stats, 0);
}
private static <N extends INode, S extends INodeStat> float nodeMeanValue(N[] nodes, S[] stats, int node) {
final N n = nodes[node];
if (stats[node].getWeight() == 0) {
return 0;
} else if (n.isLeaf()) {
return n.getLeafValue();
} else {
return (stats[n.getLeftChildIndex()].getWeight() * nodeMeanValue(nodes, stats, n.getLeftChildIndex()) +
stats[n.getRightChildIndex()].getWeight() * nodeMeanValue(nodes, stats, n.getRightChildIndex())) / stats[node].getWeight();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/TreeSHAPEnsemble.java
|
package hex.genmodel.algos.tree;
import java.util.Collection;
public class TreeSHAPEnsemble<R> implements TreeSHAPPredictor<R> {
private final TreeSHAPPredictor<R>[] _predictors;
private final float _initPred;
private final int _wsMakerIndex;
@SuppressWarnings("unchecked")
public TreeSHAPEnsemble(Collection<TreeSHAPPredictor<R>> predictors, float initPred) {
_predictors = predictors.toArray(new TreeSHAPPredictor[0]);
_initPred = initPred;
_wsMakerIndex = findWorkspaceMaker(_predictors);
}
@Override
public float[] calculateContributions(R feat, float[] out_contribs) {
return calculateContributions(feat, out_contribs, 0, -1, makeWorkspace());
}
@Override
public float[] calculateContributions(R feat, float[] out_contribs, int condition, int condition_feature, TreeSHAP.Workspace workspace) {
if (condition == 0) {
out_contribs[out_contribs.length - 1] += _initPred;
}
for (TreeSHAPPredictor<R> predictor : _predictors) {
predictor.calculateContributions(feat, out_contribs, condition, condition_feature, workspace);
}
return out_contribs;
}
@Override
public double[] calculateInterventionalContributions(R feat, R background, double[] out_contribs, int[] catOffsets, boolean expand) {
out_contribs[out_contribs.length - 1] += _initPred;
for (TreeSHAPPredictor<R> predictor : _predictors) {
predictor.calculateInterventionalContributions(feat, background, out_contribs, catOffsets, expand);
}
return out_contribs;
}
@Override
public TreeSHAPPredictor.Workspace makeWorkspace() {
return _wsMakerIndex >= 0 ? _predictors[_wsMakerIndex].makeWorkspace() : null;
}
@Override
public int getWorkspaceSize() {
return _wsMakerIndex >= 0 ? _predictors[_wsMakerIndex].getWorkspaceSize() : 0;
}
private static int findWorkspaceMaker(TreeSHAPPredictor<?>[] predictors) {
if (predictors.length == 0)
return -1;
int maxSize = 0;
int wsMakerIndex = 0;
for (int i = 0; i < predictors.length; i++) {
int size = predictors[i].getWorkspaceSize();
if (size > maxSize) {
maxSize = size;
wsMakerIndex = i;
}
}
return wsMakerIndex;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/TreeSHAPPredictor.java
|
package hex.genmodel.algos.tree;
import java.io.Serializable;
public interface TreeSHAPPredictor<R> extends Serializable {
float[] calculateContributions(final R feat, float[] out_contribs);
float[] calculateContributions(final R feat,
float[] out_contribs, int condition, int condition_feature,
Workspace workspace);
double[] calculateInterventionalContributions(final R feat, final R background, double[] out_contribs, int[] catOffsets, boolean expand);
Workspace makeWorkspace();
int getWorkspaceSize();
interface Workspace {
int getSize();
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/upliftdrf/UpliftDrfMojoModel.java
|
package hex.genmodel.algos.upliftdrf;
import hex.ModelCategory;
import hex.genmodel.algos.tree.SharedTreeMojoModel;
public class UpliftDrfMojoModel extends SharedTreeMojoModel {
protected double[] _thresholds;
public UpliftDrfMojoModel(String[] columns, String[][] domains, String responseColumn, String treatmentColumn){
super(columns, domains, responseColumn, treatmentColumn);
}
@Override
public double[] unifyPreds(double[] row, double offset, double[] preds) {
assert _nclasses == 2;
preds[1] /= _ntree_groups;
preds[2] /= _ntree_groups;
preds[0] = preds[1] - preds[2];
return preds;
}
@Override
public double[] score0(double[] row, double[] preds) {
super.scoreAllTrees(row, preds);
return unifyPreds(row, 0, preds);
}
@Override
public double getInitF() {
return 0;
}
public double[] getThresholds() {
return _thresholds;
}
@Override
public int getPredsSize() {
return 3;
}
@Override
public int getPredsSize(ModelCategory mc) {
return getPredsSize();
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/upliftdrf/UpliftDrfMojoReader.java
|
package hex.genmodel.algos.upliftdrf;
import hex.genmodel.algos.tree.SharedTreeMojoReader;
import java.io.IOException;
/**
*/
public class UpliftDrfMojoReader extends SharedTreeMojoReader<UpliftDrfMojoModel> {
@Override
public String getModelName() {
return "Distributed Uplift Random Forest";
}
@Override
protected void readModelData() throws IOException {
super.readModelData();
_model._treatmentColumn = readkv("treatment_column");
_model._thresholds = readkv("thresholds");
}
@Override
protected UpliftDrfMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
return null;
}
@Override
protected UpliftDrfMojoModel makeModel(String[] columns, String[][] domains, String responseColumn, String treatmentColumn) {
return new UpliftDrfMojoModel(columns, domains, responseColumn, treatmentColumn);
}
@Override public String mojoVersion() {
return "1.40";
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/word2vec/Word2VecMojoModel.java
|
package hex.genmodel.algos.word2vec;
import hex.genmodel.MojoModel;
import java.util.HashMap;
public class Word2VecMojoModel extends MojoModel implements WordEmbeddingModel {
int _vecSize;
HashMap<String, float[]> _embeddings;
Word2VecMojoModel(String[] columns, String[][] domains, String responseName) {
super(columns, domains, responseName);
}
@Override
public int getVecSize() {
return _vecSize;
}
@Override
public float[] transform0(String word, float[] output) {
float[] vec = _embeddings.get(word);
if (vec == null)
return null;
System.arraycopy(vec, 0, output, 0, output.length);
return output;
}
@Override
public double[] score0(double[] row, double[] preds) {
throw new UnsupportedOperationException("Word2Vec Model doesn't support scoring using score0() function");
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/word2vec/Word2VecMojoReader.java
|
package hex.genmodel.algos.word2vec;
import hex.genmodel.ModelMojoReader;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Iterator;
public class Word2VecMojoReader extends ModelMojoReader<Word2VecMojoModel> {
@Override
public String getModelName() {
return "Word2Vec";
}
@Override
protected void readModelData() throws IOException {
final int vocabSize = readkv("vocab_size", -1);
final int vecSize = readkv("vec_size", -1);
_model._vecSize = vecSize;
_model._embeddings = new HashMap<>(vocabSize);
byte[] rawVectors = readblob("vectors");
if (rawVectors.length != vocabSize * vecSize * 4)
throw new IOException("Corrupted vector representation, unexpected size: " + rawVectors.length);
ByteBuffer bb = ByteBuffer.wrap(rawVectors);
Iterator<String> vocabulary = readtext("vocabulary", true).iterator();
while (vocabulary.hasNext()) {
float[] vec = new float[vecSize];
for (int i = 0; i < vecSize; i++)
vec[i] = bb.getFloat();
_model._embeddings.put(vocabulary.next(), vec);
}
if (_model._embeddings.size() != vocabSize)
throw new IOException("Corrupted model, unexpected number of words: " + _model._embeddings.size());
}
@Override
protected Word2VecMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
return new Word2VecMojoModel(columns, domains, responseColumn);
}
@Override public String mojoVersion() {
return "1.00";
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/word2vec/WordEmbeddingModel.java
|
package hex.genmodel.algos.word2vec;
/**
* Interface for models implementing Word Embeddings
*/
public interface WordEmbeddingModel {
/**
* Dimensionality of the vector space of this Word Embedding model
* @return length of word embeddings
*/
int getVecSize();
/**
* Transforms a given a word into a word vector
* @param word input word
* @param output pre-allocated word vector embedding
* @return word vector embedding or null if the word is an out-of-dictionary word
*/
float[] transform0(String word, float[] output);
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/annotations/ModelPojo.java
|
package hex.genmodel.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Annotation to simplify identification of model pojos.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface ModelPojo {
/** Model name - in fact name of class in the most of cases. */
String name();
/** Model algorithm name - drf, gbm, deeplearning, ... */
String algorithm();
/** Model memory requirements. Estimated size of model instance in memory.
* This can help model pojo user to optimize pojo memory utilization. */
long requiredMemory() default -1L;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/DeepLearningModelAttributes.java
|
package hex.genmodel.attributes;
import com.google.gson.JsonObject;
import hex.genmodel.MojoModel;
import hex.genmodel.attributes.parameters.VariableImportancesHolder;
public class DeepLearningModelAttributes extends ModelAttributes implements VariableImportancesHolder {
private final VariableImportances _variableImportances;
public DeepLearningModelAttributes(MojoModel model, JsonObject modelJson) {
super(model, modelJson);
_variableImportances = VariableImportances.extractFromJson(modelJson);
}
public VariableImportances getVariableImportances(){
return _variableImportances;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/ModelAttributes.java
|
package hex.genmodel.attributes;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import hex.genmodel.MojoModel;
import hex.genmodel.algos.glm.GlmMojoModel;
import hex.genmodel.algos.glm.GlmMultinomialMojoModel;
import hex.genmodel.algos.glm.GlmOrdinalMojoModel;
import hex.genmodel.attributes.metrics.*;
import hex.genmodel.attributes.parameters.ModelParameter;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/**
* Attributes of a MOJO model extracted from the MOJO itself.
*/
public class ModelAttributes implements Serializable {
private final Table _modelSummary;
private final Table _scoring_history;
private final MojoModelMetrics _trainingMetrics;
private final MojoModelMetrics _validation_metrics;
private final MojoModelMetrics _cross_validation_metrics;
private final Table _cross_validation_metrics_summary;
private final ModelParameter[] _model_parameters;
public ModelAttributes(MojoModel model, final JsonObject modelJson) {
_modelSummary = ModelJsonReader.readTable(modelJson, "output.model_summary");
_scoring_history = ModelJsonReader.readTable(modelJson, "output.scoring_history");
if (ModelJsonReader.elementExists(modelJson, "output.training_metrics")) {
_trainingMetrics = determineModelMetricsType(model);
ModelJsonReader.fillObject(_trainingMetrics, modelJson, "output.training_metrics");
} else _trainingMetrics = null;
if (ModelJsonReader.elementExists(modelJson, "output.validation_metrics")) {
_validation_metrics = determineModelMetricsType(model);
ModelJsonReader.fillObject(_validation_metrics, modelJson, "output.validation_metrics");
} else _validation_metrics = null;
if (ModelJsonReader.elementExists(modelJson, "output.cross_validation_metrics")) {
_cross_validation_metrics_summary = ModelJsonReader.readTable(modelJson, "output.cross_validation_metrics_summary");
_cross_validation_metrics = determineModelMetricsType(model);
ModelJsonReader.fillObject(_cross_validation_metrics, modelJson, "output.cross_validation_metrics");
} else {
_cross_validation_metrics = null;
_cross_validation_metrics_summary = null;
}
if (ModelJsonReader.elementExists(modelJson, "parameters")) {
final JsonArray jsonParameters = ModelJsonReader.findInJson(modelJson, "parameters").getAsJsonArray();
final ArrayList<ModelParameter> modelParameters = new ArrayList<>(jsonParameters.size());
for (int i = 0; i < jsonParameters.size(); i++) {
modelParameters.add(new ModelParameter());
}
ModelJsonReader.fillObjects(modelParameters, jsonParameters);
for (int i = 0; i < modelParameters.size(); i++) {
if("model_id".equals(modelParameters.get(i).getName())){
modelParameters.remove(i);
}
}
_model_parameters = modelParameters.toArray(new ModelParameter[modelParameters.size()]);
} else {
_model_parameters = new ModelParameter[0];
}
}
private static MojoModelMetrics determineModelMetricsType(final MojoModel mojoModel) {
switch (mojoModel.getModelCategory()) {
case Binomial:
if (mojoModel instanceof GlmMojoModel) {
return new MojoModelMetricsBinomialGLM();
} else return new MojoModelMetricsBinomial();
case Multinomial:
if (mojoModel instanceof GlmMultinomialMojoModel) {
return new MojoModelMetricsMultinomialGLM();
} else return new MojoModelMetricsMultinomial();
case Regression:
if (mojoModel instanceof GlmMojoModel) {
return new MojoModelMetricsRegressionGLM();
} else return new MojoModelMetricsRegression();
case AnomalyDetection:
return new MojoModelMetricsAnomaly();
case Ordinal:
if (mojoModel instanceof GlmOrdinalMojoModel) {
return new MojoModelMetricsOrdinalGLM();
} else return new MojoModelMetricsOrdinal();
case CoxPH:
return new MojoModelMetricsRegressionCoxPH();
case BinomialUplift:
return new MojoModelMetricsBinomialUplift();
case Unknown:
case Clustering:
case AutoEncoder:
case DimReduction:
case WordEmbedding:
default:
return new MojoModelMetrics(); // Basic model metrics if nothing else is available
}
}
/**
* Model summary might vary not only per model, but per each version of the model.
*
* @return A {@link Table} with summary information about the underlying model.
*/
public Table getModelSummary() {
return _modelSummary;
}
/**
* Retrieves model's scoring history.
* @return A {@link Table} with model's scoring history, if existing. Otherwise null.
*/
public Table getScoringHistory(){
return _scoring_history;
}
/**
* @return A {@link MojoModelMetrics} instance with training metrics. If available, otherwise null.
*/
public MojoModelMetrics getTrainingMetrics() {
return _trainingMetrics;
}
/**
* @return A {@link MojoModelMetrics} instance with validation metrics. If available, otherwise null.
*/
public MojoModelMetrics getValidationMetrics() {
return _validation_metrics;
}
/**
*
* @return A {@link MojoModelMetrics} instance with cross-validation metrics. If available, otherwise null.
*/
public MojoModelMetrics getCrossValidationMetrics() {
return _cross_validation_metrics;
}
/**
*
* @return A {@link Table} instance with summary table of the cross-validation metrics. If available, otherwise null.
*/
public Table getCrossValidationMetricsSummary() {
return _cross_validation_metrics_summary;
}
/**
* @return A {@link Collection} of {@link ModelParameter}. If there are no parameters, returns an empty collection.
* Never null.
*/
public ModelParameter[] getModelParameters() {
return _model_parameters;
}
public Object getParameterValueByName(String name){
for (ModelParameter parameter:_model_parameters) {
if(parameter.name.equals(name)){
return parameter.actual_value;
}
}
return null;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/ModelAttributesGLM.java
|
package hex.genmodel.attributes;
import com.google.gson.JsonObject;
import hex.genmodel.MojoModel;
import hex.genmodel.attributes.parameters.VariableImportancesHolder;
public class ModelAttributesGLM extends ModelAttributes implements VariableImportancesHolder {
public final Table _coefficients_table;
private final VariableImportances _variableImportances;
public ModelAttributesGLM(MojoModel model, JsonObject modelJson) {
super(model, modelJson);
_coefficients_table = ModelJsonReader.readTable(modelJson, "output.coefficients_table");
_variableImportances = VariableImportances.extractFromJson(modelJson);
}
public VariableImportances getVariableImportances(){
return _variableImportances;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/ModelJsonReader.java
|
package hex.genmodel.attributes;
import com.google.gson.*;
import hex.genmodel.*;
import hex.genmodel.attributes.parameters.ColumnSpecifier;
import hex.genmodel.attributes.parameters.KeyValue;
import hex.genmodel.attributes.parameters.ParameterKey;
import hex.genmodel.attributes.parameters.StringPair;
import water.logging.Logger;
import water.logging.LoggerFactory;
import java.io.BufferedReader;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.List;
import java.util.Objects;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Utility class for extracting model details from JSON
*/
public class ModelJsonReader {
private static final Logger LOG = LoggerFactory.getLogger(ModelJsonReader.class);
public static final String MODEL_DETAILS_FILE = "experimental/modelDetails.json";
/**
* @param mojoReaderBackend
* @return {@link JsonObject} representing the deserialized Json.
*/
public static JsonObject parseModelJson(final MojoReaderBackend mojoReaderBackend) {
try (BufferedReader fileReader = mojoReaderBackend.getTextFile(MODEL_DETAILS_FILE)) {
final Gson gson = new GsonBuilder().create();
return gson.fromJson(fileReader, JsonObject.class);
} catch (Exception e){
return null;
}
}
/**
* Extracts a Table array from H2O's model serialized into JSON.
*
* @param modelJson Full JSON representation of a model
* @param tablePath Path in the given JSON to the desired table array. Levels are dot-separated.
* @return An instance of {@link Table} [], if there was a table array found by following the given path. Otherwise null.
*/
public static Table[] readTableArray(final JsonObject modelJson, final String tablePath) {
Table[] tableArray;
Objects.requireNonNull(modelJson);
JsonElement jsonElement = findInJson(modelJson, tablePath);
if (jsonElement.isJsonNull())
return null;
JsonArray jsonArray = jsonElement.getAsJsonArray();
tableArray = new Table[jsonArray.size()];
for (int i = 0; i < jsonArray.size(); i++) {
Table table = readTableJson(jsonArray.get(i).getAsJsonObject());
tableArray[i] = table;
}
return tableArray;
}
private static Table readTableJson(JsonObject tableJson){
final int rowCount = tableJson.get("rowcount").getAsInt();
final String[] columnHeaders;
final String[] columnFormats;
final Table.ColumnType[] columnTypes;
final Object[][] data;
// Extract column attributes
final JsonArray columns = findInJson(tableJson, "columns").getAsJsonArray();
final int columnCount = columns.size();
columnHeaders = new String[columnCount];
columnTypes = new Table.ColumnType[columnCount];
columnFormats = new String[columnCount];
for (int i = 0; i < columnCount; i++) {
final JsonObject column = columns.get(i).getAsJsonObject();
columnHeaders[i] = column.get("description").getAsString();
columnTypes[i] = Table.ColumnType.extractType(column.get("type").getAsString());
columnFormats[i] = column.get("format").getAsString();
}
// Extract data
JsonArray dataColumns = findInJson(tableJson, "data").getAsJsonArray();
data = new Object[columnCount][rowCount];
for (int i = 0; i < columnCount; i++) {
JsonArray column = dataColumns.get(i).getAsJsonArray();
for (int j = 0; j < rowCount; j++) {
final JsonElement cellValue = column.get(j);
if (cellValue == null || !cellValue.isJsonPrimitive()) {
data[i][j] = null;
continue;
}
JsonPrimitive primitiveValue = cellValue.getAsJsonPrimitive();
switch (columnTypes[i]) {
case LONG:
if (primitiveValue.isNumber()) {
data[i][j] = primitiveValue.getAsLong();
} else {
data[i][j] = null;
}
break;
case DOUBLE:
if (!primitiveValue.isJsonNull()) { // isNumber skips NaNs
data[i][j] = primitiveValue.getAsDouble();
} else {
data[i][j] = null;
}
break;
case FLOAT:
if (!primitiveValue.isJsonNull()) { // isNumber skips NaNs
data[i][j] = primitiveValue.getAsFloat();
} else {
data[i][j] = null;
}
case INT:
if (primitiveValue.isNumber()) {
data[i][j] = primitiveValue.getAsInt();
} else {
data[i][j] = null;
}
case STRING:
data[i][j] = primitiveValue.getAsString();
break;
}
}
}
return new Table(tableJson.get("name").getAsString(), tableJson.get("description").getAsString(),
new String[rowCount], columnHeaders, columnTypes, null, columnFormats, data);
}
/**
* Extracts a Table from H2O's model serialized into JSON.
*
* @param modelJson Full JSON representation of a model
* @param tablePath Path in the given JSON to the desired table. Levels are dot-separated.
* @return An instance of {@link Table}, if there was a table found by following the given path. Otherwise null.
*/
public static Table readTable(final JsonObject modelJson, final String tablePath) {
Objects.requireNonNull(modelJson);
JsonElement potentialTableJson = findInJson(modelJson, tablePath);
if (potentialTableJson.isJsonNull()) {
LOG.debug(String.format("Table '%s' doesn't exist in MojoModel dump.", tablePath));
return null;
}
return readTableJson(potentialTableJson.getAsJsonObject());
}
public static <T> void fillObjects(final List<T> objects, final JsonArray from) {
for (int i = 0; i < from.size(); i++) {
final JsonElement jsonElement = from.get(i);
fillObject(objects.get(i), jsonElement, "");
}
}
public static void fillObject(final Object object, final JsonElement from, final String elementPath) {
Objects.requireNonNull(object);
Objects.requireNonNull(elementPath);
final JsonElement jsonSourceObject = findInJson(from, elementPath);
if (jsonSourceObject.isJsonNull()) {
LOG.warn(String.format("Element '%s' not found in JSON. Skipping. Object '%s' is not populated by values.",
elementPath, object.getClass().getName()));
return;
}
final JsonObject jsonSourceObj = jsonSourceObject.getAsJsonObject();
final Class<?> aClass = object.getClass();
final Field[] declaredFields = aClass.getFields();
for (int i = 0; i < declaredFields.length; i++) {
Field field = declaredFields[i];
if (Modifier.isTransient(field.getModifiers())) continue;
final Class<?> type = field.getType();
final SerializedName serializedName = field.getAnnotation(SerializedName.class);
final String fieldName;
if (serializedName == null) {
String name = field.getName();
fieldName = name.charAt(0) == '_' ? name.substring(1) : name;
} else {
fieldName = serializedName.value();
}
try {
field.setAccessible(true);
assert field.isAccessible();
Object value = null;
if (type.isAssignableFrom(Object.class)) {
final JsonElement jsonElement = jsonSourceObj.get(fieldName);
if (jsonElement != null) {
// There might be a "type" element at the same leven in the JSON tree, serving as a hint.
// Especially useful for numeric types.
final JsonElement typeElement = jsonSourceObj.get("type");
final TypeHint typeHint;
if (!typeElement.isJsonNull()) {
typeHint = TypeHint.fromStringIgnoreCase(typeElement.getAsString());
} else {
typeHint = null;
}
value = convertBasedOnJsonType(jsonElement, typeHint);
}
} else if (type.isAssignableFrom(double.class) || type.isAssignableFrom(Double.class)) {
final JsonElement jsonElement = jsonSourceObj.get(fieldName);
if (jsonElement != null && !jsonElement.isJsonNull()) value = jsonElement.getAsDouble();
} else if (type.isAssignableFrom(int.class) || type.isAssignableFrom(Integer.class)) {
final JsonElement jsonElement = jsonSourceObj.get(fieldName);
if (jsonElement != null && !jsonElement.isJsonNull()) value = jsonElement.getAsInt();
} else if (type.isAssignableFrom(long.class) || type.isAssignableFrom(Long.class)) {
final JsonElement jsonElement = jsonSourceObj.get(fieldName);
if (jsonElement != null && !jsonElement.isJsonNull()) value = jsonElement.getAsLong();
} else if (type.isAssignableFrom(String.class)) {
final JsonElement jsonElement = jsonSourceObj.get(fieldName);
if (jsonElement != null && !jsonElement.isJsonNull()) value = jsonElement.getAsString();
} else if (type.isAssignableFrom(Table.class)) {
final JsonElement jsonElement = jsonSourceObj.get(fieldName);
if (jsonElement != null && !jsonElement.isJsonNull()) value = readTable(jsonElement.getAsJsonObject(), serializedName != null ? serializedName.insideElementPath() : "");
}
if (value != null) field.set(object, value);
} catch (IllegalAccessException e) {
System.err.println(String.format("Field '%s' could not be accessed. Ignoring.", fieldName));
} catch (ClassCastException | UnsupportedOperationException e) {
System.err.println(String.format("Field '%s' could not be casted to '%s'. Ignoring.", fieldName, type.toString()));
}
}
}
private static final Pattern ARRAY_PATTERN = Pattern.compile("\\[\\]");
/**
* TypeHint contained in the model's JSON. There might be more types contained than listed here - these are the ones
* used.
*/
private enum TypeHint {
INT, FLOAT, DOUBLE, LONG, DOUBLE_ARR, FLOAT_ARR, STRING_ARR, STRING_ARR_ARR, INT_ARR, LONG_ARR, OBJECT_ARR;
private static TypeHint fromStringIgnoreCase(final String from) {
final Matcher matcher = ARRAY_PATTERN.matcher(from);
final boolean isArray = matcher.find();
final String transformedType = matcher.replaceAll("_ARR");
try {
return valueOf(transformedType.toUpperCase());
} catch (IllegalArgumentException e) {
if (isArray) {
return OBJECT_ARR;
} else {
return null;
}
}
}
}
/**
* Convers a {@link JsonElement} to a corresponding Java class instance, covering all basic "primitive" types (String, numbers. ..)
* + selected Iced classes.
*
* @param convertFrom JsonElement to convert from
* @param typeHint Optional {@link TypeHint} value. Might be null.
* @return
*/
private static Object convertBasedOnJsonType(final JsonElement convertFrom, final TypeHint typeHint) {
final Object convertTo;
if (convertFrom.isJsonNull()) {
convertTo = null;
} else if (convertFrom.isJsonArray()) {
final JsonArray array = convertFrom.getAsJsonArray();
if (typeHint == null) {
convertTo = null;
} else {
switch (typeHint) {
case OBJECT_ARR:
final Object[] arrO = new Object[array.size()];
for (int i = 0; i < array.size(); i++) {
JsonElement e = array.get(i);
if (e.isJsonPrimitive()) {
arrO[i] = convertBasedOnJsonType(e, null);
} else {
arrO[i] = convertJsonObject(e.getAsJsonObject());
}
}
convertTo = arrO;
break;
case DOUBLE_ARR:
final double[] arrD = new double[array.size()];
for (int i = 0; i < array.size(); i++) {
arrD[i] = array.get(i).getAsDouble();
}
convertTo = arrD;
break;
case FLOAT_ARR:
final double[] arrF = new double[array.size()];
for (int i = 0; i < array.size(); i++) {
arrF[i] = array.get(i).getAsDouble();
}
convertTo = arrF;
break;
case STRING_ARR:
final String[] arrS = new String[array.size()];
for (int i = 0; i < array.size(); i++) {
arrS[i] = array.get(i).getAsString();
}
convertTo = arrS;
break;
case STRING_ARR_ARR:
final String[][] arrSS = new String[array.size()][];
for (int i = 0; i < array.size(); i++) {
final JsonArray arr2 = array.get(i).getAsJsonArray();
arrSS[i] = new String[arr2.size()];
for (int j = 0; j < arr2.size(); j++) {
arrSS[i][j] = arr2.get(j).getAsString();
}
}
convertTo = arrSS;
break;
case INT_ARR:
final int[] arrI = new int[array.size()];
for (int i = 0; i < array.size(); i++) {
arrI[i] = array.get(i).getAsInt();
}
convertTo = arrI;
break;
case LONG_ARR:
final long[] arrL = new long[array.size()];
for (int i = 0; i < array.size(); i++) {
arrL[i] = array.get(i).getAsLong();
}
convertTo = arrL;
break;
default:
convertTo = null;
break;
}
}
} else if (convertFrom.isJsonPrimitive()) {
final JsonPrimitive convertedPrimitive = convertFrom.getAsJsonPrimitive();
if (convertedPrimitive.isBoolean()) {
convertTo = convertedPrimitive.getAsBoolean();
} else if (convertedPrimitive.isString()) {
convertTo = convertedPrimitive.getAsString();
} else if (convertedPrimitive.isNumber()) {
if (typeHint == null) {
convertTo = convertedPrimitive.getAsDouble();
} else {
switch (typeHint) {
case INT:
convertTo = convertedPrimitive.getAsInt();
break;
case FLOAT:
convertTo = convertedPrimitive.getAsFloat();
break;
case DOUBLE:
convertTo = convertedPrimitive.getAsDouble();
break;
case LONG:
convertTo = convertedPrimitive.getAsLong();
break;
default:
convertTo = convertedPrimitive.getAsDouble();
}
}
} else {
convertTo = null;
}
} else if (convertFrom.isJsonObject()) {
convertTo = convertJsonObject(convertFrom.getAsJsonObject());
} else {
convertTo = null;
}
return convertTo;
}
private static Object convertJsonObject(final JsonObject convertFrom) {
final JsonElement meta = convertFrom.get("__meta");
if (meta == null || meta.isJsonNull()) return null;
final String schemaName = findInJson(meta, "schema_name").getAsString();
if ("FrameKeyV3".equals(schemaName) || "ModelKeyV3".equals(schemaName)) {
final String name = convertFrom.get("name").getAsString();
final String type = convertFrom.get("type").getAsString();
final ParameterKey.Type convertedType = convertKeyType(type);
final String url = convertFrom.get("URL").getAsString();
return new ParameterKey(name, convertedType, url);
} else if ("ColSpecifierV3".equals(schemaName)) {
final String columnName = convertFrom.get("column_name").getAsString();
final JsonElement is_member_of_frames = convertFrom.get("is_member_of_frames");
final String[] memberOfFrames;
if (is_member_of_frames.isJsonArray()) {
memberOfFrames = convertStringJsonArray(convertFrom.get("is_member_of_frames").getAsJsonArray());
} else {
memberOfFrames = null;
}
return new ColumnSpecifier(columnName, memberOfFrames);
} else if ("KeyValueV3".equals(schemaName)) {
return new KeyValue(
convertFrom.get("key").getAsString(),
convertFrom.get("value").getAsDouble()
);
} else if ("StringPairV3".equals(schemaName)) {
return new StringPair(
convertFrom.get("a").getAsString(),
convertFrom.get("b").getAsString()
);
} else {
LOG.error(String.format("Error reading MOJO JSON: Object not supported: \n %s ", convertFrom.toString()));
return null;
}
}
private static String[] convertStringJsonArray(final JsonArray jsonArray) {
Objects.requireNonNull(jsonArray);
if (jsonArray.isJsonNull()) return null;
final String[] strings = new String[jsonArray.size()];
for (int i = 0; i < jsonArray.size(); i++) {
final JsonElement potentialStringMember = jsonArray.get(i);
if (!potentialStringMember.isJsonNull()) {
strings[i] = jsonArray.get(i).getAsString();
}
}
return strings;
}
/**
* Converts a string key type to enum representation. All unknown keys are considered to be
* Type.Generic.
*
* @param type A Key type in String representation to be converted
* @return An instance of {@link ParameterKey.Type} enum
*/
private static final ParameterKey.Type convertKeyType(final String type) {
if ("Key<Frame>".equals(type)) {
return ParameterKey.Type.FRAME;
} else if ("Key<Model>".equals(type)) {
return ParameterKey.Type.MODEL;
} else return ParameterKey.Type.GENERIC;
}
private static final Pattern JSON_PATH_PATTERN = Pattern.compile("\\.|\\[|\\]");
/**
* Finds an element in GSON's JSON document representation
*
* @param jsonElement A (potentially complex) element to search in
* @param jsonPath Path in the given JSON to the desired table. Levels are dot-separated.
* E.g. 'model._output.variable_importances'.
* @return JsonElement, if found. Otherwise {@link JsonNull}.
*/
protected static JsonElement findInJson(final JsonElement jsonElement, final String jsonPath) {
final String[] route = JSON_PATH_PATTERN.split(jsonPath);
JsonElement result = jsonElement;
for (String key : route) {
key = key.trim();
if (key.isEmpty())
continue;
if (result == null) {
break;
}
if (result.isJsonObject()) {
result = ((JsonObject) result).get(key);
} else if (result.isJsonArray()) {
int value = Integer.valueOf(key) - 1;
result = ((JsonArray) result).get(value);
} else break;
}
if (result == null) {
return JsonNull.INSTANCE;
} else {
return result;
}
}
/**
*
* @param jsonElement A (potentially complex) element to search in
* @param jsonPath Path in the given JSON to the desired table. Levels are dot-separated.
* E.g. 'model._output.variable_importances'.
* @return True if the element exists under the given path in the target JSON, otherwise false
*/
public static boolean elementExists(JsonElement jsonElement, String jsonPath){
final boolean isEmpty = findInJson(jsonElement, jsonPath).isJsonNull();
return !isEmpty;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/SerializedName.java
|
package hex.genmodel.attributes;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import static java.lang.annotation.ElementType.*;
@Retention(RetentionPolicy.RUNTIME)
@Target({FIELD})
public @interface SerializedName {
String value();
String insideElementPath() default "";
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/SharedTreeModelAttributes.java
|
package hex.genmodel.attributes;
import com.google.gson.JsonObject;
import hex.genmodel.MojoModel;
import hex.genmodel.attributes.parameters.VariableImportancesHolder;
public class SharedTreeModelAttributes extends ModelAttributes implements VariableImportancesHolder {
private final VariableImportances _variableImportances;
public SharedTreeModelAttributes(JsonObject modelJson, MojoModel model) {
super(model, modelJson);
_variableImportances = VariableImportances.extractFromJson(modelJson);
}
/**
* @return A {@link VariableImportances} instance with variable importances for each feature used.
*/
public VariableImportances getVariableImportances() {
return _variableImportances;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/Table.java
|
package hex.genmodel.attributes;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Objects;
/**
* A two-dimensional table capable of containing generic values in each cell.
* Useful for description of various models.
*/
public class Table implements Serializable {
private String _tableHeader;
private String _tableDescription;
private String[] _rowHeaders;
private String[] _colHeaders;
private ColumnType[] _colTypes;
private Object[][] _cellValues;
private String _colHeaderForRowHeaders;
private String[] _colFormats;
/**
* @param tableHeader
* @param tableDescription
* @param rowHeaders
* @param columnHeaders
* @param columnTypes
* @param colFormats
* @param colHeaderForRowHeaders
* @param cellValues
*/
public Table(String tableHeader, String tableDescription, String[] rowHeaders, String[] columnHeaders,
ColumnType[] columnTypes, String colHeaderForRowHeaders,String[] colFormats, Object[][] cellValues) {
Objects.requireNonNull(columnHeaders);
Objects.requireNonNull(rowHeaders);
Objects.requireNonNull(cellValues);
if (tableHeader == null)
_tableHeader = "";
if (tableDescription == null)
_tableDescription = "";
_colHeaderForRowHeaders = colHeaderForRowHeaders;
// Fill row headers
for (int r = 0; r < rowHeaders.length; ++r) {
if (rowHeaders[r] == null)
rowHeaders[r] = "";
}
// Fill column headers
for (int c = 0; c < columnHeaders.length; ++c) {
if (columnHeaders[c] == null)
columnHeaders[c] = "";
}
if (columnTypes == null) {
columnTypes = new ColumnType[columnHeaders.length];
Arrays.fill(_colTypes, ColumnType.STRING);
}
_tableHeader = tableHeader;
_tableDescription = tableDescription;
_rowHeaders = rowHeaders;
_colHeaders = columnHeaders;
_colTypes = columnTypes;
_cellValues = cellValues;
_colFormats = colFormats;
}
public enum ColumnType {
LONG,
DOUBLE,
FLOAT,
INT,
STRING;
public static ColumnType extractType(final String type) {
if (type == null) return ColumnType.STRING;
String formattedType = type.trim().toUpperCase();
try {
return ColumnType.valueOf(formattedType);
} catch (IllegalArgumentException e) {
return ColumnType.STRING;
}
}
}
public String getTableHeader() {
return _tableHeader;
}
public String getTableDescription() {
return _tableDescription;
}
public String[] getRowHeaders() {
return _rowHeaders;
}
public String[] getColHeaders() {
return _colHeaders;
}
public ColumnType[] getColTypes() {
return _colTypes;
}
public String[] getColTypesString() {
String[] colTypesString = new String[_colTypes.length];
for (int i = 0; i < colTypesString.length; i++) {
colTypesString[i] = _colTypes[i].toString().toLowerCase();
}
return colTypesString;
}
public Object[][] getCellValues() {
return _cellValues;
}
public String getColHeaderForRowHeaders() {
return _colHeaderForRowHeaders;
}
public int columns() {
return _cellValues.length;
}
public int rows() {
return _cellValues[0] != null ? _cellValues[0].length : 0;
}
public Object getCell(final int column, final int row){
return _cellValues[column][row];
}
public String[] getColumnFormats(){
return _colFormats;
}
public int findColumnIndex(final String columnName) {
for (int i = 0; i < _colHeaders.length; i++) {
if (_colHeaders[i].equals(columnName)) return i;
}
return -1;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/VariableImportances.java
|
package hex.genmodel.attributes;
import com.google.gson.JsonObject;
import hex.genmodel.attributes.parameters.KeyValue;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Comparator;
/**
* Represents model's variables and their relative importances in the model.
* The structure is model-independent.
*/
public class VariableImportances implements Serializable {
// Index is the shared key to both. A record under index {i} in variables is the name of the variable
public final String[] _variables;
public final double[] _importances;
public VariableImportances(String[] variableNames, double[] relativeImportances) {
_variables = variableNames;
_importances = relativeImportances;
}
protected static VariableImportances extractFromJson(final JsonObject modelJson) {
final Table table = ModelJsonReader.readTable(modelJson, "output.variable_importances");
if (table == null) return null;
final double[] relativeVarimps = new double[table.rows()];
final String[] varNames = new String[table.rows()];
final int varImportanceCol = table.findColumnIndex("Relative Importance");
final int varNameCol = table.findColumnIndex("Variable");
if (varImportanceCol == -1) return null;
if (varNameCol == -1) return null;
for (int i = 0; i < table.rows(); i++) {
relativeVarimps[i] = (double) table.getCell(varImportanceCol, i);
varNames[i] = (String) table.getCell(varNameCol, i);
}
return new VariableImportances(varNames, relativeVarimps);
}
/**
*
* @param n how many variables is in the output. If n >= number of variables or n <= 0 then all variables are returned.
* @return descending sorted array of String -> double.
* Where String is variable and double is relative importance of the variable.
*/
public KeyValue[] topN(int n) {
if (n <= 0 || n > _importances.length) {
n = _importances.length;
}
final KeyValue[] sortedImportances = new KeyValue[_importances.length];
for (int i = 0; i < _importances.length; i++) {
sortedImportances[i] = new KeyValue(_variables[i], _importances[i]);
}
Arrays.sort(sortedImportances, new Comparator<KeyValue>() {
@Override
public int compare(KeyValue o1, KeyValue o2) {
return o1.getValue() > o2.getValue() ? -1 : 0;
}
});
return Arrays.copyOfRange(sortedImportances, 0, n);
}
public int numberOfUsedVariables() {
int numberOfUsedVariables = 0;
for (double importance : _importances) {
if (importance != 0) {
numberOfUsedVariables++;
}
}
return numberOfUsedVariables;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/metrics/MojoModelMetrics.java
|
package hex.genmodel.attributes.metrics;
import java.io.Serializable;
public class MojoModelMetrics implements Serializable {
public long _frame_checksum;
public String _description;
public String _model_category;
public long _scoring_time;
public String _custom_metric_name;
public double _custom_metric_value;
public double _r2;
public double _mae;
public double _MSE;
public double _RMSE;
public long _nobs;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/metrics/MojoModelMetricsAnomaly.java
|
package hex.genmodel.attributes.metrics;
/**
* Anomaly detection metrics
*/
public class MojoModelMetricsAnomaly extends MojoModelMetrics {
public double _mean_score;
public double _mean_normalized_score;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/metrics/MojoModelMetricsBinomial.java
|
package hex.genmodel.attributes.metrics;
import hex.genmodel.attributes.SerializedName;
import hex.genmodel.attributes.Table;
public class MojoModelMetricsBinomial extends MojoModelMetricsSupervised {
@SerializedName("AUC")
public double _auc;
public double _pr_auc;
@SerializedName("Gini")
public double _gini;
public double _mean_per_class_error;
public double _logloss;
public Table _gains_lift_table;
public Table _thresholds_and_metric_scores;
public Table _max_criteria_and_metric_scores;
@SerializedName(value = "cm", insideElementPath = "table")
public Table _confusion_matrix;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/metrics/MojoModelMetricsBinomialGLM.java
|
package hex.genmodel.attributes.metrics;
import hex.genmodel.attributes.SerializedName;
public class MojoModelMetricsBinomialGLM extends MojoModelMetricsBinomial {
@SerializedName("null_degrees_of_freedom")
public long _nullDegreesOfFreedom;
@SerializedName("residual_degrees_of_freedom")
public long _residualDegreesOfFreedom;
@SerializedName("residual_deviance")
public double _resDev;
@SerializedName("null_deviance")
public double _nullDev;
public double _AIC;
public double _loglikelihood;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/metrics/MojoModelMetricsBinomialUplift.java
|
package hex.genmodel.attributes.metrics;
import hex.genmodel.attributes.SerializedName;
import hex.genmodel.attributes.Table;
public class MojoModelMetricsBinomialUplift extends MojoModelMetricsSupervised {
@SerializedName("AUUC")
public double _auuc;
public double _normalized_auuc;
@SerializedName("Qini")
public double _qini;
public double _ate;
public double _att;
public double _atc;
public Table _thresholds_and_metric_scores;
public Table _auuc_table;
public Table _aecu_table;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/metrics/MojoModelMetricsMultinomial.java
|
package hex.genmodel.attributes.metrics;
import hex.genmodel.attributes.SerializedName;
import hex.genmodel.attributes.Table;
public class MojoModelMetricsMultinomial extends MojoModelMetricsSupervised {
@SerializedName(value = "cm", insideElementPath = "table")
public Table _confusion_matrix;
@SerializedName("hit_ratio_table")
public Table _hit_ratios;
@SerializedName("multinomial_auc_table")
public Table _multinomial_auc;
@SerializedName("multinomial_aucpr_table")
public Table _multinomial_aucpr;
public double _logloss;
public double _mean_per_class_error;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/metrics/MojoModelMetricsMultinomialGLM.java
|
package hex.genmodel.attributes.metrics;
import hex.genmodel.attributes.SerializedName;
public class MojoModelMetricsMultinomialGLM extends MojoModelMetricsMultinomial {
@SerializedName("null_degrees_of_freedom")
public long _nullDegreesOfFreedom;
@SerializedName("residual_degrees_of_freedom")
public long _residualDegreesOfFreedom;
@SerializedName("residual_deviance")
public double _resDev;
@SerializedName("null_deviance")
public double _nullDev;
public double _AIC;
public double _loglikelihood;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/metrics/MojoModelMetricsOrdinal.java
|
package hex.genmodel.attributes.metrics;
import hex.genmodel.attributes.SerializedName;
import hex.genmodel.attributes.Table;
public class MojoModelMetricsOrdinal extends MojoModelMetricsSupervised {
public float[] _hit_ratios;
@SerializedName(value = "cm", insideElementPath = "table")
public Table _cm;
public Table _hit_ratio_table;
public double _logloss;
public double _mean_per_class_error;
public String[] _domain;
public double _sigma;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/metrics/MojoModelMetricsOrdinalGLM.java
|
package hex.genmodel.attributes.metrics;
import hex.genmodel.attributes.SerializedName;
public class MojoModelMetricsOrdinalGLM extends MojoModelMetricsOrdinal {
@SerializedName("null_degrees_of_freedom")
public long _nullDegreesOfFreedom;
@SerializedName("residual_degrees_of_freedom")
public long _residualDegreesOfFreedom;
@SerializedName("residual_deviance")
public double _resDev;
@SerializedName("null_deviance")
public double _nullDev;
public double _AIC;
public double _loglikelihood;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/metrics/MojoModelMetricsRegression.java
|
package hex.genmodel.attributes.metrics;
import hex.genmodel.attributes.SerializedName;
public class MojoModelMetricsRegression extends MojoModelMetricsSupervised {
public double _mean_residual_deviance;
@SerializedName("rmsle")
public double _root_mean_squared_log_error;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/metrics/MojoModelMetricsRegressionCoxPH.java
|
package hex.genmodel.attributes.metrics;
public class MojoModelMetricsRegressionCoxPH extends MojoModelMetricsRegression {
public double _concordance;
public long _concordant;
public long _discordant;
public long _tied_y;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/metrics/MojoModelMetricsRegressionGLM.java
|
package hex.genmodel.attributes.metrics;
import hex.genmodel.attributes.SerializedName;
public class MojoModelMetricsRegressionGLM extends MojoModelMetricsRegression {
@SerializedName("null_degrees_of_freedom")
public long _nullDegreesOfFreedom;
@SerializedName("residual_degrees_of_freedom")
public long _residualDegreesOfFreedom;
@SerializedName("residual_deviance")
public double _resDev;
@SerializedName("null_deviance")
public double _nullDev;
public double _AIC;
public double _loglikelihood;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/metrics/MojoModelMetricsSupervised.java
|
package hex.genmodel.attributes.metrics;
public class MojoModelMetricsSupervised extends MojoModelMetrics {
public double _sigma;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/parameters/ColumnSpecifier.java
|
package hex.genmodel.attributes.parameters;
import java.io.Serializable;
import java.util.Objects;
public class ColumnSpecifier implements Serializable {
private final String columnName;
private final String[] is_member_of_frames;
public ColumnSpecifier(String columnName, String[] is_member_of_frames) {
Objects.requireNonNull(columnName);
this.columnName = columnName;
this.is_member_of_frames = is_member_of_frames;
}
public String getColumnName() {
return columnName;
}
public String[] getIsMemberOfFrames() {
return is_member_of_frames;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/parameters/FeatureContribution.java
|
package hex.genmodel.attributes.parameters;
import java.io.Serializable;
public class FeatureContribution implements Serializable{
public final String columnName;
public final double shapleyContribution;
public FeatureContribution(String columnName, double shapleyContribution) {
this.columnName = columnName;
this.shapleyContribution = shapleyContribution;
}
@Override
public String toString() {
return "{ColumnName: " + columnName + ", ShapleyContribution: " + shapleyContribution + "}";
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/parameters/KeyValue.java
|
package hex.genmodel.attributes.parameters;
import java.io.Serializable;
public class KeyValue implements Serializable {
public final String key;
public final double value;
public KeyValue(String key, double value) {
this.key = key;
this.value = value;
}
public String getKey() {
return key;
}
public double getValue() {
return value;
}
@Override
public String toString() {
return "{Key: " + key + ", Value: " + value + "}";
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/parameters/ModelParameter.java
|
package hex.genmodel.attributes.parameters;
import java.io.Serializable;
public class ModelParameter implements Serializable {
public String name;
public String label;
public String help;
public boolean required;
public String type;
public Object default_value;
public Object actual_value;
public Object input_value;
public String level;
public String[] values;
public String[] is_member_of_frames;
public String[] is_mutually_exclusive_with;
public boolean gridable;
public String getName() {
return name;
}
public String getLabel() {
return label;
}
public String getHelp() {
return help;
}
public boolean isRequired() {
return required;
}
public String getType() {
return type;
}
public Object getDefaultValue() {
return default_value;
}
public Object getActualValue() {
return actual_value;
}
public Object getInputValue() {
return input_value;
}
public String getLevel() {
return level;
}
public String[] getValues() {
return values;
}
public String[] getIsMemberOfFrames() {
return is_member_of_frames;
}
public String[] getIsMutuallyExclusiveWith() {
return is_mutually_exclusive_with;
}
public boolean isGridable() {
return gridable;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/parameters/ParameterKey.java
|
package hex.genmodel.attributes.parameters;
import java.io.Serializable;
import java.util.Objects;
public class ParameterKey implements Serializable {
private final String name;
private final ParameterKey.Type type;
private final String URL;
public ParameterKey(String name, Type type, String URL) {
Objects.requireNonNull(name);
Objects.requireNonNull(type);
Objects.requireNonNull(URL);
this.name = name;
this.type = type;
this.URL = URL;
}
public enum Type implements Serializable{
MODEL, FRAME, GENERIC
}
public String getName() {
return name;
}
public Type getType() {
return type;
}
public String getURL() {
return URL;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/parameters/StringPair.java
|
package hex.genmodel.attributes.parameters;
import java.io.Serializable;
import java.util.Objects;
public class StringPair implements Serializable {
public StringPair(String a, String b) {
_a = a;
_b = b;
}
public final String _a;
public final String _b;
@Override
public String toString() {
return "(" + _a + ":" + _b + ")";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
StringPair that = (StringPair) o;
return _a.equals(that._a) && _b.equals(that._b);
}
@Override
public int hashCode() {
return Objects.hash(_a, _b);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/attributes/parameters/VariableImportancesHolder.java
|
package hex.genmodel.attributes.parameters;
import hex.genmodel.attributes.VariableImportances;
public interface VariableImportancesHolder {
VariableImportances getVariableImportances();
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/descriptor/ModelDescriptor.java
|
package hex.genmodel.descriptor;
import hex.ModelCategory;
/**
* Internal structure providing basic information about a model. Used primarily, but no exclusively by MOJO pipeline.
* Every MOJO is guaranteed to provide the information defined by this interface.
*/
public interface ModelDescriptor {
/**
* Domains of categorical features. For each feature, there is a record. If the feature is not categorical, the value is null.
*
* @return An array of {@link String} representing the complete domain of each feature. Null if the feature is not categorical.
*/
String[][] scoringDomains();
/**
* E.g. "3.24.0.1"
*
* @return A {@link String} representing version of H2O Open Source Machine Learning platform project.
*/
String projectVersion();
/**
* @return A string with human-readable shortcut of the algorithm enveloped by this MOJO. Never null.
*/
String algoName();
/**
* @return A string with human-readable, full textual representation of the algorithm. Never null.
*/
String algoFullName();
/**
* @return A {@link String} with the name of the offset column used. Null if there was no offset column used during training.
*/
String offsetColumn();
/**
* @return A {@link String} with the name of the weights column used. Null if there were no weights applied to the dataset.
*/
String weightsColumn();
/**
* @return A {@link String} with the name of the fold column used. Null of there was no folding by using a fold column as a key done.
*/
String foldColumn();
/**
* @return A {@link String} with the name of the treatment column used. Null of there was no treatment used during training.
*/
String treatmentColumn();
/**
* Model's category.
*
* @return One of {@link ModelCategory} values. Never null.
*/
ModelCategory getModelCategory();
/**
* @return True for supervised learning models, false for unsupervised.
*/
boolean isSupervised();
/**
* @return An integer representing a total count of features used for training of the model.
*/
int nfeatures();
/**
* @return An array {@link String} representing the names of the features used for model training.
*/
String[] features();
/**
* @return Domain cardinality of the response column, Only meaningful if the model has a categorical response and the model is supervised.
*/
int nclasses();
/**
* @return An array of {@link String} representing the column names in the model. The very last one is the response column name,
* if the model has a response column.
*/
String[] columnNames();
boolean balanceClasses();
/**
* Default threshold for assigning class labels to the target class. Applicable to binomial models only.
*
* @return A double primitive type with the default threshold value
*/
double defaultThreshold();
double[] priorClassDist();
double[] modelClassDist();
String uuid();
String timestamp();
String[] getOrigNames();
String[][] getOrigDomains();
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/descriptor/ModelDescriptorBuilder.java
|
package hex.genmodel.descriptor;
import hex.ModelCategory;
import hex.genmodel.GenModel;
import hex.genmodel.MojoModel;
import hex.genmodel.attributes.ModelAttributes;
import hex.genmodel.attributes.parameters.ColumnSpecifier;
import hex.genmodel.utils.ArrayUtils;
import java.io.Serializable;
import java.util.Arrays;
public class ModelDescriptorBuilder {
/**
* Builds an instance of {@link ModelDescriptor}, using information provided by the serialized model and
* which corresponding implementations of {@link hex.genmodel.ModelMojoReader} are able to provide.
*
* @param mojoModel A MojoModel to extract the model description from
* @param fullAlgorithmName A full name of the algorithm
* @param modelAttributes Optional model attributes
* @return A new instance of {@link ModelDescriptor}
*/
public static ModelDescriptor makeDescriptor(final MojoModel mojoModel, final String fullAlgorithmName,
final ModelAttributes modelAttributes) {
return new MojoModelDescriptor(mojoModel, fullAlgorithmName, modelAttributes);
}
public static ModelDescriptor makeDescriptor(final GenModel pojoModel) {
return new PojoModelDescriptor(pojoModel);
}
public static class MojoModelDescriptor implements ModelDescriptor, Serializable {
// Mandatory
private final String _h2oVersion;
private final hex.ModelCategory _category;
private final String _uuid;
private final boolean _supervised;
private final int _nfeatures;
private final int _nclasses;
private final boolean _balanceClasses;
private final double _defaultThreshold;
private final double[] _priorClassDistrib;
private final double[] _modelClassDistrib;
private final String _offsetColumn;
private final String _foldColumn;
private final String _weightsColumn;
private final String _treatmentColumn;
private final String[][] _domains;
private final String[][] _origDomains;
private final String[] _names;
private final String[] _origNames;
private final String _algoName;
private final String _fullAlgoName;
private MojoModelDescriptor(final MojoModel mojoModel, final String fullAlgorithmName,
final ModelAttributes modelAttributes) {
_category = mojoModel._category;
_uuid = mojoModel._uuid;
_supervised = mojoModel.isSupervised();
_nfeatures = mojoModel.nfeatures();
_nclasses = mojoModel._nclasses;
_balanceClasses = mojoModel._balanceClasses;
_defaultThreshold = mojoModel._defaultThreshold;
_priorClassDistrib = mojoModel._priorClassDistrib;
_modelClassDistrib = mojoModel._modelClassDistrib;
_h2oVersion = mojoModel._h2oVersion;
_offsetColumn = mojoModel._offsetColumn;
_foldColumn = mojoModel._foldColumn;
_domains = mojoModel._domains;
_origDomains = mojoModel.getOrigDomainValues();
_names = mojoModel._names;
_origNames = mojoModel.getOrigNames();
_algoName = mojoModel._algoName;
_fullAlgoName = fullAlgorithmName;
if (modelAttributes != null) {
ColumnSpecifier weightsColSpec = (ColumnSpecifier) modelAttributes.getParameterValueByName("weights_column");
_weightsColumn = weightsColSpec != null ? weightsColSpec.getColumnName() : null;
} else {
_weightsColumn = null;
}
if (modelAttributes != null) {
_treatmentColumn = (String) modelAttributes.getParameterValueByName("treatment_column");;
} else {
_treatmentColumn = null;
}
}
@Override
public String[][] scoringDomains() {
return _domains;
}
@Override
public String projectVersion() {
return _h2oVersion;
}
@Override
public String algoName() {
return _algoName;
}
@Override
public String algoFullName() {
return _fullAlgoName;
}
@Override
public String offsetColumn() {
return _offsetColumn;
}
@Override
public String weightsColumn() {
return _weightsColumn;
}
@Override
public String foldColumn() {
return _foldColumn;
}
@Override
public String treatmentColumn() {
return _treatmentColumn;
}
@Override
public ModelCategory getModelCategory() {
return _category;
}
@Override
public boolean isSupervised() {
return _supervised;
}
@Override
public int nfeatures() {
return _nfeatures;
}
@Override
public String[] features() {
return Arrays.copyOf(columnNames(), nfeatures());
}
@Override
public int nclasses() {
return _nclasses;
}
@Override
public String[] columnNames() {
return _names;
}
@Override
public boolean balanceClasses() {
return _balanceClasses;
}
@Override
public double defaultThreshold() {
return _defaultThreshold;
}
@Override
public double[] priorClassDist() {
return _priorClassDistrib;
}
@Override
public double[] modelClassDist() {
return _modelClassDistrib;
}
@Override
public String uuid() {
return _uuid;
}
@Override
public String timestamp() {
return null;
}
@Override
public String[] getOrigNames() {
return _origNames;
}
@Override
public String[][] getOrigDomains() {
return _origDomains;
}
}
public static class PojoModelDescriptor implements ModelDescriptor, Serializable {
// Mandatory
private final hex.ModelCategory _category;
private final boolean _supervised;
private final int _nfeatures;
private final int _nclasses;
private final String _offsetColumn;
private final String[][] _domains;
private final String[][] _origDomains;
private final String[] _names;
private final String[] _origNames;
private PojoModelDescriptor(final GenModel mojoModel) {
_category = mojoModel.getModelCategory();
_supervised = mojoModel.isSupervised();
_nfeatures = mojoModel.nfeatures();
_nclasses = mojoModel.getNumResponseClasses();
_offsetColumn = mojoModel.getOffsetName();
_domains = mojoModel.getDomainValues();
_origDomains = mojoModel.getOrigDomainValues();
String[] names = mojoModel.getNames();
if (names.length == _domains.length - 1 && mojoModel.isSupervised() &&
!names[names.length - 1].equals(mojoModel._responseColumn)) {
names = ArrayUtils.append(names, mojoModel._responseColumn);
}
_names = names;
_origNames = mojoModel.getOrigNames();
}
@Override
public String[][] scoringDomains() {
return _domains;
}
@Override
public String projectVersion() {
return "unknown";
}
@Override
public String algoName() {
return "pojo";
}
@Override
public String algoFullName() {
return "POJO Scorer";
}
@Override
public String offsetColumn() {
return _offsetColumn;
}
@Override
public String weightsColumn() {
return null;
}
@Override
public String foldColumn() {
return null;
}
@Override
public String treatmentColumn() { return null; }
@Override
public ModelCategory getModelCategory() {
return _category;
}
@Override
public boolean isSupervised() {
return _supervised;
}
@Override
public int nfeatures() {
return _nfeatures;
}
@Override
public String[] features() {
return Arrays.copyOf(columnNames(), nfeatures());
}
@Override
public int nclasses() {
return _nclasses;
}
@Override
public String[] columnNames() {
return _names;
}
@Override
public boolean balanceClasses() {
return false;
}
@Override
public double defaultThreshold() {
return Double.NaN;
}
@Override
public double[] priorClassDist() {
return null;
}
@Override
public double[] modelClassDist() {
return null;
}
@Override
public String uuid() {
return null;
}
@Override
public String timestamp() {
return null;
}
@Override
public String[] getOrigNames() {
return _origNames;
}
@Override
public String[][] getOrigDomains() {
return _origDomains;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/BinaryColumnMapper.java
|
package hex.genmodel.easy;
import hex.genmodel.GenModel;
import hex.genmodel.utils.MathUtils;
import java.util.HashMap;
import java.util.Map;
public class BinaryColumnMapper {
private final GenModel _m;
public BinaryColumnMapper(GenModel m) {
_m = m;
}
public Map<String, Integer> create() {
String[] origNames = _m.getOrigNames();
String[][] origDomainValues = _m.getOrigDomainValues();
Map<String, Integer> columnMapping = new HashMap<>(origNames.length);
int pos = 0;
// non-categorical
for (int i = 0; i < _m.getOrigNumCols(); i++) {
if (origDomainValues[i] == null) {
columnMapping.put(origNames[i], pos);
pos++;
}
}
// categorical
for (int i = 0; i < _m.getOrigNumCols(); i++) {
String[] domainValues = origDomainValues[i];
if (domainValues != null) {
columnMapping.put(origNames[i], pos);
pos += 1 + MathUtils.log2(domainValues.length - 1 + 1/* for NAs */);
}
}
return columnMapping;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/BinaryDomainMapConstructor.java
|
package hex.genmodel.easy;
import hex.genmodel.GenModel;
import java.util.HashMap;
import java.util.Map;
public class BinaryDomainMapConstructor extends DomainMapConstructor {
public BinaryDomainMapConstructor(GenModel m, Map<String, Integer> columnNameToIndex) {
super(m, columnNameToIndex);
}
@Override
public Map<Integer, CategoricalEncoder> create() {
Map<Integer, CategoricalEncoder> domainMap = new HashMap<>();
String[] columnNames = _m.getOrigNames();
String[][] domainValues = _m.getOrigDomainValues();
for (int i = 0; i < _m.getOrigNumCols(); i++) {
String[] colDomainValues = domainValues[i];
if (colDomainValues != null) {
int targetOffsetIndex = _columnNameToIndex.get(columnNames[i]);
domainMap.put(targetOffsetIndex, new BinaryEncoder(columnNames[i], targetOffsetIndex, colDomainValues));
}
}
return domainMap;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/BinaryEncoder.java
|
package hex.genmodel.easy;
import hex.genmodel.utils.MathUtils;
import java.util.HashMap;
import java.util.Map;
public class BinaryEncoder implements CategoricalEncoder {
private final String columnName;
private final int targetIndex;
private final Map<String, Integer> domainMap;
private final int binaryCategorySizes;
BinaryEncoder(String columnName, int targetIndex, String[] domainValues) {
this.columnName = columnName;
this.targetIndex = targetIndex;
this.binaryCategorySizes = 1 + MathUtils.log2(domainValues.length - 1 + 1/* for NAs */);
domainMap = new HashMap<>(domainValues.length);
for (int j = 0; j < domainValues.length; j++) {
domainMap.put(domainValues[j], j);
}
}
@Override
public boolean encodeCatValue(String levelName, double[] rawData) {
Integer levelIndex = domainMap.get(levelName);
if (levelIndex == null)
return false;
makeBinary(levelIndex, rawData);
return true;
}
@Override
public void encodeNA(double[] rawData) {
makeBinary(-1, rawData);
}
private void makeBinary(int index, double[] rawData) {
long val = index + 1; //0 is used for NA
for (int i = 0; i < binaryCategorySizes; i++) {
rawData[targetIndex + i] = val & 1;
val >>>= 1;
}
}
@Override
public String toString() {
return "BinaryEncoder{" +
"columnName='" + columnName + '\'' +
", targetIndex=" + targetIndex +
", domainMap=" + domainMap +
'}';
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/CategoricalEncoder.java
|
package hex.genmodel.easy;
import java.io.Serializable;
public interface CategoricalEncoder extends Serializable {
/**
* Encodes a given categorical level into a raw array onto the right position.
* @param level categorical level
* @param rawData raw input to score0
* @return true if provided categorical level was valid and was properly encoded, false if nothing was written to raw data
*/
boolean encodeCatValue(String level, double[] rawData);
/**
* Encode NA (missing level) into raw data.
* @param rawData target raw data array
*/
void encodeNA(double[] rawData);
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/DomainMapConstructor.java
|
package hex.genmodel.easy;
import hex.genmodel.GenModel;
import java.util.Map;
/**
* Create map from input variable domain information.
*/
abstract class DomainMapConstructor {
protected final GenModel _m;
protected final Map<String, Integer> _columnNameToIndex;
DomainMapConstructor(GenModel m, Map<String, Integer> columnNameToIndex) {
_m = m;
_columnNameToIndex = columnNameToIndex;
}
abstract protected Map<Integer, CategoricalEncoder> create();
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/EasyPredictModelWrapper.java
|
package hex.genmodel.easy;
import hex.ModelCategory;
import hex.genmodel.*;
import hex.genmodel.algos.deeplearning.DeeplearningMojoModel;
import hex.genmodel.algos.drf.DrfMojoModel;
import hex.genmodel.algos.glrm.GlrmMojoModel;
import hex.genmodel.algos.targetencoder.TargetEncoderMojoModel;
import hex.genmodel.algos.tree.SharedTreeMojoModel;
import hex.genmodel.algos.tree.TreeBackedMojoModel;
import hex.genmodel.algos.word2vec.WordEmbeddingModel;
import hex.genmodel.attributes.ModelAttributes;
import hex.genmodel.attributes.VariableImportances;
import hex.genmodel.attributes.parameters.FeatureContribution;
import hex.genmodel.attributes.parameters.KeyValue;
import hex.genmodel.attributes.parameters.VariableImportancesHolder;
import hex.genmodel.easy.error.VoidErrorConsumer;
import hex.genmodel.easy.exception.PredictException;
import hex.genmodel.easy.prediction.*;
import java.io.IOException;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import static hex.genmodel.utils.ArrayUtils.nanArray;
/**
* An easy-to-use prediction wrapper for generated models. Instantiate as follows. The following two are equivalent.
*
* EasyPredictModelWrapper model = new EasyPredictModelWrapper(rawModel);
*
* EasyPredictModelWrapper model = new EasyPredictModelWrapper(
* new EasyPredictModelWrapper.Config()
* .setModel(rawModel)
* .setConvertUnknownCategoricalLevelsToNa(false));
*
* Note that for any given model, you must use the exact one correct predict method below based on the
* model category.
*
* By default, unknown categorical levels result in a thrown PredictUnknownCategoricalLevelException.
* The API was designed with this default to make the simplest possible setup inform the user if there are concerns
* with the data quality.
* An alternate behavior is to automatically convert unknown categorical levels to N/A. To do this, use
* setConvertUnknownCategoricalLevelsToNa(true) instead.
*
* Detection of unknown categoricals may be observed by registering an implementation of {@link ErrorConsumer}
* in the process of {@link Config} creation.
*
* Advanced scoring features are disabled by default for performance reasons. Configuration flags
* allow the user to output also
* - leaf node assignment,
* - GLRM reconstructed matrix,
* - staged probabilities,
* - prediction contributions (SHAP values).
*
* Deprecation note: Total number of unknown categorical variables is newly accessible by registering {@link hex.genmodel.easy.error.CountingErrorConsumer}.
*
*
* <p></p>
* See the top-of-tree master version of this file <a href="https://github.com/h2oai/h2o-3/blob/master/h2o-genmodel/src/main/java/hex/genmodel/easy/EasyPredictModelWrapper.java" target="_blank">here on github</a>.
*/
public class EasyPredictModelWrapper implements Serializable {
// These private members are read-only after the constructor.
public final GenModel m;
private final RowToRawDataConverter rowDataConverter;
private final boolean useExtendedOutput;
private final boolean enableLeafAssignment;
private final boolean enableGLRMReconstruct; // if set true, will return the GLRM resconstructed value, A_hat=X*Y instead of just X
private final boolean enableStagedProbabilities; // if set true, staged probabilities from tree agos are returned
private final boolean enableContributions; // if set to true, will return prediction contributions (SHAP values) - for GBM & XGBoost
private final int glrmIterNumber; // allow user to set GLRM mojo iteration number in constructing x.
private final PredictContributions predictContributions;
public boolean getEnableLeafAssignment() { return enableLeafAssignment; }
public boolean getEnableGLRMReconstruct() { return enableGLRMReconstruct; }
public boolean getEnableStagedProbabilities() { return enableStagedProbabilities; }
public boolean getEnableContributions() { return enableContributions; }
/**
* Observer interface with methods corresponding to errors during the prediction.
*/
public static abstract class ErrorConsumer implements Serializable {
/**
* Observe transformation error for data from the predicted dataset.
*
* @param columnName Name of the column for which the error is raised
* @param value Original value that could not be transformed properly
* @param message Transformation error message
*/
public abstract void dataTransformError(String columnName, Object value, String message);
/**
* Previously unseen categorical level has been detected
*
* @param columnName Name of the column to which the categorical value belongs
* @param value Original value
* @param message Reason and/or actions taken
*/
public abstract void unseenCategorical(String columnName, Object value, String message);
}
/**
* Configuration builder for instantiating a Wrapper.
*/
public static class Config {
private GenModel model;
private boolean convertUnknownCategoricalLevelsToNa = false;
private boolean convertInvalidNumbersToNa = false;
private boolean useExtendedOutput = false;
private ErrorConsumer errorConsumer;
private boolean enableLeafAssignment = false; // default to false
private boolean enableGLRMReconstrut = false;
private boolean enableStagedProbabilities = false;
private boolean enableContributions = false;
private boolean useExternalEncoding = false;
private int glrmIterNumber = 100; // default set to 100
/**
* Specify model object to wrap.
*
* @param value model
* @return this config object
*/
public Config setModel(GenModel value) {
model = value;
return this;
}
/**
* @return model object being wrapped
*/
public GenModel getModel() { return model; }
/**
* Specify how to handle unknown categorical levels.
*
* @param value false: throw exception; true: convert to N/A
* @return this config object
*/
public Config setConvertUnknownCategoricalLevelsToNa(boolean value) {
convertUnknownCategoricalLevelsToNa = value;
return this;
}
public Config setEnableLeafAssignment(boolean val) throws IOException {
if (val && (model==null))
throw new IOException("enableLeafAssignment cannot be set with null model. Call setModel() first.");
if (val && !(model instanceof TreeBackedMojoModel))
throw new IOException("enableLeafAssignment can be set to true only with TreeBackedMojoModel," +
" i.e. with GBM, DRF, Isolation forest or XGBoost.");
enableLeafAssignment = val;
return this;
}
public Config setEnableGLRMReconstrut(boolean value) throws IOException {
if (value && (model==null))
throw new IOException("Cannot set enableGLRMReconstruct for a null model. Call config.setModel() first.");
if (value && !(model instanceof GlrmMojoModel))
throw new IOException("enableGLRMReconstruct shall only be used with GlrmMojoModels.");
enableGLRMReconstrut = value;
return this;
}
public Config setGLRMIterNumber(int value) throws IOException {
if (model==null)
throw new IOException("Cannot set glrmIterNumber for a null model. Call config.setModel() first.");
if (!(model instanceof GlrmMojoModel))
throw new IOException("glrmIterNumber shall only be used with GlrmMojoModels.");
if (value <= 0)
throw new IllegalArgumentException("GLRMIterNumber must be positive.");
glrmIterNumber = value;
return this;
}
public Config setEnableStagedProbabilities (boolean val) throws IOException {
if (val && (model==null))
throw new IOException("enableStagedProbabilities cannot be set with null model. Call setModel() first.");
if (val && !(model instanceof SharedTreeMojoModel))
throw new IOException("enableStagedProbabilities can be set to true only with SharedTreeMojoModel," +
" i.e. with GBM or DRF.");
enableStagedProbabilities = val;
return this;
}
public boolean getEnableGLRMReconstrut() { return enableGLRMReconstrut; }
public Config setEnableContributions(boolean val) throws IOException {
if (val && (model==null))
throw new IOException("setEnableContributions cannot be set with null model. Call setModel() first.");
if (val && !(model instanceof PredictContributionsFactory))
throw new IOException("setEnableContributions can be set to true only with DRF, GBM, or XGBoost models.");
if (val && (ModelCategory.Multinomial.equals(model.getModelCategory()))) {
throw new IOException("setEnableContributions is not yet supported for multinomial classification models.");
}
if (val && model instanceof DrfMojoModel && ((DrfMojoModel) model).isBinomialDoubleTrees()) {
throw new IOException("setEnableContributions is not yet supported for model with binomial_double_trees parameter set.");
}
enableContributions = val;
return this;
}
public boolean getEnableContributions() { return enableContributions; }
/**
* Allows to switch on/off applying categorical encoding in EasyPredictModelWrapper.
* In current implementation only AUTO encoding is supported by the Wrapper, users are required to set
* this flag to true if they want to use POJOs/MOJOs with other encodings than AUTO.
*
* This requirement will be removed in https://github.com/h2oai/h2o-3/issues/8707
* @param val if true, user needs to provide already encoded input in the RowData structure
* @return self
*/
public Config setUseExternalEncoding(boolean val) {
useExternalEncoding = val;
return this;
}
public boolean getUseExternalEncoding() { return useExternalEncoding; }
/**
* @return Setting for unknown categorical levels handling
*/
public boolean getConvertUnknownCategoricalLevelsToNa() { return convertUnknownCategoricalLevelsToNa; }
public int getGLRMIterNumber() { return glrmIterNumber; }
/**
* Specify the default action when a string value cannot be converted to
* a number.
*
* @param value if true, then an N/A value will be produced, if false an
* exception will be thrown.
*/
public Config setConvertInvalidNumbersToNa(boolean value) {
convertInvalidNumbersToNa = value;
return this;
}
public boolean getConvertInvalidNumbersToNa() {
return convertInvalidNumbersToNa;
}
/**
* Specify whether to include additional metadata in the prediction output.
* This feature needs to be supported by a particular model and type of metadata
* is model specific.
*
* @param value if true, then the Prediction result will contain extended information
* about the prediction (this will be specific to a particular model).
* @return this config object
*/
public Config setUseExtendedOutput(boolean value) {
useExtendedOutput = value;
return this;
}
public boolean getUseExtendedOutput() {
return useExtendedOutput;
}
public boolean getEnableLeafAssignment() { return enableLeafAssignment;}
public boolean getEnableStagedProbabilities() { return enableStagedProbabilities;}
/**
* @return An instance of ErrorConsumer used to build the {@link EasyPredictModelWrapper}. Null if there is no instance.
*/
public ErrorConsumer getErrorConsumer() {
return errorConsumer;
}
/**
* Specify an instance of {@link ErrorConsumer} the {@link EasyPredictModelWrapper} is going to call
* whenever an error defined by the {@link ErrorConsumer} instance occurs.
*
* @param errorConsumer An instance of {@link ErrorConsumer}
* @return This {@link Config} object
*/
public Config setErrorConsumer(final ErrorConsumer errorConsumer) {
this.errorConsumer = errorConsumer;
return this;
}
}
/**
* Create a wrapper for a generated model.
*
* @param config The wrapper configuration
*/
public EasyPredictModelWrapper(Config config) {
m = config.getModel();
// Ensure an error consumer is always instantiated to avoid missing null-check errors.
ErrorConsumer errorConsumer = config.getErrorConsumer() == null ? new VoidErrorConsumer() : config.getErrorConsumer();
// How to handle unknown categorical levels.
useExtendedOutput = config.getUseExtendedOutput();
enableLeafAssignment = config.getEnableLeafAssignment();
enableGLRMReconstruct = config.getEnableGLRMReconstrut();
enableStagedProbabilities = config.getEnableStagedProbabilities();
enableContributions = config.getEnableContributions();
glrmIterNumber = config.getGLRMIterNumber();
if (m instanceof GlrmMojoModel)
((GlrmMojoModel)m)._iterNumber=glrmIterNumber;
if (enableContributions) {
if (!(m instanceof PredictContributionsFactory)) {
throw new IllegalStateException("Model " + m.getClass().getName() + " cannot be used to predict contributions.");
}
predictContributions = ((PredictContributionsFactory) m).makeContributionsPredictor();
} else {
predictContributions = null;
}
CategoricalEncoding categoricalEncoding = config.getUseExternalEncoding() ?
CategoricalEncoding.AUTO : m.getCategoricalEncoding();
Map<String, Integer> columnMapping = categoricalEncoding.createColumnMapping(m);
Map<Integer, CategoricalEncoder> domainMap = categoricalEncoding.createCategoricalEncoders(m, columnMapping);
if (m instanceof ConverterFactoryProvidingModel) {
rowDataConverter = ((ConverterFactoryProvidingModel) m).makeConverterFactory(columnMapping, domainMap, errorConsumer, config);
} else {
rowDataConverter = new RowToRawDataConverter(m, columnMapping, domainMap, errorConsumer, config);
}
}
/**
* Create a wrapper for a generated model.
*
* @param model The generated model
*/
public EasyPredictModelWrapper(GenModel model) {
this(new Config()
.setModel(model));
}
/**
* Make a prediction on a new data point.
*
* The type of prediction returned depends on the model type.
* The caller needs to know what type of prediction to expect.
*
* This call is convenient for generically automating model deployment.
* For specific applications (where the kind of model is known and doesn't change), it is recommended to call
* specific prediction calls like predictBinomial() directly.
*
* @param data A new data point. Unknown or missing column name is treated as a NaN or ignored. Column names are case sensitive.
* @return The prediction.
* @throws PredictException
*/
public AbstractPrediction predict(RowData data, ModelCategory mc) throws PredictException {
switch (mc) {
case AutoEncoder:
return predictAutoEncoder(data);
case Binomial:
return predictBinomial(data);
case Multinomial:
return predictMultinomial(data);
case Ordinal:
return predictOrdinal(data);
case Clustering:
return predictClustering(data);
case Regression:
return predictRegression(data);
case DimReduction:
return predictDimReduction(data);
case WordEmbedding:
return predictWord2Vec(data);
case TargetEncoder:
return predictTargetEncoding(data);
case AnomalyDetection:
return predictAnomalyDetection(data);
case KLime:
return predictKLime(data);
case CoxPH:
return predictCoxPH(data);
case BinomialUplift:
return predictUpliftBinomial(data);
case Unknown:
throw new PredictException("Unknown model category");
default:
throw new PredictException("Unhandled model category (" + m.getModelCategory() + ") in switch statement");
}
}
/**
* Make a prediction on a new data point.
*
* This method has the same input as predict. The only difference is that
* it returns and array instead of a prediction object.
*
* The meaning of the returned values can be decoded by calling getOutputNames
* and if any returned values are categorical - method getOutputDomain can be
* used to find mapping of indexes to categorical values for the particular column.
*
* @param data A new data point. Column names are case-sensitive.
* @param offset Value of offset (use 0 if the model was trained without offset).
* @return An array representing a prediction.
* @throws PredictException if prediction cannot be made (eg.: input is invalid)
*/
public double[] predictRaw(RowData data, double offset) throws PredictException {
return preamble(m.getModelCategory(), data, offset);
}
/**
* See {@link #predict(RowData, ModelCategory)}
*/
public AbstractPrediction predict(RowData data) throws PredictException {
return predict(data, m.getModelCategory());
}
ErrorConsumer getErrorConsumer() {
return rowDataConverter.getErrorConsumer();
}
/**
* Returns names of contributions for prediction results with constributions enabled.
* @return array of contribution names (array has same lenght as the actual contributions, last is BiasTerm)
*/
public String[] getContributionNames() {
if (predictContributions == null) {
throw new IllegalStateException(
"Contributions were not enabled using in EasyPredictModelWrapper (use setEnableContributions).");
}
return predictContributions.getContributionNames();
}
/**
* Make a prediction on a new data point using an AutoEncoder model.
* @param data A new data point. Unknown or missing column name is treated as a NaN or ignored. Column names are case sensitive.
* @return The prediction.
* @throws PredictException
*/
public AutoEncoderModelPrediction predictAutoEncoder(RowData data) throws PredictException {
validateModelCategory(ModelCategory.AutoEncoder);
int size = m.getPredsSize(ModelCategory.AutoEncoder);
double[] output = new double[size];
double[] rawData = nanArray(m.nfeatures());
rawData = fillRawData(data, rawData);
output = m.score0(rawData, output);
AutoEncoderModelPrediction p = new AutoEncoderModelPrediction();
p.original = expandRawData(rawData, output.length);
p.reconstructed = output;
p.reconstructedRowData = reconstructedToRowData(output);
if (m instanceof DeeplearningMojoModel){
DeeplearningMojoModel mojoModel = ((DeeplearningMojoModel)m);
p.mse = mojoModel.calculateReconstructionErrorPerRowData(p.original, p.reconstructed);
}
return p;
}
/**
* Creates a 1-hot encoded representation of the input data.
* @param data raw input as seen by the score0 function
* @param size target size of the output array
* @return 1-hot encoded data
*/
private double[] expandRawData(double[] data, int size) {
double[] expanded = new double[size];
int pos = 0;
for (int i = 0; i < data.length; i++) {
if (m._domains[i] == null) {
expanded[pos] = data[i];
pos++;
} else {
int idx = Double.isNaN(data[i]) ? m._domains[i].length : (int) data[i];
expanded[pos + idx] = 1.0;
pos += m._domains[i].length + 1;
}
}
return expanded;
}
/**
* Converts output of AutoEncoder to a RowData structure. Categorical fields are represented by
* a map of domain values -> reconstructed values, missing domain value is represented by a 'null' key
* @param reconstructed raw output of AutoEncoder
* @return reconstructed RowData structure
*/
private RowData reconstructedToRowData(double[] reconstructed) {
RowData rd = new RowData();
int pos = 0;
for (int i = 0; i < m.nfeatures(); i++) {
Object value;
if (m._domains[i] == null) {
value = reconstructed[pos++];
} else {
value = catValuesAsMap(m._domains[i], reconstructed, pos);
pos += m._domains[i].length + 1;
}
rd.put(m._names[i], value);
}
return rd;
}
private static Map<String, Double> catValuesAsMap(String[] cats, double[] reconstructed, int offset) {
Map<String, Double> result = new HashMap<>(cats.length + 1);
for (int i = 0; i < cats.length; i++) {
result.put(cats[i], reconstructed[i + offset]);
}
result.put(null, reconstructed[offset + cats.length]);
return result;
}
/**
* Make a prediction on a new data point using a Dimension Reduction model (PCA, GLRM)
* @param data A new data point. Unknown column name is treated as a NaN. Column names are case sensitive.
* @return The prediction.
* @throws PredictException
*/
public DimReductionModelPrediction predictDimReduction(RowData data) throws PredictException {
double[] preds = preamble(ModelCategory.DimReduction, data); // preds contains the x factor
DimReductionModelPrediction p = new DimReductionModelPrediction();
p.dimensions = preds;
if (m instanceof GlrmMojoModel && ((GlrmMojoModel) m)._archetypes_raw != null && this.enableGLRMReconstruct) // only for verion 1.10 or higher
p.reconstructed = ((GlrmMojoModel) m).impute_data(preds, new double[m.nfeatures()], ((GlrmMojoModel) m)._nnums,
((GlrmMojoModel) m)._ncats, ((GlrmMojoModel) m)._permutation, ((GlrmMojoModel) m)._reverse_transform,
((GlrmMojoModel) m)._normMul, ((GlrmMojoModel) m)._normSub, ((GlrmMojoModel) m)._losses,
((GlrmMojoModel) m)._transposed, ((GlrmMojoModel) m)._archetypes_raw, ((GlrmMojoModel) m)._catOffsets,
((GlrmMojoModel) m)._numLevels);
return p;
}
/**
* Calculate an aggregated word-embedding for a given input sentence (sequence of words).
*
* @param sentence array of word forming a sentence
* @return word-embedding for the given sentence calculated by averaging the embeddings of the input words
* @throws PredictException if model is not a WordEmbedding model
*/
public float[] predictWord2Vec(String[] sentence) throws PredictException {
final WordEmbeddingModel weModel = asWordEmbeddingModel();
final int vecSize = weModel.getVecSize();
final float[] aggregated = new float[vecSize];
final float[] current = new float[vecSize];
int embeddings = 0;
for (String word : sentence) {
final float[] embedding = weModel.transform0(word, current);
if (embedding == null)
continue;
embeddings++;
for (int i = 0; i < vecSize; i++)
aggregated[i] += embedding[i];
}
if (embeddings > 0) {
for (int i = 0; i < vecSize; i++) {
aggregated[i] /= (float) embeddings;
}
} else {
Arrays.fill(aggregated, Float.NaN);
}
return aggregated;
}
/**
* Lookup word embeddings for a given word (or set of words). The result is a dictionary of
* words mapped to their respective embeddings.
*
* @param data RawData structure, every key with a String value will be translated to an embedding,
* note: keys only purpose is to link the output embedding to the input word.
* @return The prediction
* @throws PredictException if model is not a WordEmbedding model
*/
public Word2VecPrediction predictWord2Vec(RowData data) throws PredictException {
final WordEmbeddingModel weModel = asWordEmbeddingModel();
final int vecSize = weModel.getVecSize();
HashMap<String, float[]> embeddings = new HashMap<>(data.size());
for (String wordKey : data.keySet()) {
Object value = data.get(wordKey);
if (value instanceof String) {
String word = (String) value;
embeddings.put(wordKey, weModel.transform0(word, new float[vecSize]));
}
}
Word2VecPrediction p = new Word2VecPrediction();
p.wordEmbeddings = embeddings;
return p;
}
private WordEmbeddingModel asWordEmbeddingModel() throws PredictException {
validateModelCategory(ModelCategory.WordEmbedding);
if (! (m instanceof WordEmbeddingModel))
throw new PredictException("Model is not of the expected type, class = " + m.getClass().getSimpleName());
return (WordEmbeddingModel) m;
}
/**
* Make a prediction on a new data point using a Anomaly Detection model.
*
* @param data A new data point. Unknown column name is treated as a NaN. Column names are case sensitive.
* @return The prediction.
* @throws PredictException
*/
public AnomalyDetectionPrediction predictAnomalyDetection(RowData data) throws PredictException {
double[] preds = preamble(ModelCategory.AnomalyDetection, data, 0.0);
AnomalyDetectionPrediction p = new AnomalyDetectionPrediction(preds);
if (enableLeafAssignment) { // only get leaf node assignment if enabled
SharedTreeMojoModel.LeafNodeAssignments assignments = leafNodeAssignmentExtended(data);
p.leafNodeAssignments = assignments._paths;
p.leafNodeAssignmentIds = assignments._nodeIds;
}
if (enableStagedProbabilities) {
double[] rawData = nanArray(m.nfeatures());
rawData = fillRawData(data, rawData);
p.stageProbabilities = ((SharedTreeMojoModel) m).scoreStagedPredictions(rawData, preds.length);
}
return p;
}
/**
* Make a prediction on a new data point using a Binomial model.
*
* @param data A new data point. Unknown or missing column name is treated as a NaN or ignored. Column names are case sensitive.
* @return The prediction.
* @throws PredictException
*/
public BinomialModelPrediction predictBinomial(RowData data) throws PredictException {
return predictBinomial(data, 0.0);
}
/**
* Make a prediction on a new data point using a Binomial model.
*
* @param data A new data point. Unknown or missing column name is treated as a NaN or ignored. Column names are case sensitive.
* @param offset An offset for the prediction.
* @return The prediction.
* @throws PredictException
*/
public BinomialModelPrediction predictBinomial(RowData data, double offset) throws PredictException {
double[] preds = preamble(ModelCategory.Binomial, data, offset);
BinomialModelPrediction p = new BinomialModelPrediction();
if (enableLeafAssignment) { // only get leaf node assignment if enabled
SharedTreeMojoModel.LeafNodeAssignments assignments = leafNodeAssignmentExtended(data);
p.leafNodeAssignments = assignments._paths;
p.leafNodeAssignmentIds = assignments._nodeIds;
}
double d = preds[0];
p.labelIndex = (int) d;
String[] domainValues = m.getDomainValues(m.getResponseIdx());
if (domainValues == null && m.getNumResponseClasses() == 2)
domainValues = new String[]{"0", "1"}; // quasibinomial
p.label = domainValues[p.labelIndex];
p.classProbabilities = new double[m.getNumResponseClasses()];
System.arraycopy(preds, 1, p.classProbabilities, 0, p.classProbabilities.length);
if (m.calibrateClassProbabilities(preds)) {
p.calibratedClassProbabilities = new double[m.getNumResponseClasses()];
System.arraycopy(preds, 1, p.calibratedClassProbabilities, 0, p.calibratedClassProbabilities.length);
}
if (enableStagedProbabilities) {
double[] rawData = nanArray(m.nfeatures());
rawData = fillRawData(data, rawData);
p.stageProbabilities = ((SharedTreeMojoModel) m).scoreStagedPredictions(rawData, preds.length);
}
if (enableContributions) {
p.contributions = predictContributions(data);
}
return p;
}
/**
* Make a prediction on a new data point using Uplift Binomial model.
* @param data A new data point. Unknown or missing column name is treated as a NaN or ignored. Column names are case sensitive.
* @return The prediction.
* @throws PredictException
*/
public UpliftBinomialModelPrediction predictUpliftBinomial(RowData data) throws PredictException {
double[] preds = preamble(ModelCategory.BinomialUplift, data, 0);
UpliftBinomialModelPrediction p = new UpliftBinomialModelPrediction();
p.predictions = preds;
return p;
}
/**
* @deprecated Use {@link #predictTargetEncoding(RowData)} instead.
*/
@Deprecated
public TargetEncoderPrediction transformWithTargetEncoding(RowData data) throws PredictException{
return predictTargetEncoding(data);
}
/**
* Perform target encoding based on TargetEncoderMojoModel
* @param data RowData structure with data for which we want to produce transformations.
* Unknown column name is treated as a NaN. Column names are case sensitive.
* @return TargetEncoderPrediction with transformations ordered in accordance with corresponding categorical columns' indices in training data
* @throws PredictException
*/
public TargetEncoderPrediction predictTargetEncoding(RowData data) throws PredictException{
if (! (m instanceof TargetEncoderMojoModel))
throw new PredictException("Model is not of the expected type, class = " + m.getClass().getSimpleName());
TargetEncoderMojoModel tem = (TargetEncoderMojoModel) this.m;
double[] preds = new double[tem.getPredsSize()];
TargetEncoderPrediction prediction = new TargetEncoderPrediction();
prediction.transformations = predict(data, 0, preds);
return prediction;
}
@SuppressWarnings("unused") // not used in this class directly, kept for backwards compatibility
public String[] leafNodeAssignment(RowData data) throws PredictException {
double[] rawData = nanArray(m.nfeatures());
rawData = fillRawData(data, rawData);
return ((TreeBackedMojoModel) m).getDecisionPath(rawData);
}
public SharedTreeMojoModel.LeafNodeAssignments leafNodeAssignmentExtended(RowData data) throws PredictException {
double[] rawData = nanArray(m.nfeatures());
rawData = fillRawData(data, rawData);
return ((TreeBackedMojoModel) m).getLeafNodeAssignments(rawData);
}
/**
* Make a prediction on a new data point using a Multinomial model.
*
* @param data A new data point. Unknown or missing column name is treated as a NaN or ignored. Column names are case sensitive.
* @return The prediction.
* @throws PredictException
*/
public MultinomialModelPrediction predictMultinomial(RowData data) throws PredictException {
return predictMultinomial(data, 0D);
}
/**
* Make a prediction on a new data point using a Multinomial model.
*
* @param data A new data point. Unknown or missing column name is treated as a NaN or ignored. Column names are case sensitive.
* @param offset Prediction offset
* @return The prediction.
* @throws PredictException
*/
public MultinomialModelPrediction predictMultinomial(RowData data, double offset) throws PredictException {
double[] preds = preamble(ModelCategory.Multinomial, data, offset);
MultinomialModelPrediction p = new MultinomialModelPrediction();
if (enableLeafAssignment) { // only get leaf node assignment if enabled
SharedTreeMojoModel.LeafNodeAssignments assignments = leafNodeAssignmentExtended(data);
p.leafNodeAssignments = assignments._paths;
p.leafNodeAssignmentIds = assignments._nodeIds;
}
p.classProbabilities = new double[m.getNumResponseClasses()];
p.labelIndex = (int) preds[0];
String[] domainValues = m.getDomainValues(m.getResponseIdx());
p.label = domainValues[p.labelIndex];
System.arraycopy(preds, 1, p.classProbabilities, 0, p.classProbabilities.length);
if (enableStagedProbabilities) {
double[] rawData = nanArray(m.nfeatures());
rawData = fillRawData(data, rawData);
p.stageProbabilities = ((SharedTreeMojoModel) m).scoreStagedPredictions(rawData, preds.length);
}
return p;
}
/**
* Make a prediction on a new data point using a Ordinal model.
*
* @param data A new data point. Unknown or missing column name is treated as a NaN or ignored. Column names are case sensitive.
* @return The prediction.
* @throws PredictException
*/
public OrdinalModelPrediction predictOrdinal(RowData data) throws PredictException {
return predictOrdinal(data, 0D);
}
/**
* Make a prediction on a new data point using a Ordinal model.
*
* @param data A new data point. Unknown or missing column name is treated as a NaN or ignored. Column names are case sensitive.
* @param offset Prediction offset
* @return The prediction.
* @throws PredictException
*/
public OrdinalModelPrediction predictOrdinal(RowData data, double offset) throws PredictException {
double[] preds = preamble(ModelCategory.Ordinal, data, offset);
OrdinalModelPrediction p = new OrdinalModelPrediction();
p.classProbabilities = new double[m.getNumResponseClasses()];
p.labelIndex = (int) preds[0];
String[] domainValues = m.getDomainValues(m.getResponseIdx());
p.label = domainValues[p.labelIndex];
System.arraycopy(preds, 1, p.classProbabilities, 0, p.classProbabilities.length);
return p;
}
/**
* Sort in descending order.
*/
private SortedClassProbability[] sortByDescendingClassProbability(String[] domainValues, double[] classProbabilities) {
assert (classProbabilities.length == domainValues.length);
SortedClassProbability[] arr = new SortedClassProbability[domainValues.length];
for (int i = 0; i < domainValues.length; i++) {
arr[i] = new SortedClassProbability();
arr[i].name = domainValues[i];
arr[i].probability = classProbabilities[i];
}
Arrays.sort(arr, Collections.reverseOrder());
return arr;
}
/**
* A helper function to return an array of binomial class probabilities for a prediction in sorted order.
* The returned array has the most probable class in position 0.
*
* @param p The prediction.
* @return An array with sorted class probabilities.
*/
public SortedClassProbability[] sortByDescendingClassProbability(BinomialModelPrediction p) {
String[] domainValues = m.getDomainValues(m.getResponseIdx());
double[] classProbabilities = p.classProbabilities;
return sortByDescendingClassProbability(domainValues, classProbabilities);
}
/**
* Make a prediction on a new data point using a Clustering model.
*
* @param data A new data point. Unknown or missing column name is treated as a NaN or ignored. Column names are case sensitive.
* @return The prediction.
* @throws PredictException
*/
public ClusteringModelPrediction predictClustering(RowData data) throws PredictException {
ClusteringModelPrediction p = new ClusteringModelPrediction();
if (useExtendedOutput && (m instanceof IClusteringModel)) {
IClusteringModel cm = (IClusteringModel) m;
// setup raw input
double[] rawData = nanArray(m.nfeatures());
rawData = fillRawData(data, rawData);
// get cluster assignment & distances
final int k = cm.getNumClusters();
p.distances = new double[k];
p.cluster = cm.distances(rawData, p.distances);
} else {
double[] preds = preamble(ModelCategory.Clustering, data);
p.cluster = (int) preds[0];
}
return p;
}
/**
* Make a prediction on a new data point using a Regression model.
*
* @param data A new data point. Unknown or missing column name is treated as a NaN or ignored. Column names are case sensitive.
* @return The prediction.
* @throws PredictException
*/
public RegressionModelPrediction predictRegression(RowData data) throws PredictException {
return predictRegression(data, 0D);
}
/**
* Make a prediction on a new data point using a Regression model.
*
* @param data A new data point. Unknown or missing column name is treated as a NaN or ignored. Column names are case sensitive.
* @param offset Prediction offset
* @return The prediction.
* @throws PredictException
*/
public RegressionModelPrediction predictRegression(RowData data, double offset) throws PredictException {
double[] preds = preamble(ModelCategory.Regression, data, offset);
RegressionModelPrediction p = new RegressionModelPrediction();
if (enableLeafAssignment) { // only get leaf node assignment if enabled
SharedTreeMojoModel.LeafNodeAssignments assignments = leafNodeAssignmentExtended(data);
p.leafNodeAssignments = assignments._paths;
p.leafNodeAssignmentIds = assignments._nodeIds;
}
p.value = preds[0];
if (enableStagedProbabilities) {
double[] rawData = nanArray(m.nfeatures());
rawData = fillRawData(data, rawData);
p.stageProbabilities = ((SharedTreeMojoModel) m).scoreStagedPredictions(rawData, preds.length);
}
if (enableContributions) {
double[] rawData = nanArray(m.nfeatures());
rawData = fillRawData(data, rawData);
p.contributions = predictContributions.calculateContributions(rawData);
}
return p;
}
/**
*
* @param data A new data point. Unknown or missing column name is treated as a NaN or ignored. Column names are case sensitive.
* @return The prediction.
* @throws PredictException
*/
public KLimeModelPrediction predictKLime(RowData data) throws PredictException {
double[] preds = preamble(ModelCategory.KLime, data);
KLimeModelPrediction p = new KLimeModelPrediction();
p.value = preds[0];
p.cluster = (int) preds[1];
p.reasonCodes = new double[preds.length - 2];
System.arraycopy(preds, 2, p.reasonCodes, 0, p.reasonCodes.length);
return p;
}
public CoxPHModelPrediction predictCoxPH(RowData data, double offset) throws PredictException {
final double[] preds = preamble(ModelCategory.CoxPH, data, offset);
CoxPHModelPrediction p = new CoxPHModelPrediction();
p.value = preds[0];
return p;
}
public CoxPHModelPrediction predictCoxPH(RowData data) throws PredictException {
return predictCoxPH(data, 0);
}
public float[] predictContributions(RowData data) throws PredictException {
double[] rawData = nanArray(m.nfeatures());
rawData = fillRawData(data, rawData);
return predictContributions.calculateContributions(rawData);
}
/**
* Calculate and sort shapley values.
*
* @param data A new data point. Unknown or missing column name is treated as a NaN or ignored. Column names are case sensitive.
* @param topN Return only #topN highest contributions + bias.
* If topN<0 then sort all SHAP values in descending order
* If topN<0 && bottomN<0 then sort all SHAP values in descending order
* @param bottomN Return only #bottomN lowest contributions + bias
* If topN and bottomN are defined together then return array of #topN + #bottomN + bias
* If bottomN<0 then sort all SHAP values in ascending order
* If topN<0 && bottomN<0 then sort all SHAP values in descending order
* @param compareAbs True to compare absolute values of contributions
* @return Sorted FeatureContribution array of contributions of size #topN + #bottomN + bias
* If topN < 0 || bottomN < 0 then all descending/ascending sorted contributions is returned.
* @throws PredictException When #data cannot be properly translate to raw data.
*/
public FeatureContribution[] predictContributions(RowData data, int topN, int bottomN, boolean compareAbs) throws PredictException {
double[] rawData = nanArray(m.nfeatures());
rawData = fillRawData(data, rawData);
return predictContributions.calculateContributions(rawData, topN, bottomN, compareAbs);
}
/**
* See {@link #varimp(int)}
* return descending sorted by relative importance array of all variables in the model
*/
public KeyValue[] varimp() {
return varimp(-1);
}
/**
* See {@link VariableImportances#topN(int)}
*/
public KeyValue[] varimp(int n) {
if (m instanceof MojoModel) {
ModelAttributes attributes = ((MojoModel) m)._modelAttributes;
if (attributes == null) {
throw new IllegalStateException("Model attributes are not available. Did you load metadata from model? MojoModel.load(\"model\", true)");
} else if (attributes instanceof VariableImportancesHolder) {
return ((VariableImportancesHolder) attributes).getVariableImportances().topN(n);
}
}
throw new IllegalStateException("Model does not support variable importance");
}
//----------------------------------------------------------------------
// Transparent methods passed through to GenModel.
//----------------------------------------------------------------------
public GenModel getModel() {
return m;
}
/**
* Get the category (type) of model.
* @return The category.
*/
public ModelCategory getModelCategory() {
return m.getModelCategory();
}
/**
* Get the array of levels for the response column.
* "Domain" just means list of level names for a categorical (aka factor, enum) column.
* If the response column is numerical and not categorical, this will return null.
*
* @return The array.
*/
public String[] getResponseDomainValues() {
return m.getDomainValues(m.getResponseIdx());
}
/**
* Some autoencoder thing, I'm not sure what this does.
* @return CSV header for autoencoder.
*/
public String getHeader() {
return m.getHeader();
}
//----------------------------------------------------------------------
// Private methods below this line.
//----------------------------------------------------------------------
private void validateModelCategory(ModelCategory c) throws PredictException {
if (!m.getModelCategories().contains(c))
throw new PredictException(c + " prediction type is not supported for this model.");
}
// This should have been called predict(), because that's what it does
protected double[] preamble(ModelCategory c, RowData data) throws PredictException {
return preamble(c, data, 0.0);
}
protected double[] preamble(ModelCategory c, RowData data, double offset) throws PredictException {
validateModelCategory(c);
final int predsSize = m.getPredsSize(c);
return predict(data, offset, new double[predsSize]);
}
protected double[] fillRawData(RowData data, double[] rawData) throws PredictException {
return rowDataConverter.convert(data, rawData);
}
protected double[] predict(RowData data, double offset, double[] preds) throws PredictException {
double[] rawData = nanArray(m.nfeatures());
rawData = fillRawData(data, rawData);
if (m.requiresOffset() || offset != 0) {
preds = m.score0(rawData, offset, preds);
}
else {
preds = m.score0(rawData, preds);
}
return preds;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/EigenEncoder.java
|
package hex.genmodel.easy;
import java.util.HashMap;
import java.util.Map;
public class EigenEncoder implements CategoricalEncoder {
private final String columnName;
private final int targetIndex;
private final Map<String, Integer> domainMap;
private final double[] projectionEigenVec;
public EigenEncoder(String columnName, int targetIndex, String[] domainValues, double[] projectionEigenVec) {
this.columnName = columnName;
this.targetIndex = targetIndex;
domainMap = new HashMap<>();
for (int j = 0; j < domainValues.length; j++) {
domainMap.put(domainValues[j], j);
}
this.projectionEigenVec = projectionEigenVec;
}
@Override
public boolean encodeCatValue(String levelName, double[] rawData) {
Integer levelIndex = domainMap.get(levelName);
if (levelIndex == null)
return false;
rawData[targetIndex] = (float) this.projectionEigenVec[levelIndex]; // make it more reproducible by casting to float
return true;
}
@Override
public void encodeNA(double[] rawData) {
rawData[targetIndex] = Double.NaN;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/EigenEncoderColumnMapper.java
|
package hex.genmodel.easy;
import hex.genmodel.GenModel;
import java.util.HashMap;
import java.util.Map;
public class EigenEncoderColumnMapper {
protected final GenModel _m;
public EigenEncoderColumnMapper(GenModel m) {
_m = m;
}
public String[] getModelColumnNames() {
return _m.getOrigNames();
}
public Map<String, Integer> create() {
String[] modelColumnNames = getModelColumnNames();
Map<String, Integer> modelColumnNameToIndexMap = new HashMap<>(modelColumnNames.length);
for (int i = 0; i < modelColumnNames.length; i++) {
modelColumnNameToIndexMap.put(modelColumnNames[i], i);
}
return modelColumnNameToIndexMap;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/EigenEncoderDomainMapConstructor.java
|
package hex.genmodel.easy;
import hex.genmodel.GenModel;
import java.util.HashMap;
import java.util.Map;
public class EigenEncoderDomainMapConstructor extends DomainMapConstructor {
public EigenEncoderDomainMapConstructor(GenModel m, Map<String, Integer> columnNameToIndex) {
super(m, columnNameToIndex);
}
@Override
public Map<Integer, CategoricalEncoder> create() {
Map<Integer, CategoricalEncoder> domainMap = new HashMap<>();
String[] columnNames = _m.getOrigNames();
int pos = 0;
for (int i = 0; i < _m.getOrigNumCols(); i++) {
String colName = columnNames[i];
Integer colIndex = _columnNameToIndex.get(colName);
String[] domainValues = _m.getOrigDomainValues()[i];
if (domainValues != null) {
double[] targetProjectionArray = new double[domainValues.length];
System.arraycopy(_m.getOrigProjectionArray() , pos, targetProjectionArray, 0, domainValues.length);
pos += domainValues.length;
domainMap.put(colIndex, new EigenEncoder(colName, colIndex, domainValues, targetProjectionArray));
}
}
return domainMap;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/EnumEncoder.java
|
package hex.genmodel.easy;
import java.util.HashMap;
import java.util.Map;
public class EnumEncoder implements CategoricalEncoder {
private final String columnName;
private final int targetIndex;
private final Map<String, Integer> domainMap;
public EnumEncoder(String columnName, int targetIndex, String[] domainValues) {
this.columnName = columnName;
this.targetIndex = targetIndex;
domainMap = new HashMap<>();
for (int j = 0; j < domainValues.length; j++) {
domainMap.put(domainValues[j], j);
}
}
@Override
public boolean encodeCatValue(String levelName, double[] rawData) {
Integer levelIndex = domainMap.get(levelName);
if (levelIndex == null) {
levelIndex = domainMap.get(columnName + "." + levelName);
}
if (levelIndex == null)
return false;
rawData[targetIndex] = levelIndex;
return true;
}
@Override
public void encodeNA(double[] rawData) {
rawData[targetIndex] = Double.NaN;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/EnumEncoderColumnMapper.java
|
package hex.genmodel.easy;
import hex.genmodel.GenModel;
import java.util.HashMap;
import java.util.Map;
public class EnumEncoderColumnMapper {
protected final GenModel _m;
public EnumEncoderColumnMapper(GenModel m) {
_m = m;
}
public String[] getModelColumnNames() {
return _m.getNames();
}
public Map<String, Integer> create() {
String[] modelColumnNames = getModelColumnNames();
Map<String, Integer> modelColumnNameToIndexMap = new HashMap<>(modelColumnNames.length);
for (int i = 0; i < modelColumnNames.length; i++) {
modelColumnNameToIndexMap.put(modelColumnNames[i], i);
}
return modelColumnNameToIndexMap;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/EnumEncoderDomainMapConstructor.java
|
package hex.genmodel.easy;
import hex.genmodel.GenModel;
import java.util.HashMap;
import java.util.Map;
public class EnumEncoderDomainMapConstructor extends DomainMapConstructor {
public EnumEncoderDomainMapConstructor(GenModel m, Map<String, Integer> columnNameToIndex) {
super(m, columnNameToIndex);
}
@Override
public Map<Integer, CategoricalEncoder> create() {
Map<Integer, CategoricalEncoder> domainMap = new HashMap<>();
String[] columnNames = _m.getNames();
for (int i = 0; i < _m.getNumCols(); i++) {
String colName = columnNames[i];
Integer colIndex = _columnNameToIndex.get(colName);
String[] domainValues = _m.getDomainValues(i);
if (domainValues != null) {
domainMap.put(colIndex, new EnumEncoder(colName, colIndex, domainValues));
}
}
return domainMap;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/EnumLimitedEncoder.java
|
package hex.genmodel.easy;
import java.util.HashMap;
import java.util.Map;
public class EnumLimitedEncoder implements CategoricalEncoder {
private final String columnName;
private final int targetIndex;
private final Map<String, Integer> domainMap = new HashMap<>();
EnumLimitedEncoder(String columnName, int targetIndex, String[] domainValues, String[] newDomainValues) {
this.columnName = columnName;
this.targetIndex = targetIndex;
for (int j = 0; j < newDomainValues.length; j++) {
domainMap.put(newDomainValues[j],j);
}
if (domainMap.containsKey("other")) {
Integer otherIndex = domainMap.get("other");
for (int j = 0; j < domainValues.length; j++) {
if (!domainMap.containsKey(domainValues[j])) {
domainMap.put(domainValues[j], otherIndex);
}
}
domainMap.remove("other");
}
}
@Override
public boolean encodeCatValue(String levelName, double[] rawData) {
Integer levelIndex = domainMap.get(levelName);
if (levelIndex == null) {
levelIndex = domainMap.get(columnName + "." + "top_" + levelName + "_levels");
}
if (levelIndex == null)
return false;
rawData[targetIndex] = levelIndex;
return true;
}
@Override
public void encodeNA(double[] rawData) {
rawData[targetIndex] = Double.NaN;
}
@Override
public String toString() {
return "EnumLimited{" +
"columnName='" + columnName + '\'' +
", targetIndex=" + targetIndex +
", domainMap=" + domainMap +
'}';
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/EnumLimitedEncoderColumnMapper.java
|
package hex.genmodel.easy;
import hex.genmodel.GenModel;
public class EnumLimitedEncoderColumnMapper extends EnumEncoderColumnMapper {
public EnumLimitedEncoderColumnMapper(GenModel m) {
super(m);
}
@Override
public String[] getModelColumnNames() {
return _m.getOrigNames();
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/EnumLimitedEncoderDomainMapConstructor.java
|
package hex.genmodel.easy;
import hex.genmodel.GenModel;
import java.util.HashMap;
import java.util.Map;
public class EnumLimitedEncoderDomainMapConstructor extends DomainMapConstructor {
public EnumLimitedEncoderDomainMapConstructor(GenModel m, Map<String, Integer> columnNameToIndex) {
super(m, columnNameToIndex);
}
@Override
public Map<Integer, CategoricalEncoder> create() {
Map<Integer, CategoricalEncoder> domainMap = new HashMap<>();
String[][] newDomainValues = _m.getDomainValues();
String[] columnNames = _m.getOrigNames();
String[][] domainValues = _m.getOrigDomainValues();
for (int i = 0; i < _m.getOrigNumCols(); i++) {
String[] colDomainValues = domainValues[i];
int colIndex = _columnNameToIndex.get(columnNames[i]);
if (colDomainValues != null) {
domainMap.put(colIndex, new EnumLimitedEncoder(columnNames[i], colIndex, colDomainValues, newDomainValues[i]));
}
}
return domainMap;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/LabelEncoder.java
|
package hex.genmodel.easy;
import java.util.HashMap;
import java.util.Map;
public class LabelEncoder implements CategoricalEncoder {
private final int targetIndex;
private final Map<String, Integer> domainMap;
public LabelEncoder(int targetIndex, String[] domainValues) {
this.targetIndex = targetIndex;
domainMap = new HashMap<>();
for (int j = 0; j < domainValues.length; j++) {
domainMap.put(domainValues[j], j);
}
}
@Override
public boolean encodeCatValue(String levelName, double[] rawData) {
Integer levelIndex = domainMap.get(levelName);
if (levelIndex == null)
return false;
// check if the 1st lvl of the domain can be parsed as int
boolean useDomain = false;
try {
Integer.parseInt(levelName);
useDomain = true;
} catch (NumberFormatException ex) {
}
if (useDomain) {
rawData[targetIndex] = Integer.parseInt(levelName);
} else {
rawData[targetIndex] = levelIndex;
}
return true;
}
@Override
public void encodeNA(double[] rawData) {
rawData[targetIndex] = Double.NaN;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/LabelEncoderDomainMapConstructor.java
|
package hex.genmodel.easy;
import hex.genmodel.GenModel;
import java.util.HashMap;
import java.util.Map;
public class LabelEncoderDomainMapConstructor extends DomainMapConstructor {
public LabelEncoderDomainMapConstructor(GenModel m, Map<String, Integer> columnNameToIndex) {
super(m, columnNameToIndex);
}
@Override
public Map<Integer, CategoricalEncoder> create() {
Map<Integer, CategoricalEncoder> domainMap = new HashMap<>();
String[] columnNames = _m.getNames();
for (int i = 0; i < _m.getNumCols(); i++) {
String colName = i < _m.getNumCols() ? columnNames[i] : _m._responseColumn;
Integer colIndex = _columnNameToIndex.get(colName);
String[] domainValues = _m.getOrigDomainValues()[i];
if (domainValues != null) {
domainMap.put(colIndex, new LabelEncoder(colIndex, domainValues));
}
}
return domainMap;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/OneHotEncoder.java
|
package hex.genmodel.easy;
import java.util.HashMap;
import java.util.Map;
public class OneHotEncoder implements CategoricalEncoder {
private final String columnName;
private final int targetIndex;
private final Map<String, Integer> domainMap;
OneHotEncoder(String columnName, int targetIndex, String[] domainValues) {
this.columnName = columnName;
this.targetIndex = targetIndex;
domainMap = new HashMap<>(domainValues.length);
for (int j = 0; j < domainValues.length; j++) {
domainMap.put(domainValues[j], j);
}
}
@Override
public boolean encodeCatValue(String levelName, double[] rawData) {
Integer levelIndex = domainMap.get(levelName);
if (levelIndex == null)
return false;
makeHot(levelIndex, rawData);
return true;
}
@Override
public void encodeNA(double[] rawData) {
makeHot(domainMap.size(), rawData);
}
private void makeHot(int index, double[] rawData) {
for (int i = 0; i < domainMap.size() + 1; i++) {
rawData[targetIndex + i] = index == i ? 1 : 0;
}
}
@Override
public String toString() {
return "OneHotEncoder{" +
"columnName='" + columnName + '\'' +
", targetIndex=" + targetIndex +
", domainMap=" + domainMap +
'}';
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/OneHotEncoderColumnMapper.java
|
package hex.genmodel.easy;
import hex.genmodel.GenModel;
import java.util.HashMap;
import java.util.Map;
public class OneHotEncoderColumnMapper {
private final GenModel _m;
public OneHotEncoderColumnMapper(GenModel m) {
_m = m;
}
public Map<String, Integer> create() {
String[] origNames = _m.getOrigNames();
String[][] origDomainValues = _m.getOrigDomainValues();
Map<String, Integer> columnMapping = new HashMap<>(origNames.length);
int pos = 0;
// non-categorical
for (int i = 0; i < _m.getOrigNumCols(); i++) {
if (origDomainValues[i] != null)
continue;
columnMapping.put(origNames[i], pos);
pos++;
}
// categorical
for (int i = 0; i < _m.getOrigNumCols(); i++) {
String[] domainValues = origDomainValues[i];
if (domainValues == null)
continue;
columnMapping.put(origNames[i], pos);
pos += domainValues.length + 1;
}
return columnMapping;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/OneHotEncoderDomainMapConstructor.java
|
package hex.genmodel.easy;
import hex.genmodel.GenModel;
import java.util.HashMap;
import java.util.Map;
public class OneHotEncoderDomainMapConstructor extends DomainMapConstructor {
public OneHotEncoderDomainMapConstructor(GenModel m, Map<String, Integer> columnNameToIndex) {
super(m, columnNameToIndex);
}
@Override
public Map<Integer, CategoricalEncoder> create() {
Map<Integer, CategoricalEncoder> domainMap = new HashMap<>();
String[] columnNames = _m.getOrigNames();
String[][] domainValues = _m.getOrigDomainValues();
for (int i = 0; i < _m.getOrigNumCols(); i++) {
String[] colDomainValues = domainValues[i];
int targetOffsetIndex = _columnNameToIndex.get(columnNames[i]);
if (colDomainValues != null) {
domainMap.put(targetOffsetIndex, new OneHotEncoder(columnNames[i], targetOffsetIndex, colDomainValues));
}
}
return domainMap;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/RowData.java
|
package hex.genmodel.easy;
import java.util.HashMap;
/**
* Column name to column value mapping for a new row (aka data point, observation, sample) to predict.
*
* The purpose in life for objects of type RowData is to be passed to a predict method.
*
* RowData contains the input values for one new row.
* In this context, "row" means a new data point (aka row, observation, sample) to make a prediction for.
* Column names are mandatory (the column name is the key in the HashMap).
*
* <p></p>
* Columns of different types are handled as follows:
* <ul>
* <li>
* For numerical columns, the value Object may either be a Double or a String. If a String is passed, then
* Double.parseDouble() will be called on the String.
* </li>
* <li>
* For categorical (aka factor, enum) columns, the value Object must be a String with the same names as seen
* in the training data.
* It is not allowed to use new categorical (aka factor, enum) levels unseen during training (this will result
* in a {@link hex.genmodel.easy.exception.PredictUnknownCategoricalLevelException} when one of the predict methods
* is called).
* </li>
* </ul>
*
* <p></p>
* Incorrect use of data types will result in a {@link hex.genmodel.easy.exception.PredictUnknownTypeException}
* when one of the predict methods is called.
*
* <p></p>
* For missing columns that are in the model, NA will be used by the predict methods.
*
* <p></p>
* Extra columns that are not in the model are ignored by the predict methods.
*
* <p></p>
* See the top-of-tree master version of this file <a href="https://github.com/h2oai/h2o-3/blob/master/h2o-genmodel/src/main/java/hex/genmodel/easy/RowData.java" target="_blank">here on github</a>.
*/
public class RowData extends HashMap<String, Object> {
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/RowToRawDataConverter.java
|
package hex.genmodel.easy;
import hex.genmodel.GenModel;
import hex.genmodel.easy.exception.PredictException;
import hex.genmodel.easy.exception.PredictNumberFormatException;
import hex.genmodel.easy.exception.PredictUnknownCategoricalLevelException;
import hex.genmodel.easy.exception.PredictUnknownTypeException;
import java.io.Serializable;
import java.util.Map;
/**
* This class is intended to transform a RowData instance - for which we want to get prediction to - into a raw array
*/
public class RowToRawDataConverter implements Serializable {
private final Map<String, Integer> _modelColumnNameToIndexMap;
private final Map<Integer, CategoricalEncoder> _domainMap;
private final EasyPredictModelWrapper.ErrorConsumer _errorConsumer;
private final boolean _convertUnknownCategoricalLevelsToNa;
private final boolean _convertInvalidNumbersToNa;
public RowToRawDataConverter(GenModel m,
Map<String, Integer> modelColumnNameToIndexMap,
Map<Integer, CategoricalEncoder> domainMap,
EasyPredictModelWrapper.ErrorConsumer errorConsumer,
EasyPredictModelWrapper.Config config) {
_modelColumnNameToIndexMap = modelColumnNameToIndexMap;
_domainMap = domainMap;
_errorConsumer = errorConsumer;
_convertUnknownCategoricalLevelsToNa = config.getConvertUnknownCategoricalLevelsToNa();
_convertInvalidNumbersToNa = config.getConvertInvalidNumbersToNa();
}
/**
*
* @param data instance of RowData we want to get prediction for
* @param rawData array that will be filled up from RowData instance and returned
* @return `rawData` array with data from RowData.
* @throws PredictException Note: name of the exception feels like out of scope of the class with name `RowToRawDataConverter`
* but this conversion is only needed to make it possible to produce predictions so it makes sense
*/
public double[] convert(RowData data, double[] rawData) throws PredictException {
for (String dataColumnName : data.keySet()) {
Integer index = _modelColumnNameToIndexMap.get(dataColumnName);
// Skip column names that are not known.
// Skip the "response" column which should not be included in `rawData`
if (index == null || index >= rawData.length) {
continue;
}
Object o = data.get(dataColumnName);
if (convertValue(dataColumnName, o, _domainMap.get(index), index, rawData)) {
return rawData;
}
}
return rawData;
}
protected boolean convertValue(String columnName, Object o, CategoricalEncoder catEncoder,
int targetIndex, double[] rawData) throws PredictException {
if (catEncoder == null) {
// Column is either numeric or a string (for images or text)
double value = Double.NaN;
if (o instanceof String) {
String s = ((String) o).trim();
// numeric
try {
value = Double.parseDouble(s);
} catch (NumberFormatException nfe) {
if (!_convertInvalidNumbersToNa)
throw new PredictNumberFormatException("Unable to parse value: " + s + ", from column: " + columnName + ", as Double; " + nfe.getMessage());
}
} else if (o instanceof Double) {
value = (Double) o;
} else {
throw new PredictUnknownTypeException(
"Unexpected object type " + o.getClass().getName() + " for numeric column " + columnName);
}
if (Double.isNaN(value)) {
// If this point is reached, the original value remains NaN.
_errorConsumer.dataTransformError(columnName, o, "Given non-categorical value is unparseable, treating as NaN.");
}
rawData[targetIndex] = value;
} else {
// Column has categorical value.
if (o instanceof String) {
String levelName = (String) o;
if (! catEncoder.encodeCatValue(levelName, rawData)) {
if (_convertUnknownCategoricalLevelsToNa) {
catEncoder.encodeNA(rawData);
_errorConsumer.unseenCategorical(columnName, o, "Previously unseen categorical level detected, marking as NaN.");
} else {
_errorConsumer.dataTransformError(columnName, o, "Unknown categorical level detected.");
throw new PredictUnknownCategoricalLevelException("Unknown categorical level (" + columnName + "," + levelName + ")", columnName, levelName);
}
}
} else if (o instanceof Double && Double.isNaN((double) o)) {
_errorConsumer.dataTransformError(columnName, o, "Missing factor value detected, setting to NaN");
catEncoder.encodeNA(rawData); // Missing factor is the only Double value allowed
} else {
_errorConsumer.dataTransformError(columnName, o, "Unknown categorical variable type.");
throw new PredictUnknownTypeException(
"Unexpected object type " + o.getClass().getName() + " for categorical column " + columnName);
}
}
return false;
}
EasyPredictModelWrapper.ErrorConsumer getErrorConsumer() {
return _errorConsumer;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/package-info.java
|
/**
* The easy prediction API for generated POJO and MOJO models.
*
* Use as follows:
* <ol>
* <li>Instantiate an EasyPredictModelWrapper</li>
* <li>Create a new row of data</li>
* <li>Call one of the predict methods</li>
* </ol>
*
* <p></p>
* Here is an example:
*
* <pre>
* {@code
* // Step 1.
* modelClassName = "your_pojo_model_downloaded_from_h2o";
* GenModel rawModel;
* rawModel = (GenModel) Class.forName(modelClassName).newInstance();
* EasyPredictModelWrapper model = new EasyPredictModelWrapper(rawModel);
* //
* // By default, unknown categorical levels throw PredictUnknownCategoricalLevelException.
* // Optionally configure the wrapper to treat unknown categorical levels as N/A instead:
* //
* // EasyPredictModelWrapper model = new EasyPredictModelWrapper(
* // new EasyPredictModelWrapper.Config()
* // .setModel(rawModel)
* // .setConvertUnknownCategoricalLevelsToNa(true));
*
* // Step 2.
* RowData row = new RowData();
* row.put(new String("CategoricalColumnName"), new String("LevelName"));
* row.put(new String("NumericColumnName1"), new String("42.0"));
* row.put(new String("NumericColumnName2"), Double.valueOf(42.0));
*
* // Step 3.
* BinomialModelPrediction p = model.predictBinomial(row);
* }
* </pre>
*/
package hex.genmodel.easy;
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/error/CountingErrorConsumer.java
|
package hex.genmodel.easy.error;
import hex.genmodel.GenModel;
import hex.genmodel.easy.EasyPredictModelWrapper;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicLong;
/**
* An implementation of {@link hex.genmodel.easy.EasyPredictModelWrapper.ErrorConsumer}
* counting number of each kind of error even received
*/
public class CountingErrorConsumer extends EasyPredictModelWrapper.ErrorConsumer {
private Map<String, AtomicLong> dataTransformationErrorsCountPerColumn;
private Map<String, AtomicLong> unknownCategoricalsPerColumn;
private Map<String, ConcurrentMap<Object, AtomicLong>> unseenCategoricalsCollector;
private final boolean collectUnseenCategoricals;
/**
* @param model An instance of {@link GenModel}
*/
public CountingErrorConsumer(GenModel model) {
this(model, DEFAULT_CONFIG);
}
/**
* @param model An instance of {@link GenModel}
* @param config An instance of {@link Config}
*/
public CountingErrorConsumer(GenModel model, Config config) {
collectUnseenCategoricals = config.isCollectUnseenCategoricals();
initializeDataTransformationErrorsCount(model);
initializeUnknownCategoricals(model);
}
/**
* Initializes the map of data transformation errors for each column that is not related to response variable,
* excluding response column. The map is initialized as unmodifiable and thread-safe.
*
* @param model {@link GenModel} the data trasnformation errors count map is initialized for
*/
private void initializeDataTransformationErrorsCount(GenModel model) {
String responseColumnName = model.isSupervised() ? model.getResponseName() : null;
dataTransformationErrorsCountPerColumn = new ConcurrentHashMap<>();
for (String column : model.getNames()) {
// Do not perform check for response column if the model is unsupervised
if (!model.isSupervised() || !column.equals(responseColumnName)) {
dataTransformationErrorsCountPerColumn.put(column, new AtomicLong());
}
}
dataTransformationErrorsCountPerColumn = Collections.unmodifiableMap(dataTransformationErrorsCountPerColumn);
}
/**
* Initializes the map of unknown categoricals per column with an unmodifiable and thread-safe implementation of {@link Map}.
*
* @param model {@link GenModel} the unknown categorical per column map is initialized for
*/
private void initializeUnknownCategoricals(GenModel model) {
unknownCategoricalsPerColumn = new ConcurrentHashMap<>();
unseenCategoricalsCollector = new ConcurrentHashMap<>();
for (int i = 0; i < model.getNumCols(); i++) {
String[] domainValues = model.getDomainValues(i);
if (domainValues != null) {
unknownCategoricalsPerColumn.put(model.getNames()[i], new AtomicLong());
if (collectUnseenCategoricals)
unseenCategoricalsCollector.put(model.getNames()[i], new ConcurrentHashMap<Object, AtomicLong>());
}
}
unknownCategoricalsPerColumn = Collections.unmodifiableMap(unknownCategoricalsPerColumn);
}
@Override
public void dataTransformError(String columnName, Object value, String message) {
dataTransformationErrorsCountPerColumn.get(columnName).incrementAndGet();
}
@Override
public void unseenCategorical(String columnName, Object value, String message) {
unknownCategoricalsPerColumn.get(columnName).incrementAndGet();
if (collectUnseenCategoricals) {
ConcurrentMap<Object, AtomicLong> columnCollector = unseenCategoricalsCollector.get(columnName);
assert columnCollector != null;
AtomicLong counter = columnCollector.get(value);
if (counter != null) {
counter.incrementAndGet(); // best effort to avoid creating new AtomicLongs on each invocation
} else {
counter = columnCollector.putIfAbsent(value, new AtomicLong(1));
if (counter != null) {
counter.incrementAndGet();
}
}
}
}
/**
* Counts and returns all previously unseen categorical variables across all columns.
* Results may vary when called during prediction phase.
*
* @return A sum of all previously unseen categoricals across all columns
*/
public long getTotalUnknownCategoricalLevelsSeen() {
long total = 0;
for (AtomicLong l : unknownCategoricalsPerColumn.values()) {
total += l.get();
}
return total;
}
/***
* Returns a thread-safe Map with column names as keys and number of observed unknown categorical values
* associated with each column. The map returned is a direct reference to
* the backing this {@link CountingErrorConsumer}. Iteration during prediction phase may end up with
* undefined results.
*
* All the columns are listed.
* @return A thread-safe map.
*/
public Map<String, AtomicLong> getUnknownCategoricalsPerColumn() {
return unknownCategoricalsPerColumn;
}
public Map<Object, AtomicLong> getUnseenCategoricals(String column) {
if (! collectUnseenCategoricals) {
throw new IllegalStateException("Unseen categorical values collection was not enabled.");
}
return unseenCategoricalsCollector.get(column);
}
/**
* An unmodifiable, thread-safe map of all columns with counts of data transformation errors observed.
* The map returned is a direct reference to the backing this {@link CountingErrorConsumer}.
* Iteration during prediction phase may end up with undefined results.
*
* @return A thread-safe instance of {@link Map}
*/
public Map<String, AtomicLong> getDataTransformationErrorsCountPerColumn() {
return dataTransformationErrorsCountPerColumn;
}
/**
* @return Number of transformation errors found
*/
public long getDataTransformationErrorsCount() {
long total = 0;
for (AtomicLong l : dataTransformationErrorsCountPerColumn.values()) {
total += l.get();
}
return total;
}
private static final Config DEFAULT_CONFIG = new Config();
public static class Config {
private boolean collectUnseenCategoricals;
public boolean isCollectUnseenCategoricals() {
return collectUnseenCategoricals;
}
public void setCollectUnseenCategoricals(boolean collectUnseenCategoricals) {
this.collectUnseenCategoricals = collectUnseenCategoricals;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/error/VoidErrorConsumer.java
|
package hex.genmodel.easy.error;
import hex.genmodel.easy.EasyPredictModelWrapper;
/**
* A void implementation of {@link hex.genmodel.easy.EasyPredictModelWrapper.ErrorConsumer}.
* It's purpose is to avoid forcing developers do to null checks in code before each and every call.
*/
public final class VoidErrorConsumer extends EasyPredictModelWrapper.ErrorConsumer {
@Override
public final void dataTransformError(String columnName, Object value, String message) {
//Do nothing on purpose to avoid the need for null checks
}
@Override
public final void unseenCategorical(String columnName, Object value, String message) {
//Do nothing on purpose to avoid the need for null checks
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/exception/PredictException.java
|
package hex.genmodel.easy.exception;
/**
* All generated model exceptions that can occur on the various predict methods derive from this.
*/
public class PredictException extends Exception {
public PredictException(String message) {
super(message);
}
public PredictException(Throwable cause) {
super(cause);
}
public PredictException(String message, Throwable cause) {
super(message, cause);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/exception/PredictNumberFormatException.java
|
package hex.genmodel.easy.exception;
/**
* Unknown type exception.
*
* When a RowData observation is provided to a predict method, the value types are extremely restricted.
* This exception occurs if the value of a numeric feature fails to parse as Double. Ex. empty string
*
*/
public class PredictNumberFormatException extends PredictException {
public PredictNumberFormatException(String message) {
super(message);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/exception/PredictUnknownCategoricalLevelException.java
|
package hex.genmodel.easy.exception;
/**
* Unknown categorical level exception.
*
* A categorical column is equivalent to a factor column or an enum column.
* A column in which different values can only be compared for equality with one another, but
* not distance.
*
* This exception occurs when the data point to predict contains a value that was not seen
* during model training.
*
* This can definitely happen as a result of the user providing bad input.
*/
public class PredictUnknownCategoricalLevelException extends PredictException {
public final String columnName;
public final String unknownLevel;
public PredictUnknownCategoricalLevelException(String message, String columnName, String unknownLevel) {
super(message);
this.columnName = columnName;
this.unknownLevel = unknownLevel;
}
/**
* Get the column name for which the unknown level was given as input.
* @return Column name
*/
@SuppressWarnings("unused")
public String getColumnName() {
return columnName;
}
/**
* Get the unknown level which was not seen during model training.
* @return Unknown level
*/
@SuppressWarnings("unused")
public String getUnknownLevel() {
return unknownLevel;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/exception/PredictUnknownTypeException.java
|
package hex.genmodel.easy.exception;
/**
* Unknown type exception.
*
* When a RowData observation is provided to a predict method, the value types are extremely restricted.
* This exception occurs if the value of a RowData element is of the wrong data type.
*
* (The only supported value types are String and Double.)
*/
public class PredictUnknownTypeException extends PredictException {
public PredictUnknownTypeException(String message) {
super(message);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/exception/PredictWrongModelCategoryException.java
|
package hex.genmodel.easy.exception;
/**
* Wrong model category exception.
*
* Each generated model is of exactly one category.
* Only one of the different predict calls works with that category.
*
* For example, a model of category Binomial can only respond properly to
* predictBinomial().
*
* Attempting to call the wrong predict method for a model results in this exception.
*/
public class PredictWrongModelCategoryException extends PredictException {
public PredictWrongModelCategoryException(String message) {
super(message);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/exception/package-info.java
|
/**
* Exceptions that can be raised by generated POJO and MOJO models.
*/
package hex.genmodel.easy.exception;
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/prediction/AbstractPrediction.java
|
package hex.genmodel.easy.prediction;
/**
* Predictions from generated models for individual new data points derive from this class.
*
* Every model has a getModelCategory() method, and the prediction type supported by that model corresponds to the
* model category.
*/
public abstract class AbstractPrediction implements java.io.Serializable {
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/prediction/AnomalyDetectionPrediction.java
|
package hex.genmodel.easy.prediction;
public class AnomalyDetectionPrediction extends AbstractPrediction {
/**
* Only available when MojoModel has contamination parameter defined otherwise is null.
*/
public Boolean isAnomaly;
/**
* The raw number that an algorithm is using to count final anomaly score.
*
* E.g. for Isolation Forest this number is mean path length of data in the trees. Smaller number means more anomalous point, higher number means more normal point.
*/
public double score;
/**
* Higher number means more anomalous point, smaller number means more normal point.
*/
public double normalizedScore;
/**
* Only valid for tree-based models, null for all other mojo models.
*/
public String[] leafNodeAssignments;
/**
* Ditto, available in MOJO 1.3 and newer
*/
public int[] leafNodeAssignmentIds;
/**
* Staged predictions of tree algorithms (prediction probabilities of trees per iteration).
* The output structure is for tree Tt and class Cc:
* Binomial models: [probability T1.C1, probability T2.C1, ..., Tt.C1] where Tt.C1 correspond to the the probability p0
* Multinomial models: [probability T1.C1, probability T1.C2, ..., Tt.Cc]
*/
public double[] stageProbabilities;
@SuppressWarnings("unused")
public AnomalyDetectionPrediction() {
}
public AnomalyDetectionPrediction(double[] preds) {
if (preds.length == 3) {
isAnomaly = preds[0] == 1;
normalizedScore = preds[1];
score = preds[2];
} else {
normalizedScore = preds[0];
score = preds[1];
}
}
public double[] toPreds() {
if (isAnomaly != null) {
return new double[] {isAnomaly ? 1 : 0, normalizedScore, score};
} else {
return new double[] {normalizedScore, score};
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/prediction/AutoEncoderModelPrediction.java
|
package hex.genmodel.easy.prediction;
import hex.genmodel.easy.RowData;
/**
* Data reconstructed by the AutoEncoder model based on a given input.
*/
public class AutoEncoderModelPrediction extends AbstractPrediction {
/**
* Representation of the original input the way AutoEncoder model sees it (1-hot encoded categorical values)
*/
public double[] original;
/**
* Reconstructed data, the array has same length as the original input. The user can use the original input
* and reconstructed output to easily calculate eg. the reconstruction error.
*/
public double[] reconstructed;
/**
* Reconstructed data represented in RowData structure. The structure will copy the structure of the RowData input
* with the exception of categorical values. Categorical fields will be represented as a map of the domain values
* to the reconstructed values.
* Example: input RowData([sex: "Male", ..]) will produce output RowData([sex: [Male: 0.9, Female: 0.1], ..]
*/
public RowData reconstructedRowData;
/**
* Reconstruction mean squared error calculated from original and reconstructed data.
* Uses a normalization defined for the numerical features of the trained model.
* average reconstruction error = ||original - reconstructed||^2 / length(original)
*/
public double mse = -1;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/prediction/BinomialModelPrediction.java
|
package hex.genmodel.easy.prediction;
/**
* Binomial classification model prediction.
*
* GLM logistic regression (GLM family "binomial") also falls into this category.
*/
public class BinomialModelPrediction extends AbstractPrediction {
/**
* 0 or 1.
*/
public int labelIndex;
/**
* Label of the predicted level.
*/
public String label;
/**
* This array of length two has the class probability for each class (aka categorical or factor level) in the
* response column.
*
* The array corresponds to the level names returned by:
* <pre>
* model.getDomainValues(model.getResponseIdx())
* </pre>
* "Domain" is the internal H2O term for level names.
*
* The values in this array may be Double.NaN, which means NA (this will happen with GLM, for example,
* if one of the input values for a new data point is NA).
* If they are valid numeric values, then they will sum up to 1.0.
*/
public double[] classProbabilities;
/**
* Class probabilities calibrated by Platt Scaling or Isotonic Regression. Optional, only calculated if the model supports it.
*/
public double[] calibratedClassProbabilities;
public String[] leafNodeAssignments; // only valid for tree-based models, null for all other mojo models
public int[] leafNodeAssignmentIds; // ditto, available in MOJO 1.3 and newer
/**
* Staged predictions of tree algorithms (prediction probabilities of trees per iteration).
* The output structure is for tree Tt and class Cc:
* Binomial models: [probability T1.C1, probability T2.C1, ..., Tt.C1] where Tt.C1 correspond to the the probability p0
* Multinomial models: [probability T1.C1, probability T1.C2, ..., Tt.Cc]
*/
public double[] stageProbabilities;
/**
* Per-feature prediction contributions (SHAP values).
* Size of the returned array is #features + 1 - there is a feature contribution column for each input feature,
* the last item is the model bias. The sum of the feature contributions and the bias term is equal to the raw
* prediction of the model. Raw prediction of tree-based model is the sum of the predictions of the individual
* trees before the inverse link function is applied to get the actual prediction.
* This means the sum is not equal to the probabilities returned in classProbabilities.
*
* (Optional) Available only for supported models (GBM, XGBoost).
*/
public float[] contributions;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/prediction/ClusteringModelPrediction.java
|
package hex.genmodel.easy.prediction;
/**
* Clustering model prediction.
*/
public class ClusteringModelPrediction extends AbstractPrediction {
/**
* Chosen cluster for this data point.
*/
public int cluster;
/**
* (Optional) Vector of squared distances to all cluster centers.
* This field will only be included in the output if "useExtendedOutput" flag was enabled in EasyPredictModelWrapper.
*/
public double[] distances = null;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/prediction/CoxPHModelPrediction.java
|
package hex.genmodel.easy.prediction;
/**
* CoxPH model prediction.
*/
public class CoxPHModelPrediction extends AbstractPrediction {
/**
* This value may be Double.NaN, which means NA (this will happen with CoxPH, for example,
* if one of the input values for a new data point is NA).
*/
public double value;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/prediction/DimReductionModelPrediction.java
|
package hex.genmodel.easy.prediction;
/**
* TODO
*/
public class DimReductionModelPrediction extends AbstractPrediction {
public double[] dimensions; // contains the X factor/coefficient for GLRM or PCA
/**
* This field is only used for GLRM and not for PCA. Reconstructed data, the array has same length as the
* original input. The user can use the original input and reconstructed output to easily calculate eg. the
* reconstruction error. Note that all values are either doubles or integers. Users need to convert
* the enum columns from the integer columns if necessary.
*/
public double[] reconstructed;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/prediction/KLimeModelPrediction.java
|
package hex.genmodel.easy.prediction;
public class KLimeModelPrediction extends RegressionModelPrediction {
/**
* Chosen cluster for this data point.
*/
public int cluster;
/**
* Array of reason codes. Each element of the array corresponds to a feature used in model training.
* Order of the codes is given by the order of columns in the model.
*/
public double[] reasonCodes;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/prediction/MultinomialModelPrediction.java
|
package hex.genmodel.easy.prediction;
/**
* Binomial classification model prediction.
*/
public class MultinomialModelPrediction extends AbstractPrediction {
/**
* Index number of the predicted class (aka categorical or factor level) in the response column.
*/
public int labelIndex;
/**
* Label of the predicted level.
*/
public String label;
/**
* This array has an element for each class (aka categorical or factor level) in the response column.
*
* The array corresponds to the level names returned by:
* <pre>
* model.getDomainValues(model.getResponseIdx())
* </pre>
* "Domain" is the internal H2O term for level names.
*
* The values in this array may be Double.NaN, which means NA.
* If they are valid numeric values, then they will sum up to 1.0.
*/
public double[] classProbabilities;
public String[] leafNodeAssignments; // only valid for tree-based models, null for all other mojo models
public int[] leafNodeAssignmentIds; // ditto, available in MOJO 1.3 and newer
/**
* Staged predictions of tree algorithms (prediction probabilities of trees per iteration).
* The output structure is for tree Tt and class Cc:
* Binomial models: [probability T1.C1, probability T2.C1, ..., Tt.C1] where Tt.C1 correspond to the the probability p0
* Multinomial models: [probability T1.C1, probability T1.C2, ..., Tt.Cc]
*/
public double[] stageProbabilities;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/prediction/OrdinalModelPrediction.java
|
package hex.genmodel.easy.prediction;
/**
* Ordinal classification model prediction.
*/
public class OrdinalModelPrediction extends AbstractPrediction {
/**
* Index number of the predicted class (aka categorical or factor level) in the response column.
*/
public int labelIndex;
/**
* Label of the predicted level.
*/
public String label;
/**
* This array has an element for each class (aka categorical or factor level) in the response column.
*
* The array corresponds to the level names returned by:
* <pre>
* model.getDomainValues(model.getResponseIdx())
* </pre>
* "Domain" is the internal H2O term for level names.
*
* The values in this array may be Double.NaN, which means NA.
* If they are valid numeric values, then they will sum up to 1.0.
*/
public double[] classProbabilities;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/prediction/RegressionModelPrediction.java
|
package hex.genmodel.easy.prediction;
/**
* Regression model prediction.
*/
public class RegressionModelPrediction extends AbstractPrediction {
/**
* This value may be Double.NaN, which means NA (this will happen with GLM, for example,
* if one of the input values for a new data point is NA).
*/
public double value;
public String[] leafNodeAssignments; // only valid for tree-based models, null for all other mojo models
public int[] leafNodeAssignmentIds; // ditto, available in MOJO 1.3 and newer
/**
* Staged predictions of tree algorithms (prediction probabilities of trees per iteration).
* The output structure is for tree Tt and class Cc:
* Binomial models: [probability T1.C1, probability T2.C1, ..., Tt.C1] where Tt.C1 correspond to the the probability p0
* Multinomial models: [probability T1.C1, probability T1.C2, ..., Tt.Cc]
*/
public double[] stageProbabilities;
/**
* Per-feature prediction contributions (SHAP values).
* Size of the returned array is #features + 1 - there is a feature contribution column for each input feature,
* the last item is the model bias. The sum of the feature contributions and the bias term is equal to the raw
* prediction of the model. Raw prediction of tree-based model is the sum of the predictions of the individual
* trees before the inverse link function is applied to get the actual prediction.
* For Gaussian distribution the sum of the contributions is equal to the model prediction.
*
* (Optional) Available only for supported models (GBM, XGBoost).
*/
public float[] contributions;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/prediction/SortedClassProbability.java
|
package hex.genmodel.easy.prediction;
/**
* Class probability.
*
* Produced by method sortClassProbabilities() in class EasyPredictModelWrapper.
*/
public class SortedClassProbability implements Comparable {
/**
* Name of this class level.
*/
public String name;
/**
* Prediction value for this class level.
*/
public double probability;
/**
* Comparison implementation for this object type.
*
* @param o The other object to compare to.
* @return -1, 0, 1 if this object is less than, equal, or greather than the other object.
*/
@Override
public int compareTo(Object o) {
SortedClassProbability other = (SortedClassProbability) o;
if (this.probability < other.probability) {
return -1;
}
else if (this.probability > other.probability) {
return 1;
}
else {
return 0;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/prediction/TargetEncoderPrediction.java
|
package hex.genmodel.easy.prediction;
public class TargetEncoderPrediction extends AbstractPrediction {
public double[] transformations;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/prediction/UpliftBinomialModelPrediction.java
|
package hex.genmodel.easy.prediction;
public class UpliftBinomialModelPrediction extends AbstractPrediction {
public double[] predictions;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/prediction/Word2VecPrediction.java
|
package hex.genmodel.easy.prediction;
import java.util.HashMap;
public class Word2VecPrediction extends AbstractPrediction {
public HashMap<String, float[]> wordEmbeddings;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/easy/prediction/package-info.java
|
/**
* Prediction types that can be returned by generated POJO and MOJO models.
*
* Every model has a model category returned by getModelCategory().
* The model creates predictions of the appropriate kind.
*/
package hex.genmodel.easy.prediction;
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/tools/BuildPipeline.java
|
package hex.genmodel.tools;
import hex.genmodel.MojoPipelineBuilder;
import java.io.File;
import java.util.*;
public class BuildPipeline {
private File _output;
private Map<String, File> _input;
private List<MojoPipelineBuilder.MappingSpec> _mappings;
public static void main(String[] args) {
// Parse command line arguments
BuildPipeline main = new BuildPipeline();
main.parseArgs(args);
// Run the main program
try {
main.run();
} catch (Exception e) {
System.err.println("ERROR: " + e.getMessage());
e.printStackTrace();
System.exit(2);
}
}
private void run() throws Exception {
String mainModelAlias = findMainModel();
MojoPipelineBuilder builder = new MojoPipelineBuilder();
for (Map.Entry<String, File> e : _input.entrySet()) {
if (! mainModelAlias.equals(e.getKey())) {
builder.addModel(e.getKey(), e.getValue());
}
}
builder
.addMappings(_mappings)
.addMainModel(mainModelAlias, _input.get(mainModelAlias))
.buildPipeline(_output);
}
private String findMainModel() {
Set<String> subModels = new HashSet<>();
for (MojoPipelineBuilder.MappingSpec spec : _mappings) {
subModels.add(spec._modelAlias);
}
Set<String> candidates = new HashSet<>();
for (String alias : _input.keySet()) {
if (! subModels.contains(alias)) {
candidates.add(alias);
}
}
if (candidates.size() != 1) {
throw new IllegalStateException("Main model cannot be identified, " +
"main should be the only model that doesn't have output mappings. Candidates: " + candidates.toString());
}
return candidates.iterator().next();
}
private static void usage() {
System.out.println("");
System.out.println("Usage: java [...java args...] hex.genmodel.tools.BuildPipeline ");
System.out.println(" --mapping <inputMapping1> <inputMapping2> ... --output <outputFile> --input <inputFile1> <inputFile2> ...");
System.out.println("");
System.out.println(" --mapping Mapping of model predictions to main model inputs.");
System.out.println(" Example: Specify 'CLUSTER=clustering:0' to use a model defined in a MOJO file 'clustering.zip'");
System.out.println(" and map the predicted cluster (output 0) to input column 'CLUSTER' of the main model.");
System.out.println(" --input List of input MOJO files representing both the main model and the prerequisite models.");
System.out.println(" --output Name of the generated MOJO pipeline file.");
System.out.println("");
System.out.println(" Input mappings are specified in format '<columnName>=<modelAlias>:<predictionIndex>'.");
System.out.println("");
System.out.println(" Model alias is based on the name of the MOJO file.");
System.out.println(" For example, a MOJO stored in 'glm_model.zip' will have the alias 'glm_model'.");
System.out.println("");
System.out.println("Note: There is no need to specify which of the MOJO model represents the main model. The tool");
System.out.println("automatically identifies the main model as the one that doesn't have any output mappings.");
System.out.println("");
System.exit(1);
}
private void parseArgs(String[] args) {
try {
for (int i = 0; i < args.length; i++) {
String s = args[i];
if (s.equals("--mapping")) {
List<String> mappingSpec = readArgValues(args, i + 1);
_mappings = new ArrayList<>(mappingSpec.size());
for (String spec : mappingSpec) {
try {
_mappings.add(MojoPipelineBuilder.MappingSpec.parse(spec));
} catch (Exception e) {
throw new IllegalArgumentException("Invalid mapping specified ('" + spec + "'." +
" Please use format '<columnName>=<modelAlias>:<predictionIndex>'.");
}
}
i += mappingSpec.size();
} else if (s.equals("--output")) {
List<String> outputFile = readArgValues(args, i + 1);
if (outputFile.size() != 1) {
throw new IllegalArgumentException("Invalid specification of the output file (" + outputFile.toString() + "). " +
"Please specify only a single output file.");
}
_output = new File(outputFile.get(0));
i += 1;
} else if (s.equals("--input")) {
List<String> inputFiles = readArgValues(args, i + 1);
if (inputFiles.size() < 2) {
throw new IllegalArgumentException("Pipeline needs at least 2 input files, only " + inputFiles.size() + " specified.");
}
_input = makeAliases(inputFiles);
i += inputFiles.size();
} else {
System.out.println("ERROR: Unknown command line argument: " + s);
usage();
}
}
} catch (Exception e) {
System.err.println("ERROR: " + e.getMessage());
e.printStackTrace();
usage();
}
if (_input == null) {
System.err.println("ERROR: Missing mandatory argument '--output'");
usage();
}
if (_output == null) {
System.err.println("ERROR: Missing mandatory argument '--input'");
usage();
}
if (_mappings == null) {
System.err.println("ERROR: Missing mandatory argument '--mapping'");
usage();
}
}
private Map<String, File> makeAliases(List<String> paths) {
Map<String, File> aliases = new HashMap<>(paths.size());
for (String path : paths) {
File f = new File(path);
String name = f.getName();
int extIndex = name.lastIndexOf(".");
String alias = extIndex >= 0 ? name.substring(0, extIndex) : name;
aliases.put(alias, f);
}
return aliases;
}
private static List<String> readArgValues(String[] args, int startIdx) {
List<String> params = new LinkedList<>();
for (int i = startIdx; i < args.length; i++) {
if (args[i].startsWith("--"))
break;
params.add(args[i]);
}
return params;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/tools/MojoPrinter.java
|
package hex.genmodel.tools;
public interface MojoPrinter {
enum Format {
dot, json, raw, png
}
void run() throws Exception;
void parseArgs(String[] args);
boolean supportsFormat(Format format);
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/tools/MungeCsv.java
|
package hex.genmodel.tools;
import hex.genmodel.GenMunger;
import hex.genmodel.easy.RowData;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileReader;
import java.io.FileWriter;
/**
* Simple driver program for reading a CSV file and munging it.
*
* This driver program is used as a test harness by several tests in the testdir_javamunge directory.
* <p></p>
* See the top-of-tree master version of this file <a href="https://github.com/h2oai/h2o-3/blob/master/h2o-genmodel/src/main/java/hex/genmodel/tools/MungeCsv.java" target="_blank">here on github</a>.
*/
public class MungeCsv {
private static String assemblyClassName;
private static String inputCSVFileName;
private static String outputCSVFileName;
private static int haveHeaders = -1;
private static void usage() {
System.out.println("");
System.out.println("usage: java [...java args...] hex.genmodel.tools.MungeCsv --header --model modelClassName --input inputCSVFileName --output outputCSVFileName");
System.out.println("");
System.out.println(" assembly class name is something like AssemblyPojo_bleehbleehbleeh.");
System.out.println("");
System.out.println(" inputCSVFileName is the test data set.");
System.out.println(" Specifying --header is required for h2o-3.");
System.out.println("");
System.out.println(" outputCSVFileName is the munged data set (one row per data set row).");
System.out.println("");
System.exit(1);
}
private static void parseArgs(String[] args) {
for (int i = 0; i < args.length; i++) {
String s = args[i];
switch( s ) {
case "--munger":
i++;
if (i >= args.length) usage();
assemblyClassName = args[i];
break;
case "--input":
i++;
if (i >= args.length) usage();
inputCSVFileName = args[i];
break;
case "--output":
i++;
if (i >= args.length) usage();
outputCSVFileName = args[i];
break;
case "--header":
haveHeaders = 1;
break;
default:
// skip
System.out.println("bad param... skipping.");
}
}
if (haveHeaders != 1) {
System.out.println("ERROR: header not specified");
usage();
}
if (assemblyClassName == null) {
System.out.println("ERROR: model not specified");
usage();
}
if (inputCSVFileName == null) {
System.out.println("ERROR: input not specified");
usage();
}
if (outputCSVFileName == null) {
System.out.println("ERROR: output not specified");
usage();
}
}
/**
* This CSV parser is as bare bones as it gets.
* Our test data doesn't have funny quoting, spacing, or other issues.
* Can't handle cases where the number of data columns is less than the number of header columns.
*/
private static RowData parseDataRow(String line, GenMunger munger) {
if( line.isEmpty() || line.equals("") )
return null;
String[] inputData = line.split(",(?=([^\"]*\"[^\"]*\")*[^\"]*$)|(,)", -1);
for(int i=0;i<inputData.length;++i)
inputData[i]=inputData[i]==null?"":inputData[i];
if( inputData.length != munger.inNames().length )
return null;
return munger.fillDefault(inputData);
}
/**
* CSV reader and predictor test program.
*
* @param args Command-line args.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
parseArgs(args);
GenMunger rawMunger;
rawMunger = (hex.genmodel.GenMunger) Class.forName(assemblyClassName).newInstance();
BufferedReader input = new BufferedReader(new FileReader(inputCSVFileName));
BufferedWriter output = new BufferedWriter(new FileWriter(outputCSVFileName));
// Emit outputCSV column names.
String[] rawHeader = rawMunger.outNames();
StringBuilder header = new StringBuilder();
for(int i=0;i<rawHeader.length;++i) {
header.append("\"").append(rawHeader[i]).append("\"");
if( i < rawHeader.length - 1 ) header.append(",");
}
output.write(header.toString());
output.write("\n");
// Loop over inputCSV one row at a time.
int lineNum = 0;
String line;
try {
while ((line = input.readLine()) != null) {
lineNum++;
// skip the header.
if (lineNum == 1)
continue;
// Parse the CSV line. Somewhat handles quoted commas. But this ain't no parser test!
RowData row;
try {
row = parseDataRow(line, rawMunger);
} catch( NumberFormatException nfe) {
nfe.printStackTrace();
System.out.println("Failed to parse row: " + lineNum );
throw new RuntimeException();
}
RowData mungedRow = rawMunger.fit(row);
for(int i=0; i<rawMunger.outNames().length;++i) {
Object val = mungedRow==null?Double.NaN:mungedRow.get(rawMunger.outNames()[i]);
if( val instanceof Double ) output.write(String.valueOf(val));
else output.write("\"" + val + "\"");
if( i < rawMunger.outNames().length - 1) output.write(",");
}
output.write("\n");
}
}
catch (Exception e) {
System.out.println("Caught exception on line " + lineNum);
System.out.println("");
e.printStackTrace();
System.exit(1);
}
finally {
// Clean up.
output.close();
input.close();
}
// Predictions were successfully generated. Calling program can now compare them with something.
System.exit(0);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/tools/PredictCsv.java
|
package hex.genmodel.tools;
import au.com.bytecode.opencsv.CSVReader;
import hex.ModelCategory;
import hex.genmodel.GenModel;
import hex.genmodel.MojoModel;
import hex.genmodel.algos.glrm.GlrmMojoModel;
import hex.genmodel.algos.tree.SharedTreeMojoModel;
import hex.genmodel.easy.EasyPredictModelWrapper;
import hex.genmodel.easy.RowData;
import hex.genmodel.easy.prediction.*;
import hex.genmodel.utils.ArrayUtils;
import java.io.BufferedWriter;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
/**
* Simple driver program for reading a CSV file and making predictions. Added support for separators that are
* not commas. User needs to add the --separator separator_string to the input call. Do not escape
* the special Java characters, I will do it for you.
*
* This driver program is used as a test harness by several tests in the testdir_javapredict directory.
* <p></p>
* See the top-of-tree master version of this file <a href="https://github.com/h2oai/h2o-3/blob/master/h2o-genmodel/src/main/java/hex/genmodel/tools/PredictCsv.java" target="_blank">here on github</a>.
*/
public class PredictCsv {
private final String inputCSVFileName;
private final String outputCSVFileName;
private final boolean useDecimalOutput;
private final char separator;
private final boolean setInvNumNA;
private final boolean getTreePath;
private final boolean predictContributions;
private final boolean predictCalibrated;
private final boolean returnGLRMReconstruct;
private final int glrmIterNumber;
private final boolean outputHeader;
// Model instance
private EasyPredictModelWrapper modelWrapper;
private PredictCsv(
String inputCSVFileName, String outputCSVFileName,
boolean useDecimalOutput, char separator, boolean setInvNumNA,
boolean getTreePath, boolean predictContributions, boolean predictCalibrated,
boolean returnGLRMReconstruct, int glrmIterNumber,
boolean outputHeader) {
this.inputCSVFileName = inputCSVFileName;
this.outputCSVFileName = outputCSVFileName;
this.useDecimalOutput = useDecimalOutput;
this.separator = separator;
this.setInvNumNA = setInvNumNA;
this.getTreePath = getTreePath;
this.predictContributions = predictContributions;
this.predictCalibrated = predictCalibrated;
this.returnGLRMReconstruct = returnGLRMReconstruct;
this.glrmIterNumber = glrmIterNumber;
this.outputHeader = outputHeader;
}
public static void main(String[] args) {
PredictCsvCollection predictors = buildPredictCsv(args);
PredictCsv main = predictors.main;
// Run the main program
try {
main.run();
} catch (Exception e) {
System.out.println("Predict error: " + e.getMessage());
System.out.println();
e.printStackTrace();
System.exit(1);
}
if (predictors.concurrent.length > 0) {
try {
ExecutorService executor = Executors.newFixedThreadPool(predictors.concurrent.length);
List<PredictCsvCallable> callables = new ArrayList<>(predictors.concurrent.length);
for (int i = 0; i < predictors.concurrent.length; i++) {
callables.add(new PredictCsvCallable(predictors.concurrent[i]));
}
int numExceptions = 0;
for (Future<Exception> future : executor.invokeAll(callables)) {
Exception e = future.get();
if (e != null) {
e.printStackTrace();
numExceptions++;
}
}
if (numExceptions > 0) {
throw new Exception("Some predictors failed (#failed=" + numExceptions + ")");
}
} catch (Exception e) {
System.out.println("Concurrent predict error: " + e.getMessage());
System.out.println();
e.printStackTrace();
System.exit(1);
}
}
// Predictions were successfully generated.
System.exit(0);
}
// Only meant to be used in tests
public static PredictCsv make(String[] args, GenModel model) {
final PredictCsvCollection predictorCollection = buildPredictCsv(args);
if (predictorCollection.concurrent.length != 0) {
throw new UnsupportedOperationException("Predicting with concurrent predictors is not supported in programmatic mode.");
}
final PredictCsv predictor = predictorCollection.main;
if (model != null) {
try {
predictor.setModelWrapper(model);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
return predictor;
}
private static RowData formatDataRow(String[] splitLine, String[] inputColumnNames) {
// Assemble the input values for the row.
RowData row = new RowData();
int maxI = Math.min(inputColumnNames.length, splitLine.length);
for (int i = 0; i < maxI; i++) {
String columnName = inputColumnNames[i];
String cellData = splitLine[i];
switch (cellData) {
case "":
case "NA":
case "N/A":
case "-":
continue;
default:
row.put(columnName, cellData);
}
}
return row;
}
private String myDoubleToString(double d) {
if (Double.isNaN(d)) {
return "NA";
}
return useDecimalOutput? Double.toString(d) : Double.toHexString(d);
}
private void writeTreePathNames(BufferedWriter output) throws Exception {
String[] columnNames = ((SharedTreeMojoModel) modelWrapper.m).getDecisionPathNames();
writeColumnNames(output, columnNames);
}
private void writeCalibratedOutputNames(BufferedWriter output) throws Exception {
String[] outputNames = modelWrapper.m.getOutputNames();
String[] calibOutputNames = new String[outputNames.length - 1];
for (int i = 0; i < calibOutputNames.length; i++) {
calibOutputNames[i] = "cal_" + outputNames[i + 1];
}
writeColumnNames(output, ArrayUtils.append(outputNames, calibOutputNames));
}
private void writeContributionNames(BufferedWriter output) throws Exception {
writeColumnNames(output, modelWrapper.getContributionNames());
}
private void writeColumnNames(BufferedWriter output, String[] columnNames) throws Exception {
int lastIndex = columnNames.length-1;
for (int index = 0; index < lastIndex; index++) {
output.write(columnNames[index]);
output.write(",");
}
output.write(columnNames[lastIndex]);
}
public void run() throws Exception {
ModelCategory category = modelWrapper.getModelCategory();
CSVReader reader = new CSVReader(new FileReader(inputCSVFileName), separator);
BufferedWriter output = new BufferedWriter(new FileWriter(outputCSVFileName));
// Emit outputCSV column names.
if (outputHeader) {
switch (category) {
case Binomial:
case Multinomial:
case Regression:
if (getTreePath) {
writeTreePathNames(output);
} else if (predictContributions) {
writeContributionNames(output);
} else if (predictCalibrated) {
writeCalibratedOutputNames(output);
} else
writeHeader(modelWrapper.m.getOutputNames(), output);
break;
case DimReduction: // will write factor or the predicted value depending on what the user wants
if (returnGLRMReconstruct) {
int datawidth;
String[] colnames = this.modelWrapper.m.getNames();
datawidth = ((GlrmMojoModel) modelWrapper.m)._permutation.length;
int lastData = datawidth - 1;
for (int index = 0; index < datawidth; index++) { // add the numerical column names
output.write("reconstr_" + colnames[index]);
if (index < lastData)
output.write(',');
}
} else
writeHeader(modelWrapper.m.getOutputNames(), output);
break;
default:
writeHeader(modelWrapper.m.getOutputNames(), output);
}
output.write("\n");
}
// Loop over inputCSV one row at a time.
//
int lineNum=1; // count number of lines of input dataset file parsed
try {
String[] inputColumnNames;
String[] splitLine;
//Reader in the column names here.
if ((splitLine = reader.readNext()) != null) {
inputColumnNames = splitLine;
checkMissingColumns(inputColumnNames);
}
else // file empty, throw an error
throw new Exception("Input dataset file is empty!");
while ((splitLine = reader.readNext()) != null) {
// Parse the CSV line. Don't handle quoted commas. This isn't a parser test.
RowData row = formatDataRow(splitLine, inputColumnNames);
// Do the prediction.
// Emit the result to the output file.
String offsetColumn = modelWrapper.m.getOffsetName();
double offset = offsetColumn==null ? 0 : Double.parseDouble((String) row.get(offsetColumn));
switch (category) {
case AutoEncoder: { // write the expanded predictions out
AutoEncoderModelPrediction p = modelWrapper.predictAutoEncoder(row);
for (int i=0; i < p.reconstructed.length; i++) {
output.write(myDoubleToString(p.reconstructed[i]));
if (i < p.reconstructed.length-1)
output.write(',');
}
break;
}
case Binomial: {
BinomialModelPrediction p = modelWrapper.predictBinomial(row, offset);
if (getTreePath) {
writeTreePaths(p.leafNodeAssignments, output);
} else if (predictContributions) {
writeContributions(p.contributions, output);
} else {
output.write(p.label);
output.write(",");
for (int i = 0; i < p.classProbabilities.length; i++) {
if (i > 0) {
output.write(",");
}
output.write(myDoubleToString(p.classProbabilities[i]));
}
if (predictCalibrated) {
for (int i = 0; i < p.classProbabilities.length; i++) {
output.write(",");
double calibProb = p.calibratedClassProbabilities != null ?
p.calibratedClassProbabilities[i] : Double.NaN;
output.write(myDoubleToString(calibProb));
}
}
}
break;
}
case Multinomial: {
MultinomialModelPrediction p = modelWrapper.predictMultinomial(row);
if (getTreePath) {
writeTreePaths(p.leafNodeAssignments, output);
} else {
output.write(p.label);
output.write(",");
for (int i = 0; i < p.classProbabilities.length; i++) {
if (i > 0) {
output.write(",");
}
output.write(myDoubleToString(p.classProbabilities[i]));
}
}
break;
}
case Ordinal: {
OrdinalModelPrediction p = modelWrapper.predictOrdinal(row, offset);
output.write(p.label);
output.write(",");
for (int i = 0; i < p.classProbabilities.length; i++) {
if (i > 0) {
output.write(",");
}
output.write(myDoubleToString(p.classProbabilities[i]));
}
break;
}
case Clustering: {
ClusteringModelPrediction p = modelWrapper.predictClustering(row);
output.write(myDoubleToString(p.cluster));
break;
}
case Regression: {
RegressionModelPrediction p = modelWrapper.predictRegression(row, offset);
if (getTreePath) {
writeTreePaths(p.leafNodeAssignments, output);
} else if (predictContributions) {
writeContributions(p.contributions, output);
} else
output.write(myDoubleToString(p.value));
break;
}
case CoxPH: {
CoxPHModelPrediction p = modelWrapper.predictCoxPH(row, offset);
output.write(myDoubleToString(p.value));
break;
}
case DimReduction: {
DimReductionModelPrediction p = modelWrapper.predictDimReduction(row);
double[] out;
if (returnGLRMReconstruct) {
out = p.reconstructed; // reconstructed A
} else {
out = p.dimensions; // x factors
}
int lastOne = out.length-1;
for (int i=0; i < out.length; i++) {
output.write(myDoubleToString(out[i]));
if (i < lastOne)
output.write(',');
}
break;
}
case AnomalyDetection: {
AnomalyDetectionPrediction p = modelWrapper.predictAnomalyDetection(row);
double[] rawPreds = p.toPreds();
for (int i = 0; i < rawPreds.length - 1; i++) {
output.write(myDoubleToString(rawPreds[i]));
output.write(',');
}
output.write(myDoubleToString(rawPreds[rawPreds.length - 1]));
break;
}
default:
throw new Exception("Unknown model category " + category);
}
output.write("\n");
lineNum++;
}
}
catch (Exception e) {
throw new Exception("Prediction failed on line " + lineNum, e);
} finally {
// Clean up.
output.close();
reader.close();
}
}
private void writeHeader(String[] colNames, BufferedWriter output) throws Exception {
output.write(colNames[0]);
for (int i = 1; i < colNames.length; i++) {
output.write(",");
output.write(colNames[i]);
}
}
private void writeTreePaths(String[] treePaths, BufferedWriter output) throws Exception {
int len = treePaths.length-1;
for (int index=0; index<len; index++) {
output.write(treePaths[index]);
output.write(",");
}
output.write(treePaths[len]);
}
private void writeContributions(float[] contributions, BufferedWriter output) throws Exception {
for (int i = 0; i < contributions.length; i++) {
if (i > 0) {
output.write(",");
}
output.write(myDoubleToString(contributions[i]));
}
}
private void setModelWrapper(GenModel genModel) throws IOException {
EasyPredictModelWrapper.Config config = new EasyPredictModelWrapper.Config()
.setModel(genModel)
.setConvertUnknownCategoricalLevelsToNa(true)
.setConvertInvalidNumbersToNa(setInvNumNA);
if (getTreePath)
config.setEnableLeafAssignment(true);
if (predictContributions)
config.setEnableContributions(true);
if (returnGLRMReconstruct)
config.setEnableGLRMReconstrut(true);
if (glrmIterNumber > 0) // set GLRM Mojo iteration number
config.setGLRMIterNumber(glrmIterNumber);
setModelWrapper(new EasyPredictModelWrapper(config));
}
private void setModelWrapper(EasyPredictModelWrapper modelWrapper) {
this.modelWrapper = modelWrapper;
}
private static void usage() {
System.out.println();
System.out.println("Usage: java [...java args...] hex.genmodel.tools.PredictCsv --mojo mojoName");
System.out.println(" --pojo pojoName --input inputFile --output outputFile --separator sepStr --decimal --setConvertInvalidNum");
System.out.println();
System.out.println(" --mojo Name of the zip file containing model's MOJO.");
System.out.println(" --pojo Name of the java class containing the model's POJO. Either this ");
System.out.println(" parameter or --model must be specified.");
System.out.println(" --input text file containing the test data set to score.");
System.out.println(" --output Name of the output CSV file with computed predictions.");
System.out.println(" --separator Separator to be used in input file containing test data set.");
System.out.println(" --decimal Use decimal numbers in the output (default is to use hexademical).");
System.out.println(" --setConvertInvalidNum Will call .setConvertInvalidNumbersToNa(true) when loading models.");
System.out.println(" --leafNodeAssignment will show the leaf node assignment for tree based models instead of" +
" prediction results");
System.out.println(" --predictContributions will output prediction contributions (Shapley values) for tree based" +
" models instead of regular model predictions");
System.out.println(" --glrmReconstruct will return the reconstructed dataset for GLRM mojo instead of X factor derived from the dataset.");
System.out.println(" --glrmIterNumber integer indicating number of iterations to go through when constructing X factor derived from the dataset.");
System.out.println(" --testConcurrent integer (for testing) number of concurrent threads that will be making predictions.");
System.out.println();
System.exit(1);
}
private void checkMissingColumns(final String[] parsedColumnNamesArr) {
final String[] modelColumnNames = modelWrapper.m._names;
final Set<String> parsedColumnNames = new HashSet<>(parsedColumnNamesArr.length);
Collections.addAll(parsedColumnNames, parsedColumnNamesArr);
List<String> missingColumns = new ArrayList<>();
for (String columnName : modelColumnNames) {
if (!parsedColumnNames.contains(columnName) && !columnName.equals(modelWrapper.m._responseColumn)) {
missingColumns.add(columnName);
} else {
parsedColumnNames.remove(columnName);
}
}
if(missingColumns.size() > 0){
final StringBuilder stringBuilder = new StringBuilder("There were ");
stringBuilder.append(missingColumns.size());
stringBuilder.append(" missing columns found in the input data set: {");
for (int i = 0; i < missingColumns.size(); i++) {
stringBuilder.append(missingColumns.get(i));
if(i != missingColumns.size() - 1) stringBuilder.append(",");
}
stringBuilder.append('}');
System.out.println(stringBuilder);
}
if(parsedColumnNames.size() > 0){
final StringBuilder stringBuilder = new StringBuilder("Detected ");
stringBuilder.append(parsedColumnNames.size());
stringBuilder.append(" unused columns in the input data set: {");
final Iterator<String> iterator = parsedColumnNames.iterator();
while (iterator.hasNext()){
stringBuilder.append(iterator.next());
if(iterator.hasNext()) stringBuilder.append(",");
}
stringBuilder.append('}');
System.out.println(stringBuilder);
}
}
private static class PredictCsvCollection {
private final PredictCsv main;
private final PredictCsv[] concurrent;
private PredictCsvCollection(PredictCsv main, PredictCsv[] concurrent) {
this.main = main;
this.concurrent = concurrent;
}
}
private static PredictCsvCollection buildPredictCsv(String[] args) {
try {
PredictCsvBuilder builder = new PredictCsvBuilder();
builder.parseArgs(args);
final GenModel genModel;
switch (builder.loadType) {
case -1:
genModel = null;
break;
case 0:
genModel = loadPojo(builder.pojoMojoModelNames);
break;
case 1:
genModel = loadMojo(builder.pojoMojoModelNames);
break;
case 2:
genModel = loadModel(builder.pojoMojoModelNames);
break;
default:
throw new IllegalStateException("Unexpected value of loadType = " + builder.loadType);
}
PredictCsv mainPredictCsv = builder.newPredictCsv();
if (genModel != null) {
mainPredictCsv.setModelWrapper(genModel);
}
PredictCsv[] concurrentPredictCsvs = new PredictCsv[builder.testConcurrent];
for (int id = 0; id < concurrentPredictCsvs.length; id++) {
PredictCsv concurrentPredictCsv = builder.newConcurrentPredictCsv(id);
concurrentPredictCsv.setModelWrapper(mainPredictCsv.modelWrapper); // re-use both the wrapper and the MOJO
concurrentPredictCsvs[id] = concurrentPredictCsv;
}
return new PredictCsvCollection(mainPredictCsv, concurrentPredictCsvs);
} catch (Exception e) {
e.printStackTrace();
usage();
throw new IllegalStateException("Should not be reachable");
}
}
private static GenModel loadPojo(String className) throws Exception {
return (GenModel) Class.forName(className).newInstance();
}
private static GenModel loadMojo(String modelName) throws IOException {
return MojoModel.load(modelName);
}
private static GenModel loadModel(String modelName) throws Exception {
try {
return loadMojo(modelName);
} catch (IOException e) {
return loadPojo(modelName); // may throw an exception too
}
}
private static class PredictCsvBuilder {
// For PredictCsv
private String inputCSVFileName;
private String outputCSVFileName;
private boolean useDecimalOutput;
private char separator = ','; // separator used to delimite input datasets
private boolean setInvNumNA; // enable .setConvertInvalidNumbersToNa(true)
private boolean getTreePath; // enable tree models to obtain the leaf-assignment information
private boolean predictContributions; // enable tree models to predict contributions instead of regular predictions
private boolean predictCalibrated; // output also calibrated probabilities
private boolean returnGLRMReconstruct; // for GLRM, return x factor by default unless set this to true
private int glrmIterNumber = -1; // for GLRM, default to 100.
private boolean outputHeader = true; // should we write-out header to output files?
// For Model Loading
private int loadType = 0; // 0: load pojo, 1: load mojo, 2: load model, -1: special value when PredictCsv is used embedded and instance of Model is passed directly
private String pojoMojoModelNames = ""; // store Pojo/Mojo/Model names
private int testConcurrent = 0;
private PredictCsv newPredictCsv() {
return new PredictCsv(inputCSVFileName, outputCSVFileName, useDecimalOutput, separator, setInvNumNA,
getTreePath, predictContributions, predictCalibrated, returnGLRMReconstruct, glrmIterNumber, outputHeader);
}
private PredictCsv newConcurrentPredictCsv(int id) {
return new PredictCsv(inputCSVFileName, outputCSVFileName + "." + id, useDecimalOutput, separator, setInvNumNA,
getTreePath, predictContributions, predictCalibrated, returnGLRMReconstruct, glrmIterNumber, outputHeader);
}
private void parseArgs(String[] args) {
for (int i = 0; i < args.length; i++) {
String s = args[i];
if (s.equals("--header"))
continue;
if (s.equals("--decimal"))
useDecimalOutput = true;
else if (s.equals("--glrmReconstruct"))
returnGLRMReconstruct = true;
else if (s.equals("--setConvertInvalidNum"))
setInvNumNA = true;
else if (s.equals("--leafNodeAssignment"))
getTreePath = true;
else if (s.equals("--predictContributions")) {
predictContributions = true;
} else if (s.equals("--predictCalibrated")) {
predictCalibrated = true;
} else if (s.equals("--embedded")) {
loadType = -1;
} else {
i++;
if (i >= args.length) usage();
String sarg = args[i];
switch (s) {
case "--model":
pojoMojoModelNames = sarg;
loadType = 2;
break;
case "--mojo":
pojoMojoModelNames = sarg;
loadType = 1;
break;
case "--pojo":
pojoMojoModelNames = sarg;
loadType = 0;
break;
case "--input":
inputCSVFileName = sarg;
break;
case "--output":
outputCSVFileName = sarg;
break;
case "--separator":
separator = sarg.charAt(sarg.length() - 1);
break;
case "--glrmIterNumber":
glrmIterNumber = Integer.parseInt(sarg);
break;
case "--testConcurrent":
testConcurrent = Integer.parseInt(sarg);
break;
case "--outputHeader":
outputHeader = Boolean.parseBoolean(sarg);
break;
default:
System.out.println("ERROR: Unknown command line argument: " + s);
usage();
}
}
}
}
}
private static class PredictCsvCallable implements Callable<Exception> {
private final PredictCsv predictCsv;
private PredictCsvCallable(PredictCsv predictCsv) {
this.predictCsv = predictCsv;
}
@Override
public Exception call() throws Exception {
try {
predictCsv.run();
} catch (Exception e) {
return e;
}
return null;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/tools/PrintMojo.java
|
package hex.genmodel.tools;
import com.google.gson.*;
import com.google.gson.reflect.TypeToken;
import hex.genmodel.MojoModel;
import hex.genmodel.algos.tree.ConvertTreeOptions;
import hex.genmodel.algos.gbm.GbmMojoModel;
import hex.genmodel.algos.tree.SharedTreeGraph;
import hex.genmodel.algos.tree.SharedTreeGraphConverter;
import hex.genmodel.algos.tree.TreeBackedMojoModel;
import water.genmodel.AbstractBuildVersion;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.lang.reflect.Type;
import java.util.*;
import java.util.List;
import static water.util.JavaVersionUtils.JAVA_VERSION;
/**
* Print dot (graphviz) representation of one or more trees in a DRF or GBM model.
*/
public class PrintMojo implements MojoPrinter {
public static final AbstractBuildVersion ABV = AbstractBuildVersion.getBuildVersion();
protected MojoModel genModel;
protected Format format = Format.dot;
protected int treeToPrint = -1;
protected int maxLevelsToPrintPerEdge = 10;
protected boolean detail = false;
protected String outputFileName = null;
protected String optionalTitle = null;
protected PrintTreeOptions pTreeOptions;
protected boolean internal;
protected boolean floatToDouble;
protected final String tmpOutputFileName = "tmpOutputFileName.gv";
public static void main(String[] args) {
MojoPrinter mojoPrinter = null;
if (JAVA_VERSION.isKnown() && JAVA_VERSION.getMajor() > 7) {
ServiceLoader<MojoPrinter> mojoPrinters = ServiceLoader.load(MojoPrinter.class);
for (MojoPrinter printer : mojoPrinters) {
if (printer.supportsFormat(getFormat(args))) {
mojoPrinter = printer;
}
}
if (mojoPrinter == null) {
System.out.println("No supported MojoPrinter for the format required found. Please make sure you are using h2o-genmodel.jar for executing this tool.");
System.exit(1);
}
} else {
mojoPrinter = new PrintMojo();
}
// Parse command line arguments
mojoPrinter.parseArgs(args);
// Run the main program
try {
mojoPrinter.run();
} catch (Exception e) {
e.printStackTrace();
System.exit(2);
}
// Success
System.exit(0);
}
@Override
public boolean supportsFormat(Format format) {
if (Format.png.equals(format)){
return false;
} else {
return true;
}
}
static Format getFormat(String[] args) {
for (int i = 0; i < args.length; i++) {
if (args[i].equals("--format")) {
try {
return Format.valueOf(args[++i]);
}
catch (Exception e) {
// invalid format will be handled in parseArgs()
return null;
}
}
}
return null;
}
private void loadMojo(String modelName) throws IOException {
genModel = MojoModel.load(modelName);
}
protected static void usage() {
System.out.println("Build git branch: " + ABV.branchName());
System.out.println("Build git hash: " + ABV.lastCommitHash());
System.out.println("Build git describe: " + ABV.describe());
System.out.println("Build project version: " + ABV.projectVersion());
System.out.println("Built by: '" + ABV.compiledBy() + "'");
System.out.println("Built on: '" + ABV.compiledOn() + "'");
System.out.println();
System.out.println("Emit a human-consumable graph of a model for use with dot (graphviz).");
System.out.println("The currently supported model types are DRF, GBM and XGBoost.");
System.out.println();
System.out.println("Usage: java [...java args...] hex.genmodel.tools.PrintMojo [--tree n] [--levels n] [--title sss] [-o outputFileName]");
System.out.println();
System.out.println(" --format Output format. For .png output at least Java 8 is required.");
System.out.println(" dot|json|raw|png [default dot]");
System.out.println();
System.out.println(" --tree Tree number to print.");
System.out.println(" [default all]");
System.out.println();
System.out.println(" --levels Number of levels per edge to print.");
System.out.println(" [default 10]");
System.out.println();
System.out.println(" --title (Optional) Force title of tree graph.");
System.out.println();
System.out.println(" --detail Specify to print additional detailed information like node numbers.");
System.out.println();
System.out.println(" --input | -i Input mojo file.");
System.out.println();
System.out.println(" --output | -o Output filename. Taken as a directory name in case of .png format and multiple trees to visualize.");
System.out.println(" [default stdout]");
System.out.println(" --decimalplaces | -d Set decimal places of all numerical values.");
System.out.println();
System.out.println(" --fontsize | -f Set font sizes of strings.");
System.out.println();
System.out.println(" --internal Internal H2O representation of the decision tree (splits etc.) is used for generating the GRAPHVIZ format.");
System.out.println();
System.out.println();
System.out.println("Example:");
System.out.println();
System.out.println(" (brew install graphviz)");
System.out.println(" java -cp h2o.jar hex.genmodel.tools.PrintMojo --tree 0 -i model_mojo.zip -o model.gv -f 20 -d 3");
System.out.println(" dot -Tpng model.gv -o model.png");
System.out.println(" open model.png");
System.out.println();
System.exit(1);
}
public void parseArgs(String[] args) {
int nPlaces = -1;
int fontSize = 14; // default size is 14
boolean setDecimalPlaces = false;
try {
for (int i = 0; i < args.length; i++) {
String s = args[i];
switch (s) {
case "--format":
i++;
if (i >= args.length) usage();
s = args[i];
try {
format = Format.valueOf(s);
}
catch (Exception e) {
System.out.println("ERROR: invalid --format argument (" + s + ")");
System.exit(1);
}
break;
case "--tree":
i++;
if (i >= args.length) usage();
s = args[i];
try {
treeToPrint = Integer.parseInt(s);
}
catch (Exception e) {
System.out.println("ERROR: invalid --tree argument (" + s + ")");
System.exit(1);
}
break;
case "--levels":
i++;
if (i >= args.length) usage();
s = args[i];
try {
maxLevelsToPrintPerEdge = Integer.parseInt(s);
}
catch (Exception e) {
System.out.println("ERROR: invalid --levels argument (" + s + ")");
System.exit(1);
}
break;
case "--title":
i++;
if (i >= args.length) usage();
optionalTitle = args[i];
break;
case "--detail":
detail = true;
break;
case "--input":
case "-i":
i++;
if (i >= args.length) usage();
s = args[i];
loadMojo(s);
break;
case "--fontsize":
case "-f":
i++;
if (i >= args.length) usage();
s = args[i];
fontSize = Integer.parseInt(s);
break;
case "--decimalplaces":
case "-d":
i++;
if (i >= args.length) usage();
setDecimalPlaces=true;
s = args[i];
nPlaces = Integer.parseInt(s);
break;
case "--raw":
format = Format.raw;
break;
case "--internal":
internal = true;
break;
case "--floattodouble":
floatToDouble = true;
break;
case "-o":
case "--output":
i++;
if (i >= args.length) usage();
outputFileName = args[i];
break;
default:
System.out.println("ERROR: Unknown command line argument: " + s);
usage();
break;
}
}
pTreeOptions = new PrintTreeOptions(setDecimalPlaces, nPlaces, fontSize, internal);
} catch (Exception e) {
e.printStackTrace();
usage();
}
}
protected void validateArgs() {
if (genModel == null) {
System.out.println("ERROR: Must specify -i");
usage();
}
}
public void run() throws Exception {
validateArgs();
PrintStream os;
if (outputFileName != null) {
os = new PrintStream(new FileOutputStream(outputFileName));
}
else {
os = System.out;
}
if (genModel instanceof SharedTreeGraphConverter) {
SharedTreeGraphConverter treeBackedModel = (SharedTreeGraphConverter) genModel;
ConvertTreeOptions options = new ConvertTreeOptions().withTreeConsistencyCheckEnabled();
final SharedTreeGraph g = treeBackedModel.convert(treeToPrint, null, options);
switch (format) {
case raw:
g.print();
break;
case dot:
g.printDot(os, maxLevelsToPrintPerEdge, detail, optionalTitle, pTreeOptions);
break;
case json:
if (!(treeBackedModel instanceof TreeBackedMojoModel)) {
System.out.println("ERROR: Printing XGBoost MOJO as JSON not supported");
System.exit(1);
}
printJson((TreeBackedMojoModel) treeBackedModel, g, os);
break;
}
}
else {
System.out.println("ERROR: Unsupported MOJO type");
System.exit(1);
}
}
private Map<String, Object> getParamsAsJson(TreeBackedMojoModel tree) {
Map<String, Object> params = new LinkedHashMap<>();
params.put("h2o_version", genModel._h2oVersion);
params.put("mojo_version", genModel._mojo_version);
params.put("algo", genModel._algoName);
params.put("model_category", genModel._category.toString());
params.put("classifier", genModel.isClassifier());
params.put("supervised", genModel._supervised);
params.put("nfeatures", genModel._nfeatures);
params.put("nclasses", genModel._nclasses);
params.put("balance_classes", genModel._balanceClasses);
params.put("n_tree_groups", tree.getNTreeGroups());
params.put("n_trees_in_group", tree.getNTreesPerGroup());
params.put("base_score", tree.getInitF());
if (genModel.isClassifier()) {
String[] responseValues = genModel.getDomainValues(genModel.getResponseIdx());
params.put("class_labels", responseValues);
}
if (genModel instanceof GbmMojoModel) {
GbmMojoModel m = (GbmMojoModel) genModel;
params.put("family", m._family.toString());
params.put("link_function", m._link_function.toString());
}
return params;
}
private List<Object> getDomainValuesAsJSON() {
List<Object> domainValues = new ArrayList<>();
String[][] values = genModel.getDomainValues();
// each col except response
for (int i = 0; i < values.length-1; i++) {
if (values[i] == null) continue;
Map<String, Object> colValuesObject = new LinkedHashMap<>();
colValuesObject.put("colId", i);
colValuesObject.put("colName", genModel._names[i]);
colValuesObject.put("values", values[i]);
domainValues.add(colValuesObject);
}
return domainValues;
}
private void printJson(TreeBackedMojoModel mojo, SharedTreeGraph trees, PrintStream os) {
Map<String, Object> json = new LinkedHashMap<>();
json.put("params", getParamsAsJson(mojo));
json.put("domainValues", getDomainValuesAsJSON());
json.put("trees", trees.toJson());
if (optionalTitle != null) {
json.put("title", optionalTitle);
}
GsonBuilder gsonBuilder = new GsonBuilder().setPrettyPrinting();
if (floatToDouble) {
Type floatType = new TypeToken<Float>(){}.getType();
JsonSerializer<Float> serializer = new FloatCastingSerializer();
gsonBuilder.registerTypeAdapter(floatType, serializer);
}
Gson gson = gsonBuilder.create();
os.print(gson.toJson(json));
}
static class FloatCastingSerializer implements JsonSerializer<Float> {
@Override
public JsonElement serialize(Float src, Type typeOfSrc, JsonSerializationContext context) {
return new JsonPrimitive(src.doubleValue());
}
}
public static class PrintTreeOptions {
public boolean _setDecimalPlace;
public int _nPlaces;
public int _fontSize;
public boolean _internal;
public PrintTreeOptions(boolean setdecimalplaces, int nplaces, int fontsize, boolean internal) {
_setDecimalPlace = setdecimalplaces;
_nPlaces = _setDecimalPlace ? nplaces : _nPlaces;
_fontSize = fontsize;
_internal = internal;
}
public float roundNPlace(float value) {
if (_nPlaces < 0)
return value;
double sc = Math.pow(10, _nPlaces);
return (float) (Math.round(value*sc)/sc);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/tools/package-info.java
|
/**
* Tools that use generated POJO and MOJO models.
*/
package hex.genmodel.tools;
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/utils/ArrayUtils.java
|
package hex.genmodel.utils;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Random;
/**
* Copied (partially) from water.util.ArrayUtils
*/
public class ArrayUtils {
public static double[] nanArray(int len) {
double[] arr = new double[len];
for (int i = 0; i < len; i++) {
arr[i] = Double.NaN;
}
return arr;
}
public static double l2norm(double[] x) {
return Math.sqrt(l2norm2(x));
}
public static double l2norm2(double [] x){
return l2norm2(x, false);
}
public static double l2norm2(double [] x, boolean skipLast){
int last = x.length - (skipLast? 1 : 0);
double sum = 0;
for (int i = 0; i < last; ++i)
sum += x[i]*x[i];
return sum;
}
public static double[] flat(double[][] arr) {
if (arr == null) return null;
if (arr.length == 0) return null;
int tlen = 0;
for (double[] t : arr) tlen += (t != null) ? t.length : 0;
double[] result = Arrays.copyOf(arr[0], tlen);
int j = arr[0].length;
for (int i = 1; i < arr.length; i++) {
if (arr[i] == null)
continue;
System.arraycopy(arr[i], 0, result, j, arr[i].length);
j += arr[i].length;
}
return result;
}
public static double[] eleDiff(final double[] from) {
int arryLen = from.length-1;
double[] cumsumR = new double[arryLen];
for (int index = 0; index < arryLen; index++) {
cumsumR[index] = from[index+1]-from[index];
}
return cumsumR;
}
public static int[] subtract(final int[] from, int val ) {
int arryLen = from.length;
int[] cumsumR = new int[arryLen];
for (int index = 0; index < arryLen; index++) {
cumsumR[index] = from[index]-val;
}
return cumsumR;
}
public static int[] subtract(final int[] from, int[] val ) {
int arryLen = from.length;
int[] cumsumR = new int[arryLen];
for (int index = 0; index < arryLen; index++) {
cumsumR[index] = from[index]-val[index];
}
return cumsumR;
}
public static double[] mult(double[] nums, double n) {
assert !Double.isInfinite(n) : "Trying to multiply " + Arrays.toString(nums) + " by " + n; // Almost surely not what you want
if (nums != null)
for (int i=0; i<nums.length; i++) nums[i] *= n;
return nums;
}
public static int[] arrayInitRange(int arrayLen, int startIndex) {
int[] newArr = new int[arrayLen];
for (int index=0; index<arrayLen; index++)
newArr[index] = index+startIndex;
return newArr;
}
/**
* Check to see if a column is a boolean column. A boolean column should contains only two
* levels and the string describing the domains should be true/false
* @param domains
* @return
*/
public static boolean isBoolColumn(String[] domains) {
if (domains != null) {
if (domains.length == 2) { // check domain names to be true/false
if (domains[0].equalsIgnoreCase("true") && domains[1].equalsIgnoreCase("false"))
return true;
else if (domains[1].equalsIgnoreCase("true") && domains[0].equalsIgnoreCase("false"))
return true;
} else if (domains.length == 1) {
if (domains[0].equalsIgnoreCase("true") || domains[0].equalsIgnoreCase("false")) {
return true;
}
}
}
return false;
}
public static int maxIndex(double[] from, Random rand) {
assert rand != null;
int result = 0;
int maxCount = 0; // count of maximal element for a 1 item reservoir sample
for( int i = 1; i < from.length; ++i ) {
if( from[i] > from[result] ) {
result = i;
maxCount = 1;
} else if( from[i] == from[result] ) {
if( rand.nextInt(++maxCount) == 0 ) result = i;
}
}
return result;
}
public static int maxIndex(double[] from) {
int result = 0;
for (int i = 1; i < from.length; ++i)
if (from[i] > from[result]) result = i;
return result;
}
/**
* Sort an integer array of indices based on values
* Updates indices in place, keeps values the same
* @param idxs indices
* @param values values
*/
public static void sort(int[] idxs, double[] values) {
sort(idxs, values, 500);
}
public static void sort(int[] idxs, final double[] values, int cutoff) {
if (idxs.length < cutoff) {
//hand-rolled insertion sort
for (int i = 0; i < idxs.length; i++) {
for (int j = i; j > 0 && values[idxs[j - 1]] > values[idxs[j]]; j--) {
int tmp = idxs[j];
idxs[j] = idxs[j - 1];
idxs[j - 1] = tmp;
}
}
} else {
Integer[] d = new Integer[idxs.length];
for (int i = 0; i < idxs.length; ++i) d[i] = idxs[i];
Arrays.sort(d, new Comparator<Integer>() {
@Override
public int compare(Integer x, Integer y) {
return values[x] < values[y] ? -1 : (values[x] > values[y] ? 1 : 0);
}
});
for (int i = 0; i < idxs.length; ++i) idxs[i] = d[i];
}
}
/**
* Sort an integer array of indices based on values
* Updates indices in place, keeps values the same
* @param idxs indices
* @param values values
*/
public static void sort(final int[] idxs, final float[] values, int fromIndex, int toIndex, boolean abs, int increasing) {
sort(idxs, values, fromIndex, toIndex, abs, increasing, 500);
}
public static void sort(final int[] idxs, final float[] values, int fromIndex, int toIndex, final boolean abs, final int increasing, int cutoff) {
assert toIndex > fromIndex: "toIndex must be > fromIndex";
if ((toIndex - fromIndex) < cutoff) {
//hand-rolled insertion sort
for (int i = fromIndex; i < toIndex; i++) {
// the long line means: Sorted part of the array will be compared as absolute values if necessary
for (int j = i; j > fromIndex && (abs ? Math.abs(values[idxs[j - 1]]) : values[idxs[j - 1]])*increasing > (abs ? Math.abs(values[idxs[j]]) : values[idxs[j]])*increasing; j--) {
int tmp = idxs[j];
idxs[j] = idxs[j - 1];
idxs[j - 1] = tmp;
}
}
} else {
Integer[] d = new Integer[idxs.length];
for (int i = 0; i < idxs.length; ++i) d[i] = idxs[i];
Arrays.sort(d, fromIndex, toIndex, new Comparator<Integer>() {
@Override
public int compare(Integer x, Integer y) {
return Float.compare((abs ? Math.abs(values[x]) : values[x]) * increasing, (abs ? Math.abs(values[y]) : values[y]) * increasing);
}
});
for (int i = 0; i < idxs.length; ++i) idxs[i] = d[i];
}
}
public static String[] append(String[] a, String... b) {
if (a==null )
return b;
String[] tmp = Arrays.copyOf(a,a.length + b.length);
System.arraycopy(b, 0, tmp, a.length, b.length);
return tmp;
}
public static String[][] append(String[][] a, String[]... b) {
if (a==null )
return b;
String[][] tmp = Arrays.copyOf(a,a.length + b.length);
System.arraycopy(b, 0, tmp, a.length, b.length);
return tmp;
}
public static int[] append(int[] a, int... b) {
if (a==null )
return b;
int[] tmp = Arrays.copyOf(a,a.length + b.length);
System.arraycopy(b, 0, tmp, a.length, b.length);
return tmp;
}
public static double[] signum(double[] array) {
double[] signArray = new double[array.length];
for (int i = 0; i < array.length; i++) {
if (array[i] > 0)
signArray[i] = 1;
else if (array[i] < 0)
signArray[i] = -1;
else
signArray[i] = 0;
}
return signArray;
}
public static double[] difference(double[] array) {
double[] difference = new double[array.length - 1];
for (int i = 0; i < array.length - 1; i++) {
difference[i] = array[i+1] - array[i];
}
return difference;
}
/***
* Carry out multiplication of row array a and matrix b and store the result in result array. However the transpose
* of the matrix is given.
*
* @param a
* @param bT
* @param result
*/
public static void multArray(double[] a, double[][] bT, double[] result) {
int resultDim = result.length;
int vectorSize = a.length;
Arrays.fill(result, 0.0);
for (int index = 0; index < resultDim; index++) {
for (int innerIndex = 0; innerIndex < vectorSize; innerIndex++) {
result[index] += a[innerIndex]*bT[index][innerIndex];
}
}
}
/**
* Provide array from start to end in steps of 1
* @param start beginning value (inclusive)
* @param end ending value (inclusive)
* @return specified range of integers
*/
public static int[] range(int start, int end) {
int[] r = new int[end-start+1];
for(int i=0;i<r.length;i++)
r[i] = i+start;
return r;
}
/**
*
* @param data vector (1 x n)
* @param p vector (1 x n)
* @param n vector (1 x n)
* @return Result of matrix operation (data - p) * n
*/
public static double subAndMul(double[] data, double[] p, double[] n) {
double res = 0;
for (int col=0; col<data.length; col++)
res += (data[col] - p[col]) * n[col];
return res;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/utils/ByteBufferWrapper.java
|
package hex.genmodel.utils;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
/**
* Simplified version and drop-in replacement of water.util.AutoBuffer
*/
public final class ByteBufferWrapper {
// The direct ByteBuffer for schlorping data about.
// Set to null to indicate the ByteBufferWrapper is closed.
ByteBuffer _bb;
/** Read from a fixed byte[]; should not be closed. */
public ByteBufferWrapper(byte[] buf) {
assert buf != null : "null fed to ByteBuffer.wrap";
_bb = ByteBuffer.wrap(buf, 0, buf.length).order(ByteOrder.nativeOrder());
}
public int position() {
return _bb.position();
}
public boolean hasRemaining() {
return _bb.hasRemaining();
}
/** Skip over some bytes in the byte buffer. Caller is responsible for not
* reading off end of the bytebuffer; generally this is easy for
* array-backed autobuffers and difficult for i/o-backed bytebuffers. */
public void skip(int skip) {
_bb.position(_bb.position() + skip);
}
// -----------------------------------------------
// Unlike original getX() methods, these will not attempt to auto-widen the buffer.
public int get1U() {
return _bb.get() & 0xFF;
}
public char get2() {
return _bb.getChar();
}
public int get3() {
return get1U() | (get1U() << 8) | (get1U() << 16);
}
public int get4() {
return _bb.getInt();
}
public float get4f() {
return _bb.getFloat();
}
public double get8d() {
return _bb.getDouble();
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/utils/DistributionFamily.java
|
package hex.genmodel.utils;
/**
* Used to be `hex.Distribution.Family`.
* NOTE: The moving to hex.DistributionFamily is not possible without resolving dependencies between
* h2o-genmodel and h2o-algos project
*/
public enum DistributionFamily {
AUTO, // model-specific behavior
bernoulli,
quasibinomial,
modified_huber,
multinomial,
ordinal,
gaussian,
poisson,
gamma,
tweedie,
huber,
laplace,
quantile,
fractionalbinomial,
negativebinomial,
custom
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/utils/GenmodelBitSet.java
|
package hex.genmodel.utils;
/**
* GenmodelBitSet - bitset that "lives" on top of an external byte array. It does not necessarily span the entire
* byte array, and thus essentially provides a "bitset-view" on the underlying data stream.
*
* This is a bastardized copy of water.utils.IcedBitSet
*/
public class GenmodelBitSet {
private byte[] _val; // Holder of the bits, perhaps also holding other unrelated data
private int _byteoff; // Number of bytes skipped before starting to count bits
private int _nbits; // Number of bits in this bitset
private int _bitoff; // Number of bits discarded from beginning (inclusive min)
public GenmodelBitSet(int nbits) {
this(nbits, 0);
}
public GenmodelBitSet(int nbits, int bitoff) {
// For small bitsets, just use a no-offset fixed-length format
if (bitoff + nbits <= 32) {
bitoff = 0;
nbits = 32;
}
fill(nbits <= 0 ? null : new byte[bytes(nbits)], 0, nbits, bitoff);
}
public int getNBits() {
return _nbits;
}
// Fill in fields, with the bytes coming from some other large backing byte
// array, which also contains other unrelated bits.
public void fill(byte[] v, int byteoff, int nbits, int bitoff) {
if (nbits < 0) throw new NegativeArraySizeException("nbits < 0: " + nbits);
if (byteoff < 0) throw new IndexOutOfBoundsException("byteoff < 0: "+ byteoff);
if (bitoff < 0) throw new IndexOutOfBoundsException("bitoff < 0: " + bitoff);
assert v == null || byteoff + bytes(nbits) <= v.length;
_val = v;
_nbits = nbits;
_bitoff = bitoff;
_byteoff = byteoff;
}
public boolean isInRange(int b) {
b -= _bitoff;
return b >= 0 && b < _nbits;
}
public boolean contains(int idx) {
idx -= _bitoff;
assert (idx >= 0 && idx < _nbits): "Must have "+_bitoff+" <= idx <= " + (_bitoff+_nbits-1) + ": " + idx;
return (_val[_byteoff + (idx >> 3)] & ((byte)1 << (idx & 7))) != 0;
}
public void fill2(byte[] bits, ByteBufferWrapper ab) {
fill(bits, ab.position(), 32, 0);
ab.skip(4); // Skip inline bitset
}
// Reload IcedBitSet from AutoBuffer
public void fill3(byte[] bits, ByteBufferWrapper ab) {
int bitoff = ab.get2();
int nbits = ab.get4();
fill(bits, ab.position(), nbits, bitoff);
ab.skip(bytes(nbits)); // Skip inline bitset
}
private static int bytes(int nbits) {
return ((nbits-1) >> 3) + 1;
}
/* SET IN STONE FOR MOJO VERSION "1.00" - DO NOT CHANGE */
public boolean contains0(int idx) {
if (idx < 0) throw new IndexOutOfBoundsException("idx < 0: " + idx);
idx -= _bitoff;
return (idx >= 0) && (idx < _nbits) &&
(_val[_byteoff + (idx >> 3)] & ((byte)1 << (idx & 7))) != 0;
}
/* SET IN STONE FOR MOJO VERSION "1.10" AND OLDER - DO NOT CHANGE */
public void fill3_1(byte[] bits, ByteBufferWrapper ab) {
int bitoff = ab.get2();
int nbytes = ab.get2();
fill_1(bits, ab.position(), nbytes<<3, bitoff);
ab.skip(nbytes); // Skip inline bitset
}
/* SET IN STONE FOR MOJO VERSION "1.10" AND OLDER - DO NOT CHANGE */
public void fill_1(byte[] v, int byteoff, int nbits, int bitoff) {
if (nbits < 0) throw new NegativeArraySizeException("nbits < 0: " + nbits);
if (byteoff < 0) throw new IndexOutOfBoundsException("byteoff < 0: "+ byteoff);
if (bitoff < 0) throw new IndexOutOfBoundsException("bitoff < 0: " + bitoff);
assert v == null || byteoff + ((nbits-1) >> 3) + 1 <= v.length;
_val = v;
_nbits = nbits;
_bitoff = bitoff;
_byteoff = byteoff;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/utils/IOUtils.java
|
package hex.genmodel.utils;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
public class IOUtils {
public static void copyStream(InputStream source, OutputStream target) throws IOException {
byte[] buffer = new byte[8 * 1024];
while (true) {
int len = source.read(buffer);
if (len == -1)
break;
target.write(buffer, 0, len);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/utils/LinkFunctionType.java
|
package hex.genmodel.utils;
/**
* Link Function type
* NOTE: The moving to hex.LinkFunctionType is not possible without resolving dependencies between
* h2o-genmodel and h2o-algos project
*/
public enum LinkFunctionType {
log,
logit,
identity,
ologit,
ologlog,
oprobit,
inverse,
tweedie
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.