index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/IClusteringModel.java
|
package hex.genmodel;
/**
* Clustering Model Interface
*/
public interface IClusteringModel {
double[] score0(double[] row, double[] preds);
/**
* Calculates squared distances to all cluster centers.
*
* {@see hex.genmodel.GenModel.KMeans_distances(..)} for precise definition
* of the distance metric.
*
* Pass in data in a double[], in a same way as to the score0 function.
* Cluster distances will be stored into the distances[] array. Function
* will return the closest cluster. This way the caller can avoid to call
* score0(..) to retrieve the cluster where the data point belongs.
*
* Warning: This function can modify content of row array (same as for score0).
*
* @param row input row
* @param distances vector of distances
* @return index of closest cluster
*/
int distances(double[] row, double[] distances);
/**
* Returns number of cluster used by this model.
* @return number of clusters
*/
int getNumClusters();
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/IGenModel.java
|
package hex.genmodel;
import hex.ModelCategory;
import java.util.EnumSet;
/**
* Interface publishing methods for generated models.
*
* This interface extend the original interface from H2O.
*/
public interface IGenModel {
/**
* Returns true for supervised models.
* @return true if this class represents supervised model.
*/
boolean isSupervised();
/**
* Returns number of input features.
* @return number of input features used for training.
*/
int nfeatures();
/**
* Returns names of input features.
* @return names of input features used for training.
*/
String[] features();
/**
* Returns number of output classes for classifiers or 1 for regression models. For unsupervised models returns 0.
* @return returns number of output classes or 1 for regression models.
*/
int nclasses();
/** Returns this model category.
*
* @return model category
* @see hex.ModelCategory
*/
ModelCategory getModelCategory();
/**
* For models with multiple categories, returns the set of all supported categories.
*/
EnumSet<ModelCategory> getModelCategories();
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/InMemoryMojoReaderBackend.java
|
package hex.genmodel;
import java.io.*;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
public class InMemoryMojoReaderBackend implements MojoReaderBackend, Closeable {
private static final Map<String, byte[]> CLOSED = Collections.unmodifiableMap(new HashMap<String, byte[]>());
private Map<String, byte[]> _mojoContent;
public InMemoryMojoReaderBackend(Map<String, byte[]> mojoContent) {
_mojoContent = mojoContent;
}
@Override
public BufferedReader getTextFile(String filename) throws IOException {
checkOpen();
byte[] data = _mojoContent.get(filename);
if (data == null)
throw new IOException("MOJO doesn't contain resource " + filename);
return new BufferedReader(new InputStreamReader(new ByteArrayInputStream(data)));
}
@Override
public byte[] getBinaryFile(String filename) throws IOException {
checkOpen();
return _mojoContent.get(filename);
}
@Override
public boolean exists(String filename) {
checkOpen();
return _mojoContent.containsKey(filename);
}
@Override
public void close() throws IOException {
_mojoContent = CLOSED;
}
private void checkOpen() {
if (_mojoContent == CLOSED)
throw new IllegalStateException("ReaderBackend was already closed");
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/ModelMojoFactory.java
|
package hex.genmodel;
import hex.genmodel.algos.coxph.CoxPHMojoReader;
import hex.genmodel.algos.deeplearning.DeeplearningMojoReader;
import hex.genmodel.algos.drf.DrfMojoReader;
import hex.genmodel.algos.ensemble.StackedEnsembleMojoReader;
import hex.genmodel.algos.gam.GamMojoReader;
import hex.genmodel.algos.gbm.GbmMojoReader;
import hex.genmodel.algos.glm.GlmMojoReader;
import hex.genmodel.algos.isoforextended.ExtendedIsolationForestMojoReader;
import hex.genmodel.algos.isotonic.IsotonicRegressionMojoReader;
import hex.genmodel.algos.pca.PCAMojoReader;
import hex.genmodel.algos.glrm.GlrmMojoReader;
import hex.genmodel.algos.isofor.IsolationForestMojoReader;
import hex.genmodel.algos.kmeans.KMeansMojoReader;
import hex.genmodel.algos.pipeline.MojoPipelineReader;
import hex.genmodel.algos.rulefit.RuleFitMojoReader;
import hex.genmodel.algos.svm.SvmMojoReader;
import hex.genmodel.algos.targetencoder.TargetEncoderMojoReader;
import hex.genmodel.algos.upliftdrf.UpliftDrfMojoReader;
import hex.genmodel.algos.word2vec.Word2VecMojoReader;
import hex.genmodel.algos.klime.KLimeMojoReader;
import java.util.ServiceLoader;
/**
* Factory class for instantiating specific MojoGenmodel classes based on the algo name.
*/
public class ModelMojoFactory {
public final static ModelMojoFactory INSTANCE = new ModelMojoFactory();
/** Service loader for model mojo readers.
*
* Based on JavaDoc of SPI: "Instances of this class are not safe for use by multiple concurrent threads." - all usages of the loader
* are protected by synchronized block.
*/
private final ServiceLoader<ModelMojoReader> loader;
private ModelMojoFactory() {
loader = ServiceLoader.load(ModelMojoReader.class);
}
private ModelMojoReader loadMojoReader(String algo) {
assert algo != null : "Name of algorithm should be != null!";
synchronized (loader) {
loader.reload();
for (ModelMojoReader mrb : loader) {
if (algo.equals(mrb.getModelName())) {
return mrb;
}
}
}
return null;
}
public ModelMojoReader getMojoReader(String algo) {
if (algo == null)
throw new IllegalArgumentException("Algorithm not specified.");
switch (algo) {
case "Distributed Random Forest":
return new DrfMojoReader();
case "Gradient Boosting Method":
case "Gradient Boosting Machine":
return new GbmMojoReader();
case "Generalized Low Rank Modeling":
case "Generalized Low Rank Model":
return new GlrmMojoReader();
case "Generalized Linear Modeling":
case "Generalized Linear Model":
return new GlmMojoReader();
case "Generalized Additive Model":
return new GamMojoReader();
case "Word2Vec":
return new Word2VecMojoReader();
case "TargetEncoder":
return new TargetEncoderMojoReader();
case "Isolation Forest":
return new IsolationForestMojoReader();
case "Extended Isolation Forest":
return new ExtendedIsolationForestMojoReader();
case "K-means":
return new KMeansMojoReader();
case "Deep Learning":
case "deep learning":
return new DeeplearningMojoReader();
case "Support Vector Machine (*Spark*)":
return new SvmMojoReader();
case "StackedEnsemble":
case "Stacked Ensemble":
return new StackedEnsembleMojoReader();
case "k-LIME":
return new KLimeMojoReader();
case "MOJO Pipeline":
return new MojoPipelineReader();
case "Principal Components Analysis":
return new PCAMojoReader();
case "Cox Proportional Hazards":
return new CoxPHMojoReader();
case "RuleFit":
return new RuleFitMojoReader();
case "Isotonic Regression":
return new IsotonicRegressionMojoReader();
case "Uplift Distributed Random Forest":
return new UpliftDrfMojoReader();
default:
// Try to load MOJO reader via service
ModelMojoReader mmr = loadMojoReader(algo);
if (mmr != null) {
return mmr;
} else {
throw new IllegalStateException("Algorithm `" + algo + "` is not supported by this version of h2o-genmodel. " +
"If you are using an algorithm implemented in an extension, be sure to include a jar dependency of the extension (eg.: ai.h2o:h2o-genmodel-ext-" + algo.toLowerCase() + ")");
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/ModelMojoReader.java
|
package hex.genmodel;
import com.google.gson.JsonObject;
import hex.genmodel.algos.isotonic.IsotonicCalibrator;
import hex.genmodel.attributes.ModelAttributes;
import hex.genmodel.attributes.ModelJsonReader;
import hex.genmodel.attributes.Table;
import hex.genmodel.descriptor.ModelDescriptorBuilder;
import hex.genmodel.utils.DistributionFamily;
import hex.genmodel.utils.LinkFunctionType;
import hex.genmodel.utils.ParseUtils;
import hex.genmodel.utils.StringEscapeUtils;
import java.io.BufferedReader;
import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
/**
* Helper class to deserialize a model from MOJO format. This is a counterpart to `ModelMojoWriter`.
*/
public abstract class ModelMojoReader<M extends MojoModel> {
protected M _model;
protected MojoReaderBackend _reader;
private Map<String, Object> _lkv;
/**
* De-serializes a {@link MojoModel}, creating an instance of {@link MojoModel} useful for scoring
* and model evaluation.
*
* @param reader An instance of {@link MojoReaderBackend} to read from existing MOJO. After the model is de-serialized,
* the {@link MojoReaderBackend} instance is automatically closed if it implements {@link Closeable}.
* @return De-serialized {@link MojoModel}
* @throws IOException Whenever there is an error reading the {@link MojoModel}'s data.
*/
public static MojoModel readFrom(MojoReaderBackend reader) throws IOException {
return readFrom(reader, false);
}
/**
* De-serializes a {@link MojoModel}, creating an instance of {@link MojoModel} useful for scoring
* and model evaluation.
*
* @param reader An instance of {@link MojoReaderBackend} to read from existing MOJO
* @param readModelMetadata If true, parses also model metadata (model performance metrics... {@link ModelAttributes})
* Model metadata are not required for scoring, it is advised to leave this option disabled
* if you want to use MOJO for inference only.
* @return De-serialized {@link MojoModel}
* @throws IOException Whenever there is an error reading the {@link MojoModel}'s data.
*/
public static MojoModel readFrom(MojoReaderBackend reader, final boolean readModelMetadata) throws IOException {
try {
Map<String, Object> info = parseModelInfo(reader);
if (! info.containsKey("algorithm"))
throw new IllegalStateException("Unable to find information about the model's algorithm.");
String algo = String.valueOf(info.get("algorithm"));
ModelMojoReader mmr = ModelMojoFactory.INSTANCE.getMojoReader(algo);
mmr._lkv = info;
mmr._reader = reader;
mmr.readAll(readModelMetadata);
return mmr._model;
} finally {
if (reader instanceof Closeable)
((Closeable) reader).close();
}
}
public abstract String getModelName();
//--------------------------------------------------------------------------------------------------------------------
// Inheritance interface: ModelMojoWriter subclasses are expected to override these methods to provide custom behavior
//--------------------------------------------------------------------------------------------------------------------
protected abstract void readModelData() throws IOException;
protected abstract M makeModel(String[] columns, String[][] domains, String responseColumn);
protected M makeModel(String[] columns, String[][] domains, String responseColumn, String treatmentColumn){
return makeModel(columns, domains, responseColumn);
}
/**
* Maximal version of the mojo file current model reader supports. Follows the <code>major.minor</code>
* format, where <code>minor</code> is a 2-digit number. For example "1.00",
* "2.05", "2.13". See README in mojoland repository for more details.
*/
public abstract String mojoVersion();
//--------------------------------------------------------------------------------------------------------------------
// Interface for subclasses
//--------------------------------------------------------------------------------------------------------------------
/**
* Retrieve value from the model's kv store which was previously put there using `writekv(key, value)`. We will
* attempt to cast to your expected type, but this is obviously unsafe. Note that the value is deserialized from
* the underlying string representation using {@link ParseUtils#tryParse(String, Object)}, which occasionally may get
* the answer wrong.
* If the `key` is missing in the local kv store, null will be returned. However when assigning to a primitive type
* this would result in an NPE, so beware.
*/
@SuppressWarnings("unchecked")
protected <T> T readkv(String key) {
return (T) readkv(key, null);
}
/**
* Retrieves the value associated with a given key. If value is not set of the key, a given default value is returned
* instead. Uses same parsing logic as {@link ModelMojoReader#readkv(String)}. If default value is not null it's type
* is used to assist the parser to determine the return type.
* @param key name of the key
* @param defVal default value
* @param <T> return type
* @return parsed value
*/
@SuppressWarnings("unchecked")
protected <T> T readkv(String key, T defVal) {
Object val = _lkv.get(key);
if (! (val instanceof RawValue))
return val != null ? (T) val : defVal;
return ((RawValue) val).parse(defVal);
}
/**
* Retrieve binary data previously saved to the mojo file using `writeblob(key, blob)`.
*/
protected byte[] readblob(String name) throws IOException {
return getMojoReaderBackend().getBinaryFile(name);
}
protected boolean exists(String name) {
return getMojoReaderBackend().exists(name);
}
/**
* Retrieve text previously saved using `startWritingTextFile` + `writeln` as an array of lines. Each line is
* trimmed to remove the leading and trailing whitespace.
*/
protected Iterable<String> readtext(String name) throws IOException {
return readtext(name, false);
}
/**
* Retrieve text previously saved using `startWritingTextFile` + `writeln` as an array of lines. Each line is
* trimmed to remove the leading and trailing whitespace. Removes escaping of the new line characters in enabled.
*/
protected Iterable<String> readtext(String name, boolean unescapeNewlines) throws IOException {
ArrayList<String> res = new ArrayList<>(50);
BufferedReader br = getMojoReaderBackend().getTextFile(name);
try {
String line;
while (true) {
line = br.readLine();
if (line == null) break;
if (unescapeNewlines)
line = StringEscapeUtils.unescapeNewlines(line);
res.add(line.trim());
}
br.close();
} finally {
try { br.close(); } catch (IOException e) { /* ignored */ }
}
return res;
}
protected String[] readStringArray(String name, int size) throws IOException {
String[] array = new String[size];
int i = 0;
for (String line : readtext(name, true)) {
array[i++] = line;
}
return array;
}
protected IsotonicCalibrator readIsotonicCalibrator() throws IOException {
return new IsotonicCalibrator(
readkv("calib_min_x", Double.NaN),
readkv("calib_max_x", Double.NaN),
readblobDoubles("calib/thresholds_x"),
readblobDoubles("calib/thresholds_y")
);
}
private double[] readblobDoubles(String filename) throws IOException {
byte[] bytes = readblob(filename);
final ByteBuffer bb = ByteBuffer.wrap(bytes);
int size = bb.getInt();
double[] doubles = new double[size];
for (int i = 0; i < doubles.length; i++) {
doubles[i] = bb.getDouble();
}
return doubles;
}
/**
* Reads an two dimensional array written by {@link hex.ModelMojoWriter#writeRectangularDoubleArray} method.
*
* Dimensions of the result are explicitly given as parameters.
*
* @param title can't be null
* @param firstSize
* @param secondSize
* @return and double[][] array with dimensions firstSize and secondSize
* @throws IOException
*/
protected double[][] readRectangularDoubleArray(String title, int firstSize, int secondSize) throws IOException {
assert null != title;
final double [][] row = new double[firstSize][secondSize];
final ByteBuffer bb = ByteBuffer.wrap(readblob(title));
for (int i = 0; i < firstSize; i++) {
for (int j = 0; j < secondSize; j++)
row[i][j] = bb.getDouble();
}
return row;
}
/**
* Reads an two dimensional array written by {@link hex.ModelMojoWriter#writeRectangularDoubleArray} method
*
* Dimensions of the array are read from the mojo.
*
* @param title can't be null
* @throws IOException
*/
protected double[][] readRectangularDoubleArray(String title) throws IOException {
assert null != title;
final int size1 = readkv(title + "_size1");
final int size2 = readkv(title + "_size2");
return readRectangularDoubleArray(title, size1, size2);
}
//--------------------------------------------------------------------------------------------------------------------
// Private
//--------------------------------------------------------------------------------------------------------------------
private void readAll(final boolean readModelMetadata) throws IOException {
String[] columns = (String[]) _lkv.get("[columns]");
String[][] domains = parseModelDomains(columns.length);
boolean isSupervised = readkv("supervised");
_model = makeModel(columns, domains, isSupervised ? columns[columns.length - 1] : null, (String) readkv("treatment_column"));
_model._uuid = readkv("uuid");
_model._algoName = readkv("algo");
_model._h2oVersion = readkv("h2o_version", "unknown");
_model._category = hex.ModelCategory.valueOf((String) readkv("category"));
_model._supervised = isSupervised;
_model._nfeatures = readkv("n_features");
_model._nclasses = readkv("n_classes");
_model._balanceClasses = readkv("balance_classes");
_model._defaultThreshold = readkv("default_threshold");
_model._priorClassDistrib = readkv("prior_class_distrib");
_model._modelClassDistrib = readkv("model_class_distrib");
_model._offsetColumn = readkv("offset_column");
_model._foldColumn = readkv("fold_column");
_model._mojo_version = ((Number) readkv("mojo_version")).doubleValue();
checkMaxSupportedMojoVersion();
readModelData();
if (readModelMetadata) {
final String algoFullName = readkv("algorithm"); // The key'algo' contains the shortcut, 'algorithm' is the long version
_model._modelAttributes = readModelSpecificAttributes();
_model._modelDescriptor = ModelDescriptorBuilder.makeDescriptor(_model, algoFullName, _model._modelAttributes);
}
_model._reproducibilityInformation = readReproducibilityInformation() ;
}
protected Table[] readReproducibilityInformation() {
final JsonObject modelJson = ModelJsonReader.parseModelJson(_reader);
if (modelJson != null && modelJson.get("output") != null) {
return ModelJsonReader.readTableArray(modelJson, "output.reproducibility_information_table");
}
return null;
}
protected ModelAttributes readModelSpecificAttributes() {
final JsonObject modelJson = ModelJsonReader.parseModelJson(_reader);
if(modelJson != null) {
return new ModelAttributes(_model, modelJson);
} else {
return null;
}
}
private static Map<String, Object> parseModelInfo(MojoReaderBackend reader) throws IOException {
Map<String, Object> info = new HashMap<>();
BufferedReader br = reader.getTextFile("model.ini");
try {
String line;
int section = 0;
int ic = 0; // Index for `columns` array
String[] columns = new String[0]; // array of column names, will be initialized later
Map<Integer, String> domains = new HashMap<>(); // map of (categorical column index => name of the domain file)
while (true) {
line = br.readLine();
if (line == null) break;
line = line.trim();
if (line.startsWith("#") || line.isEmpty()) continue;
if (line.equals("[info]"))
section = 1;
else if (line.equals("[columns]")) {
section = 2; // Enter the [columns] section
if (! info.containsKey("n_columns"))
throw new IOException("`n_columns` variable is missing in the model info.");
int n_columns = Integer.parseInt(((RawValue) info.get("n_columns"))._val);
columns = new String[n_columns];
info.put("[columns]", columns);
} else if (line.equals("[domains]")) {
section = 3; // Enter the [domains] section
info.put("[domains]", domains);
} else if (section == 1) {
// [info] section: just parse key-value pairs and store them into the `info` map.
String[] res = line.split("\\s*=\\s*", 2);
info.put(res[0], res[0].equals("uuid")? res[1] : new RawValue(res[1]));
} else if (section == 2) {
// [columns] section
if (ic >= columns.length)
throw new IOException("`n_columns` variable is too small.");
columns[ic++] = line;
} else if (section == 3) {
// [domains] section
String[] res = line.split(":\\s*", 2);
int col_index = Integer.parseInt(res[0]);
domains.put(col_index, res[1]);
}
}
br.close();
} finally {
try { br.close(); } catch (IOException e) { /* ignored */ }
}
return info;
}
private String[][] parseModelDomains(int n_columns) throws IOException {
final boolean escapeDomainValues = Boolean.TRUE.equals(readkv("escape_domain_values")); // The key might not exist in older MOJOs
String[][] domains = new String[n_columns][];
// noinspection unchecked
Map<Integer, String> domass = (Map<Integer, String>) _lkv.get("[domains]");
for (Map.Entry<Integer, String> e : domass.entrySet()) {
int col_index = e.getKey();
// There is a file with categories of the response column, but we ignore it.
if (col_index >= n_columns) continue;
String[] info = e.getValue().split(" ", 2);
int n_elements = Integer.parseInt(info[0]);
String domfile = info[1];
String[] domain = new String[n_elements];
try (BufferedReader br = getMojoReaderBackend().getTextFile("domains/" + domfile)) {
String line;
int id = 0; // domain elements counter
while ((line = br.readLine()) != null) {
if (escapeDomainValues) {
line = StringEscapeUtils.unescapeNewlines(line);
}
domain[id++] = line;
}
if (id != n_elements)
throw new IOException("Not enough elements in the domain file");
}
domains[col_index] = domain;
}
return domains;
}
private static class RawValue {
private final String _val;
RawValue(String val) { _val = val; }
@SuppressWarnings("unchecked")
<T> T parse(T defVal) { return (T) ParseUtils.tryParse(_val, defVal); }
@Override
public String toString() { return _val; }
}
private void checkMaxSupportedMojoVersion() throws IOException {
if(_model._mojo_version > Double.parseDouble(mojoVersion())){
throw new IOException(String.format("MOJO version incompatibility - the model MOJO version (%.2f) is higher than the current H2O version (%s) supports. Please, use a newer version of H2O to load MOJO model.", _model._mojo_version, mojoVersion()));
}
}
public static LinkFunctionType readLinkFunction(String linkFunctionTypeName, DistributionFamily family) {
if (linkFunctionTypeName != null)
return LinkFunctionType.valueOf(linkFunctionTypeName);
return defaultLinkFunction(family);
}
public static LinkFunctionType defaultLinkFunction(DistributionFamily family){
switch (family) {
case bernoulli:
case fractionalbinomial:
case quasibinomial:
case modified_huber:
case ordinal:
return LinkFunctionType.logit;
case multinomial:
case poisson:
case gamma:
case tweedie:
return LinkFunctionType.log;
default:
return LinkFunctionType.identity;
}
}
protected MojoReaderBackend getMojoReaderBackend() {
return _reader;
}
public String[] readStringArrays(int aSize, String title) throws IOException {
String[] stringArrays = new String[aSize];
int counter = 0;
for (String line : readtext(title)) {
stringArrays[counter++] = line;
}
return stringArrays;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/MojoModel.java
|
package hex.genmodel;
import hex.genmodel.attributes.ModelAttributes;
import hex.genmodel.attributes.Table;
import hex.genmodel.descriptor.ModelDescriptor;
import java.io.*;
/**
* Prediction model based on the persisted binary data.
*/
public abstract class MojoModel extends GenModel {
public String _algoName;
public String _h2oVersion;
public hex.ModelCategory _category;
public String _uuid;
public boolean _supervised;
public int _nfeatures;
public int _nclasses;
public boolean _balanceClasses;
public double _defaultThreshold;
public double[] _priorClassDistrib;
public double[] _modelClassDistrib;
public double _mojo_version;
public ModelDescriptor _modelDescriptor = null;
public ModelAttributes _modelAttributes = null;
public Table[] _reproducibilityInformation;
/**
* Primary factory method for constructing MojoModel instances.
*
* @param file Name of the zip file (or folder) with the model's data. This should be the data retrieved via
* the `GET /3/Models/{model_id}/mojo` endpoint.
* @return New `MojoModel` object.
* @throws IOException if `file` does not exist, or cannot be read, or does not represent a valid model.
*/
public static MojoModel load(String file) throws IOException {
return load(file, false);
}
/**
* Primary factory method for constructing MojoModel instances.
*
* @param file Name of the zip file (or folder) with the model's data. This should be the data retrieved via
* the `GET /3/Models/{model_id}/mojo` endpoint.
* @param readMetadata read additional model metadata (metrics...) if enabled, otherwise skip metadata parsing
* @return New `MojoModel` object.
* @throws IOException if `file` does not exist, or cannot be read, or does not represent a valid model.
*/
public static MojoModel load(String file, boolean readMetadata) throws IOException {
File f = new File(file);
if (!f.exists())
throw new FileNotFoundException("File " + file + " cannot be found.");
MojoReaderBackend cr = f.isDirectory()? new FolderMojoReaderBackend(file)
: new ZipfileMojoReaderBackend(file);
return ModelMojoReader.readFrom(cr, readMetadata);
}
/**
* Advanced way of constructing Mojo models by supplying a custom mojoReader.
*
* @param mojoReader a class that implements the {@link MojoReaderBackend} interface.
* @return New `MojoModel` object
* @throws IOException if the mojoReader does
*/
public static MojoModel load(MojoReaderBackend mojoReader) throws IOException {
return ModelMojoReader.readFrom(mojoReader);
}
//------------------------------------------------------------------------------------------------------------------
// IGenModel interface
//------------------------------------------------------------------------------------------------------------------
@Override public boolean isSupervised() { return _supervised; }
@Override public int nfeatures() { return _nfeatures; }
@Override public int nclasses() { return _nclasses; }
@Override public hex.ModelCategory getModelCategory() { return _category; }
@Override public String getUUID() { return _uuid; }
protected MojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
protected MojoModel(String[] columns, String[][] domains, String responseColumn, String treatmentColumn) {
super(columns, domains, responseColumn, treatmentColumn);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/MojoPipelineBuilder.java
|
package hex.genmodel;
import hex.genmodel.utils.IOUtils;
import java.io.*;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import java.util.zip.ZipOutputStream;
public class MojoPipelineBuilder {
private final Map<String, File> _files = new HashMap<>();
private final Map<String, MojoModel> _models = new HashMap<>();
private final Map<String, String> _mapping = new HashMap<>();
private String _mainModelAlias;
public MojoPipelineBuilder addModel(String modelAlias, File mojoFile) throws IOException {
MojoModel model = MojoModel.load(mojoFile.getAbsolutePath());
_files.put(modelAlias, mojoFile);
_models.put(modelAlias, model);
return this;
}
public MojoPipelineBuilder addMainModel(String modelAlias, File mojoFile) throws IOException {
addModel(modelAlias, mojoFile);
_mainModelAlias = modelAlias;
return this;
}
public MojoPipelineBuilder addMappings(List<MappingSpec> specs) {
for (MappingSpec spec : specs) {
addMapping(spec);
}
return this;
}
public MojoPipelineBuilder addMapping(MappingSpec spec) {
return addMapping(spec._columnName, spec._modelAlias, spec._predsIndex);
}
public MojoPipelineBuilder addMapping(String columnName, String sourceModelAlias, int sourceModelPredictionIndex) {
_mapping.put(columnName, sourceModelAlias + ":" + sourceModelPredictionIndex);
return this;
}
public void buildPipeline(File pipelineFile) throws IOException {
MojoPipelineWriter w = new MojoPipelineWriter(_models, _mapping, _mainModelAlias);
try (FileOutputStream fos = new FileOutputStream(pipelineFile);
ZipOutputStream zos = new ZipOutputStream(fos)) {
w.writeTo(zos);
for (Map.Entry<String, File> mojoFile : _files.entrySet()) {
try (ZipFile zf = new ZipFile(mojoFile.getValue())) {
Enumeration<? extends ZipEntry> entries = zf.entries();
while (entries.hasMoreElements()) {
ZipEntry ze = entries.nextElement();
ZipEntry copy = new ZipEntry("models/" + mojoFile.getKey() + "/" + ze.getName());
if (copy.getSize() >= 0) {
copy.setSize(copy.getSize());
}
copy.setTime(copy.getTime());
zos.putNextEntry(copy);
try (InputStream input = zf.getInputStream(zf.getEntry(ze.getName()))) {
IOUtils.copyStream(input, zos);
}
zos.closeEntry();
}
}
}
}
}
public static class MappingSpec {
public String _columnName;
public String _modelAlias;
public int _predsIndex;
public static MappingSpec parse(String spec) throws NumberFormatException, IndexOutOfBoundsException {
MappingSpec ms = new MappingSpec();
String[] parts = spec.split("=", 2);
ms._columnName = parts[0];
parts = parts[1].split(":", 2);
ms._modelAlias = parts[0];
ms._predsIndex = Integer.valueOf(parts[1]);
return ms;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/MojoPipelineWriter.java
|
package hex.genmodel;
import hex.ModelCategory;
import hex.genmodel.descriptor.ModelDescriptor;
import hex.genmodel.attributes.Table;
import hex.genmodel.attributes.VariableImportances;
import java.io.IOException;
import java.util.Arrays;
import java.util.Date;
import java.util.LinkedHashMap;
import java.util.Map;
public class MojoPipelineWriter extends AbstractMojoWriter {
private Map<String, MojoModel> _models;
private Map<String, String> _inputMapping;
private String _mainModelAlias;
MojoPipelineWriter(Map<String, MojoModel> models, Map<String, String> inputMapping, String mainModelAlias) {
super(makePipelineDescriptor(models, inputMapping, mainModelAlias));
_models = models;
_inputMapping = inputMapping;
_mainModelAlias = mainModelAlias;
}
@Override
public String mojoVersion() {
return "1.00";
}
@Override
protected void writeModelData() throws IOException {
writekv("submodel_count", _models.size());
int modelNum = 0;
for (Map.Entry<String, MojoModel> model : _models.entrySet()) {
writekv("submodel_key_" + modelNum, model.getKey());
writekv("submodel_dir_" + modelNum, "models/" + model.getKey() + "/");
modelNum++;
}
writekv("generated_column_count", _inputMapping.size());
int generatedColumnNum = 0;
for (Map.Entry<String, String> mapping : _inputMapping.entrySet()) {
String inputSpec = mapping.getValue();
String[] inputSpecArr = inputSpec.split(":", 2);
writekv("generated_column_name_" + generatedColumnNum, mapping.getKey());
writekv("generated_column_model_" + generatedColumnNum, inputSpecArr[0]);
writekv("generated_column_index_" + generatedColumnNum, Integer.valueOf(inputSpecArr[1]));
generatedColumnNum++;
}
writekv("main_model", _mainModelAlias);
}
private static MojoPipelineDescriptor makePipelineDescriptor(
Map<String, MojoModel> models, Map<String, String> inputMapping, String mainModelAlias) {
MojoModel finalModel = models.get(mainModelAlias);
if (finalModel == null) {
throw new IllegalArgumentException("Main model is missing. There is no model with alias '" + mainModelAlias + "'.");
}
LinkedHashMap<String, String[]> schema = deriveInputSchema(models, inputMapping, finalModel);
return new MojoPipelineDescriptor(schema, finalModel);
}
private static LinkedHashMap<String, String[]> deriveInputSchema(
Map<String, MojoModel> allModels, Map<String, String> inputMapping, MojoModel finalModel) {
LinkedHashMap<String, String[]> schema = new LinkedHashMap<>();
for (MojoModel model : allModels.values()) {
if (model == finalModel) {
continue;
}
for (int i = 0; i < model.nfeatures(); i++) {
String fName = model._names[i];
if (schema.containsKey(fName)) { // make sure the domain matches
String[] domain = schema.get(fName);
if (! Arrays.equals(domain, model._domains[i])) {
throw new IllegalStateException("Domains of column '" + fName + "' differ.");
}
} else {
schema.put(fName, model._domains[i]);
}
}
}
for (int i = 0; i < finalModel._names.length; i++) { // we include the response of the final model as well
String fName = finalModel._names[i];
if (! inputMapping.containsKey(fName)) {
schema.put(fName, finalModel._domains[i]);
}
}
return schema;
}
private static class MojoPipelineDescriptor implements ModelDescriptor {
private final MojoModel _finalModel;
private final String[] _names;
private final String[][] _domains;
private MojoPipelineDescriptor(LinkedHashMap<String, String[]> schema, MojoModel finalModel) {
_finalModel = finalModel;
_names = new String[schema.size()];
_domains = new String[schema.size()][];
int i = 0;
for (Map.Entry<String, String[]> field : schema.entrySet()) {
_names[i] = field.getKey();
_domains[i] = field.getValue();
i++;
}
}
@Override
public String[][] scoringDomains() {
return _domains;
}
@Override
public String projectVersion() {
return _finalModel._h2oVersion;
}
@Override
public String algoName() {
return "pipeline";
}
@Override
public String algoFullName() {
return "MOJO Pipeline";
}
@Override
public String offsetColumn() {
return _finalModel._offsetColumn;
}
@Override
public String weightsColumn() {
return null;
}
@Override
public String foldColumn() {
return null;
}
@Override
public String treatmentColumn() { return null; }
@Override
public ModelCategory getModelCategory() {
return _finalModel._category;
}
@Override
public boolean isSupervised() {
return _finalModel.isSupervised();
}
@Override
public int nfeatures() {
return isSupervised() ? columnNames().length - 1 : columnNames().length;
}
@Override
public String[] features() {
return Arrays.copyOf(columnNames(), nfeatures());
}
@Override
public int nclasses() {
return _finalModel.nclasses();
}
@Override
public String[] columnNames() {
return _names;
}
@Override
public boolean balanceClasses() {
return _finalModel._balanceClasses;
}
@Override
public double defaultThreshold() {
return _finalModel._defaultThreshold;
}
@Override
public double[] priorClassDist() {
return _finalModel._priorClassDistrib;
}
@Override
public double[] modelClassDist() {
return _finalModel._modelClassDistrib;
}
@Override
public String uuid() {
return _finalModel._uuid;
}
@Override
public String timestamp() {
return String.valueOf(new Date().getTime());
}
@Override
public String[] getOrigNames() {
return null;
}
@Override
public String[][] getOrigDomains() {
return null;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/MojoReaderBackend.java
|
package hex.genmodel;
import java.io.BufferedReader;
import java.io.IOException;
/**
*/
public interface MojoReaderBackend {
BufferedReader getTextFile(String filename) throws IOException;
byte[] getBinaryFile(String filename) throws IOException;
boolean exists(String filename);
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/MojoReaderBackendFactory.java
|
package hex.genmodel;
import hex.genmodel.utils.IOUtils;
import java.io.*;
import java.net.URL;
import java.util.HashMap;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
/**
* Factory class for vending MojoReaderBackend object that can be used to load MOJOs from different data sources.
*
* <p>This class provides convenience methods for loading MOJOs from files, URLs (this includes classpath resources)
* and also from a generic InputStream source.</p>
*
* <p>User needs to make a choice of a Caching Strategy for MOJO sources that are not file based.
* Available caching strategies:</p>
* <ul>
* <li>MEMORY: (decompressed) content of the MOJO will be cached in memory, this should be suitable for most cases
* (for very large models please make sure that your application has enough memory to hold the unpacked MOJO)</li>
* <li>DISK: MOJO is cached in a temporary file on disk, recommended for very large models</li>
* </ul>
*
* <p>Example of using MojoReaderBackendFactory to read a MOJO from a classpath resource:</p>
*
* <pre>
* {@code
* public class ExampleApp {
* public static void main(String[] args) throws Exception {
* URL mojoURL = ExampleApp.class.getResource("/com/company/mojo.zip");
* MojoReaderBackend reader = MojoReaderBackendFactory.createReaderBackend(mojoURL, CachingStrategy.MEMORY);
* MojoModel model = ModelMojoReader.readFrom(reader);
* EasyPredictModelWrapper modelWrapper = new EasyPredictModelWrapper(model);
* RowData testRow = new RowData();
* for (int i = 0; i < args.length; i++)
* testRow.put("C"+i, Double.valueOf(args[i]));
* RegressionModelPrediction prediction = (RegressionModelPrediction) modelWrapper.predict(testRow);
* System.out.println("Prediction: " + prediction.value);
* }
* }
* }
* </pre>
*/
public class MojoReaderBackendFactory {
public enum CachingStrategy { MEMORY, DISK }
public static MojoReaderBackend createReaderBackend(String filename) throws IOException {
return createReaderBackend(new File(filename));
}
public static MojoReaderBackend createReaderBackend(File file) throws IOException {
if (file.isFile())
return new ZipfileMojoReaderBackend(file.getPath());
else if (file.isDirectory())
return new FolderMojoReaderBackend(file.getPath());
else
throw new IOException("Invalid file specification: " + file);
}
public static MojoReaderBackend createReaderBackend(URL url, CachingStrategy cachingStrategy) throws IOException {
try (InputStream is = url.openStream()) {
return createReaderBackend(is, cachingStrategy);
}
}
public static MojoReaderBackend createReaderBackend(InputStream inputStream, CachingStrategy cachingStrategy) throws IOException {
switch (cachingStrategy) {
case MEMORY:
return createInMemoryReaderBackend(inputStream);
case DISK:
return createTempFileReaderBackend(inputStream);
}
throw new IllegalStateException("Unexpected caching strategy: " + cachingStrategy);
}
private static MojoReaderBackend createInMemoryReaderBackend(InputStream inputStream) throws IOException {
HashMap<String, byte[]> content = new HashMap<>();
ZipInputStream zis = new ZipInputStream(inputStream);
try {
ZipEntry entry;
while ((entry = zis.getNextEntry()) != null) {
if (entry.getSize() > Integer.MAX_VALUE)
throw new IOException("File too large: " + entry.getName());
ByteArrayOutputStream os = new ByteArrayOutputStream();
IOUtils.copyStream(zis, os);
content.put(entry.getName(), os.toByteArray());
}
zis.close();
} finally {
closeQuietly(zis);
}
return new InMemoryMojoReaderBackend(content);
}
private static MojoReaderBackend createTempFileReaderBackend(InputStream inputStream) throws IOException {
File tmpFile = File.createTempFile("h2o-mojo", ".zip");
tmpFile.deleteOnExit(); // register delete on exit hook (in case tmp reader doesn't do the job)
FileOutputStream fos = new FileOutputStream(tmpFile);
try {
IOUtils.copyStream(inputStream, fos);
fos.close();
} catch (IOException e) {
closeQuietly(fos); // Windows won't let us delete an open file
if (! tmpFile.delete())
e = new IOException(e.getMessage() + " [Note: temp file " + tmpFile + " not deleted]", e);
throw e;
} finally {
closeQuietly(fos);
}
return new TmpMojoReaderBackend(tmpFile);
}
private static void closeQuietly(Closeable c) {
if (c != null)
try {
c.close();
} catch (IOException e) {
// intentionally ignore exception
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/MultiModelMojoReader.java
|
package hex.genmodel;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
public abstract class MultiModelMojoReader<M extends MojoModel> extends ModelMojoReader<M> {
private Map<String, MojoModel> _subModels;
@Override
protected final void readModelData() throws IOException {
int subModelCount = readkv("submodel_count", 0);
HashMap<String, MojoModel> models = new HashMap<>(subModelCount);
for (int i = 0; i < subModelCount; i++) {
String key = readkv("submodel_key_" + i);
String zipDirectory = readkv("submodel_dir_" + i);
MojoModel model = ModelMojoReader.readFrom(new NestedMojoReaderBackend(zipDirectory));
models.put(key, model);
}
_subModels = Collections.unmodifiableMap(models);
readParentModelData();
}
protected MojoModel getModel(String key) {
return _subModels.get(key);
}
protected Map<String, MojoModel> getSubModels() {
return _subModels;
}
protected abstract void readParentModelData() throws IOException;
private class NestedMojoReaderBackend implements MojoReaderBackend {
private String _zipDirectory;
private NestedMojoReaderBackend(String zipDirectory) {
_zipDirectory = zipDirectory;
}
@Override
public BufferedReader getTextFile(String filename) throws IOException {
return _reader.getTextFile(_zipDirectory + filename);
}
@Override
public byte[] getBinaryFile(String filename) throws IOException {
return _reader.getBinaryFile(_zipDirectory + filename);
}
@Override
public boolean exists(String filename) {
return _reader.exists(_zipDirectory + filename);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/NestedMojoReaderBackend.java
|
package hex.genmodel;
import java.io.BufferedReader;
import java.io.IOException;
class NestedMojoReaderBackend implements MojoReaderBackend {
private MojoReaderBackend _reader;
private String _zipDirectory;
NestedMojoReaderBackend(MojoReaderBackend parent, String zipDirectory) {
_reader = parent;
_zipDirectory = zipDirectory;
}
@Override
public BufferedReader getTextFile(String filename) throws IOException {
return _reader.getTextFile(_zipDirectory + filename);
}
@Override
public byte[] getBinaryFile(String filename) throws IOException {
return _reader.getBinaryFile(_zipDirectory + filename);
}
@Override
public boolean exists(String filename) {
return _reader.exists(_zipDirectory + filename);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/PredictContributions.java
|
package hex.genmodel;
import hex.genmodel.attributes.parameters.FeatureContribution;
import java.io.Serializable;
public interface PredictContributions extends Serializable {
/**
* Calculate contributions (SHAP values) for a given input row.
* @param input input data
* @return per-feature contributions, last value is the model bias
*/
float[] calculateContributions(double[] input);
FeatureContribution[] calculateContributions(double[] input, int topN, int bottomN, boolean compareAbs);
String[] getContributionNames();
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/PredictContributionsFactory.java
|
package hex.genmodel;
/**
* MOJO Models that can calculate SHAP Values (contributions) should implement this interface
*/
public interface PredictContributionsFactory {
/**
* Create an instance of PredictContributions
* The returned implementation is not guaranteed to be thread-safe and the caller is responsible for making sure
* each thread will have own copy of the instance
* @return instance of PredictContributions
*/
PredictContributions makeContributionsPredictor();
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/TmpMojoReaderBackend.java
|
package hex.genmodel;
import java.io.File;
import java.io.IOException;
public class TmpMojoReaderBackend extends ZipfileMojoReaderBackend {
File _tempZipFile;
public TmpMojoReaderBackend(File tempZipFile) throws IOException {
super(tempZipFile.getPath());
_tempZipFile = tempZipFile;
}
@Override
public void close() throws IOException {
super.close();
if (_tempZipFile != null) {
File f = _tempZipFile;
_tempZipFile = null; // we don't want to attempt to delete the file twice (even if the first attempt fails)
if (! f.delete())
throw new IOException("Failed to delete temporary file " + f);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/ZipfileMojoReaderBackend.java
|
package hex.genmodel;
import java.io.*;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
/**
*/
class ZipfileMojoReaderBackend implements MojoReaderBackend, Closeable {
private ZipFile zf;
public ZipfileMojoReaderBackend(String archivename) throws IOException {
zf = new ZipFile(archivename);
}
@Override
public BufferedReader getTextFile(String filename) throws IOException {
InputStream input = zf.getInputStream(zf.getEntry(filename));
return new BufferedReader(new InputStreamReader(input));
}
@Override
public byte[] getBinaryFile(String filename) throws IOException {
ZipEntry za = zf.getEntry(filename);
if (za == null)
throw new IOException("Binary file " + filename + " not found");
byte[] out = new byte[(int) za.getSize()];
DataInputStream dis = new DataInputStream(zf.getInputStream(za));
dis.readFully(out);
return out;
}
@Override
public boolean exists(String filename) {
return zf.getEntry(filename) != null;
}
@Override
public void close() throws IOException {
if (zf != null) {
ZipFile f = zf;
zf = null;
f.close();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/package-info.java
|
/**
* Low-level information about generated POJO and MOJO models.
*/
package hex.genmodel;
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/coxph/CoxPHMojoModel.java
|
package hex.genmodel.algos.coxph;
import hex.genmodel.MojoModel;
import java.io.Serializable;
import java.util.*;
public class CoxPHMojoModel extends MojoModel {
static class Strata implements Serializable {
final double[] strata;
final int strataLen;
final int hashCode;
public Strata(double[] strata, int strataLen) {
this.strata = strata;
int hash = 11;
for (int i = 0; i < strataLen; i++) {
hash *= 13;
hash += 17 * (int) strata[i];
}
hashCode = hash;
this.strataLen = strataLen;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final Strata that = (Strata) o;
if (this.hashCode != that.hashCode) return false;
if (this.strataLen != that.strataLen) return false;
for (int i = 0; i < strataLen; i++) {
if ((int) strata[i] != (int) that.strata[i]) return false;
}
return true;
}
@Override
public int hashCode() {
return hashCode;
}
}
enum InteractionTypes {ENUM_TO_ENUM, ENUM_TO_NUM, NUM_TO_NUM};
double[] _coef;
HashMap<Strata, Integer> _strata; // HashMap to make sure the implementation is Serializable
int _strata_len;
double[][] _x_mean_cat;
double[][] _x_mean_num;
int[] _cat_offsets;
int _cats;
double[] _lpBase;
boolean _useAllFactorLevels;
int _nums;
int[] _interactions_1;
int[] _interactions_2;
int[] _interaction_targets;
boolean[] _is_enum_1; // check interaction column1 column type
HashSet<Integer> _interaction_column_index;
HashMap<Integer, List<String>> _interaction_column_domains;
InteractionTypes[] _interaction_types;
int[] _num_offsets;
CoxPHMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
@Override
public double[] score0(double[] row, double[] predictions) {
return score0(row, 0, predictions);
}
@Override
public double[] score0(double[] row, double offset, double[] predictions) {
int[] enumOffset = null;
if (_interaction_targets != null) {
enumOffset = evaluateInteractions(row);
}
predictions[0] = forCategories(row) + forOtherColumns(row, enumOffset) - forStrata(row) + offset;
return predictions;
}
private double forOtherColumns(double[] row, int[] enumOffset) {
double result = 0.0;
int coefLen = _coef.length;
for(int i = 0 ; i < _nums; i++) {
if (enumOffset == null || enumOffset[i] < 0) {
if (_num_offsets[i] >= coefLen)
break;
result += _coef[_num_offsets[i]] * featureValue(row, i + _cats);
} else {
if (enumOffset[i] >= coefLen)
break;
result += _coef[enumOffset[i]] * featureValue(row, i + _cats);
}
}
return result;
}
private double forStrata(double[] row) {
final int strata = strataForRow(row);
return _lpBase[strata];
}
private double forCategories(double[] row) {
double result = 0.0;
if (!_useAllFactorLevels) {
for(int category = 0; category < _cats; ++category) {
double val = featureValue(row, category);
if (Double.isNaN(val)) {
result = Double.NaN;
} else if (val >= 0) {
if (_interaction_column_index.contains(category))
result += forOneCategory(row, category, 0); // already taken into account the useAllFactorLevels
else
result += forOneCategory(row, category, 1);
}
}
} else {
for(int category = 0; category < _cat_offsets.length - 1; ++category) {
result += forOneCategory(row, category, 0);
}
}
return result;
}
double forOneCategory(double[] row, int category, int lowestFactorValue) {
final int value = (int) featureValue(row, category) - lowestFactorValue;
if (value != featureValue(row, category) - lowestFactorValue) {
throw new IllegalArgumentException("categorical value out of range");
}
final int x = value + _cat_offsets[category]; // value will be < 0 if cat value is not within domain
if (value >= 0 && x < _cat_offsets[category + 1]) {
return _coef[x];
} else {
return 0;
}
}
double[] computeLpBase() {
final int _numStart = _x_mean_cat.length >= 1 ? _x_mean_cat[0].length : 0;
final int size = 0 < _strata.size() ? _strata.size() : 1;
double[] lpBase = new double[size];
for (int s = 0; s < size; s++) {
for (int i = 0; i < _x_mean_cat[s].length; i++)
lpBase[s] += _x_mean_cat[s][i] * _coef[i];
for (int i = 0; i < _x_mean_num[s].length; i++)
lpBase[s] += _x_mean_num[s][i] * _coef[i + _numStart];
}
return lpBase;
}
double featureValue(double[] row, int featureIdx) {
return row[featureIdx + _strata_len];
}
private int strataForRow(double[] row) {
if (0 == _strata.size()) {
return 0;
} else {
final Strata o = new Strata(row, _strata_len);
return _strata.get(o);
}
}
private int[] evaluateInteractions(double[] row) {
int[] enumOffset = new int[_nums];
Arrays.fill(enumOffset, -1);
for (int interactionIndex = 0; interactionIndex < _interaction_targets.length; interactionIndex++) {
final int target = _interaction_targets[interactionIndex]; // index into row
if (Double.isNaN(row[target])) {
if (InteractionTypes.ENUM_TO_ENUM.equals(_interaction_types[interactionIndex])) { // enum to enum interaction
row[target] = enumEnumInteractions(row, interactionIndex);
} else if (InteractionTypes.NUM_TO_NUM.equals(_interaction_types[interactionIndex])) { // num to num interaction
row[target] = row[_interactions_1[interactionIndex]] * row[_interactions_2[interactionIndex]];
} else { // enum to num interaction
enumNumInteractions(row, enumOffset, interactionIndex, target);
}
}
}
return enumOffset;
}
/**
* Again, this method is similar to extractDenseRow method of DatInfo.java. It stores the interactionOffset (
* as catLevel here) in enumOffset and store the numerical value back into the row at the correct rowIndex. If the
* catlevel is not valid, a value of 0.0 will be store at the row at the rowIndex.
*/
private void enumNumInteractions(double[] row, int[] enumOffset, int interactionIndex, int rowIndex) {
int enumPredIndex = _is_enum_1[interactionIndex] ? _interactions_1[interactionIndex] : _interactions_2[interactionIndex];
int numPredIndex = _is_enum_1[interactionIndex] ? _interactions_2[interactionIndex] : _interactions_1[interactionIndex];
int offset = _num_offsets[rowIndex - _cats];
int catLevel = (int) row[enumPredIndex]-(_useAllFactorLevels?0:1);
row[rowIndex] = catLevel < 0 ? 0 : row[numPredIndex];
enumOffset[rowIndex-_cats] = catLevel+offset;
}
/**
* This method is similar to extractDenseRow method of DataInfo.java. Basically, it takes the domain of column 1
* and domain of column 2 to form the new combined domain: as domain1_domain2. Then, it will look up the index
* of this new combination in the combinedDomains. If it is found, it will return the index. It not, will return
* -1.
*/
private int enumEnumInteractions(double[] row, int interactionIndex) {
List<String> combinedDomains = _interaction_column_domains.get(_interaction_targets[interactionIndex]);
int predictor1Index = _interactions_1[interactionIndex]; // original column index into row
int predictor2Index = _interactions_2[interactionIndex];
String[] predictor1Domains = _domains[predictor1Index];
String[] predictor2Domains = _domains[predictor2Index];
String predictor1Domain = predictor1Domains[(int) row[predictor1Index]];
String predictor2Domain = predictor2Domains[(int) row[predictor2Index]];
String combinedEnumDomains = predictor1Domain+"_"+predictor2Domain;
if (combinedDomains.contains(combinedEnumDomains))
return combinedDomains.indexOf(combinedEnumDomains);
else
return -1;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/coxph/CoxPHMojoReader.java
|
package hex.genmodel.algos.coxph;
import hex.genmodel.ModelMojoReader;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
public class CoxPHMojoReader extends ModelMojoReader<CoxPHMojoModel> {
@Override
public String getModelName() {
return "CoxPH";
}
@Override
protected void readModelData() throws IOException {
_model._x_mean_cat = readRectangularDoubleArray("x_mean_cat");
_model._x_mean_num = readRectangularDoubleArray("x_mean_num");
_model._coef = readkv("coef");
_model._strata = readStrata();
_model._strata_len = readStrataLen();
_model._cat_offsets = readkv("cat_offsets");
_model._cats = readkv("cats");
_model._useAllFactorLevels = readkv("use_all_factor_levels");
_model._lpBase = _model.computeLpBase();
_model._interaction_targets = readkv("interaction_targets");
_model._interaction_column_index = new HashSet<>();
_model._interaction_column_domains = new HashMap<>();
_model._nums = readkv("num_numerical_columns");
_model._num_offsets = readkv("num_offsets");
if (_model._interaction_targets != null) {
_model._interactions_1 = readkv("interactions_1");
_model._interactions_2 = readkv("interactions_2");
for (int index : _model._interaction_targets) {
_model._interaction_column_index.add(index);
if (_model._domains[index] != null)
_model._interaction_column_domains.put(index, Arrays.asList(_model._domains[index]));
}
createInteractionTypes();
}
}
private void createInteractionTypes() {
int numInteractions = _model._interaction_targets.length;
_model._interaction_types = new CoxPHMojoModel.InteractionTypes[numInteractions];
_model._is_enum_1 = new boolean[numInteractions];
for (int index=0; index<numInteractions; index++) {
if (_model._domains[_model._interactions_1[index]] != null && _model._domains[_model._interactions_2[index]] != null) {
_model._interaction_types[index] = CoxPHMojoModel.InteractionTypes.ENUM_TO_ENUM;
_model._is_enum_1[index] = true;
} else if ((_model._domains[_model._interactions_1[index]] == null && _model._domains[_model._interactions_2[index]] == null)) {
_model._interaction_types[index] = CoxPHMojoModel.InteractionTypes.NUM_TO_NUM;
} else {
_model._interaction_types[index] = CoxPHMojoModel.InteractionTypes.ENUM_TO_NUM;
if (_model._domains[_model._interactions_1[index]] != null)
_model._is_enum_1[index] = true;
}
}
}
private HashMap<CoxPHMojoModel.Strata, Integer> readStrata() {
final int count = readkv("strata_count");
final HashMap<CoxPHMojoModel.Strata, Integer> result = new HashMap<>(count);
for (int i = 0; i < count; i++) {
final double[] strata = readkv("strata_" + i);
result.put(new CoxPHMojoModel.Strata(strata, strata.length), i);
}
return result;
}
private int readStrataLen() {
final int count = readkv("strata_count");
if (0 == count) {
return 0;
} else {
final double[] strata = readkv("strata_0");
return strata.length;
}
}
@Override
protected CoxPHMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
return new CoxPHMojoModel(columns, domains, responseColumn);
}
@Override public String mojoVersion() { return "1.00"; }
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/deeplearning/ActivationUtils.java
|
package hex.genmodel.algos.deeplearning;
import java.io.Serializable;
public class ActivationUtils {
// drived from GLMMojoModel
public interface ActivationFunctions extends Serializable {
double[] eval(double[] x, double drop_out_ratio, int maxOutk); // for MaxoutDropout
}
public static class LinearOut implements ActivationFunctions {
public double[] eval(double[] input, double drop_out_ratio, int maxOutk) { // do nothing
return input;
}
}
public static class SoftmaxOut implements ActivationFunctions {
public double[] eval(double[] input, double drop_out_ratio, int maxOutk) {
int nodeSize = input.length;
double[] output = new double[nodeSize];
double scaling = 0;
double max = maxArray(input);
for (int index = 0; index < nodeSize; index++) {
output[index] = Math.exp(input[index]-max);
scaling += output[index];
}
for (int index = 0; index < nodeSize; index++)
output[index] /= scaling;
return output;
}
}
public static double maxArray(double[] input) {
assert ((input != null) && (input.length > 0)) : "Your array is empty.";
double temp = input[0];
for (int index = 0; index < input.length; index++)
temp = temp<input[index]?input[index]:temp;
return temp;
}
public static class ExpRectifierDropoutOut extends ExpRectifierOut {
public double[] eval(double[] input, double drop_out_ratio, int maxOutk) {
double[] output = super.eval(input, drop_out_ratio, maxOutk);
applyDropout(output, drop_out_ratio, input.length);
return output;
}
}
public static double[] applyDropout(double[] input, double drop_out_ratio, int nodeSize) {
if (drop_out_ratio > 0) {
double multFact = 1.0 - drop_out_ratio;
for (int index = 0; index < nodeSize; index++)
input[index] *= multFact;
}
return input;
}
public static class ExpRectifierOut implements ActivationFunctions {
public double[] eval(double[] input, double drop_out_ratio, int maxOutk) {
int nodeSize = input.length;
double[] output = new double[nodeSize];
for (int index = 0; index < nodeSize; index++) {
output[index] = input[index] >= 0 ? input[index] : Math.exp(input[index]) - 1;
}
return output;
}
}
public static class RectifierOut implements ActivationFunctions {
public double[] eval(double[] input, double drop_out_ratio, int maxOutk) {
int nodeSize = input.length;
double[] output = new double[nodeSize];
for (int index = 0; index < nodeSize; index++)
output[index] = 0.5f * (input[index] + Math.abs(input[index])); // clever. Copied from Neurons.java
return output;
}
}
public static class RectifierDropoutOut extends RectifierOut {
public double[] eval(double[] input, double drop_out_ratio, int maxOutk) {
double[] output = super.eval(input, drop_out_ratio, maxOutk);
applyDropout(output, drop_out_ratio, input.length);
return output;
}
}
public static class MaxoutDropoutOut extends MaxoutOut {
public double[] eval(double[] input, double drop_out_ratio, int maxOutk) {
double[] output = super.eval(input, drop_out_ratio, maxOutk);
applyDropout(output, drop_out_ratio, output.length);
return output;
}
}
public static class MaxoutOut implements ActivationFunctions {
public double[] eval(double[] input, double drop_out_ratio, int maxOutk) {
int nodeSize = input.length/maxOutk; // weight matrix is twice the size of other act functions
double[] output = new double[nodeSize];
for (int index=0; index < nodeSize; index++) {
int countInd = index*maxOutk;
double temp = input[countInd];
for (int k = 0; k < maxOutk; k++) {
countInd += k;
temp = temp > input[countInd]?temp:input[countInd];
}
output[index] = temp;
}
return output;
}
}
public static class TanhDropoutOut extends TanhOut {
public double[] eval(double[] input, double drop_out_ratio, int maxOutk) {
int nodeSize = input.length;
double[] output = super.eval(input, drop_out_ratio, maxOutk);
applyDropout(output, drop_out_ratio, input.length);
return output;
}
}
public static class TanhOut implements ActivationFunctions {
public double[] eval(double[] input, double drop_out_ratio, int maxOutk) {
int nodeSize = input.length;
double[] output = new double[nodeSize];
for (int index=0; index < nodeSize; index++)
output[index] = 1.-2./(1.+Math.exp(2.*input[index]));
return output;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/deeplearning/DeeplearningMojoModel.java
|
package hex.genmodel.algos.deeplearning;
import hex.ModelCategory;
import hex.genmodel.CategoricalEncoding;
import hex.genmodel.GenModel;
import hex.genmodel.MojoModel;
import hex.genmodel.utils.DistributionFamily;
import java.io.Serializable;
public class DeeplearningMojoModel extends MojoModel {
public int _mini_batch_size;
public int _nums; // number of numerical columns
public int _cats; // number of categorical columns
public int[] _catoffsets;
public double[] _normmul;
public double[] _normsub;
public double[] _normrespmul;
public double[] _normrespsub;
public boolean _use_all_factor_levels;
public String _activation;
public String[] _allActivations; // store activation function of all layers
public boolean _imputeMeans;
public int[] _units; // size of neural network, input, hidden layers and output layer
public double[] _all_drop_out_ratios; // input layer and hidden layers
public StoreWeightsBias[] _weightsAndBias; // stores weights of different layers
public int[] _catNAFill; // if mean imputation is true, mode imputation for categorical columns
public int _numLayers; // number of neural network layers.
public DistributionFamily _family;
protected String _genmodel_encoding;
protected String[] _orig_names;
protected String[][] _orig_domain_values;
protected double[] _orig_projection_array;
/***
* Should set up the neuron network frame work here
* @param columns
* @param domains
*/
DeeplearningMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
public void init() {
_numLayers = _units.length-1;
_allActivations = new String[_numLayers];
int inputLayers = _numLayers-1;
for (int index=0; index < (inputLayers); index++)
_allActivations[index]=_activation;
_allActivations[inputLayers] = this.isAutoEncoder()?_activation:(this.isClassifier()?"Softmax":"Linear");
}
/***
* This method will be derived from the scoring/prediction function of deeplearning model itself. However,
* we followed closely what is being done in deepwater mojo. The variable offset is not used.
* @param dataRow
* @param offset
* @param preds
* @return
*/
@Override
public final double[] score0(double[] dataRow, double offset, double[] preds) {
assert(dataRow != null) : "doubles are null"; // check to make sure data is not null
double[] neuronsInput = new double[_units[0]]; // store inputs into the neural network
double[] neuronsOutput; // save output from a neural network layer
double[] _numsA = new double[_nums];
int[] _catsA = new int[_cats];
// transform inputs: NAs in categoricals are always set to new extra level.
setInput(dataRow, neuronsInput, _numsA, _catsA, _nums, _cats, _catoffsets, _normmul, _normsub, _use_all_factor_levels, true);
// proprogate inputs through neural network
for (int layer=0; layer < _numLayers; layer++) {
NeuralNetwork oneLayer = new NeuralNetwork(_allActivations[layer], _all_drop_out_ratios[layer],
_weightsAndBias[layer], neuronsInput, _units[layer + 1]);
neuronsOutput = oneLayer.fprop1Layer();
neuronsInput = neuronsOutput;
}
if (!this.isAutoEncoder())
assert (_nclasses == neuronsInput.length) : "nclasses " + _nclasses + " neuronsOutput.length " + neuronsInput.length;
// Correction for classification or standardize outputs
return modifyOutputs(neuronsInput, preds, dataRow);
}
public double[] modifyOutputs(double[] out, double[] preds, double[] dataRow) {
if (this.isAutoEncoder()) { // only perform unscale numerical value if need
if (_normmul != null && _normmul.length > 0) { // undo the standardization on output
int nodeSize = out.length - _nums;
for (int k = 0; k < nodeSize; k++) {
preds[k] = out[k];
}
for (int k = 0; k < _nums; k++) {
int offset = nodeSize + k;
preds[offset] = out[offset] / _normmul[k] + _normsub[k];
}
} else {
for (int k = 0; k < out.length; k++) {
preds[k] = out[k];
}
}
} else {
if (_family == DistributionFamily.modified_huber) {
preds[0] = -1;
preds[2] = linkInv(_family, preds[0]);
preds[1] = 1 - preds[2];
} else if (this.isClassifier()) {
assert (preds.length == out.length + 1);
for (int i = 0; i < preds.length - 1; ++i) {
preds[i + 1] = out[i];
if (Double.isNaN(preds[i + 1])) throw new RuntimeException("Predicted class probability NaN!");
}
if (_balanceClasses)
GenModel.correctProbabilities(preds, _priorClassDistrib, _modelClassDistrib);
preds[0] = GenModel.getPrediction(preds, _priorClassDistrib, dataRow, _defaultThreshold);
} else {
if (_normrespmul != null) //either both are null or none
preds[0] = (out[0] / _normrespmul[0] + _normrespsub[0]);
else
preds[0] = out[0];
// transform prediction to response space
preds[0] = linkInv(_family, preds[0]);
if (Double.isNaN(preds[0]))
throw new RuntimeException("Predicted regression target NaN!");
}
}
return preds;
}
/**
* Calculate inverse link depends on distribution type
* Be careful if you are changing code here - you have to change it in hex.LinkFunction too
* @param distribution
* @param f raw prediction
* @return calculated inverse link value
*/
private double linkInv(DistributionFamily distribution, double f){
switch (distribution) {
case bernoulli:
case quasibinomial:
case modified_huber:
case ordinal:
return 1/(1+Math.min(1e19, Math.exp(-f)));
case multinomial:
case poisson:
case gamma:
case tweedie:
return Math.min(1e19, Math.exp(f));
default:
return f;
}
}
@Override
public double[] score0(double[] row, double[] preds) {
return score0(row, 0.0, preds);
}
public int getPredsSize(ModelCategory mc) {
return (mc == ModelCategory.AutoEncoder)? _units[0]: (isClassifier()?nclasses()+1 :2);
}
/**
* Calculates average reconstruction error (MSE).
* Uses a normalization defined for the numerical features of the trained model.
* @return average reconstruction error = ||original - reconstructed||^2 / length(original)
*/
public double calculateReconstructionErrorPerRowData(double [] original, double [] reconstructed){
assert (original != null && original.length > 0) && (reconstructed != null && reconstructed.length > 0);
assert original.length == reconstructed.length;
int numStartIndex = original.length - this._nums;
double norm;
double l2 = 0;
for (int i = 0; i < original.length; i++) {
norm = (this._normmul != null && this._normmul.length > 0 && this._nums > 0 && i >= numStartIndex) ? this._normmul[i-numStartIndex] : 1;
l2 += Math.pow((reconstructed[i] - original[i]) * norm, 2);
}
return l2 / original.length;
}
// class to store weight or bias for one neuron layer
public static class StoreWeightsBias implements Serializable {
float[] _wValues; // store weight or bias arrays
double[] _bValues;
StoreWeightsBias(float[] wvalues, double[] bvalues) {
_wValues = wvalues;
_bValues = bvalues;
}
}
@Override
public CategoricalEncoding getCategoricalEncoding() {
switch (_genmodel_encoding) {
case "AUTO":
case "SortByResponse":
case "OneHotInternal":
return CategoricalEncoding.AUTO;
case "Binary":
return CategoricalEncoding.Binary;
case "Eigen":
return CategoricalEncoding.Eigen;
case "LabelEncoder":
return CategoricalEncoding.LabelEncoder;
default:
return null;
}
}
@Override
public String[] getOrigNames() {
return _orig_names;
}
@Override
public double[] getOrigProjectionArray() {
return _orig_projection_array;
}
@Override
public String[][] getOrigDomainValues() {
return _orig_domain_values;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/deeplearning/DeeplearningMojoReader.java
|
package hex.genmodel.algos.deeplearning;
import com.google.gson.JsonObject;
import hex.genmodel.ModelMojoReader;
import hex.genmodel.attributes.DeepLearningModelAttributes;
import hex.genmodel.attributes.ModelAttributes;
import hex.genmodel.attributes.ModelJsonReader;
import hex.genmodel.utils.DistributionFamily;
import java.io.IOException;
import static hex.genmodel.GenModel.convertDouble2Float;
public class DeeplearningMojoReader extends ModelMojoReader<DeeplearningMojoModel> {
@Override
public String getModelName() {
return "Deep Learning";
}
@Override
protected void readModelData() throws IOException {
/* if (_model.isAutoEncoder()) {
throw new UnsupportedOperationException("AutoEncoder mojo is not ready for deployment. Stay tuned...");
}*/
_model._mini_batch_size=readkv("mini_batch_size");
_model._nums = readkv("nums");
_model._cats = readkv("cats");
_model._catoffsets = readkv("cat_offsets", new int[0]);
_model._normmul = readkv("norm_mul", new double[0]);
_model._normsub = readkv("norm_sub", new double[0]);
_model._normrespmul = readkv("norm_resp_mul");
_model._normrespsub = readkv("norm_resp_sub");
_model._use_all_factor_levels = readkv("use_all_factor_levels");
_model._activation = readkv("activation");
_model._imputeMeans = readkv("mean_imputation");
_model._family = DistributionFamily.valueOf((String)readkv("distribution"));
if (_model._imputeMeans & (_model._cats > 0)) {
_model._catNAFill = readkv("cat_modes", new int[0]);
}
_model._units = readkv("neural_network_sizes", new int[0]);
_model._all_drop_out_ratios = readkv("hidden_dropout_ratios", new double[0]);
// read in biases and weights for each layer
int numLayers = _model._units.length-1; // exclude the output nodes.
_model._weightsAndBias = new DeeplearningMojoModel.StoreWeightsBias[numLayers];
for (int layerIndex = 0; layerIndex < numLayers; layerIndex++) {
double[] tempB = readkv("bias_layer" + layerIndex, new double[0]);
double[] tempWD = readkv("weight_layer" + layerIndex, new double[0]);
float[] tempW;
if (tempWD.length==0)
tempW = new float[0];
else
tempW = convertDouble2Float(tempWD);
_model._weightsAndBias[layerIndex] = new DeeplearningMojoModel.StoreWeightsBias(tempW, tempB);
}
if (_model._mojo_version < 1.10) {
_model._genmodel_encoding = "AUTO";
} else {
_model._genmodel_encoding = readkv("_genmodel_encoding").toString();
_model._orig_projection_array = readkv("_orig_projection_array", new double[0]);
Integer n = readkv("_n_orig_names");
if (n != null) {
_model._orig_names = readStringArray("_orig_names", n);
}
n = readkv("_n_orig_domain_values");
if (n != null) {
_model._orig_domain_values = new String[n][];
for (int i = 0; i < n; i++) {
int m = readkv("_m_orig_domain_values_" + i);
if (m > 0) {
_model._orig_domain_values[i] = readStringArray("_orig_domain_values_" + i, m);
}
}
}
}
_model.init();
}
@Override
protected DeeplearningMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
return new DeeplearningMojoModel(columns, domains, responseColumn);
}
@Override
public String mojoVersion() {
return "1.10";
}
@Override
protected ModelAttributes readModelSpecificAttributes() {
final JsonObject modelJson = ModelJsonReader.parseModelJson(_reader);
if (modelJson != null) {
return new DeepLearningModelAttributes(_model, modelJson);
} else {
return null;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/deeplearning/NeuralNetwork.java
|
package hex.genmodel.algos.deeplearning;
import java.util.Arrays;
import java.util.List;
import static hex.genmodel.algos.deeplearning.ActivationUtils.*;
public class NeuralNetwork { // represent one layer of neural network
public String _activation; // string that describe the activation function
double _drop_out_ratio; // drop_out_ratio for that layer
public DeeplearningMojoModel.StoreWeightsBias _weightsAndBias; // store layer weight
public double[] _inputs; // store input to layer
public double[] _outputs; // layer output
public int _outSize; // number of nodes in this layer
public int _inSize; // number of inputs to this layer
public int _maxK=1;
List<String> _validActivation = Arrays.asList("Linear", "Softmax", "ExpRectifierWithDropout", "ExpRectifier",
"Rectifier", "RectifierWithDropout", "MaxoutWithDropout", "Maxout", "TanhWithDropout", "Tanh");
public NeuralNetwork(String activation, double drop_out_ratio, DeeplearningMojoModel.StoreWeightsBias weightsAndBias,
double[] inputs, int outSize) {
validateInputs(activation, drop_out_ratio, weightsAndBias._wValues.length, weightsAndBias._bValues.length,
inputs.length, outSize);
_activation=activation;
_drop_out_ratio=drop_out_ratio;
_weightsAndBias=weightsAndBias;
_inputs=inputs;
_outSize=outSize;
_inSize=_inputs.length;
_outputs = new double[_outSize];
if ("Maxout".equals(_activation) || "MaxoutWithDropout".equals(_activation)) {
_maxK = weightsAndBias._bValues.length/outSize;
}
}
public double[] fprop1Layer() {
double[] input2ActFun = _maxK==1?formNNInputs():formNNInputsMaxOut();
ActivationFunctions createActivations = createActFuns(_activation); // choose activation function
return createActivations.eval(input2ActFun, _drop_out_ratio, _maxK); // apply activation function to form NN outputs
}
/*
This method matches the exact operation of gemv_row_optimized in order to match all the bits
*/
public double[] formNNInputs() {
double[] input2ActFun = new double[_outSize];
int cols = _inputs.length;
int rows = input2ActFun.length;
int extra=cols-cols%8;
int multiple = (cols/8)*8-1;
int idx = 0;
for (int row = 0; row < rows; row++) {
double psum0 = 0, psum1 = 0, psum2 = 0, psum3 = 0, psum4 = 0, psum5 = 0, psum6 = 0, psum7 = 0;
for (int col=0; col < multiple; col+=8) {
int off=idx+col;
psum0 += _weightsAndBias._wValues[off ] * _inputs[col ];
psum1 += _weightsAndBias._wValues[off + 1] * _inputs[col + 1];
psum2 += _weightsAndBias._wValues[off + 2] * _inputs[col + 2];
psum3 += _weightsAndBias._wValues[off + 3] * _inputs[col + 3];
psum4 += _weightsAndBias._wValues[off + 4] * _inputs[col + 4];
psum5 += _weightsAndBias._wValues[off + 5] * _inputs[col + 5];
psum6 += _weightsAndBias._wValues[off + 6] * _inputs[col + 6];
psum7 += _weightsAndBias._wValues[off + 7] * _inputs[col + 7];
}
input2ActFun[row] += psum0+psum1+psum2+psum3;
input2ActFun[row] += psum4+psum5+psum6+psum7;
for (int col = extra; col<cols;col++) {
input2ActFun[row] += _weightsAndBias._wValues[idx+col]*_inputs[col];
}
input2ActFun[row] += _weightsAndBias._bValues[row];
idx += cols;
}
return input2ActFun;
}
public double[] formNNInputsMaxOut() {
double[] input2ActFun = new double[_outSize*_maxK];
for (int k = 0; k < _maxK; k++) {
for (int row = 0; row < _outSize; row++) {
int countInd = _maxK*row+k;
for (int col = 0; col < _inSize; col++) {
input2ActFun[countInd] += _inputs[col] * _weightsAndBias._wValues[_maxK*(row*_inSize+col)+k];
}
input2ActFun[countInd] += _weightsAndBias._bValues[countInd]; //
}
}
return input2ActFun;
}
public void validateInputs(String activation, double drop_out_ratio, int weightLen, int biasLen, int inSize,
int outSize) {
assert (_validActivation.contains(activation)) : "activation must be one of \"Linear\", \"Softmax\", " +
"\"ExpRectifierWithDropout\", \"ExpRectifier\", \"Rectifier\", \"RectifierWithDropout\", \"MaxoutWithDropout\", " +
"\"Maxout\", \"TanhWithDropout\", \"Tanh\"";
// use mod to take care of Maxout networks
assert (weightLen % (inSize * outSize) == 0) : "Your neural network layer number of input * number " +
"of outputs should equal length of your weight vector";
assert ((biasLen % outSize) == 0) : "Number of bias should equal number of nodes in your nerual network" +
" layer.";
assert (drop_out_ratio >= 0 && drop_out_ratio < 1) : "drop_out_ratio must be >=0 and < 1.";
assert (outSize > 0) : "number of nodes in neural network must exceed 0.";
}
public ActivationFunctions createActFuns(String activation) {
switch (activation) {
case "Linear":
return new LinearOut();
case "Softmax":
return new SoftmaxOut();
case "ExpRectifierWithDropout":
return new ExpRectifierDropoutOut();
case "ExpRectifier":
return new ExpRectifierOut();
case "Rectifier":
return new RectifierOut();
case "RectifierWithDropout":
return new RectifierDropoutOut();
case "MaxoutWithDropout":
return new MaxoutDropoutOut();
case "Maxout":
return new MaxoutOut();
case "TanhWithDropout":
return new TanhDropoutOut();
case "Tanh":
return new TanhOut();
default:
throw new UnsupportedOperationException("Unexpected activation function: " + activation);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/drf/DrfMojoModel.java
|
package hex.genmodel.algos.drf;
import hex.ModelCategory;
import hex.genmodel.GenModel;
import hex.genmodel.PredictContributions;
import hex.genmodel.algos.tree.*;
import hex.genmodel.attributes.VariableImportances;
import hex.genmodel.attributes.parameters.VariableImportancesHolder;
/**
* "Distributed Random Forest" MojoModel
*/
public final class DrfMojoModel extends SharedTreeMojoModelWithContributions implements SharedTreeGraphConverter {
protected boolean _binomial_double_trees;
public DrfMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
@Override
protected PredictContributions getContributionsPredictor(TreeSHAPPredictor<double[]> treeSHAPPredictor) {
return new ContributionsPredictorDRF(this, treeSHAPPredictor);
}
/**
* Corresponds to `hex.tree.drf.DrfMojoModel.score0()`
*/
@Override
public final double[] score0(double[] row, double offset, double[] preds) {
super.scoreAllTrees(row, preds);
return unifyPreds(row, offset, preds);
}
@Override
public final double[] unifyPreds(double[] row, double offset, double[] preds) {
// Correct the predictions -- see `DRFModel.toJavaUnifyPreds`
if (_nclasses == 1) {
// Regression
preds[0] /= _ntree_groups;
} else {
// Classification
if (_nclasses == 2 && !_binomial_double_trees) {
// Binomial model
preds[1] /= _ntree_groups;
preds[2] = 1.0 - preds[1];
} else {
// Multinomial
double sum = 0;
for (int i = 1; i <= _nclasses; i++) { sum += preds[i]; }
if (sum > 0)
for (int i = 1; i <= _nclasses; i++) { preds[i] /= sum; }
}
if (_balanceClasses)
GenModel.correctProbabilities(preds, _priorClassDistrib, _modelClassDistrib);
preds[0] = GenModel.getPrediction(preds, _priorClassDistrib, row, _defaultThreshold);
}
return preds;
}
@Override
public double[] score0(double[] row, double[] preds) {
return score0(row, 0.0, preds);
}
static class ContributionsPredictorDRF extends SharedTreeContributionsPredictor {
private final float _featurePlusBiasRatio;
private final int _normalizer;
private ContributionsPredictorDRF(DrfMojoModel model, TreeSHAPPredictor<double[]> treeSHAPPredictor) {
super(model, treeSHAPPredictor);
if (model._binomial_double_trees) {
throw new UnsupportedOperationException(
"Calculating contributions is currently not supported for model with binomial_double_trees parameter set.");
}
int numberOfUsedVariables = ((VariableImportancesHolder) model._modelAttributes).getVariableImportances().numberOfUsedVariables();
if (ModelCategory.Regression.equals(model._category)) {
_featurePlusBiasRatio = 0;
_normalizer = model._ntree_groups;
} else if (ModelCategory.Binomial.equals(model._category)) {
_featurePlusBiasRatio = 1f / (numberOfUsedVariables + 1);
_normalizer = -model._ntree_groups;
} else
throw new UnsupportedOperationException(
"Model category " + model._category + " cannot be used to calculate feature contributions.");
}
@Override
public float[] getContribs(float[] contribs) {
for (int i = 0; i < contribs.length; i++) {
if (contribs[i] != 0)
contribs[i] = _featurePlusBiasRatio + (contribs[i] / _normalizer);
}
return contribs;
}
}
public boolean isBinomialDoubleTrees() {
return _binomial_double_trees;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/drf/DrfMojoReader.java
|
package hex.genmodel.algos.drf;
import hex.genmodel.algos.tree.SharedTreeMojoReader;
import java.io.IOException;
/**
*/
public class DrfMojoReader extends SharedTreeMojoReader<DrfMojoModel> {
@Override
public String getModelName() {
return "Distributed Random Forest";
}
@Override
protected void readModelData() throws IOException {
super.readModelData();
_model._binomial_double_trees = readkv("binomial_double_trees");
// _model._effective_n_classes = _model._nclasses == 2 && !_model._binomial_double_trees ? 1 : _model._nclasses;
}
@Override
protected DrfMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
return new DrfMojoModel(columns, domains, responseColumn);
}
@Override public String mojoVersion() {
return "1.40";
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/ensemble/StackedEnsembleMojoModel.java
|
package hex.genmodel.algos.ensemble;
import hex.genmodel.MojoModel;
import java.io.Serializable;
import java.util.Arrays;
public class StackedEnsembleMojoModel extends MojoModel {
MojoModel _metaLearner; //Currently only a GLM. May change to be DRF, GBM, XGBoost, or DL in the future
boolean _useLogitMetaLearnerTransform;
StackedEnsembleMojoSubModel[] _baseModels; //An array of base models
int _baseModelNum; //Number of base models
public StackedEnsembleMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
private static double logit(double p) {
final double x = p / (1 - p);
return x == 0 ? -19 : Math.max(-19, Math.log(x));
}
private static void logitTransformRow(double[] basePreds){
for (int i = 0; i < basePreds.length; i ++)
basePreds[i] = logit(Math.min(1 - 1e-9, Math.max(basePreds[i], 1e-9)));
}
@Override
public double[] score0(double[] row, double[] preds) {
double[] basePreds = new double[_baseModelNum]; //Proper allocation for binomial and regression ensemble (one prediction per base model)
double[] basePredsRow = new double[preds.length];
if(_nclasses > 2) { //Multinomial
basePreds = new double[_baseModelNum * _nclasses]; //Proper allocation for multinomial ensemble (class probabilities per base model)
for(int i = 0; i < _baseModelNum; ++i){
if (_baseModels[i] == null) continue; // skip unused model
for(int j = 0; j < _nclasses; ++j){
basePreds[i * _nclasses + j] = _baseModels[i]._mojoModel.score0(_baseModels[i].remapRow(row), basePredsRow)[j + 1];
}
}
if (_useLogitMetaLearnerTransform)
logitTransformRow(basePreds);
}else if(_nclasses == 2){ //Binomial
for(int i = 0; i < _baseModelNum; ++i) {
if (_baseModels[i] == null) continue; // skip unused model
_baseModels[i]._mojoModel.score0(_baseModels[i].remapRow(row), basePredsRow);
basePreds[i] = basePredsRow[2];
}
if (_useLogitMetaLearnerTransform)
logitTransformRow(basePreds);
}else{ //Regression
for(int i = 0; i < _baseModelNum; ++i) { //Regression
if (_baseModels[i] == null) continue; // skip unused model
_baseModels[i]._mojoModel.score0(_baseModels[i].remapRow(row), basePredsRow);
basePreds[i] = basePredsRow[0];
}
}
_metaLearner.score0(basePreds, preds);
return preds;
}
/**
* In stacked ensembles, multiple models may appear. Problem with multiple models present is possibly different
* internal order of features. Therefore, the scored row's values must be re-mapped to the internal order of each
* model.
*/
static class StackedEnsembleMojoSubModel implements Serializable {
final MojoModel _mojoModel;
final int[] _mapping;
public StackedEnsembleMojoSubModel(MojoModel mojoModel, int[] mapping) {
_mojoModel = mojoModel;
_mapping = mapping;
}
/**
* Returns a new array represeting row values re-mapped to order given by the underlying submodel.
* Order of columns in the row may remain the same, yet a new instance of double[] is returned all the time.
*
* @param row Row to re-map
* @return A new instance of double[] with values re-mapped to order given by the underlying submodel.
*/
public double[] remapRow(final double[] row) {
double[] remappedRow = new double[_mapping.length];
for (int i = 0; i < _mapping.length; i++) {
remappedRow[i] = row[_mapping[i]];
}
return remappedRow;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/ensemble/StackedEnsembleMojoReader.java
|
package hex.genmodel.algos.ensemble;
import hex.genmodel.MojoModel;
import hex.genmodel.MultiModelMojoReader;
public class StackedEnsembleMojoReader extends MultiModelMojoReader<StackedEnsembleMojoModel> {
@Override
public String getModelName() {
return "StackedEnsemble";
}
@Override
protected void readParentModelData() {
int baseModelNum = readkv("base_models_num", 0);
_model._baseModelNum = baseModelNum;
_model._metaLearner = getModel((String) readkv("metalearner"));
final String metaLearnerTransform = readkv("metalearner_transform", "NONE");
if (!metaLearnerTransform.equals("NONE") && !metaLearnerTransform.equals("Logit"))
throw new UnsupportedOperationException("Metalearner Transform \"" + metaLearnerTransform + "\" is not supported!");
_model._useLogitMetaLearnerTransform = metaLearnerTransform.equals("Logit");
_model._baseModels = new StackedEnsembleMojoModel.StackedEnsembleMojoSubModel[baseModelNum];
final String[] columnNames = readkv("[columns]");
for (int i = 0; i < baseModelNum; i++) {
String modelKey = readkv("base_model" + i);
if (modelKey == null)
continue;
final MojoModel model = getModel(modelKey);
_model._baseModels[i] = new StackedEnsembleMojoModel.StackedEnsembleMojoSubModel(model,
createMapping(model, columnNames, modelKey));
}
}
/**
* Creates an array of integers with mapping of referential column name space into model-specific column name space.
*
* @param model Model to create column mapping for
* @param reference Column mapping serving as a reference
* @param modelName Name of the model for various error reports
* @return An array of integers with representing the mapping.
*/
private static int[] createMapping(final MojoModel model, final String[] reference, final String modelName) {
String[] features = model.features();
int[] mapping = new int[features.length];
for (int i = 0; i < mapping.length; i++) {
String feature = features[i];
mapping[i] = findColumnIndex(reference, feature);
if (mapping[i] < 0) {
throw new IllegalStateException(String.format("Model '%s' does not have input column '%s'",
modelName, feature));
}
}
return mapping;
}
private static int findColumnIndex(String[] arr, String searchedColname) {
for (int i = 0; i < arr.length; i++) {
if (arr[i].equals(searchedColname)) return i;
}
return -1;
}
@Override
protected StackedEnsembleMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
return new StackedEnsembleMojoModel(columns, domains, responseColumn);
}
@Override public String mojoVersion() {
return "1.01";
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/gam/GamMojoModel.java
|
package hex.genmodel.algos.gam;
import static hex.genmodel.utils.DistributionFamily.*;
public class GamMojoModel extends GamMojoModelBase {
public static final int CS_SPLINE_TYPE = 0;
public static final int IS_SPLINE_TYPE = 2;
public static final int MS_SPLINE_TYPE = 3;
public static final int TP_SPLINE_TYPE = 1;
private boolean _classifier;
GamMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
void init() {
super.init();
_classifier = _family.equals(bernoulli) || _family.equals(fractionalbinomial) || _family.equals(quasibinomial);
}
// generate prediction for binomial/fractional binomial/negative binomial, poisson, tweedie families
@Override
double[] gamScore0(double[] data, double[] preds) {
double eta = generateEta(_beta_center, data); // generate eta, inner product of beta and data
double mu = evalLink(eta);
if (_classifier) {
preds[0] = (mu >= _defaultThreshold) ? 1 : 0; // threshold given by ROC
preds[1] = 1.0 - mu; // class 0
preds[2] = mu; // class 1
} else {
preds[0] = mu;
}
return preds;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/gam/GamMojoModelBase.java
|
package hex.genmodel.algos.gam;
import hex.genmodel.ConverterFactoryProvidingModel;
import hex.genmodel.GenModel;
import hex.genmodel.MojoModel;
import hex.genmodel.easy.CategoricalEncoder;
import hex.genmodel.easy.EasyPredictModelWrapper;
import hex.genmodel.easy.RowData;
import hex.genmodel.easy.RowToRawDataConverter;
import hex.genmodel.utils.ArrayUtils;
import hex.genmodel.utils.DistributionFamily;
import hex.genmodel.utils.LinkFunctionType;
import java.util.Map;
import static hex.genmodel.algos.gam.GamMojoModel.*;
import static hex.genmodel.algos.gam.GamUtilsThinPlateRegression.*;
import static hex.genmodel.utils.ArrayUtils.multArray;
import static hex.genmodel.utils.ArrayUtils.nanArray;
public abstract class GamMojoModelBase extends MojoModel implements ConverterFactoryProvidingModel, Cloneable {
public LinkFunctionType _link_function;
boolean _useAllFactorLevels;
int _cats;
int[] _catNAFills;
int[] _catOffsets;
int _nums;
int _numsCenter;
double[] _numNAFillsCenter;
boolean _meanImputation;
double[] _beta_no_center;
double[] _beta_center;
double[][] _beta_multinomial;
double[][] _beta_multinomial_no_center; // coefficients not centered for multinomial/ordinal
double[][] _beta_multinomial_center; // coefficients not centered for multinomial/ordinal
int[] _spline_orders;
int[] _spline_orders_sorted;
DistributionFamily _family;
String[][] _gam_columns;
String[][] _gam_columns_sorted;
int[] _d;
int[] _m;
int[] _M;
int[] _gamPredSize;
int _num_gam_columns;
int[] _bs;
int[] _bs_sorted;
int[] _num_knots;
int[] _num_knots_sorted;
int[] _num_knots_sorted_minus1;
int[] _numBasisSize; // number of basis function sizes for I-spline
int[] _numMSBasisSize; // number of basis function sizes for M-spline
int[] _num_knots_TP;
double[][][] _knots;
double[][][] _binvD;
double[][][] _zTranspose;
double[][][] _zTransposeCS;
String[][] _gamColNames; // expanded gam column names
String[][] _gamColNamesCenter;
String[] _names_no_centering; // column names of features with no centering
int _totFeatureSize; // Gam Algo predictors numbers that include: predictors, expanded gam columns no centered
int _betaSizePerClass;
int _betaCenterSizePerClass;
double _tweedieLinkPower;
double[][] _hj; // difference between knot values
int _numExpandedGamCols; // number of expanded gam columns
int _numExpandedGamColsCenter; // number of expanded gam columns centered
int _lastClass;
int[][][] _allPolyBasisList;
int _numTPCol;
int _numCSCol;
int _numISCol;
int _numMSCol;
// following arrays are pre-allocated to avoid repeated memory allocation per row of scoring
int[] _tpDistzCSSize;
boolean[] _dEven;
double[] _constantTerms;
double[][] _gamColMeansRaw;
double[][] _oneOGamColStd;
boolean _standardize;
ISplines[] _iSplineBasis;
MSplines[] _mSplineBasis;
GamMojoModelBase(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
@Override
public double[] score0(double[] row, double[] preds) {
if (_meanImputation) {
imputeMissingWithMeans(row); // perform imputation for each row
}
return gamScore0(row, preds);
}
void init() {
_num_knots_sorted_minus1 = new int[_num_knots_sorted.length];
for (int index = 0; index < _num_knots_sorted.length; index++)
_num_knots_sorted_minus1[index] = _num_knots_sorted[index]-1;
if (_numCSCol > 0) {
_hj = new double[_numCSCol][];
for (int ind = 0; ind < _numCSCol; ind++)
_hj[ind] = ArrayUtils.eleDiff(_knots[ind][0]);
}
int offset = _numCSCol;
if (_numISCol > 0) {
_numBasisSize = new int[_numISCol];
_iSplineBasis = new ISplines[_numISCol];
for (int ind=0; ind<_numISCol;ind++) {
int absIndex = ind + offset;
_numBasisSize[ind] = _num_knots_sorted[absIndex]+_spline_orders_sorted[absIndex]-2;
_iSplineBasis[ind] = new ISplines(_spline_orders_sorted[absIndex], _knots[absIndex][0]);
}
}
offset += _numISCol;
if (_numMSCol > 0) {
_numMSBasisSize = new int[_numMSCol];
_mSplineBasis = new MSplines[_numMSCol];
for (int ind=0; ind<_numMSCol; ind++) {
int absIndex = ind + offset;
_numMSBasisSize[ind] = _num_knots_sorted[absIndex]+_spline_orders_sorted[absIndex]-2;
_mSplineBasis[ind] = new MSplines(_spline_orders_sorted[absIndex], _knots[absIndex][0]);
}
}
offset += _numMSCol;
if (_numTPCol > 0) {
_tpDistzCSSize = new int[_numTPCol];
_dEven = new boolean[_numTPCol];
_constantTerms = new double[_numTPCol];
for (int index = 0; index < _numTPCol; index++) {
int absIndex = index+ offset;
_tpDistzCSSize[index] = _num_knots_sorted[absIndex]-_M[index];
_dEven[index] = (_d[absIndex] % 2) == 0;
_constantTerms[index] = calTPConstantTerm(_m[index], _d[absIndex], _dEven[index]);
}
}
_lastClass = _nclasses - 1;
}
@Override
public GenModel internal_threadSafeInstance() {
try {
GamMojoModelBase clonedMojo = (GamMojoModelBase) clone();
clonedMojo.init();
return clonedMojo;
} catch (CloneNotSupportedException e) {
throw new RuntimeException(e);
}
}
abstract double[] gamScore0(double[] row, double[] preds);
private void imputeMissingWithMeans(double[] data) {
for (int ind=0; ind < _cats; ind++)
if (Double.isNaN(data[ind])) data[ind] = _catNAFills[ind];
for (int ind = 0; ind < _numsCenter; ind++)
if (Double.isNaN(data[ind + _cats])) data[ind + _cats] = _numNAFillsCenter[ind];
}
double evalLink(double val) {
switch (_link_function) {
case identity: return GenModel.GLM_identityInv(val);
case logit: return GenModel.GLM_logitInv(val);
case log: return GenModel.GLM_logInv(val);
case inverse: return GenModel.GLM_inverseInv(val);
case tweedie: return GenModel.GLM_tweedieInv(val, _tweedieLinkPower);
default: throw new UnsupportedOperationException("Unexpected link function "+_link_function);
}
}
// This method will read in categorical value and adjust for when useAllFactorLevels = true or false
int readCatVal(double data, int dataIndex) {
int ival = _useAllFactorLevels ? ((int) data) : ((int) data - 1);
if (ival < 0)
return -1;
ival += _catOffsets[dataIndex];
return ival;
}
// this method will generate the beta*data+intercept
double generateEta(double[] beta, double[] data) {
double eta = 0.0;
int catOffsetLength = _catOffsets.length - 1;
for (int i = 0; i < catOffsetLength; ++i) { // take care of contribution from categorical columns
int ival = readCatVal(data[i], i);
if ((ival < _catOffsets[i + 1]) && (ival >= 0))
eta += beta[ival];
}
int noff = _catOffsets[_cats] - _cats;
int numColLen = beta.length - 1 - noff;
for (int i = _cats; i < numColLen; ++i)
eta += beta[noff + i] * data[i];
eta += beta[beta.length - 1]; // add intercept
return eta;
}
// check if gamificationis needed. If all gamified column values are NaN, we need to add gamification. Otherwise,
// gamification is already done.
private boolean gamificationNeeded(double[] rawData, int gamColStart) {
for (int cind = gamColStart; cind < rawData.length; cind++)
if (!Double.isNaN(rawData[cind])) {
return false;
}
return true;
}
int addCSGamification(final RowData rowData, int cind, int dataIndEnd, double[] dataWithGamifiedColumns) {
Object dataObject = rowData.get(_gam_columns_sorted[cind][0]); // read predictor column
double gamColData = Double.NaN;
if (dataObject == null) { // NaN, skip column gamification
return dataIndEnd;
} else { // can only test this with Python/R client
gamColData = (dataObject instanceof String) ? Double.parseDouble((String) dataObject) : (double) dataObject;
}
double[] basisVals = new double[_num_knots_sorted[cind]];
double[] basisValsCenter = new double[_num_knots_sorted_minus1[cind]];
GamUtilsCubicRegression.expandOneGamCol(gamColData, _binvD[cind], basisVals, _hj[cind], _knots[cind][0]);
multArray(basisVals, _zTranspose[cind], basisValsCenter);
System.arraycopy(basisValsCenter, 0, dataWithGamifiedColumns, dataIndEnd, _num_knots_sorted_minus1[cind]); // copy expanded gam to rawData
return dataIndEnd;
}
int addISGamification(final RowData rowData, int cind, int csCounter, int dataIndEnd, double[] dataWithGamifiedColumns) {
Object dataObject = rowData.get(_gam_columns_sorted[cind][0]); // read predictor column
double gamColData = Double.NaN;
if (dataObject == null) // NaN, skip column gamification
return dataIndEnd;
else // can only test this with Python/R client
gamColData = (dataObject instanceof String) ? Double.parseDouble((String) dataObject) : (double) dataObject;
double[] basisVals = new double[_numBasisSize[csCounter]];
_iSplineBasis[csCounter].gamifyVal(basisVals, gamColData);
System.arraycopy(basisVals, 0, dataWithGamifiedColumns, dataIndEnd, _numBasisSize[csCounter]); // copy expanded gam to rawData
return dataIndEnd;
}
int addMSGamification(final RowData rowData, int cind, int csCounter, int dataIndEnd, double[] dataWithGamifiedColumns) {
Object dataObject = rowData.get(_gam_columns_sorted[cind][0]); // read predictor column
double gamColData = Double.NaN;
if (dataObject == null) // NaN, skip column gamification
return dataIndEnd;
else // can only test this with Python/R client
gamColData = (dataObject instanceof String) ? Double.parseDouble((String) dataObject) : (double) dataObject;
double[] basisVals = new double[_numMSBasisSize[csCounter]];
_mSplineBasis[csCounter].gamifyVal(basisVals, gamColData);
int centerBasisLen = basisVals.length-1;
double[] basisValsCenter = new double[centerBasisLen];
multArray(basisVals, _zTranspose[csCounter], basisValsCenter);
System.arraycopy(basisValsCenter, 0, dataWithGamifiedColumns, dataIndEnd, centerBasisLen); // copy expanded gam to rawData
return dataIndEnd;
}
// this method will add to each data row the expanded gam columns with centering
double[] addExpandGamCols(double[] rawData, final RowData rowData) { // add all expanded gam columns here
int dataIndEnd = _nfeatures - _numExpandedGamColsCenter; // starting index to fill out the rawData
if (!gamificationNeeded(rawData, dataIndEnd))
return rawData; // already contain gamified columns. Nothing needs to be done.
// add expanded gam columns to rowData
double[] dataWithGamifiedColumns = nanArray(_nfeatures); // store gamified columns
System.arraycopy(rawData, 0, dataWithGamifiedColumns, 0, dataIndEnd);
int tpCounter = 0;
int isCounter = 0;
int msCounter = 0;
for (int cind = 0; cind < _num_gam_columns; cind++) { // go through all gam_columns, CS and TP
if (_bs_sorted[cind] == CS_SPLINE_TYPE) { // to generate basis function values for cubic regression spline
dataIndEnd = addCSGamification(rowData, cind, dataIndEnd, dataWithGamifiedColumns);
} else if (_bs_sorted[cind] == TP_SPLINE_TYPE) { // tp regression
addTPGamification(rowData, cind, tpCounter, dataIndEnd, dataWithGamifiedColumns);
tpCounter++;
} else if (_bs_sorted[cind] == IS_SPLINE_TYPE) { // perform I-spline gamification
addISGamification(rowData, cind, isCounter, dataIndEnd, dataWithGamifiedColumns);
isCounter++;
} else if (_bs_sorted[cind] == MS_SPLINE_TYPE) {
addMSGamification(rowData, cind, msCounter, dataIndEnd, dataWithGamifiedColumns);
msCounter++;
} else {
throw new IllegalArgumentException("spline type not implemented!");
}
dataIndEnd += _num_knots_sorted_minus1[cind];
}
return dataWithGamifiedColumns;
}
int addTPGamification(final RowData rowData, int cind, int tpCounter, int dataIndEnd, double[] dataWithGamifiedColumns) {
String[] gamCols = _gam_columns_sorted[cind];
double[] gamPred = grabPredictorVals(gamCols, rowData); // grabbing multiple predictors
if (gamPred == null)
return dataIndEnd;
double[] tpDistance = new double[_num_knots_sorted[cind]];
calculateDistance(tpDistance, gamPred, _num_knots_sorted[cind], _knots[cind],
_d[cind], _m[tpCounter], _dEven[tpCounter], _constantTerms[tpCounter], _oneOGamColStd[tpCounter],
_standardize); // calculate distance between row and knots, result in rowValues
double[] tpDistzCS = new double[_tpDistzCSSize[tpCounter]];
multArray(tpDistance, _zTransposeCS[tpCounter], tpDistzCS); // distance * zCS
double[] tpPoly = new double[_M[tpCounter]];
calculatePolynomialBasis(tpPoly, gamPred, _d[cind], _M[tpCounter],
_allPolyBasisList[tpCounter], _gamColMeansRaw[tpCounter], _oneOGamColStd[tpCounter], _standardize); // generate polynomial basis
// concatenate distance zCS and poly basis.
double[] tpDistzCSPoly = new double[_num_knots_sorted[cind]];
double[] tpDistzCSPolyzT = new double[_num_knots_sorted_minus1[cind]];
System.arraycopy(tpDistzCS, 0, tpDistzCSPoly, 0, tpDistzCS.length);
System.arraycopy(tpPoly, 0, tpDistzCSPoly, tpDistzCS.length, _M[tpCounter]);
multArray(tpDistzCSPoly, _zTranspose[cind], tpDistzCSPolyzT);
System.arraycopy(tpDistzCSPolyzT, 0, dataWithGamifiedColumns, dataIndEnd,
tpDistzCSPolyzT.length);
return dataIndEnd;
}
double[] grabPredictorVals(String[] gamCols, final RowData rowData) {
int numCol = gamCols.length;
double[] predVals = new double[numCol];
for (int index = 0; index < numCol; index++) {
Object data = rowData.get(gamCols[index]);
if (data == null)
return null;
predVals[index] = (data instanceof String) ? Double.parseDouble((String) data) : (double) data;
}
return predVals;
}
@Override
public RowToRawDataConverter makeConverterFactory(Map<String, Integer> modelColumnNameToIndexMap,
Map<Integer, CategoricalEncoder> domainMap,
EasyPredictModelWrapper.ErrorConsumer errorConsumer,
EasyPredictModelWrapper.Config config) {
return new GamRowToRawDataConverter(this, modelColumnNameToIndexMap, domainMap, errorConsumer, config);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/gam/GamMojoMultinomialModel.java
|
package hex.genmodel.algos.gam;
import static hex.genmodel.utils.DistributionFamily.multinomial;
public class GamMojoMultinomialModel extends GamMojoModelBase {
private boolean _trueMultinomial;
GamMojoMultinomialModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
void init() {
super.init();
_trueMultinomial = _family.equals(multinomial);
}
@Override
double[] gamScore0(double[] row, double[] preds) {
if (row.length == nfeatures())
_beta_multinomial = _beta_multinomial_center;
else
_beta_multinomial = _beta_multinomial_no_center;
for (int c=0; c<_nclasses; ++c)
preds[c+1] = generateEta(_beta_multinomial[c], row); // generate eta for each class
if (_trueMultinomial)
return postPredMultinomial(preds);
else // post process predict for ordinal family
return postPredOrdinal(preds);
}
double[] postPredMultinomial(double[] preds) {
double max_row = 0;
double sum_exp = 0;
for (int c = 1; c < preds.length; ++c) if (preds[c] > max_row) max_row = preds[c];
for (int c = 1; c < preds.length; ++c) { sum_exp += (preds[c] = Math.exp(preds[c]-max_row));}
sum_exp = 1/sum_exp;
double max_p = 0;
for (int c = 1; c < preds.length; ++c) if ((preds[c] *= sum_exp) > max_p) { max_p = preds[c]; preds[0] = c-1; }
return preds;
}
double[] postPredOrdinal(double[] preds) {
double previousCDF = 0.0;
preds[0] = _lastClass;
for (int cInd = 0; cInd < _lastClass; cInd++) { // classify row and calculate PDF of each class
double eta = preds[cInd + 1];
double currCDF = 1.0 / (1 + Math.exp(-eta));
preds[cInd + 1] = currCDF - previousCDF;
previousCDF = currCDF;
if (eta > 0) { // found the correct class
preds[0] = cInd;
break;
}
}
for (int cInd = (int) preds[0] + 1; cInd < _lastClass; cInd++) { // continue PDF calculation
double currCDF = 1.0 / (1 + Math.exp(-preds[cInd + 1]));
preds[cInd + 1] = currCDF - previousCDF;
previousCDF = currCDF;
}
preds[_nclasses] = 1-previousCDF;
return preds;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/gam/GamMojoReader.java
|
package hex.genmodel.algos.gam;
import hex.genmodel.ModelMojoReader;
import hex.genmodel.utils.DistributionFamily;
import java.io.IOException;
import java.nio.ByteBuffer;
import static hex.genmodel.algos.gam.GamMojoModel.IS_SPLINE_TYPE;
import static hex.genmodel.algos.gam.GamMojoModel.MS_SPLINE_TYPE;
import static hex.genmodel.utils.ArrayUtils.subtract;
import static hex.genmodel.utils.DistributionFamily.ordinal;
public class GamMojoReader extends ModelMojoReader<GamMojoModelBase> {
@Override
public String getModelName() {
return "Generalized Additive Model";
}
@Override
protected void readModelData() throws IOException {
_model._useAllFactorLevels = readkv("use_all_factor_levels", false);
_model._numExpandedGamCols = readkv("num_expanded_gam_columns",0);
_model._numExpandedGamColsCenter = readkv("num_expanded_gam_columns_center",0);
_model._family = DistributionFamily.valueOf((String)readkv("family"));
_model._cats = readkv("cats", -1);
_model._nums = readkv("num");
_model._numsCenter = readkv("numsCenter");
_model._catNAFills = readkv("catNAFills", new int[0]);
_model._numNAFillsCenter = readkv("numNAFillsCenter", new double[0]);
_model._meanImputation = readkv("mean_imputation", false);
_model._betaSizePerClass = readkv("beta length per class",0);
_model._catOffsets = readkv("cat_offsets", new int[0]);
if (!_model._family.equals(DistributionFamily.multinomial)) // multinomial or ordinal have specific link functions not included in general link functions
_model._link_function = readLinkFunction((String) readkv("link"), _model._family);
_model._tweedieLinkPower = readkv("tweedie_link_power", 0.0);
_model._betaCenterSizePerClass = readkv("beta center length per class", 0);
if (_model._family.equals(DistributionFamily.multinomial) || _model._family.equals(ordinal)) {
_model._beta_multinomial_no_center = readRectangularDoubleArray("beta_multinomial", _model._nclasses, _model._betaSizePerClass);
_model._beta_multinomial_center = readRectangularDoubleArray("beta_multinomial_centering", _model._nclasses,
_model._betaCenterSizePerClass);
} else {
_model._beta_no_center = readkv("beta");
_model._beta_center = readkv("beta_center");
}
// read in GAM specific parameters
_model._num_knots = readkv("num_knots");
_model._num_knots_sorted = readkv("num_knots_sorted");
int[] gamColumnDim = readkv("gam_column_dim");
_model._gam_columns = read2DStringArrays(gamColumnDim,"gam_columns");
int[] gamColumnDimSorted = readkv("gam_column_dim_sorted");
_model._gam_columns_sorted = read2DStringArrays(gamColumnDimSorted,"gam_columns_sorted");
_model._num_gam_columns = _model._gam_columns.length;
_model._numTPCol = readkv("num_TP_col");
_model._numCSCol = readkv("num_CS_col");
_model._numISCol = readkv("num_IS_col");
_model._numMSCol = readkv("num_MS_col");
if (_model._numISCol > 0 || _model._numMSCol > 0) {
_model._spline_orders = readkv("spline_orders");
_model._spline_orders_sorted = readkv("spline_orders_sorted");
if (_model._numISCol > 0) {
_model._numBasisSize = new int[_model._numISCol];
int isCounter = 0;
int offset = _model._numCSCol;
for (int index = 0; index < _model._numISCol; index++) {
int trueIndex = index+offset;
_model._numBasisSize[isCounter++] = _model._num_knots_sorted[trueIndex] +
_model._spline_orders_sorted[trueIndex] - 2;
}
}
if (_model._numMSCol > 0) {
_model._numMSBasisSize = new int[_model._numMSCol];
int msCounter = 0;
int offset = _model._numISCol+_model._numCSCol;
for (int index=0; index<_model._numMSCol; index++) {
int trueIndex = offset + index;
_model._numMSBasisSize[msCounter++] = _model._num_knots_sorted[trueIndex]+
_model._spline_orders_sorted[trueIndex]-2;
}
}
}
_model._totFeatureSize = readkv("total feature size");
_model._names_no_centering = readStringArrays(_model._totFeatureSize, "_names_no_centering");
_model._bs = readkv("bs");
_model._bs_sorted = readkv("bs_sorted");
_model._zTranspose = new double[_model._num_gam_columns][][];
int[] gamColName_dim = readkv("gamColName_dim");
_model._gamColNames = read2DStringArrays(gamColName_dim, "gamColNames");
//_model._gamColNames = new String[_model._num_gam_columns][];
//_model._gamColNamesCenter = new String[_model._num_gam_columns][];
_model._gamPredSize = readkv("_d");
if (_model._numTPCol > 0) {
_model._standardize = readkv("standardize");
_model._zTransposeCS = new double[_model._numTPCol][][];
_model._num_knots_TP = readkv("num_knots_TP");
_model._d = readkv("_d");
_model._m = readkv("_m");
_model._M = readkv("_M");
int[] predSize = new int[_model._numTPCol];
System.arraycopy(predSize, predSize.length-_model._numTPCol, predSize, 0, _model._numTPCol);
_model._gamColMeansRaw = read2DDoubleArrays(predSize, "gamColMeansRaw");
_model._oneOGamColStd = read2DDoubleArrays(predSize, "gamColStdRaw");
int[] numKnotsMM = subtract(_model._num_knots_TP, _model._M);
_model._zTransposeCS = read3DArray("zTransposeCS", _model._numTPCol, numKnotsMM, _model._num_knots_TP);
int[] predNum = new int[_model._numTPCol];
System.arraycopy(_model._d, _model._numCSCol, predNum, 0, _model._numTPCol);
_model._allPolyBasisList = read3DIntArray("polynomialBasisList", _model._numTPCol, _model._M, predNum);
}
int[] numKnotsM1 = subtract(_model._num_knots_sorted, 1);
int numKnotsLen = numKnotsM1.length;
int isCounter=0;
int msCounter = 0;
int[] zSecondDim = new int[numKnotsLen];
int[] zThirdDim = new int[numKnotsLen];
for (int index=0; index<numKnotsLen; index++) {
if (_model._bs_sorted[index] == IS_SPLINE_TYPE) {
numKnotsM1[index] = _model._numBasisSize[isCounter++];
zSecondDim[index] = 0;
zThirdDim[index] = 0;
} else if (_model._bs_sorted[index] == MS_SPLINE_TYPE) {
numKnotsM1[index] = _model._numMSBasisSize[msCounter++]-1;
zSecondDim[index] = numKnotsM1[index];
zThirdDim[index] = numKnotsM1[index]+1;
} else {
zSecondDim[index] = numKnotsM1[index];
zThirdDim[index] = _model._num_knots_sorted[index];
}
}
_model._gamColNamesCenter = read2DStringArrays(numKnotsM1, "gamColNamesCenter");
_model._zTranspose = read3DArray("zTranspose", _model._num_gam_columns, zSecondDim, zThirdDim);
_model._knots = read3DArray("knots", _model._num_gam_columns, _model._gamPredSize, _model._num_knots_sorted);
if (_model._numCSCol > 0) {
int[] numKnotsM2 = subtract(_model._num_knots_sorted, 2);
_model._binvD = read3DArray("_binvD", _model._numCSCol, numKnotsM2, _model._num_knots_sorted);
}
_model.init();
}
String[][] read2DStringArrays(int[] arrayDim, String title) throws IOException {
int firstDim = arrayDim.length;
String[][] stringArrays = new String[firstDim][];
int indexDim1 = 0;
int indexDim2 = 0;
for (int index = 0; index < firstDim; index++)
stringArrays[index] = new String[arrayDim[index]];
for (String line : readtext(title)) {
if (indexDim2 >= stringArrays[indexDim1].length) { // go to next dim
indexDim1++;
indexDim2 = 0;
}
stringArrays[indexDim1][indexDim2] = line;
indexDim2++;
}
return stringArrays;
}
double[][] read2DDoubleArrays(int[] arrayDim, String title) throws IOException {
int firstDim = arrayDim.length;
double[][] doubleArrays = new double[firstDim][];
ByteBuffer bb = ByteBuffer.wrap(readblob(title));
for (int index = 0; index < firstDim; index++) {
doubleArrays[index] = new double[arrayDim[index]];
for (int index2nd = 0; index2nd < arrayDim[index]; index2nd++) {
doubleArrays[index][index2nd] = bb.getDouble();
}
}
return doubleArrays;
}
double[][] read2DArray(String title, int firstDSize, int secondDSize) throws IOException {
double [][] row = new double[firstDSize][secondDSize];
ByteBuffer bb = ByteBuffer.wrap(readblob(title));
for (int i = 0; i < firstDSize; i++) {
for (int j = 0; j < secondDSize; j++)
row[i][j] = bb.getDouble();
}
return row;
}
int[][][] read3DIntArray(String title, int firstDimSize, int[] secondDim, int[] thirdDim) throws IOException {
int [][][] row = new int[firstDimSize][][];
ByteBuffer bb = ByteBuffer.wrap(readblob(title));
for (int i = 0; i < firstDimSize; i++) {
row[i] = new int[secondDim[i]][thirdDim[i]];
for (int j = 0; j < secondDim[i]; j++) {
for (int k = 0; k < thirdDim[i]; k++)
row[i][j][k] = bb.getInt();
}
}
return row;
}
double[][][] read3DArray(String title, int firstDimSize, int[] secondDim, int[] thirdDim) throws IOException {
double [][][] row = new double[firstDimSize][][];
ByteBuffer bb = ByteBuffer.wrap(readblob(title));
for (int i = 0; i < firstDimSize; i++) {
row[i] = new double[secondDim[i]][thirdDim[i]];
for (int j = 0; j < secondDim[i]; j++) {
for (int k = 0; k < thirdDim[i]; k++)
row[i][j][k] = bb.getDouble();
}
}
return row;
}
double[][] read2DArrayDiffLength(String title, double[][] row, int[] num_knots) throws IOException {
int numGamColumns = num_knots.length;
ByteBuffer bb = ByteBuffer.wrap(readblob(title));
for (int i = 0; i < numGamColumns; i++) {
row[i] = new double[num_knots[i]];
for (int j = 0; j < row[i].length; j++)
row[i][j] = bb.getDouble();
}
return row;
}
@Override
protected GamMojoModelBase makeModel(String[] columns, String[][] domains, String responseColumn) {
String family = readkv("family");
if ("multinomial".equals(family) || "ordinal".equals(family))
return new GamMojoMultinomialModel(columns, domains, responseColumn);
else
return new GamMojoModel(columns, domains, responseColumn);
}
@Override
public String mojoVersion() {
return "1.00";
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/gam/GamRowToRawDataConverter.java
|
package hex.genmodel.algos.gam;
import hex.genmodel.GenModel;
import hex.genmodel.easy.CategoricalEncoder;
import hex.genmodel.easy.EasyPredictModelWrapper;
import hex.genmodel.easy.RowData;
import hex.genmodel.easy.RowToRawDataConverter;
import hex.genmodel.easy.exception.PredictException;
import java.util.Map;
public class GamRowToRawDataConverter extends RowToRawDataConverter {
GamMojoModelBase _m;
public GamRowToRawDataConverter(GenModel m, Map<String, Integer> modelColumnNameToIndexMap, Map<Integer, CategoricalEncoder> domainMap, EasyPredictModelWrapper.ErrorConsumer errorConsumer, EasyPredictModelWrapper.Config config) {
super(m, modelColumnNameToIndexMap, domainMap, errorConsumer, config);
_m = (GamMojoModelBase) m;
}
@Override
public double[] convert(RowData data, double[] rawData) throws PredictException {
rawData = super.convert(data, rawData);
return _m.addExpandGamCols(rawData, data);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/gam/GamUtilsCubicRegression.java
|
package hex.genmodel.algos.gam;
import java.util.Arrays;
public class GamUtilsCubicRegression {
public static double gen_a_m_j(double xjp1, double x, double hj) {
return (xjp1-x)/hj;
}
public static double gen_a_p_j(double xj, double x, double hj) {
return (x-xj)/hj;
}
public static double gen_c_m_j(double xjp1, double x, double hj) {
double t = (xjp1-x);
double t3 = t*t*t;
return ((t3/hj-t*hj)/6.0);
}
public static double gen_c_p_j(double xj, double x, double hj) {
double t=(x-xj);
double t3 = t*t*t;
return ((t3/hj-t*hj)/6.0);
}
public static int locateBin(double xval, double[] knots) {
if (xval <= knots[0]) //small short cut
return 0;
int highIndex = knots.length-1;
if (xval >= knots[highIndex]) // small short cut
return (highIndex-1);
int tryBin = -1;
int count = 0;
int numBins = knots.length;
int lowIndex = 0;
while (count < numBins) {
tryBin = (int) Math.floor((highIndex+lowIndex)*0.5);
if ((xval >= knots[tryBin]) && (xval < knots[tryBin+1]))
return tryBin;
else if (xval > knots[tryBin])
lowIndex = tryBin;
else if (xval < knots[tryBin])
highIndex = tryBin;
count++;
}
return tryBin;
}
public static void updateAFunc(double[] basisVals, double xval, int binIndex, double[] knots, double[] hj) {
int jp1 = binIndex+1;
basisVals[binIndex] += gen_a_m_j(knots[jp1], xval, hj[binIndex]);
basisVals[jp1] += gen_a_p_j(knots[binIndex], xval, hj[binIndex]);
}
public static void updateFMatrixCFunc(double[] basisVals, double xval, int binIndex, double[] knots, double[] hj,
double[][] binvD) {
int numKnots = basisVals.length;
int matSize = binvD.length;
int jp1 = binIndex+1;
double cmj = gen_c_m_j(knots[jp1], xval, hj[binIndex]);
double cpj = gen_c_p_j(knots[binIndex], xval, hj[binIndex]);
int binIndexM1 = binIndex-1;
for (int index=0; index < numKnots; index++) {
if (binIndex == 0) { // only one part
basisVals[index] = binvD[binIndex][index] * cpj;
} else if (binIndex >= matSize) { // update only one part
basisVals[index] = binvD[binIndexM1][index] * cmj;
} else { // binIndex > 0 and binIndex < matSize
basisVals[index] = binvD[binIndexM1][index] * cmj+binvD[binIndex][index] * cpj;
}
}
}
public static void expandOneGamCol(double xval, double[][] binvD, double[] basisVals, double[] hj, double[] knots) {
if (!Double.isNaN(xval)) {
int binIndex = locateBin(xval, knots);
updateFMatrixCFunc(basisVals, xval, binIndex, knots, hj, binvD);
updateAFunc(basisVals, xval, binIndex, knots, hj);
} else {
Arrays.fill(basisVals, Double.NaN);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/gam/GamUtilsISplines.java
|
package hex.genmodel.algos.gam;
import hex.genmodel.utils.ArrayUtils;
import java.util.ArrayList;
import java.util.List;
import static hex.genmodel.utils.ArrayUtils.arrayInitRange;
public class GamUtilsISplines {
public static double[] fillKnots(double[] knots, int m) {
int numKnotsDup = knots.length+2*m-2;
double[] knotsNew = new double[numKnotsDup];
int upperBound = m > 0?m-1:0;
for (int index=0; index < upperBound; index++) // m lower knots, all equal value
knotsNew[index]=knots[0];
int knotLen = knots.length;
for (int index=0; index < knotLen; index++) // N-2 interior knots
knotsNew[index+upperBound]=knots[index];
double upperVal = knots[knots.length-1];
for (int index=knotLen+upperBound; index < numKnotsDup; index++)
knotsNew[index]=upperVal;
return knotsNew;
}
/**
* This method is used to extract the knots over which a basis function is supposed to be non-zero.
*/
public static double[] extractKnots(int index, int order, double[] knots) {
double[] newKnots = new double[order+1];
int upperIndex = Math.min(index+order, knots.length-1);
int startIndex = 0;
for (int counter = index; counter <= upperIndex; counter++)
newKnots[startIndex++]=knots[counter];
return newKnots;
}
static double[] formDenominatorNSpline(int order, double[] knots) {
double[] oneOverDenominator = new double[2];
if (order == 1) {
oneOverDenominator[0] = 1;
oneOverDenominator[1] = 0;
} else {
double tempDenom = knots[order-1]-knots[0];
oneOverDenominator[0] = tempDenom==0
? 0 : 1.0/tempDenom;
tempDenom = knots[order]-knots[1];
oneOverDenominator[1] = tempDenom==0
? 0 : 1.0/tempDenom;
}
return oneOverDenominator;
}
static double[] formNumerator(int order, double[] knots) {
double[] numerator = new double[2];
if (order == 1) {
numerator[0] = 1;
numerator[1] = 0;
} else {
numerator[0] = knots[0];
numerator[1] = knots[order];
}
return numerator;
}
static double[] formDenominatorMSpline(int order, double[] knots) {
double[] oneOverDenominator = new double[2];
if (order == 1) {
oneOverDenominator[0] = 1;
oneOverDenominator[1] = 0;
} else {
double tempDenom = knots[order]-knots[0];
oneOverDenominator[0] = tempDenom==0
? 0 : 1.0/tempDenom;
tempDenom = knots[order]-knots[0];
oneOverDenominator[1] = tempDenom==0
? 0 : 1.0/tempDenom;
}
return oneOverDenominator;
}
/**
* This method performs the multiplication of two polynomials where the polynomials are given as a double
* array. This will result in another array which contains the multiplication of the two polynomials.
*/
public static double[] polynomialProduct(double[] coeff1, double[] coeff2) {
int firstLen = coeff1.length;
int secondLen = coeff2.length;
int combinedLen = firstLen*secondLen;
int[] firstOrder = arrayInitRange(firstLen, 0);
int[] secondOrder = arrayInitRange(secondLen, 0);
int highestOrder = firstLen+secondLen-2;
double[] combinedCoefficients = new double[highestOrder+1]; // start with order 0
List<Double> combinedC = new ArrayList<>();
List<Integer> combinedOrder = new ArrayList<>();
for (int firstIndex=0; firstIndex < firstLen; firstIndex++) {
for (int secondIndex=0; secondIndex < secondLen; secondIndex++) {
double tempValue = coeff1[firstIndex]*coeff2[secondIndex];
combinedC.add(tempValue);
int tempOrder = firstOrder[firstIndex]+secondOrder[secondIndex];
combinedOrder.add(tempOrder);
}
}
for (int index = 0; index < combinedLen; index++) {
combinedCoefficients[combinedOrder.get(index)] += combinedC.get(index);
}
return combinedCoefficients;
}
/**
* Extract coefficients for a node as in equation 5, 11 or 16 by combining the constants, additional
* polynomials with polynomials from nodes of lower orders.
*/
public static void combineParentCoef(double[] parentCoeff, double parentConst, double[][] currCoeff) {
int numBasis = currCoeff.length;
double[] copyParentCoef = parentCoeff.clone();
ArrayUtils.mult(copyParentCoef, parentConst);
for (int index = 0; index < numBasis; index++) {
if (currCoeff[index] != null) {
currCoeff[index] = polynomialProduct(copyParentCoef, currCoeff[index]);
}
}
}
/**
* Perform sum of two polynomials resulting in a double[] representing the result of the summation.
*/
public static void sumCoeffs(double[][] leftCoeffs, double[][] riteCoeffs, double[][] currCoeffs) {
int knotInt = leftCoeffs.length;
for (int index=0; index < knotInt; index++) {
double[] leftCoef1 = leftCoeffs[index];
double[] riteCoef1 = riteCoeffs[index];
if (leftCoef1 != null || riteCoef1 != null) {
if (leftCoef1 != null && riteCoef1 != null) {
currCoeffs[index] = addCoeffs(leftCoef1, riteCoef1);
} else if (leftCoef1 != null) {
currCoeffs[index] = leftCoef1.clone();
} else { // only riteCoef1 is not null
currCoeffs[index] = riteCoef1.clone();
}
}
}
}
/***
* Perform summation of coefficients from the left and rite splines with lower order.
*/
public static double[] addCoeffs(double[] leftCoef, double[] riteCoef) {
int leftLen = leftCoef.length;
int riteLen = riteCoef.length;
int coeffLen = Math.max(leftLen, riteLen);
double[] sumCoeffs = new double[coeffLen];
for (int index=0; index<coeffLen; index++) {
double val = 0;
if (index < leftLen)
val += leftCoef[index];
if (index < riteLen)
val += riteCoef[index];
sumCoeffs[index] = val;
}
return sumCoeffs;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/gam/GamUtilsThinPlateRegression.java
|
package hex.genmodel.algos.gam;
public class GamUtilsThinPlateRegression {
public static double calTPConstantTerm(int m, int d, boolean dEven) {
if (dEven)
return (Math.pow(-1, m + 1 + d / 2.0) / (Math.pow(2, 2*m - 1) * Math.pow(Math.PI, d / 2.0) *
factorial(m-1)*factorial(m - d / 2)));
else
return (Math.pow(-1, m) * m / (factorial(2 * m) * Math.pow(Math.PI, (d - 1) / 2.0)));
}
public static int factorial(int m) {
if (m <= 1) {
return 1;
} else {
int prod = 1;
for (int index = 1; index <= m; index++)
prod *= index;
return prod;
}
}
public static void calculateDistance(double[] rowValues, double[] chk, int knotNum, double[][] knots, int d, int m,
boolean dEven, double constantTerms, double[] oneOGamColStd, boolean standardizeGAM) { // see 3.1
for (int knotInd = 0; knotInd < knotNum; knotInd++) { // calculate distance between data and knots
double sumSq = 0;
for (int predInd = 0; predInd < d; predInd++) {
double temp = standardizeGAM?(chk[predInd] - knots[predInd][knotInd])*oneOGamColStd[predInd]:
(chk[predInd] - knots[predInd][knotInd]); // standardized
sumSq += temp*temp;
}
double distance = Math.pow(Math.sqrt(sumSq), 2*m-d);
rowValues[knotInd] = constantTerms*distance;
if (dEven && (distance != 0))
rowValues[knotInd] *= Math.log(distance);
}
}
public static void calculatePolynomialBasis(double[] onePolyRow, double[] oneDataRow, int d, int M,
int[][] polyBasisList, double[] gamColMean, double[] oneOGamStd,
boolean standardizeGAM) {
for (int colIndex = 0; colIndex < M; colIndex++) {
int[] oneBasis = polyBasisList[colIndex];
double val = 1.0;
for (int predIndex = 0; predIndex < d; predIndex++) {
val *= standardizeGAM?
Math.pow((oneDataRow[predIndex]-gamColMean[predIndex]*oneOGamStd[predIndex]), oneBasis[predIndex]):
Math.pow(oneDataRow[predIndex], oneBasis[predIndex]);
}
onePolyRow[colIndex] = val;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/gam/ISplines.java
|
package hex.genmodel.algos.gam;
import java.io.Serializable;
import static hex.genmodel.algos.gam.GamUtilsISplines.extractKnots;
import static hex.genmodel.algos.gam.GamUtilsISplines.fillKnots;
public class ISplines implements Serializable {
private final double[] _knotsWDuplicates; // expanded knots with duplicates
private final int _order; // order of ISplines, starts from 1, 2, ...
public int _numIBasis; // number of I splines over knot sequence
NBSplinesTypeII _bSplines; // point to BSpline of order _order+1 over the same knot sequence
private final ISplineBasis[] _iSplines;
public ISplines(int order, double[] knots) {
_knotsWDuplicates = fillKnots(knots, order);
_order = order;
_bSplines = new NBSplinesTypeII(order + 1, knots);
_numIBasis = knots.length + order - 2;
_iSplines = new ISplineBasis[_numIBasis];
for (int index = 0; index < _numIBasis; index++)
_iSplines[index] = new ISplineBasis(index, _order, _knotsWDuplicates);
}
public void gamifyVal(double[] gamifiedResults, double val) {
if (gamifiedResults == null)
gamifiedResults = new double[_numIBasis];
for (int basisInd = 0; basisInd < _numIBasis; basisInd++) {
if (val < _iSplines[basisInd]._knots[0])
gamifiedResults[basisInd] = 0;
else if (val >= _iSplines[basisInd]._knots[_order])
gamifiedResults[basisInd] = 1;
else
gamifiedResults[basisInd] = sumNBSpline(basisInd + 1, val); // NBspline index is I-spline index-1
}
}
public double sumNBSpline(int startIndex, double val) {
double gamifiedVal = 0;
// int maxBasisInd = Math.min(startIndex+_order, _bSplines._basisFuncs.length);
int maxBasisInd = _bSplines._basisFuncs.length;
for (int basisInd = startIndex; basisInd < maxBasisInd; basisInd++) {
if (val < _bSplines._basisFuncs[basisInd]._knots[0]) {
break; // no more basis function to be activated
} else if (val >= _bSplines._basisFuncs[basisInd]._knots[_bSplines._order]) {
gamifiedVal += 1;
} else {
gamifiedVal += NBSplinesTypeII.BSplineBasis.evaluate(val, _bSplines._basisFuncs[basisInd]);
}
}
return gamifiedVal;
}
private static class ISplineBasis implements Serializable {
private double[] _knots; // knots over which function is non-zero
private int _NSplineBasisStartIndex; // start index of NB spline function of interest
private int _order;
public ISplineBasis(int basisInd, int order, double[] knots) {
_NSplineBasisStartIndex = basisInd;
_order = order;
_knots = extractKnots(basisInd, order, knots);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/gam/MSplines.java
|
package hex.genmodel.algos.gam;
import java.io.Serializable;
import static hex.genmodel.algos.gam.GamUtilsISplines.fillKnots;
public class MSplines implements Serializable {
private final double[] _knotsWDuplicates; // expanded knots with duplicates
private final int _order; // order of ISplines, starts from 1, 2, ...
public int _numMBasis; // number of I splines over knot sequence
private final NBSplinesTypeI.MSplineBasis[] _mSplines;
/***
*
* @param order: order of the spline. However, polynomial order is order - 1
* @param knots: all knots (boundary and interior) without duplication
*/
public MSplines(int order, double[] knots) {
_knotsWDuplicates = fillKnots(knots, order);
_order = order;
_numMBasis = knots.length + order - 2;
_mSplines = NBSplinesTypeI.genBasisFunctions(_numMBasis, order, _knotsWDuplicates);
}
public void gamifyVal(double[] gamifiedResults, double val) {
if (gamifiedResults == null)
gamifiedResults = new double[_numMBasis];
for (int basisInd = 0; basisInd < _numMBasis; basisInd++) {
if (val < _mSplines[basisInd]._knots[0]) {
gamifiedResults[basisInd] = 0;
} else if (val >= _mSplines[basisInd]._knots[_order]) {
gamifiedResults[basisInd] = 0;
} else {
gamifiedResults[basisInd] = NBSplinesTypeI.MSplineBasis.evaluate(val, _mSplines[basisInd]); //NBSplinesTypeI
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/gam/NBSplinesTypeI.java
|
package hex.genmodel.algos.gam;
import java.io.Serializable;
import static hex.genmodel.algos.gam.GamUtilsISplines.*;
/**
* I implemented the spline described in Section III of doc in the GitHub issue https://github.com/h2oai/h2o-3/issues/7261.
* Any reference to doc I in the code refer to the one here with the http link.
*
* The recursive formula in equation 5 is used. It is implemented as a binary tree with current node with order m
* and two child nodes with order m-1.
*/
public class NBSplinesTypeI implements Serializable {
public final int _order;
public double[][] _nodeCoeffs; // expanded polynomial coefficients at current node, section VI of doc
private double[] _coeffLeft; // represent (t-ti) of equation 5
private double[] _coeffRight; // represent (ti+k-t) of equation 5
double[] _knots; // knot sequence with duplication
public double _commonConst; // k/((k-1)*(ti+k-ti) of equation 5
private NBSplinesTypeI _left; // lower order spline Mi,k-1(t) of equation 5
private NBSplinesTypeI _right; // lower order spline Mi+1,k-1(t) of equation 5
public final int _totBasisFuncs;
public final int _nKnots;
/**
* Generate SBSpline Type I in Section III of doc I
*
* @param knots : knots that span the whole input range of interest with no duplication
* @param order : order of spline
* @param basisIndex : offset added to basis function index, recall lower order splines have index i and i+1 equation 5 of doc
* @param numKnotInt : integer denoting knot intervals over which polynomial coefficients are defined
*/
public NBSplinesTypeI(double[] knots, int order, int basisIndex, int numKnotInt) {
_order= order;
_knots = extractKnots(basisIndex, order, knots); // extract knots over basis function is non-zero
_nodeCoeffs = new double[numKnotInt][];
setConstNChildCoeffs(knots, basisIndex);
_left = null;
_right = null;
_nKnots = knots.length;
_totBasisFuncs = _order+_nKnots-2;
}
protected static MSplineBasis[] genBasisFunctions(int totBasisFuncs, int order, double[] knots) {
MSplineBasis[] basisFuncs = new MSplineBasis[totBasisFuncs];
for (int index=0; index<totBasisFuncs; index++)
basisFuncs[index] = formOneBasisFunc(index, order, knots);
return basisFuncs;
}
private static MSplineBasis formOneBasisFunc(int basisIndex, int order, double[] knots) {
if (order == 1) {
return new MSplineBasis(basisIndex, order, knots);
} else {
MSplineBasis oneBasis = new MSplineBasis(basisIndex, order, knots);
oneBasis._first = formOneBasisFunc(basisIndex, order-1, knots);
oneBasis._second = formOneBasisFunc(basisIndex+1, order-1, knots);
return oneBasis;
}
}
public void setConstNChildCoeffs(double[] knots, int basisIndex) {
double temp;
if (_order <= 1) {
_commonConst = 0.0;
temp = _knots[1] - _knots[0];
_coeffLeft = temp == 0 ? new double[]{0} : new double[]{1.0 / temp};
_coeffRight = new double[]{0.0};
} else {
temp = knots[basisIndex] - knots[basisIndex + _order];
_commonConst = temp == 0 ? 0 :
_order / (temp * (_order - 1));
_coeffLeft = new double[]{-knots[basisIndex], 1};
_coeffRight = new double[]{knots[_order + basisIndex], -1};
}
}
/**
* Given root spline, this method will extract the coefficients of spline as described in Section VI in order to
* generate the penalty matrix using recursion. This is actually follows recursion of TypeII
*/
public static void extractNBSplineCoeffs(NBSplinesTypeI root, int order, double[] coeffParent, double constParent,
int basisIndex) {
if (order == 1) { // reach the bottom of recursion tree
double temp = root._knots[1] - root._knots[0];
if (temp != 0) {
root._nodeCoeffs[basisIndex] = polynomialProduct(
new double[]{constParent / temp}, coeffParent);
}
} else {
extractNBSplineCoeffs(root._left, order-1, root._coeffLeft, root._commonConst, basisIndex);
extractNBSplineCoeffs(root._right, order-1, root._coeffRight, root._commonConst, basisIndex+1);
sumCoeffs(root._left._nodeCoeffs, root._right._nodeCoeffs, root._nodeCoeffs);
combineParentCoef(coeffParent, constParent, root._nodeCoeffs);
}
}
/**
* extract coefficients of NBSplineType I in the process of generating penalty matrix in Section VI of doc.
*/
public static double[][] extractCoeffs(NBSplinesTypeI root, int basisIndex, double parentConst) {
double temp, temp2;
if (root._order == 1) { // short cut for tree of order 1
temp = root._knots[1]-root._knots[0];
if (temp != 0) {
temp2 = parentConst / temp;
root._nodeCoeffs[basisIndex] = new double[]{temp2};
}
return root._nodeCoeffs;
} else {
extractNBSplineCoeffs(root, root._order, new double[]{1.0}, parentConst, basisIndex);
return root._nodeCoeffs;
}
}
/**
* Given an basis function index, order and knot sequence with duplication over the input range of interest,
* we build the whole binary tree for NBSplineType I for Mi,k(t) down to the lowest level with splines of order
* 1 as in Mi,1(t). This is done using recursion following equation 5.
*
* @param knots : knot sequence with duplication
* @param order : order (k) of spline to build
* @param basisIndex : offset to added in order to address spline of lower order
* @param numKnotInt : length of knots with duplication over the whole range of input of interest.
* @return NBSplinesTypeI for spline Mi,k(t)
*/
public static NBSplinesTypeI formBasisDeriv(double[] knots, int order, int basisIndex, int numKnotInt) {
if (order == 1) {
return new NBSplinesTypeI(knots, order, basisIndex, numKnotInt);
} else {
NBSplinesTypeI nbsplines = new NBSplinesTypeI(knots, order, basisIndex, numKnotInt);
nbsplines._left = formBasisDeriv(nbsplines._knots, order-1,0, numKnotInt);
nbsplines._right = formBasisDeriv(nbsplines._knots, order-1,1, numKnotInt);
return nbsplines;
}
}
/**
* This class describes a M spline using the recursive formula in equation 5 of the doc.
*/
public static class MSplineBasis implements Serializable {
double[] _knots; // knots over which basis function is non-zero, may include duplicate
private double[] _numerator;
private double[] _oneOverDenominator;
private MSplineBasis _first;
private MSplineBasis _second;
private double _constant;
/**
*
* @param index: basis function number
* @param order: order of M-spline
* @param knots: full knots with duplications already performed.
*/
public MSplineBasis(int index, int order, double[] knots) {
_first = null;
_second = null;
_knots = extractKnots(index, order, knots);
_constant = order > 1 ? (order/(order-1.0)) : 1;
_numerator = formNumerator(order, _knots);
_oneOverDenominator = formDenominatorMSpline(order, _knots);
}
public static double evaluate(double value, MSplineBasis root) {
if (value < root._knots[0] || value >= root._knots[root._knots.length - 1])
return 0; // value outside current basis function non-zero range
if (root._first != null) {
return root._constant*((value - root._numerator[0]) * root._oneOverDenominator[0] * evaluate(value, root._first)
+ (root._numerator[1] - value) * root._oneOverDenominator[1] * evaluate(value, root._second));
} else { // arrive at order==1 with null children
double temp = root._knots[1]-root._knots[0];
if (temp != 0)
return 1.0/temp;
else
return 0.0;
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/gam/NBSplinesTypeII.java
|
package hex.genmodel.algos.gam;
import java.io.Serializable;
import static hex.genmodel.algos.gam.GamUtilsISplines.*;
public class NBSplinesTypeII implements Serializable {
public final int _order; // order of splines
private final int _nKnots; // number of knots of multiplicity 1
private final double[] _knots; // whole knots with duplication
public final int _totBasisFuncs;
public final BSplineBasis[] _basisFuncs;
public NBSplinesTypeII(int m, double[] knots) {
_order = m;
_nKnots = knots.length;
_totBasisFuncs = _nKnots + _order - 2;
_knots = fillKnots(knots, m);
_basisFuncs = genBasisFunctions(_totBasisFuncs, _order, _knots);
}
private static BSplineBasis[] genBasisFunctions(int totBasisFuncs, int order, double[] knots) {
BSplineBasis[] basisFuncs = new BSplineBasis[totBasisFuncs];
for (int index = 0; index < totBasisFuncs; index++) {
basisFuncs[index] = formOneBasisFunc(index, order, knots);
}
return basisFuncs;
}
private static BSplineBasis formOneBasisFunc(int knotIndex, int order, double[] knots) {
if (order == 1) {
return new BSplineBasis(knotIndex, order, knots);
} else {
BSplineBasis oneBasis = new BSplineBasis(knotIndex, order, knots);
oneBasis._first = formOneBasisFunc(knotIndex, order - 1, knots);
oneBasis._second = formOneBasisFunc(knotIndex + 1, order - 1, knots);
return oneBasis;
}
}
public void gamify(double[] gamifiedValues, double value) {
if (gamifiedValues == null)
gamifiedValues = new double[_totBasisFuncs];
for (int index = 0; index < _totBasisFuncs; index++)
gamifiedValues[index] = BSplineBasis.evaluate(value, _basisFuncs[index]);
}
public static class BSplineBasis implements Serializable {
public double[] _knots; // knots over which basis function is non-zero, include possible duplicates
private double[] _numerator;
private double[] _oneOverDenominator;
private BSplineBasis _first; // first part of basis function
private BSplineBasis _second;
public BSplineBasis(int index, int order, double[] knots) {
_first = null;
_second = null;
_knots = extractKnots(index, order, knots);
int knotsizeDiff = order + 1 - _knots.length;
if (knotsizeDiff > 0) {
double[] extendKnots = new double[knots.length + knotsizeDiff];
System.arraycopy(_knots, 0, extendKnots, 0, _knots.length);
double lastKnot = _knots[_knots.length - 1];
for (int kIndex = _knots.length; kIndex < extendKnots.length; kIndex++)
extendKnots[kIndex] = lastKnot; // extend last index
_knots = extendKnots;
}
_numerator = formNumerator(order, _knots);
_oneOverDenominator = formDenominatorNSpline(order, _knots);
}
public static double evaluate(double value, BSplineBasis root) {
if (value < root._knots[0] || value >= root._knots[root._knots.length - 1])
return 0; // value outside current basis function non-zero range
if (root._first != null) {
return (value - root._numerator[0]) * root._oneOverDenominator[0] * evaluate(value, root._first)
+ (root._numerator[1] - value) * root._oneOverDenominator[1] * evaluate(value, root._second);
} else { // arrive at order==1 with null children
return 1;
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/gbm/GbmMojoModel.java
|
package hex.genmodel.algos.gbm;
import hex.genmodel.GenModel;
import hex.genmodel.PredictContributions;
import hex.genmodel.algos.tree.*;
import hex.genmodel.utils.DistributionFamily;
import hex.genmodel.utils.LinkFunctionType;
import static hex.genmodel.utils.DistributionFamily.*;
/**
* "Gradient Boosting Machine" MojoModel
*/
public final class GbmMojoModel extends SharedTreeMojoModelWithContributions implements SharedTreeGraphConverter {
public DistributionFamily _family;
public LinkFunctionType _link_function;
public double _init_f;
public GbmMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
@Override
protected PredictContributions getContributionsPredictor(TreeSHAPPredictor<double[]> treeSHAPPredictor) {
return new SharedTreeContributionsPredictor(this, treeSHAPPredictor);
}
@Override
public double getInitF() {
return _init_f;
}
/**
* Corresponds to `hex.tree.gbm.GbmMojoModel.score0()`
*/
@Override
public final double[] score0(double[] row, double offset, double[] preds) {
super.scoreAllTrees(row, preds);
return unifyPreds(row, offset, preds);
}
@Override
public final double[] unifyPreds(double[] row, double offset, double[] preds) {
if (_family == bernoulli || _family == quasibinomial || _family == modified_huber) {
double f = preds[1] + _init_f + offset;
preds[2] = linkInv(_link_function, f);
preds[1] = 1.0 - preds[2];
} else if (_family == multinomial) {
if (_nclasses == 2) { // 1-tree optimization for binomial
preds[1] += _init_f + offset; //offset is not yet allowed, but added here to be future-proof
preds[2] = -preds[1];
}
GenModel.GBM_rescale(preds);
} else { // Regression
double f = preds[0] + _init_f + offset;
preds[0] = linkInv(_link_function, f);
return preds;
}
if (_balanceClasses)
GenModel.correctProbabilities(preds, _priorClassDistrib, _modelClassDistrib);
preds[0] = GenModel.getPrediction(preds, _priorClassDistrib, row, _defaultThreshold);
return preds;
}
/**
* Calculate inverse link depends on distribution type - every distribution has own link function
* Be careful if you are changing code here - you have to change it in hex.LinkFunction too
* @param linkFunction link function to compute link inversion
* @param f raw prediction
* @return calculated inverse link value
*/
private double linkInv(LinkFunctionType linkFunction, double f){
switch (linkFunction) {
case log:
return exp(f);
case logit:
case ologit:
return 1 / (1 + exp(-f));
case ologlog:
return 1 - exp(-1 * exp(f));
case oprobit:
return 0;
case inverse:
double xx = f < 0 ? Math.min(-1e-5, f) : Math.max(-1e-5, f);
return 1.0/xx;
case identity:
default:
return f;
}
}
/**
* Sanitized exponential function - helper function.
* Be careful if you are changing code here - you have to change it in hex.LogExpUtils too
*
* @param x value to be transform
* @return result of exp function
*/
public static double exp(double x) { return Math.min(1e19, Math.exp(x)); }
/**
* Sanitized log function - helper function
* Be careful if you are changing code here - you have to change it in hex.LogExpUtils too
*
* @param x value to be transform
* @return result of log function
*/
public static double log(double x) {
x = Math.max(0, x);
return x == 0 ? -19 : Math.max(-19, Math.log(x));
}
@Override
public double[] score0(double[] row, double[] preds) {
return score0(row, 0.0, preds);
}
public String[] leaf_node_assignment(double[] row) {
return getDecisionPath(row);
}
@Override
public String[] getOutputNames() {
if (_family == quasibinomial && getDomainValues(getResponseIdx()) == null) {
return new String[]{"predict", "pVal0", "pVal1"};
}
return super.getOutputNames();
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/gbm/GbmMojoReader.java
|
package hex.genmodel.algos.gbm;
import hex.genmodel.algos.tree.SharedTreeMojoReader;
import hex.genmodel.utils.DistributionFamily;
import java.io.IOException;
/**
*/
public class GbmMojoReader extends SharedTreeMojoReader<GbmMojoModel> {
@Override
public String getModelName() {
return "Gradient Boosting Machine";
}
@Override
protected void readModelData() throws IOException {
super.readModelData();
_model._family = DistributionFamily.valueOf((String)readkv("distribution"));
_model._init_f = readkv("init_f");
_model._link_function = readLinkFunction((String) readkv("link_function"), _model._family);
}
@Override
protected GbmMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
return new GbmMojoModel(columns, domains, responseColumn);
}
@Override public String mojoVersion() {
return "1.40";
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/glm/GlmMojoModel.java
|
package hex.genmodel.algos.glm;
import hex.genmodel.GenModel;
import java.io.Serializable;
public class GlmMojoModel extends GlmMojoModelBase {
String _link;
double _tweedieLinkPower;
// set by init()
private Function1 _linkFn;
private boolean _binomial;
GlmMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
@Override
void init() {
_linkFn = createLinkFunction();
_binomial = "binomial".equals(_family) || "fractionalbinomial".equals(_family) || "quasibinomial".equals(_family);
}
public final double[] score0(double[] data, double offset, double[] preds) {
if (_meanImputation)
super.imputeMissingWithMeans(data);
return glmScore0(data, offset, preds);
}
double[] glmScore0(double[] data, double offset, double[] preds) {
double eta = 0.0;
if (!_useAllFactorLevels) { // skip level 0 of all factors
for(int i = 0; i < _catOffsets.length-1; ++i) {
if(data[i] != 0) {
int ival = (int) data[i] - 1;
if (ival != data[i] - 1) {
throw new IllegalArgumentException("categorical value out of range");
}
ival += _catOffsets[i];
if (ival < _catOffsets[i + 1]) {
eta += _beta[ival];
}
}
}
} else { // do not skip any levels
for(int i = 0; i < _catOffsets.length-1; ++i) {
int ival = (int) data[i];
if (ival != data[i]) {
throw new IllegalArgumentException("categorical value out of range");
}
ival += _catOffsets[i];
if (ival < _catOffsets[i + 1]) {
eta += _beta[ival];
}
}
}
int noff = _catOffsets[_cats] - _cats;
for(int i = _cats; i < _beta.length - 1 - noff; ++i)
eta += _beta[noff + i] * data[i];
eta += _beta[_beta.length - 1]; // reduce intercept
eta += offset;
double mu = _linkFn.eval(eta);
if (_binomial) {
preds[0] = (mu >= _defaultThreshold) ? 1 : 0; // threshold given by ROC
preds[1] = 1.0 - mu; // class 0
preds[2] = mu; // class 1
} else {
preds[0] = mu;
}
return preds;
}
/**
* Applies GLM coefficients to a given row of data to calculate
* feature contributions.
*
* Note: for internal purposes only (k-LIME)
*
* @param data input row of data (same input as to glmScore0)
* @param output target output array
* @param destPos index to the output array where the result should start
* @return feature contributions, prediction = linkFunction(sum(output) + intercept)
*/
public double[] applyCoefficients(double[] data, double[] output, int destPos) {
final int offset = _useAllFactorLevels ? 0 : -1;
for (int i = 0; i < _catOffsets.length - 1; i++) {
int ival = (int) data[i] - offset;
if (ival < 0) continue;
ival += _catOffsets[i];
if (ival < _catOffsets[i + 1])
output[i + destPos] = _beta[ival];
}
int p = destPos + _catOffsets.length - 1;
int noff = _catOffsets[_cats] - _cats;
for (int i = _cats; i < _beta.length - 1 - noff; i++)
output[p++] = _beta[noff + i] * data[i];
return output;
}
public double getIntercept() {
return _beta[_beta.length - 1];
}
private interface Function1 extends Serializable {
double eval(double x);
}
private Function1 createLinkFunction() {
if ("identity".equals(_link))
return new GLM_identityInv();
else if ("logit".equals(_link))
return new GLM_logitInv();
else if ("log".equals(_link))
return new GLM_logInv();
else if ("inverse".equals(_link))
return new GLM_inverseInv();
else if ("tweedie".equals(_link))
return new GLM_tweedieInv(_tweedieLinkPower);
else
throw new UnsupportedOperationException("Unexpected link function " + _link);
}
private static class GLM_identityInv implements Function1 {
@Override public double eval(double x) { return GenModel.GLM_identityInv(x); }
}
private static class GLM_logitInv implements Function1 {
@Override public double eval(double x) { return GenModel.GLM_logitInv(x); }
}
private static class GLM_logInv implements Function1 {
@Override public double eval(double x) { return GenModel.GLM_logInv(x); }
}
private static class GLM_inverseInv implements Function1 {
@Override
public double eval(double x) {
return GenModel.GLM_inverseInv(x);
}
}
private static class GLM_ologitInv implements Function1 {
@Override public double eval(double x) { return GenModel.GLM_ologitInv(x); }
}
private static class GLM_tweedieInv implements Function1 {
private final double _tweedie_link_power;
GLM_tweedieInv(double tweedie_link_power) { this._tweedie_link_power = tweedie_link_power; }
@Override public double eval(double x) { return GenModel.GLM_tweedieInv(x, _tweedie_link_power); }
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/glm/GlmMojoModelBase.java
|
package hex.genmodel.algos.glm;
import hex.genmodel.MojoModel;
public abstract class GlmMojoModelBase extends MojoModel {
boolean _useAllFactorLevels;
int _cats;
int[] _catModes;
int[] _catOffsets;
int _nums;
double[] _numMeans;
boolean _meanImputation;
double[] _beta;
String _family;
boolean _versionSupportOffset;
double _dispersion_estimated;
GlmMojoModelBase(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
public double[] getBeta() {
return _beta;
}
public double getDispersionEstimated() {
return _dispersion_estimated;
}
void init() {
_versionSupportOffset = _mojo_version >= 1.1;
}
@Override
public final double[] score0(double[] data, double[] preds) {
return score0(data, 0, preds);
}
void imputeMissingWithMeans(double[] data) {
for (int i = 0; i < _cats; ++i)
if (Double.isNaN(data[i])) data[i] = _catModes[i];
for (int i = 0; i < _nums; ++i)
if (Double.isNaN(data[i + _cats])) data[i + _cats] = _numMeans[i];
}
@Override
public String[] getOutputNames() {
// special handling of binomial case where response domain is not represented
if (nclasses() == 2 && getDomainValues(getResponseIdx()) == null) {
return new String[]{"predict", "0", "1"};
}
return super.getOutputNames();
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/glm/GlmMojoReader.java
|
package hex.genmodel.algos.glm;
import com.google.gson.JsonObject;
import hex.genmodel.ModelMojoReader;
import hex.genmodel.attributes.ModelAttributes;
import hex.genmodel.attributes.ModelAttributesGLM;
import hex.genmodel.attributes.ModelJsonReader;
import java.io.IOException;
public class GlmMojoReader extends ModelMojoReader<GlmMojoModelBase> {
@Override
public String getModelName() {
return "Generalized Linear Model";
}
@Override
protected void readModelData() throws IOException {
_model._useAllFactorLevels = readkv("use_all_factor_levels", false);
_model._cats = readkv("cats", -1);
_model._catModes = readkv("cat_modes", new int[0]);
_model._catOffsets = readkv("cat_offsets", new int[0]);
_model._nums = readkv("nums", -1);
_model._numMeans = readkv("num_means", new double[0]);
_model._meanImputation = readkv("mean_imputation", false);
_model._beta = readkv("beta");
_model._family = readkv("family");
_model._dispersion_estimated = readkv("dispersion_estimated", 1.0);
if (_model instanceof GlmMojoModel) {
GlmMojoModel m = (GlmMojoModel) _model;
m._link = readkv("link");
m._tweedieLinkPower = readkv("tweedie_link_power", 0.0);
}
_model.init();
}
@Override
protected ModelAttributes readModelSpecificAttributes() {
final JsonObject modelJson = ModelJsonReader.parseModelJson(_reader);
if(modelJson != null) {
return new ModelAttributesGLM(_model, modelJson);
} else {
return null;
}
}
@Override
protected GlmMojoModelBase makeModel(String[] columns, String[][] domains, String responseColumn) {
String family = readkv("family");
if ("multinomial".equals(family))
return new GlmMultinomialMojoModel(columns, domains, responseColumn);
else if ("ordinal".equals(family))
return new GlmOrdinalMojoModel(columns, domains, responseColumn);
else
return new GlmMojoModel(columns, domains, responseColumn);
}
@Override public String mojoVersion() {
return "1.00";
} // add support to offset
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/glm/GlmMultinomialMojoModel.java
|
package hex.genmodel.algos.glm;
public class GlmMultinomialMojoModel extends GlmMojoModelBase {
private int P;
private int noff;
GlmMultinomialMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
@Override
void init() {
P = _beta.length / _nclasses;
if (P * _nclasses != _beta.length)
throw new IllegalStateException("Incorrect coding of Beta.");
noff = _catOffsets[_cats];
}
public final double[] score0(double[] data, double offset, double[] preds) {
if (_meanImputation)
super.imputeMissingWithMeans(data);
return glmScore0(data, offset, preds);
}
double[] glmScore0(double[] data, double offset, double[] preds) {
preds[0] = 0;
for (int c = 0; c < _nclasses; ++c) {
preds[c + 1] = 0;
if (_cats > 0) {
if (! _useAllFactorLevels) { // skip level 0 of all factors
for (int i = 0; i < _catOffsets.length-1; ++i) if(data[i] != 0) {
int ival = (int) data[i] - 1;
if (ival != data[i] - 1) throw new IllegalArgumentException("categorical value out of range");
ival += _catOffsets[i];
if (ival < _catOffsets[i + 1])
preds[c + 1] += _beta[ival + c*P];
}
} else { // do not skip any levels
for(int i = 0; i < _catOffsets.length-1; ++i) {
int ival = (int) data[i];
if (ival != data[i]) throw new IllegalArgumentException("categorical value out of range");
ival += _catOffsets[i];
if(ival < _catOffsets[i + 1])
preds[c + 1] += _beta[ival + c*P];
}
}
}
for (int i = 0; i < _nums; ++i)
preds[c+1] += _beta[noff+i + c*P]*data[i+_cats];
preds[c+1] += _beta[(P-1) + c*P]; // reduce intercept
}
double max_row = 0;
for (int c = 1; c < preds.length; ++c) if (preds[c] > max_row) max_row = preds[c];
double sum_exp = 0;
for (int c = 1; c < preds.length; ++c) { sum_exp += (preds[c] = Math.exp(preds[c]-max_row));}
sum_exp = 1/sum_exp;
double max_p = 0;
for (int c = 1; c < preds.length; ++c) if ((preds[c] *= sum_exp) > max_p) { max_p = preds[c]; preds[0] = c-1; }
return preds;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/glm/GlmOrdinalMojoModel.java
|
package hex.genmodel.algos.glm;
import hex.genmodel.utils.ArrayUtils;
import java.util.Arrays;
public class GlmOrdinalMojoModel extends GlmMojoModelBase {
private int P;
private int noff;
private int lastClass;
private int[] icptIndices;
GlmOrdinalMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
@Override
void init() {
P = _beta.length / _nclasses;
lastClass = _nclasses-1;
icptIndices = new int[lastClass];
for (int c = 0; c < lastClass; c++) {
icptIndices[c] = P-1+c*P;
}
if (P * _nclasses != _beta.length)
throw new IllegalStateException("Incorrect coding of Beta.");
noff = _catOffsets[_cats];
}
public final double[] score0(double[] data, double offset, double[] preds) {
if (_meanImputation)
super.imputeMissingWithMeans(data);
return glmScore0(data, offset, preds);
}
double[] glmScore0(double[] data, double offset, double[] preds) {
Arrays.fill(preds, 0);
preds[0]=lastClass;
for (int c = 0; c < lastClass; ++c) { // preds contains the etas for each class
if (_cats > 0) {
if (! _useAllFactorLevels) { // skip level 0 of all factors
for (int i = 0; i < _catOffsets.length-1; ++i) if(data[i] != 0) {
int ival = (int) data[i] - 1;
if (ival != data[i] - 1) throw new IllegalArgumentException("categorical value out of range");
ival += _catOffsets[i];
if (ival < _catOffsets[i + 1])
preds[c + 1] += _beta[ival + c*P];
}
} else { // do not skip any levels
for(int i = 0; i < _catOffsets.length-1; ++i) {
int ival = (int) data[i];
if (ival != data[i]) throw new IllegalArgumentException("categorical value out of range");
ival += _catOffsets[i];
if(ival < _catOffsets[i + 1])
preds[c + 1] += _beta[ival + c*P];
}
}
}
for (int i = 0; i < _nums; ++i) {
preds[c + 1] += _beta[i+noff + c * P] * data[i+_cats];
}
preds[c+1] += _beta[icptIndices[c]];
}
double previousCDF = 0.0;
for (int cInd = 0; cInd < lastClass; cInd++) { // classify row and calculate PDF of each class
double eta = preds[cInd + 1]+offset;
double currCDF = 1.0 / (1 + Math.exp(-eta));
preds[cInd + 1] = currCDF - previousCDF;
previousCDF = currCDF;
}
preds[_nclasses] = 1-previousCDF;
preds[0] = 0;
preds[0] = ArrayUtils.maxIndex(preds)-1;
return preds;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/glrm/GlrmInitialization.java
|
package hex.genmodel.algos.glrm;
/**
* Initialization strategy for matrices X and Y in the GLRM algorithm.
*/
public enum GlrmInitialization {
Random, SVD, PlusPlus, User, Power
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/glrm/GlrmLoss.java
|
package hex.genmodel.algos.glrm;
import hex.genmodel.utils.ArrayUtils;
/**
* Loss function for the GLRM algorithm.
*/
public enum GlrmLoss {
//--------------------------------------------------------------------------------------------------------------------
// Loss functions for numeric features
//--------------------------------------------------------------------------------------------------------------------
Quadratic {
@Override public boolean isForNumeric() { return true; }
@Override public boolean isForCategorical() { return false; }
@Override public boolean isForBinary() { return false; }
@Override public double loss(double u, double a) {
return (u - a)*(u - a);
}
@Override public double lgrad(double u, double a) {
return 2*(u - a);
}
@Override public double impute(double u) {
return u;
}
},
Absolute {
@Override public boolean isForNumeric() { return true; }
@Override public boolean isForCategorical() { return false; }
@Override public boolean isForBinary() { return false; }
@Override public double loss(double u, double a) {
return Math.abs(u - a);
}
@Override public double lgrad(double u, double a) {
return Math.signum(u - a);
}
@Override public double impute(double u) {
return u;
}
},
Huber {
@Override public boolean isForNumeric() { return true; }
@Override public boolean isForCategorical() { return false; }
@Override public boolean isForBinary() { return false; }
@Override public double loss(double u, double a) {
double x = u - a;
return x > 1? x - 0.5 : x < -1 ? -x - 0.5 : 0.5*x*x;
}
@Override public double lgrad(double u, double a) {
double x = u - a;
return x > 1? 1 : x < -1 ? -1 : x;
}
@Override public double impute(double u) {
return u;
}
},
Poisson {
@Override public boolean isForNumeric() { return true; }
@Override public boolean isForCategorical() { return false; }
@Override public boolean isForBinary() { return false; }
@Override public double loss(double u, double a) {
assert a >= 0 : "Poisson loss L(u,a) requires variable a >= 0";
return Math.exp(u) + (a == 0 ? 0 : -a*u + a*Math.log(a) - a); // Since \lim_{a->0} a*log(a) = 0
}
@Override public double lgrad(double u, double a) {
assert a >= 0 : "Poisson loss L(u,a) requires variable a >= 0";
return Math.exp(u) - a;
}
@Override public double impute(double u) {
return Math.exp(u);
}
},
Periodic {
@Override public boolean isForNumeric() { return true; }
@Override public boolean isForCategorical() { return false; }
@Override public boolean isForBinary() { return false; }
private double f;
private int period;
@Override public double loss(double u, double a) {
return 1 - Math.cos((u - a)*f);
}
@Override public double lgrad(double u, double a) {
return f * Math.sin((u - a)*f);
}
@Override public double impute(double u) {
return u;
}
@Override public void setParameters(int period) {
this.period = period;
f = 2 * Math.PI / period;
}
@Override public String toString() { return "Periodic(" + period + ")"; }
},
//--------------------------------------------------------------------------------------------------------------------
// Loss functions for binary features
//--------------------------------------------------------------------------------------------------------------------
Logistic {
@Override public boolean isForNumeric() { return false; }
@Override public boolean isForCategorical() { return false; }
@Override public boolean isForBinary() { return true; }
@Override public double loss(double u, double a) {
assert a == 0 || a == 1 : "Logistic loss should be applied to binary features only";
return Math.log(1 + Math.exp((1 - 2*a)*u));
}
@Override public double lgrad(double u, double a) {
double s = 1 - 2*a;
return s/(1 + Math.exp(-s*u));
}
@Override public double impute(double u) {
return u > 0? 1 : 0;
}
},
Hinge {
@Override public boolean isForNumeric() { return false; }
@Override public boolean isForCategorical() { return false; }
@Override public boolean isForBinary() { return true; }
@Override public double loss(double u, double a) {
assert a == 0 || a == 1 : "Hinge loss should be applied to binary variables only";
return Math.max(1 + (1 - 2*a)*u, 0);
}
@Override public double lgrad(double u, double a) {
double s = 1 - 2*a;
return 1 + s*u > 0? s : 0;
}
@Override public double impute(double u) {
return u > 0? 1 : 0;
}
},
//--------------------------------------------------------------------------------------------------------------------
// Loss functions for multinomial features
//--------------------------------------------------------------------------------------------------------------------
Categorical {
@Override public boolean isForNumeric() { return false; }
@Override public boolean isForCategorical() { return true; }
@Override public boolean isForBinary() { return false; }
@Override public double mloss(double[] u, int a) {
return mloss(u, a, u.length);
}
// this function performs the same function as the one above but it is memory optimized for the original
// GLRM.java code. See GLRM.java for details
@Override public double mloss(double[] u, int a, int u_len) {
if (!(a >= 0 && a < u_len))
throw new IndexOutOfBoundsException("a must be between 0 and " + (u_len - 1));
double sum = 0;
for (int ind=0; ind < u_len; ind++)
sum += Math.max(1 + u[ind], 0);
sum += Math.max(1 - u[a], 0) - Math.max(1 + u[a], 0);
return sum;
}
@Override public double[] mlgrad(double[] u, int a) {
double[] grad = new double[u.length];
return mlgrad(u, a, grad, u.length);
}
@Override public double[] mlgrad(double[] u, int a, double[] grad, int u_len) {
if (!(a >= 0 && a < u_len)) throw new IndexOutOfBoundsException("a must be between 0 and " + (u_len - 1));
for (int i = 0; i < u_len; i++)
grad[i] = (1 + u[i] > 0) ? 1 : 0;
grad[a] = (1 - u[a] > 0) ? -1 : 0;
return grad;
}
@Override public int mimpute(double[] u) {
return ArrayUtils.maxIndex(u);
}
},
Ordinal {
@Override public boolean isForNumeric() { return false; }
@Override public boolean isForCategorical() { return true; }
@Override public boolean isForBinary() { return false; }
@Override public double mloss(double[] u, int a) { return mloss(u, a, u.length); }
@Override public double mloss(double[] u, int a, int u_len) {
if (!(a >= 0 && a < u_len)) throw new IndexOutOfBoundsException("a must be between 0 and " + (u_len - 1));
double sum = 0;
for (int i = 0; i < u_len - 1; i++)
sum += a > i ? Math.max(1 - u[i], 0) : 1;
return sum;
}
@Override public double[] mlgrad(double[] u, int a) {
double[] grad = new double[u.length];
return mlgrad(u, a, grad, u.length);
}
@Override public double[] mlgrad(double[] u, int a, double[] grad, int u_len) {
if (!(a >= 0 && a < u_len)) throw new IndexOutOfBoundsException("a must be between 0 and " + (u_len - 1));
for (int i = 0; i < u_len - 1; i++)
grad[i] = (a > i && 1 - u[i] > 0) ? -1 : 0;
return grad;
}
@Override public int mimpute(double[] u) {
double sum = u.length - 1;
double best_loss = sum;
int best_a = 0;
for (int a = 1; a < u.length; a++) {
sum -= Math.min(1, u[a - 1]);
if (sum < best_loss) {
best_loss = sum;
best_a = a;
}
}
return best_a;
}
};
//--------------------------------------------------------------------------------------------------------------------
// Public interface
//--------------------------------------------------------------------------------------------------------------------
public abstract boolean isForNumeric();
public abstract boolean isForCategorical();
public abstract boolean isForBinary();
/** Loss function for numeric variables */
public double loss(double u, double a) { throw new UnsupportedOperationException(); }
/** \grad_u L(u,a): Derivative of the numeric loss function with respect to u */
public double lgrad(double u, double a) { throw new UnsupportedOperationException(); }
/** \argmin_a L(u, a): Data imputation for real numeric values */
public double impute(double u) { throw new UnsupportedOperationException(); }
/** Loss function for categorical variables where the size of u represents the true column length. */
public double mloss(double[] u, int a) { throw new UnsupportedOperationException(); }
/** Loss function for categorical variables performing same function as mloss above. However, in this case,
* the size of u can be much bigger than what is needed. The actual length of u is now specified in u_len. */
public double mloss(double[] u, int a, int u_len) { throw new UnsupportedOperationException(); }
/** \grad_u L(u,a): Gradient of multidimensional loss function with respect to u */
public double[] mlgrad(double[] u, int a) { throw new UnsupportedOperationException(); }
/** \grad_u L(u,a): Gradient of multidimensional loss function with respect to u. This method avoids the
* memory allocation compared to the method above by passing in a array prod which can be longer
* than the actual column length. The actual column length for prod is now specified by u_len. */
public double[] mlgrad(double[] u, int a, double[] prod, int u_len) { throw new UnsupportedOperationException(); }
/** \argmin_a L(u, a): Data imputation for categorical values {0, 1, 2, ...} */
public int mimpute(double[] u) { throw new UnsupportedOperationException(); }
/** Initialize additional parameters on the loss function. Currently used by Periodic class only. */
public void setParameters(int p) { throw new UnsupportedOperationException(); }
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/glrm/GlrmMojoModel.java
|
package hex.genmodel.algos.glrm;
import hex.ModelCategory;
import hex.genmodel.MojoModel;
import hex.genmodel.algos.pca.PCAMojoModel;
import java.util.EnumSet;
import java.util.Random;
/**
*/
public class GlrmMojoModel extends MojoModel {
public int _ncolA;
public int _ncolX;
public int _ncolY;
public int _nrowY;
public double[][] _archetypes;
public double[][] _archetypes_raw;
public int[] _numLevels;
public int [] _catOffsets;
public int[] _permutation;
public GlrmLoss[] _losses;
public GlrmRegularizer _regx;
public double _gammax;
public GlrmInitialization _init;
public int _ncats;
public int _nnums;
public double[] _normSub; // used to perform dataset transformation. When no transform is needed, will be 0
public double[] _normMul; // used to perform dataset transformation. When no transform is needed, will be 1
public long _seed; // added to ensure reproducibility
public boolean _transposed;
public boolean _reverse_transform;
public double _accuracyEps = 1e-10; // reconstruction accuracy A=X*Y
public int _iterNumber = 100; // maximum number of iterations to perform X update. Default is 100
// We don't really care about regularization of Y since it is changed during scoring
/**
* This is the "learning rate" in the gradient descent method. More specifically, at each iteration step we update
* x according to x_new = x_old - alpha * grad_x(obj)(x_old). If the objective evaluated at x_new is smaller than
* the objective at x_old, then we proceed with the update, increasing alpha slightly (in case we learn too slowly);
* however if the objective at x_new is bigger than the original objective, then we "overshot" and therefore reduce
* alpha in half.
* When reusing the alpha between multiple computations of the gradient, we find that alpha eventually "stabilizes"
* in a certain range; moreover that range is roughly the same when scoring different rows. This is why alpha was
* made static -- so that its value from previous scoring round can be reused to achieve faster convergence.
* This approach is not thread-safe! If we ever make GenModel capable of scoring multiple rows in parallel, this
* will have to be changed to make updates to alpha synchronized.
*/
// private double alpha = 1.0; // Do not shared across class.
private static final double DOWN_FACTOR = 0.5;
private static final double UP_FACTOR = Math.pow(1.0/DOWN_FACTOR, 1.0/4);
public long _rcnt = 0; // increment per row and can be changed to different values to ensure reproducibility
public int _numAlphaFactors = 10;
public double[] _allAlphas;
static {
//noinspection ConstantAssertCondition,ConstantConditions
assert DOWN_FACTOR < 1 && DOWN_FACTOR > 0;
assert UP_FACTOR > 1;
}
private static EnumSet<ModelCategory> CATEGORIES = EnumSet.of(ModelCategory.AutoEncoder, ModelCategory.DimReduction);
@Override public EnumSet<ModelCategory> getModelCategories() {
return CATEGORIES;
}
public GlrmMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
@Override public int getPredsSize(ModelCategory mc) {
return _ncolX;
}
public static double[] initializeAlphas(int numAlpha) {
double[] alphas = new double[numAlpha];
double alpha = 1.0;
for (int index=0; index < numAlpha; index++) {
alpha *= DOWN_FACTOR;
alphas[index] = alpha;
}
return alphas;
}
public double[] score0(double[] row, double[] preds, long seedValue) {
assert row.length == _ncolA;
assert preds.length == _ncolX;
assert _nrowY == _ncolX;
assert _archetypes.length == _nrowY;
assert _archetypes[0].length == _ncolY;
// Step 0: prepare the data row
double[] a = getRowData(row);
// Step 1: initialize X (for now do Random initialization only)
double[] x = new double[_ncolX];
double[] u = new double[_ncolX];
Random random = new Random(seedValue); // change the random seed everytime it is used
for (int i = 0; i < _ncolX; i++) // randomly generate initial x coefficients
x[i] = random.nextGaussian();
x = _regx.project(x, random);
// Step 2: update X based on prox-prox algorithm, iterate until convergence
double obj = objective(x, a);
double oldObj = obj; // store original obj value
boolean done = false;
int iters = 0;
while (!done && iters++ < _iterNumber) {
// Compute the gradient of the loss function
double[] grad = gradientL(x, a);
// Try to make a step of size alpha, until we can achieve improvement in the objective.
obj = applyBestAlpha(u, x, grad, a, oldObj, random);
double obj_improvement = 1 - obj/oldObj;
if ((obj_improvement < 0) || (obj_improvement < _accuracyEps))
done = true; // not getting improvement or significant improvement, quit
oldObj = obj;
}
System.arraycopy(x, 0, preds, 0, _ncolX);
return preds;
}
public double[] getRowData(double[] row) {
double[] a = new double[_ncolA];
for (int i=0; i < _ncats; i++) {
double temp = row[_permutation[i]];
a[i] = (temp>=_numLevels[i])?Double.NaN:temp; // set unseen levels to NaN
}
for (int i = _ncats; i < _ncolA; i++)
a[i] = row[_permutation[i]];
return a;
}
/***
* This method will try a bunch of arbitray alpha values and pick the best to return which get the best obj
* improvement.
*
* @param u
* @param x
* @param grad
* @param a
* @param oldObj
* @param random
* @return
*/
public double applyBestAlpha(double[] u, double[] x, double[] grad, double[] a, double oldObj, Random random) {
double[] bestX = new double[x.length];
double lowestObj = Double.MAX_VALUE;
if (oldObj == 0) { // done optimization, loss is now zero.
return 0;
}
double alphaScale = oldObj > 10?(1.0/oldObj):1.0;
for (int index=0; index < _numAlphaFactors; index++) {
double alpha = _allAlphas[index]*alphaScale; // scale according to object function size
// Compute the tentative new x (using the prox algorithm)
for (int k = 0; k < _ncolX; k++) {
u[k] = x[k] - alpha * grad[k];
}
double[] xnew = _regx.rproxgrad(u, alpha * _gammax, random);
double newobj = objective(xnew, a);
if (lowestObj > newobj) {
System.arraycopy(xnew, 0, bestX, 0, xnew.length);
lowestObj = newobj;
}
if (newobj == 0)
break;
}
if (lowestObj < oldObj) // only copy if new result is good
System.arraycopy(bestX, 0, x, 0, x.length);
return lowestObj;
}
/**
* This function corresponds to the DimReduction model category
*/
@Override
public double[] score0(double[] row, double[] preds) {
return score0(row, preds, _seed+_rcnt++);
}
// impute data from x and archetypes
public static double[] impute_data(double[] xfactor, double[] preds, int nnums, int ncats, int[] permutation,
boolean reverse_transform, double[] normMul, double[] normSub, GlrmLoss[] losses,
boolean transposed, double[][] archetypes_raw, int[] catOffsets, int[] numLevels) {
assert preds.length == nnums + ncats;
// Categorical columns
for (int d = 0; d <ncats; d++) {
double[] xyblock = lmulCatBlock(xfactor,d, numLevels, transposed, archetypes_raw, catOffsets);
preds[permutation[d]] = losses[d].mimpute(xyblock);
}
// Numeric columns
for (int d = ncats; d < preds.length; d++) {
int ds = d - ncats;
double xy = lmulNumCol(xfactor, ds, transposed, archetypes_raw, catOffsets);
preds[permutation[d]] = losses[d].impute(xy);
if (reverse_transform)
preds[permutation[d]] = preds[permutation[d]] / normMul[ds] + normSub[ds];
}
return preds;
}
// For j = 0 to number of numeric columns - 1
public static int getNumCidx(int j, int[] catOffsets) {
return catOffsets[catOffsets.length-1]+j;
}
// Inner product x * y_j where y_j is numeric column j of Y
public static double lmulNumCol(double[] x, int j, boolean transposed, double[][] archetypes_raw, int[] catOffsets) {
assert x != null && x.length == rank(transposed, archetypes_raw) : "x must be of length " + rank(transposed, archetypes_raw);
int cidx = getNumCidx(j, catOffsets);
double prod = 0;
if (transposed) {
for (int k = 0; k < rank(transposed, archetypes_raw); k++)
prod += x[k] * archetypes_raw[cidx][k];
} else {
for (int k = 0; k < rank(transposed, archetypes_raw); k++)
prod += x[k] * archetypes_raw[k][cidx];
}
return prod;
}
// For j = 0 to number of categorical columns - 1, and level = 0 to number of levels in categorical column - 1
public static int getCatCidx(int j, int level, int[] numLevels, int[] catOffsets) {
int catColJLevel = numLevels[j];
assert catColJLevel != 0 : "Number of levels in categorical column cannot be zero";
assert !Double.isNaN(level) && level >= 0 && level < catColJLevel : "Got level = " + level +
" when expected integer in [0," + catColJLevel + ")";
return catOffsets[j]+level;
}
// Vector-matrix product x * Y_j where Y_j is block of Y corresponding to categorical column j
public static double[] lmulCatBlock(double[] x, int j, int[] numLevels, boolean transposed, double[][] archetypes_raw, int[] catOffsets) {
int catColJLevel = numLevels[j];
assert catColJLevel != 0 : "Number of levels in categorical column cannot be zero";
assert x != null && x.length == rank(transposed, archetypes_raw) : "x must be of length " +
rank(transposed, archetypes_raw);
double[] prod = new double[catColJLevel];
if (transposed) {
for (int level = 0; level < catColJLevel; level++) {
int cidx = getCatCidx(j,level, numLevels, catOffsets);
for (int k = 0; k < rank(transposed, archetypes_raw); k++)
prod[level] += x[k] * archetypes_raw[cidx][k];
}
} else {
for (int level = 0; level < catColJLevel; level++) {
int cidx = getCatCidx(j,level, numLevels, catOffsets);
for (int k = 0; k < rank(transposed, archetypes_raw); k++)
prod[level] += x[k] * archetypes_raw[k][cidx];
}
}
return prod;
}
public static int rank(boolean transposed, double[][] archetypes_raw) {
return transposed ? archetypes_raw[0].length : archetypes_raw.length;
}
/**
* Compute gradient of the objective function with respect to x, i.e. d/dx Sum_j[L_j(xY_j, a)]
* @param x: current x row
* @param a: the adapted data row
*/
private double[] gradientL(double[] x, double[] a) {
// Prepate output row
double[] grad = new double[_ncolX];
// Categorical columns
int cat_offset = 0;
for (int j = 0; j < _ncats; j++) {
if (Double.isNaN(a[j])) continue; // Skip missing observations in row (???)
int n_levels = _numLevels[j];
// Calculate xy = x * Y_j where Y_j is sub-matrix corresponding to categorical col j
double[] xy = new double[n_levels];
for (int level = 0; level < n_levels; level++) {
for (int k = 0; k < _ncolX; k++) {
xy[level] += x[k] * _archetypes[k][level + cat_offset];
}
}
// Gradient wrt x is matrix product \grad L_j(x * Y_j, A_j) * Y_j'
double[] gradL = _losses[j].mlgrad(xy, (int) a[j]);
for (int k = 0; k < _ncolX; k++) {
for (int c = 0; c < n_levels; c++)
grad[k] += gradL[c] * _archetypes[k][c + cat_offset];
}
cat_offset += n_levels;
}
// Numeric columns
for (int j = _ncats; j < _ncolA; j++) {
int js = j - _ncats;
if (Double.isNaN(a[j])) continue; // Skip missing observations in row
// Inner product x * y_j
double xy = 0;
for (int k = 0; k < _ncolX; k++)
xy += x[k] * _archetypes[k][js + cat_offset];
// Sum over y_j weighted by gradient of loss \grad L_j(x * y_j, A_j)
double gradL = _losses[j].lgrad(xy, (a[j] - _normSub[js]) * _normMul[js]);
for (int k = 0; k < _ncolX; k++)
grad[k] += gradL * _archetypes[k][js + cat_offset];
}
return grad;
}
private double objective(double[] x, double[] a) {
double res = 0;
// Loss: Categorical columns
int cat_offset = 0;
for (int j = 0; j < _ncats; j++) {
if (Double.isNaN(a[j])) continue; // Skip missing observations in row
int n_levels = _numLevels[j];
double[] xy = new double[n_levels];
for (int level = 0; level < n_levels; level++) {
for (int k = 0; k < _ncolX; k++) {
xy[level] += x[k] * _archetypes[k][level + cat_offset];
}
}
res += _losses[j].mloss(xy, (int) a[j]);
cat_offset += n_levels;
}
// Loss: Numeric columns
for (int j = _ncats; j < _ncolA; j++) {
int js = j - _ncats;
if (Double.isNaN(a[j])) continue; // Skip missing observations in row
double xy = 0;
for (int k = 0; k < _ncolX; k++)
xy += x[k] * _archetypes[k][js + cat_offset];
res += _losses[j].loss(xy, (a[j] - _normSub[js]) * _normMul[js]);
}
res += _gammax * _regx.regularize(x);
return res;
}
@Override
public String[] getOutputNames() {
String[] names = new String[_ncolX];
for (int i = 0; i < names.length; i++) {
names[i] = "Arch" + (i + 1);
}
return names;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/glrm/GlrmMojoReader.java
|
package hex.genmodel.algos.glrm;
import hex.genmodel.ModelMojoReader;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
*/
public class GlrmMojoReader extends ModelMojoReader<GlrmMojoModel> {
@Override
public String getModelName() {
return "Generalized Low Rank Model";
}
@Override
protected void readModelData() throws IOException {
_model._ncolA = readkv("ncolA");
_model._ncolY = readkv("ncolY");
_model._nrowY = readkv("nrowY");
_model._ncolX = readkv("ncolX");
_model._regx = GlrmRegularizer.valueOf((String) readkv("regularizationX"));
_model._gammax = readkv("gammaX");
_model._init = GlrmInitialization.valueOf((String) readkv("initialization"));
_model._ncats = readkv("num_categories");
_model._nnums = readkv("num_numeric");
_model._normSub = readkv("norm_sub");
_model._normMul = readkv("norm_mul");
_model._permutation = readkv("cols_permutation");
// loss functions
_model._losses = new GlrmLoss[_model._ncolA];
int li = 0;
for (String line : readtext("losses")) {
_model._losses[li++] = GlrmLoss.valueOf(line);
}
// archetypes
_model._numLevels = readkv("num_levels_per_category");
_model._archetypes = new double[_model._nrowY][];
ByteBuffer bb = ByteBuffer.wrap(readblob("archetypes"));
for (int i = 0; i < _model._nrowY; i++) {
double[] row = new double[_model._ncolY];
_model._archetypes[i] = row;
for (int j = 0; j < _model._ncolY; j++)
row[j] = bb.getDouble();
}
// new fields added after version 1.00
try {
_model._seed = readkv("seed", 0l);
_model._reverse_transform = readkv("reverse_transform");
_model._transposed = readkv("transposed");
_model._catOffsets = readkv("catOffsets");
// load in archetypes raw
if (_model._transposed) {
_model._archetypes_raw = new double[_model._archetypes[0].length][_model._archetypes.length];
for (int row = 0; row < _model._archetypes.length; row++) {
for (int col = 0; col < _model._archetypes[0].length; col++) {
_model._archetypes_raw[col][row] = _model._archetypes[row][col];
}
}
} else
_model._archetypes_raw = _model._archetypes;
} catch (NullPointerException re) { // only expect null pointer exception.
_model._seed = System.currentTimeMillis(); // randomly initialize seed
_model._reverse_transform = true;
_model._transposed = true;
_model._catOffsets = null;
_model._archetypes_raw = null;
}
}
@Override
protected GlrmMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
GlrmMojoModel glrmModel = new GlrmMojoModel(columns, domains, responseColumn);
glrmModel._allAlphas = GlrmMojoModel.initializeAlphas(glrmModel._numAlphaFactors); // set _allAlphas array
return glrmModel;
}
@Override public String mojoVersion() {
return "1.10";
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/glrm/GlrmRegularizer.java
|
package hex.genmodel.algos.glrm;
import hex.genmodel.utils.ArrayUtils;
import hex.genmodel.utils.MathUtils;
import java.util.Random;
/**
* Regularization method for matrices X and Y in the GLRM algorithm.
*
* Examples:
* + Non-negative matrix factorization (NNMF): r_x = r_y = NonNegative
* + Orthogonal NNMF: r_x = OneSparse, r_y = NonNegative
* + K-means clustering: r_x = UnitOneSparse, r_y = 0 (\gamma_y = 0)
* + Quadratic mixture: r_x = Simplex, r_y = 0 (\gamma_y = 0)
*/
public enum GlrmRegularizer {
None {
@Override public double regularize(double[] u) {
return 0;
}
@Override public double[] rproxgrad(double[] u, double delta, Random rand) {
return u;
}
@Override public double[] project(double[] u, Random rand) {
return u;
}
},
Quadratic {
@Override public double regularize(double[] u) {
if (u == null) return 0;
double ureg = 0;
for (double ui : u) ureg += ui * ui;
return ureg;
}
@Override public double[] rproxgrad(double[] u, double delta, Random rand) {
if (u == null || delta == 0) return u;
double[] v = new double[u.length];
double f = 1/(1 + 2*delta);
for (int i = 0; i < u.length; i++)
v[i] = u[i] * f;
return v;
}
@Override public double[] project(double[] u, Random rand) {
return u;
}
},
L2 {
@Override public double regularize(double[] u) {
if (u == null) return 0;
double ureg = 0;
for (double ui : u) ureg += ui * ui;
return Math.sqrt(ureg);
}
@Override public double[] rproxgrad(double[] u, double delta, Random rand) {
if (u == null || delta == 0) return u;
double[] v = new double[u.length];
// Proof uses Moreau decomposition;
// see section 6.5.1 of Parikh and Boyd https://web.stanford.edu/~boyd/papers/pdf/prox_algs.pdf
double weight = 1 - delta/ArrayUtils.l2norm(u);
if (weight < 0) return v; // Zero vector
for (int i = 0; i < u.length; i++)
v[i] = weight * u[i];
return v;
}
@Override public double[] project(double[] u, Random rand) {
return u;
}
},
L1 {
@Override public double regularize(double[] u) {
if (u == null) return 0;
double ureg = 0;
for (double ui : u) ureg += Math.abs(ui);
return ureg;
}
@Override public double[] rproxgrad(double[] u, double delta, Random rand) {
if (u == null || delta == 0) return u;
double[] v = new double[u.length];
for (int i = 0; i < u.length; i++)
v[i] = Math.max(u[i] - delta, 0) + Math.min(u[i] + delta, 0);
return v;
}
@Override public double[] project(double[] u, Random rand) {
return u;
}
},
NonNegative {
@Override public double regularize(double[] u) {
if (u == null) return 0;
for (double ui : u)
if (ui < 0)
return Double.POSITIVE_INFINITY;
return 0;
}
@Override public double[] rproxgrad(double[] u, double delta, Random rand) {
if (u == null || delta == 0) return u;
double[] v = new double[u.length];
for (int i = 0; i < u.length; i++)
v[i] = Math.max(u[i], 0);
return v;
}
// Proximal operator of indicator function for a set C is (Euclidean) projection onto C
@Override public double[] project(double[] u, Random rand) {
return u == null? null : rproxgrad(u, 1, rand);
}
},
OneSparse {
@Override public double regularize(double[] u) {
if (u == null) return 0;
int card = 0;
for (double ui : u) {
if (ui < 0) return Double.POSITIVE_INFINITY;
else if (ui > 0) card++;
}
return card == 1 ? 0 : Double.POSITIVE_INFINITY;
}
@Override public double[] rproxgrad(double[] u, double delta, Random rand) {
if (u == null || delta == 0) return u;
double[] v = new double[u.length];
int idx = ArrayUtils.maxIndex(u, rand);
v[idx] = u[idx] > 0 ? u[idx] : 1e-6;
return v;
}
@Override public double[] project(double[] u, Random rand) {
return u == null? null : rproxgrad(u, 1, rand);
}
},
UnitOneSparse {
@Override public double regularize(double[] u) {
if (u == null) return 0;
int ones = 0, zeros = 0;
for (double ui : u) {
if (ui == 1) ones++;
else if (ui == 0) zeros++;
else return Double.POSITIVE_INFINITY;
}
return ones == 1 && zeros == u.length-1 ? 0 : Double.POSITIVE_INFINITY;
}
@Override public double[] rproxgrad(double[] u, double delta, Random rand) {
if (u == null || delta == 0) return u;
double[] v = new double[u.length];
int idx = ArrayUtils.maxIndex(u, rand);
v[idx] = 1;
return v;
}
@Override public double[] project(double[] u, Random rand) {
return u == null? null : rproxgrad(u, 1, rand);
}
},
Simplex {
@Override public double regularize(double[] u) {
if (u == null) return 0;
double sum = 0, absum = 0;
for (double ui : u) {
if (ui < 0) return Double.POSITIVE_INFINITY;
else {
sum += ui;
absum += Math.abs(ui);
}
}
return MathUtils.equalsWithinRecSumErr(sum, 1.0, u.length, absum) ? 0 : Double.POSITIVE_INFINITY;
}
@Override public double[] rproxgrad(double[] u, double delta, Random rand) {
if (u == null || delta == 0) return u;
// Proximal gradient algorithm by Chen and Ye in http://arxiv.org/pdf/1101.6081v2.pdf
// 1) Sort input vector u in ascending order: u[1] <= ... <= u[n]
int n = u.length;
int[] idxs = new int[n];
for (int i = 0; i < n; i++) idxs[i] = i;
ArrayUtils.sort(idxs, u);
// 2) Calculate cumulative sum of u in descending order
// cumsum(u) = (..., u[n-2]+u[n-1]+u[n], u[n-1]+u[n], u[n])
double[] ucsum = new double[n];
ucsum[n-1] = u[idxs[n-1]];
for (int i = n-2; i >= 0; i--)
ucsum[i] = ucsum[i+1] + u[idxs[i]];
// 3) Let t_i = (\sum_{j=i+1}^n u[j] - 1)/(n - i)
// For i = n-1,...,1, set optimal t* to first t_i >= u[i]
double t = (ucsum[0] - 1)/n; // Default t* = (\sum_{j=1}^n u[j] - 1)/n
for (int i = n-1; i >= 1; i--) {
double tmp = (ucsum[i] - 1)/(n - i);
if (tmp >= u[idxs[i-1]]) {
t = tmp;
break;
}
}
// 4) Return max(u - t*, 0) as projection of u onto simplex
double[] x = new double[u.length];
for (int i = 0; i < u.length; i++)
x[i] = Math.max(u[i] - t, 0);
return x;
}
@Override public double[] project(double[] u, Random rand) {
double reg = regularize(u); // Check if inside simplex before projecting since algo is complicated
if (reg == 0) return u;
return rproxgrad(u, 1, rand);
}
};
/** Regularization function applied to a single row x_i or column y_j */
public abstract double regularize(double[] u);
/** Regularization applied to an entire matrix (sum over rows) */
public final double regularize(double[][] u) {
if (u == null || this == None) return 0;
double ureg = 0;
for (double[] uarr : u) {
ureg += regularize(uarr);
if (Double.isInfinite(ureg)) break;
}
return ureg;
}
/** \prox_{\alpha_k*r}(u): Proximal gradient of (step size) * (regularization function) evaluated at vector u */
public abstract double[] rproxgrad(double[] u, double delta, Random rand);
/** Project X,Y matrices into appropriate subspace so regularizer is finite. Used during initialization. */
public abstract double[] project(double[] u, Random rand);
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/isofor/IsolationForestMojoModel.java
|
package hex.genmodel.algos.isofor;
import hex.genmodel.algos.tree.SharedTreeMojoModel;
public final class IsolationForestMojoModel extends SharedTreeMojoModel {
int _min_path_length;
int _max_path_length;
boolean _outputAnomalyFlag;
public IsolationForestMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
@Override
public double[] score0(double[] row, double[] preds) {
return score0(row, 0.0, preds);
}
@Override
public double[] score0(double[] row, double offset, double[] preds) {
super.scoreAllTrees(row, preds);
return unifyPreds(row, offset, preds);
}
@Override
public double[] unifyPreds(double[] row, double offset, double[] preds) {
double mpLength = 0;
if (_ntree_groups >= 1 && preds.length > 1) {
mpLength = preds[0] / _ntree_groups;
}
double score = _max_path_length > _min_path_length ?
(_max_path_length - preds[0]) / (_max_path_length - _min_path_length) : 1;
if (_outputAnomalyFlag) {
preds[0] = score > _defaultThreshold ? 1 : 0;
preds[1] = score;
preds[2] = mpLength;
} else {
preds[0] = score;
preds[1] = mpLength;
}
return preds;
}
@Override
public double getInitF() {
return 0;
}
@Override
public int getPredsSize() {
return _outputAnomalyFlag ? 3 : 2;
}
@Override
public String[] getOutputNames() {
if (_outputAnomalyFlag) {
return new String[]{"predict", "score", "mean_length"};
} else {
return new String[]{"predict", "mean_length"};
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/isofor/IsolationForestMojoReader.java
|
package hex.genmodel.algos.isofor;
import hex.genmodel.algos.tree.SharedTreeMojoReader;
import java.io.IOException;
public class IsolationForestMojoReader extends SharedTreeMojoReader<IsolationForestMojoModel> {
@Override
public String getModelName() {
return "Isolation Forest";
}
@Override
protected void readModelData() throws IOException {
super.readModelData();
_model._min_path_length = readkv("min_path_length", 0);
_model._max_path_length = readkv("max_path_length", 0);
_model._outputAnomalyFlag = readkv("output_anomaly_flag", false);
}
@Override
protected IsolationForestMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
return new IsolationForestMojoModel(columns, domains, responseColumn);
}
@Override public String mojoVersion() {
return "1.40";
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/isoforextended/ExtendedIsolationForestMojoModel.java
|
package hex.genmodel.algos.isoforextended;
import hex.genmodel.MojoModel;
import hex.genmodel.algos.tree.ScoreIsolationTree;
import hex.genmodel.algos.tree.ScoreIsolationTree0;
import hex.genmodel.utils.ArrayUtils;
import hex.genmodel.utils.ByteBufferWrapper;
import hex.genmodel.utils.MathUtils;
public final class ExtendedIsolationForestMojoModel extends MojoModel {
public static final int NODE = 'N';
public static final int LEAF = 'L';
int _ntrees;
long _sample_size;
byte[][] _compressedTrees;
private ScoreIsolationTree _scoreIsolationTree;
public ExtendedIsolationForestMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
public void postInit() {
_scoreIsolationTree = new ScoreIsolationTree0();
}
@Override
public double[] score0(double[] row, double[] preds) {
return score0(row, 0.0, preds);
}
@Override
public double[] score0(double[] row, double offset, double[] preds) {
double pathLength = 0;
for(int treeId = 0; treeId < _ntrees; treeId++) {
double iTreeScore = _scoreIsolationTree.scoreTree(_compressedTrees[treeId], row);
pathLength += iTreeScore;
}
pathLength = pathLength / _ntrees;
double anomalyScore = anomalyScore(pathLength, _sample_size);
preds[0] = anomalyScore;
preds[1] = pathLength;
return preds;
}
@Override
public int getPredsSize() {
return 2;
}
@Override
public String[] getOutputNames() {
return new String[]{"anomaly_score", "mean_length"};
}
public static double scoreTree0(byte[] isolationTree, double[] row) {
ByteBufferWrapper ab = new ByteBufferWrapper(isolationTree);
int sizeOfBranchingArrays = ab.get4();
double[] tempN = new double[sizeOfBranchingArrays];
double[] tempP = new double[sizeOfBranchingArrays];
int tempNodeNumber, tempNodeType, tempNumRows, height = 0, findNodeNumber = 0;
final int SIZE_OF_NODE = 2*sizeOfBranchingArrays*8;
final int SIZE_OF_LEAF = 4;
double pathLength = -1;
while(ab.hasRemaining()) {
tempNodeNumber = ab.get4();
tempNodeType = ab.get1U();
if (tempNodeNumber != findNodeNumber) {
if (tempNodeType == NODE) {
ab.skip(SIZE_OF_NODE);
} else if (tempNodeType == LEAF) {
ab.skip(SIZE_OF_LEAF);
} else {
throw new UnsupportedOperationException("Unknown node type: " + tempNodeType);
}
continue;
}
if (tempNodeType == NODE) {
loadNode(ab, tempN, tempP);
double mul = ArrayUtils.subAndMul(row, tempP, tempN);
if (mul <= 0) {
// go left
height++;
findNodeNumber = leftChildIndex(tempNodeNumber);
} else {
// go right
height++;
findNodeNumber = rightChildIndex(tempNodeNumber);
}
} else if (tempNodeType == LEAF) {
tempNumRows = ab.get4();
pathLength = height + averagePathLengthOfUnsuccessfulSearch(tempNumRows);
break;
} else {
throw new UnsupportedOperationException("Unknown node type: " + tempNodeType);
}
}
return pathLength;
}
private static void loadNode(ByteBufferWrapper ab, double[] n, double[] p) {
for (int i = 0; i < n.length; i++) {
n[i] = ab.get8d();
}
for (int i = 0; i < n.length; i++) {
p[i] = ab.get8d();
}
}
public static int leftChildIndex(int i) {
return 2 * i + 1;
}
public static int rightChildIndex(int i) {
return 2 * i + 2;
}
/**
* Anomaly score computation comes from Equation 1 in paper
*
* @param pathLength path from root to leaf
* @return anomaly score in range [0, 1]
*/
public static double anomalyScore(double pathLength, long sample_size) {
return Math.pow(2, -1 * (pathLength /
averagePathLengthOfUnsuccessfulSearch(sample_size)));
}
/**
* Gives the average path length of unsuccessful search in BST.
* Comes from Algorithm 3 (pathLength) and Equation 2 in paper
*
* @param n number of elements
*/
public static double averagePathLengthOfUnsuccessfulSearch(long n) {
if (n < 2)
return 0;
if (n == 2)
return 1;
return 2 * MathUtils.harmonicNumberEstimation(n - 1) - (2.0 * (n - 1.0)) / n;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/isoforextended/ExtendedIsolationForestMojoReader.java
|
package hex.genmodel.algos.isoforextended;
import hex.genmodel.ModelMojoReader;
import hex.genmodel.algos.tree.ScoreTree0;
import hex.genmodel.algos.tree.ScoreTree1;
import hex.genmodel.algos.tree.ScoreTree2;
import java.io.IOException;
public class ExtendedIsolationForestMojoReader extends ModelMojoReader<ExtendedIsolationForestMojoModel> {
@Override
public String getModelName() {
return "Extended Isolation Forest";
}
@Override
protected void readModelData() throws IOException {
_model._ntrees = readkv("ntrees", 0);
_model._sample_size = readkv("sample_size", 0);
_model._compressedTrees = new byte[_model._ntrees][];
for (int treeId = 0; treeId < _model._ntrees; treeId++) {
String blobName = String.format("trees/t%02d.bin", treeId);
_model._compressedTrees[treeId] = readblob(blobName);
}
_model.postInit();
}
@Override
protected ExtendedIsolationForestMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
return new ExtendedIsolationForestMojoModel(columns, domains, responseColumn);
}
@Override public String mojoVersion() {
return "1.00";
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/isotonic/IsotonicCalibrator.java
|
package hex.genmodel.algos.isotonic;
import java.io.Serializable;
public class IsotonicCalibrator implements Serializable {
public final double _min_x;
public final double _max_x;
public final double[] _thresholds_x;
public final double[] _thresholds_y;
public IsotonicCalibrator(double minX, double maxX, double[] thresholdsX, double[] thresholdsY) {
_min_x = minX;
_max_x = maxX;
_thresholds_x = thresholdsX;
_thresholds_y = thresholdsY;
}
public double calibrateP1(double p1) {
final double x = IsotonicRegressionUtils.clip(p1, _min_x, _max_x);
return IsotonicRegressionUtils.score(x, _min_x, _max_x, _thresholds_x, _thresholds_y);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/isotonic/IsotonicRegressionMojoModel.java
|
package hex.genmodel.algos.isotonic;
import hex.genmodel.MojoModel;
public class IsotonicRegressionMojoModel extends MojoModel {
protected IsotonicCalibrator _isotonic_calibrator;
public IsotonicRegressionMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
@Override
public double[] score0(double[] row, double[] preds) {
preds[0] = _isotonic_calibrator.calibrateP1(row[0]);
return preds;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/isotonic/IsotonicRegressionMojoReader.java
|
package hex.genmodel.algos.isotonic;
import hex.genmodel.ModelMojoReader;
import java.io.IOException;
public class IsotonicRegressionMojoReader extends ModelMojoReader<IsotonicRegressionMojoModel> {
@Override
public String getModelName() {
return "Isotonic Regression";
}
@Override
public String mojoVersion() {
return "1.00";
}
@Override
protected void readModelData() throws IOException {
_model._isotonic_calibrator = readIsotonicCalibrator();
}
@Override
protected IsotonicRegressionMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
return new IsotonicRegressionMojoModel(columns, domains, responseColumn);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/isotonic/IsotonicRegressionUtils.java
|
package hex.genmodel.algos.isotonic;
import java.util.Arrays;
public class IsotonicRegressionUtils {
public static double score(double x, double minX, double maxX,
double[] thresholdsX, double[] thresholdsY) {
if (Double.isNaN(x) || x < minX || x > maxX) {
return Double.NaN;
}
final int pos = Arrays.binarySearch(thresholdsX, x);
final double y;
if (pos >= 0) {
y = thresholdsY[pos];
} else {
final int lo = -pos - 2;
final int hi = lo + 1;
assert lo >= 0;
assert hi < thresholdsX.length;
assert x > thresholdsX[lo];
assert x < thresholdsX[hi];
y = interpolate(x, thresholdsX[lo], thresholdsX[hi],
thresholdsY[lo], thresholdsY[hi]);
}
return y;
}
public static double clip(double x, double min, double max) {
final double clipped;
if (Double.isNaN(x))
clipped = Double.NaN;
else if (x < min)
clipped = min;
else
clipped = Math.min(x, max);
return clipped;
}
static double interpolate(double x, double xLo, double xHi, double yLo, double yHi) {
final double slope = (yHi - yLo) / (xHi - xLo);
return yLo + slope * (x - xLo);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/klime/KLimeMojoModel.java
|
package hex.genmodel.algos.klime;
import hex.ModelCategory;
import hex.genmodel.MojoModel;
import hex.genmodel.algos.glm.GlmMojoModel;
import java.util.EnumSet;
public class KLimeMojoModel extends MojoModel {
MojoModel _clusteringModel;
MojoModel _globalRegressionModel;
MojoModel[] _clusterRegressionModels;
int[] _rowSubsetMap;
@Override public EnumSet<ModelCategory> getModelCategories() {
return EnumSet.of(ModelCategory.Regression, ModelCategory.KLime);
}
KLimeMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
@Override
public double[] score0(double[] row, double[] preds) {
assert preds.length == row.length + 2;
//K-Means scoring
double[] predsSubset = new double[_clusteringModel.nfeatures()+2];
double[] rowSubset = new double[_clusteringModel.nfeatures()];
for(int j = 0; j < _clusteringModel._names.length; j++) {
rowSubset[j] = row[_rowSubsetMap[j]];
}
_clusteringModel.score0(rowSubset, predsSubset);
final int cluster = (int) predsSubset[0];
//GLM scoring
GlmMojoModel regressionModel = getRegressionModel(cluster);
regressionModel.score0(row, preds);
preds[1] = cluster;
for (int i = 2; i < preds.length; i++)
preds[i] = Double.NaN;
// preds = {prediction, cluster, NaN, ..., NaN)
regressionModel.applyCoefficients(row, preds, 2);
// preds = {prediction, cluster, reason code 1, ..., reason code N}
return preds;
}
public GlmMojoModel getRegressionModel(int cluster) {
return (GlmMojoModel) (_clusterRegressionModels[cluster] != null ?
_clusterRegressionModels[cluster] : _globalRegressionModel);
}
@Override
public int getPredsSize() {
return nfeatures() + 2;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/klime/KLimeMojoReader.java
|
package hex.genmodel.algos.klime;
import hex.genmodel.MojoModel;
import hex.genmodel.MultiModelMojoReader;
import java.io.IOException;
public class KLimeMojoReader extends MultiModelMojoReader<KLimeMojoModel> {
@Override
protected void readParentModelData() throws IOException {
int clusterNum = readkv("cluster_num", 0);
_model._clusteringModel = getModel((String) readkv("clustering_model"));
_model._globalRegressionModel = getModel((String) readkv("global_regression_model"));
_model._clusterRegressionModels = new MojoModel[clusterNum];
_model._rowSubsetMap = new int[_model._clusteringModel._names.length];
for (int i = 0; i < clusterNum; i++) {
String modelKey = readkv("cluster_regression_model_" + i);
if (modelKey != null)
_model._clusterRegressionModels[i] = getModel(modelKey);
}
//Subset row to columns used for kmeans (can be less than number of columns passed to k-LIME)
//Placed here as it only needs to be done once
for(int i = 0; i < _model._globalRegressionModel._names.length; i++){
for(int j = 0; j < _model._clusteringModel._names.length; j++) {
if (_model._globalRegressionModel._names[i].equals(_model._clusteringModel._names[j])) {
_model._rowSubsetMap[j] = i;
}
}
}
}
@Override
public String getModelName() {
return "k-LIME";
}
@Override
protected KLimeMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
return new KLimeMojoModel(columns, domains, responseColumn);
}
@Override
public String mojoVersion() {
return "1.00";
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/kmeans/KMeansMojoModel.java
|
package hex.genmodel.algos.kmeans;
import hex.genmodel.IClusteringModel;
import hex.genmodel.MojoModel;
public class KMeansMojoModel extends MojoModel implements IClusteringModel {
boolean _standardize;
double[][] _centers;
double[] _means;
double[] _mults;
int[] _modes;
KMeansMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
@Override
public double[] score0(double[] row, double[] preds) {
if (_standardize)
Kmeans_preprocessData(row, _means, _mults, _modes);
preds[0] = KMeans_closest(_centers, row, _domains);
return preds;
}
@Override
public int distances(double[] row, double[] distances) {
if (_standardize)
Kmeans_preprocessData(row, _means, _mults, _modes);
return KMeans_distances(_centers, row, _domains, distances);
}
@Override
public int getNumClusters() {
return _centers.length;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/kmeans/KMeansMojoReader.java
|
package hex.genmodel.algos.kmeans;
import hex.genmodel.ModelMojoReader;
import java.io.IOException;
public class KMeansMojoReader extends ModelMojoReader<KMeansMojoModel> {
@Override
public String getModelName() {
return "K-means";
}
@Override
protected void readModelData() throws IOException {
_model._standardize = readkv("standardize");
if (_model._standardize) {
_model._means = readkv("standardize_means");
_model._mults = readkv("standardize_mults");
_model._modes = readkv("standardize_modes");
}
final int centerNum = readkv("center_num");
_model._centers = new double[centerNum][];
for (int i = 0; i < centerNum; i++)
_model._centers[i] = readkv("center_" + i);
}
@Override
protected KMeansMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
return new KMeansMojoModel(columns, domains, responseColumn);
}
@Override public String mojoVersion() { return "1.00"; }
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/pca/PCAMojoModel.java
|
package hex.genmodel.algos.pca;
import hex.genmodel.MojoModel;
public class PCAMojoModel extends MojoModel {
double[][] _eigenvectors_raw;
public int [] _catOffsets;
public int[] _permutation;
public int _ncats;
public int _nnums;
public double[] _normSub; // used to perform dataset transformation. When no transform is needed, will be 0
public double[] _normMul; // used to perform dataset transformation. When no transform is needed, will be 1
public boolean _use_all_factor_levels;
public String _pca_method;
public String _pca_impl;
public int _k;
public int _eigenVectorSize;
public PCAMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
@Override
public double[] score0(double[] row, double[] preds) {
assert(row!=null):"input data row is null";
double[] tpred = preds==null?new double[_k]:preds; // allocate tpred in case it is null
int numStart = _catOffsets[_ncats];
assert(row.length == _nnums + _ncats):"assert dataset input size does not eqaul to expected size";
for(int i = 0; i < _k; i++) {
tpred[i] = 0;
for (int j = 0; j < _ncats; j++) {
double tmp = row[_permutation[j]];
if (Double.isNaN(tmp)) continue; // Missing categorical values are skipped
int last_cat = _catOffsets[j+1]-_catOffsets[j]-1;
int level = (int)tmp - (_use_all_factor_levels ? 0:1); // Reduce index by 1 if first factor level dropped during training
if (level < 0 || level > last_cat) continue; // Skip categorical level in test set but not in train
tpred[i] += _eigenvectors_raw[_catOffsets[j]+level][i];
}
int dcol = _ncats;
int vcol = numStart;
for (int j = 0; j < _nnums; j++) {
tpred[i] += (row[_permutation[dcol]] - _normSub[j]) * _normMul[j] * _eigenvectors_raw[vcol][i];
dcol++; vcol++;
}
}
return tpred;
}
@Override public int getPredsSize() {
return _k;
}
@Override public int nclasses() {
return _k;
}
@Override
public String[] getOutputNames() {
String[] names = new String[_k];
for (int i = 0; i < names.length; i++) {
names[i] = "PC" + (i + 1);
}
return names;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/pca/PCAMojoReader.java
|
package hex.genmodel.algos.pca;
import hex.genmodel.ModelMojoReader;
import java.io.IOException;
import java.nio.ByteBuffer;
public class PCAMojoReader extends ModelMojoReader<PCAMojoModel>{
@Override
public String getModelName() {
return "Principal Component Analysis";
}
@Override
protected void readModelData() throws IOException {
_model._use_all_factor_levels = readkv("use_all_factor_levels");
_model._pca_method = readkv("pca_methods");
_model._pca_impl = readkv("pca_impl");
_model._k = readkv("k");
_model._permutation = readkv("permutation");
_model._ncats = readkv("ncats");
_model._nnums = readkv("nnums");
if (_model._nnums==0) {
_model._normMul = new double[0];
_model._normSub = new double[0];
} else {
_model._normSub = readkv("normSub");
_model._normMul = readkv("normMul");
}
_model._catOffsets = readkv("catOffsets");
_model._eigenVectorSize = readkv("eigenvector_size");
_model._eigenvectors_raw = new double[_model._eigenVectorSize][];
ByteBuffer bb = ByteBuffer.wrap(readblob("eigenvectors_raw"));
for (int i = 0; i < _model._eigenVectorSize; i++) {
double[] row = new double[_model._k];
_model._eigenvectors_raw[i] = row;
for (int j = 0; j < _model._k; j++)
row[j] = bb.getDouble();
}
}
@Override
protected PCAMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
return new PCAMojoModel(columns, domains, responseColumn);
}
@Override public String mojoVersion() {
return "1.00";
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/pipeline/MojoPipeline.java
|
package hex.genmodel.algos.pipeline;
import hex.genmodel.MojoModel;
import java.io.Serializable;
public class MojoPipeline extends MojoModel {
MojoModel _mainModel;
int[] _sourceRowIndices;
int[] _targetMainModelRowIndices;
int _generatedColumnCount;
PipelineSubModel[] _models;
public MojoPipeline(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
@Override
public double[] score0(double[] row, double[] preds) {
double[] mainModelRow = new double[_targetMainModelRowIndices.length + _generatedColumnCount];
for (int i = 0; i < _targetMainModelRowIndices.length; i++) {
mainModelRow[_targetMainModelRowIndices[i]] = row[_sourceRowIndices[i]];
}
// score sub-models and populate generated fields of the main-model input row
for (PipelineSubModel psm : _models) {
double[] subModelRow = new double[psm._inputMapping.length];
for (int i = 0; i < psm._inputMapping.length; i++) {
subModelRow[i] = row[psm._inputMapping[i]];
}
double[] subModelPreds = new double[psm._predsSize];
subModelPreds = psm._mojoModel.score0(subModelRow, subModelPreds);
for (int j = 0; j < psm._sourcePredsIndices.length; j++) {
mainModelRow[psm._targetRowIndices[j]] = subModelPreds[psm._sourcePredsIndices[j]];
}
}
return _mainModel.score0(mainModelRow, preds);
}
static class PipelineSubModel implements Serializable {
int[] _inputMapping;
int _predsSize;
int[] _sourcePredsIndices;
int[] _targetRowIndices;
MojoModel _mojoModel;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/pipeline/MojoPipelineReader.java
|
package hex.genmodel.algos.pipeline;
import hex.genmodel.MojoModel;
import hex.genmodel.MultiModelMojoReader;
import java.util.LinkedList;
import java.util.Map;
import java.util.List;
import java.util.HashMap;
public class MojoPipelineReader extends MultiModelMojoReader<MojoPipeline> {
@Override
public String getModelName() {
return "MOJO Pipeline";
}
@Override
protected void readParentModelData() {
String mainModelAlias = readkv("main_model");
String[] generatedColumns = readGeneratedColumns();
_model._mainModel = getModel(mainModelAlias);
_model._generatedColumnCount = generatedColumns.length;
_model._targetMainModelRowIndices = new int[_model._mainModel._nfeatures - generatedColumns.length];
_model._sourceRowIndices = findIndices(_model._names, _model._mainModel._names, _model._mainModel._nfeatures,
_model._targetMainModelRowIndices, generatedColumns);
Map<String, List<Integer>> m2idxs = readModel2GeneratedColumnIndex();
_model._models = new MojoPipeline.PipelineSubModel[getSubModels().size() - 1];
int modelsCnt = 0;
int genColsCnt = 0;
for (Map.Entry<String, MojoModel> subModel : getSubModels().entrySet()) {
if (mainModelAlias.equals(subModel.getKey())) {
continue;
}
final MojoModel m = subModel.getValue();
final List<Integer> generatedColsIdxs = m2idxs.get(subModel.getKey());
MojoPipeline.PipelineSubModel psm = _model._models[modelsCnt++] = new MojoPipeline.PipelineSubModel();
psm._mojoModel = m;
psm._inputMapping = mapModelColumns(m);
psm._predsSize = m.getPredsSize(m.getModelCategory());
psm._sourcePredsIndices = new int[generatedColsIdxs.size()];
String[] targetColNames = new String[generatedColsIdxs.size()];
int t = 0;
for (int i : generatedColsIdxs) {
psm._sourcePredsIndices[t] = readkv("generated_column_index_" + i, 0);
targetColNames[t] = readkv("generated_column_name_" + i, "");
t++;
}
psm._targetRowIndices = findIndices(_model._mainModel._names, targetColNames);
genColsCnt += t;
}
assert modelsCnt == _model._models.length;
assert genColsCnt == _model._generatedColumnCount;
}
private Map<String, List<Integer>> readModel2GeneratedColumnIndex() {
final int cnt = readkv("generated_column_count", 0);
Map<String, List<Integer>> map = new HashMap<>(cnt);
for (int i = 0; i < cnt; i++) {
String alias = readkv("generated_column_model_" + i);
if (! map.containsKey(alias)) {
map.put(alias, new LinkedList<Integer>());
}
List<Integer> indices = map.get(alias);
indices.add(i);
}
return map;
}
private String[] readGeneratedColumns() {
final int cnt = readkv("generated_column_count", 0);
final String[] names = new String[cnt];
for (int i = 0; i < names.length; i++) {
names[i] = readkv("generated_column_name_" + i, "");
}
return names;
}
@Override
protected MojoPipeline makeModel(String[] columns, String[][] domains, String responseColumn) {
return new MojoPipeline(columns, domains, responseColumn);
}
private int[] mapModelColumns(MojoModel subModel) {
return findIndices(_model._names, subModel._names, subModel._nfeatures, null, new String[0]);
}
private static int[] findIndices(String[] strings, String[] subset) {
return findIndices(strings, subset, subset.length, null, new String[0]);
}
private static int[] findIndices(String[] strings, String[] subset, int firstN, int[] outSubsetIdxs, String[] ignored) {
final int[] idx = new int[firstN - ignored.length];
assert outSubsetIdxs == null || outSubsetIdxs.length == idx.length;
int cnt = 0;
outer: for (int i = 0; i < firstN; i++) {
final String s = subset[i];
assert s != null;
for (String si : ignored) {
if (s.equals(si)) {
continue outer;
}
}
for (int j = 0; j < strings.length; j++) {
if (s.equals(strings[j])) {
if (outSubsetIdxs != null) {
outSubsetIdxs[cnt] = i;
}
idx[cnt++] = j;
continue outer;
}
}
throw new IllegalStateException("Pipeline doesn't have input column '" + subset[i] + "'.");
}
assert cnt == idx.length;
return idx;
}
@Override public String mojoVersion() {
return "1.00";
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/psvm/KernelParameters.java
|
package hex.genmodel.algos.psvm;
import java.io.Serializable;
public class KernelParameters implements Serializable {
public double _gamma = Double.NaN;
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/psvm/KernelType.java
|
package hex.genmodel.algos.psvm;
public enum KernelType {
gaussian
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/psvm/ScorerFactory.java
|
package hex.genmodel.algos.psvm;
public class ScorerFactory {
public static SupportVectorScorer makeScorer(KernelType kt, KernelParameters parms, byte[] svs) {
switch (kt) {
case gaussian:
return new GaussianScorer(parms, svs);
default:
throw new UnsupportedOperationException("Scoring for kernel " + kt + " is not yet implemented");
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/psvm/SupportVectorScorer.java
|
package hex.genmodel.algos.psvm;
import hex.genmodel.utils.ByteBufferWrapper;
import java.io.Serializable;
public interface SupportVectorScorer extends Serializable {
double score0(double[] data);
}
class GaussianScorer implements SupportVectorScorer {
private final double _gamma;
private final byte[] _svs;
GaussianScorer(KernelParameters parms, byte[] svs) {
this(parms._gamma, svs);
}
GaussianScorer(double gamma, byte[] svs) {
_gamma = gamma;
_svs = svs;
}
public double score0(double[] row) {
double result = 0;
ByteBufferWrapper bb = new ByteBufferWrapper(_svs);
while (bb.hasRemaining()) {
final double alpha = bb.get8d();
double norm = 0;
final int cats = bb.get4();
for (int i = 0; i < cats; i++) {
norm += (int) row[i] == bb.get4() ? 0 : 2;
}
final int nums = bb.get4();
for (int i = 0; i < nums; i++) {
double v = row[i + cats] - bb.get8d();
norm += v * v;
}
result += alpha * Math.exp(-_gamma * norm);
}
return result;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/rulefit/MojoCondition.java
|
package hex.genmodel.algos.rulefit;
import java.io.Serializable;
public class MojoCondition implements Serializable {
public enum Type {Categorical, Numerical};
public enum Operator {LessThan, GreaterThanOrEqual, In}
int _featureIndex;
Type _type;
public Operator _operator;
public String _featureName;
public boolean _NAsIncluded;
public String _languageCondition;
public double _numThreshold;
public String[] _languageCatThreshold;
public int[] _catThreshold;
void map(double[] cs, byte[] out) {
double col = cs[MojoCondition.this._featureIndex];
byte newVal = 0;
if (out[0] == (byte)0) {
return;
}
boolean isNA = Double.isNaN(col);
// check whether condition is fulfilled:
if (MojoCondition.this._NAsIncluded && isNA) {
newVal = 1;
} else if (!isNA) {
if (MojoCondition.Type.Numerical.equals(MojoCondition.this._type)) {
if (MojoCondition.Operator.LessThan.equals(MojoCondition.this._operator)) {
if (col < MojoCondition.this._numThreshold) {
newVal = 1;
}
} else if (MojoCondition.Operator.GreaterThanOrEqual.equals(MojoCondition.this._operator)) {
if (col >= MojoCondition.this._numThreshold) {
newVal = 1;
}
}
} else if (MojoCondition.Type.Categorical.equals(MojoCondition.this._type)) {
for (int i = 0; i < MojoCondition.this._catThreshold.length; i++) {
if (MojoCondition.this._catThreshold[i] == col) {
newVal = 1;
}
}
}
}
out[0] = newVal;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/rulefit/MojoRule.java
|
package hex.genmodel.algos.rulefit;
import java.io.Serializable;
public class MojoRule implements Serializable {
MojoCondition[] _conditions;
double _predictionValue;
String _languageRule;
double _coefficient;
String _varName;
double _support;
public void map(double[] cs, byte[] out) {
for (MojoCondition c : _conditions) {
c.map(cs, out);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/rulefit/MojoRuleEnsemble.java
|
package hex.genmodel.algos.rulefit;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class MojoRuleEnsemble implements Serializable {
MojoRule[][][] _orderedRules;
public MojoRuleEnsemble(MojoRule[][][] orderedRules) {
this._orderedRules = orderedRules;
}
public double[] transformRow(double[] row, int depth, int ntrees, String[] linearModelNames, String[][] linearModelDomains, String[] classes) {
boolean isMultinomial = classes != null && classes.length > 2;
double[] transformedRow = isMultinomial ? new double[depth * ntrees * classes.length] : new double[depth * ntrees];
for (int i = 0; i < depth; i++) {
for (int j = 0; j < ntrees; j++) {
MojoRule[] filteredOrderedRules = _orderedRules[i][j];
if (isMultinomial) {
List<MojoRule>[] classRules = new ArrayList[classes.length];
for (int k = 0; k < filteredOrderedRules.length; k++) {
for (int l = 0; l < classes.length; l++) {
if (filteredOrderedRules[k]._varName.endsWith(classes[l])) {
if (classRules[l] == null) {
classRules[l] = new ArrayList<>();
}
classRules[l].add(filteredOrderedRules[k]);
}
}
}
for (int k = 0; k < classes.length; k++) {
transformedRow[i * ntrees * classes.length + j * classes.length + k] = decode(transform(row, classRules[k].toArray(new MojoRule[0])), classRules[k].toArray(new MojoRule[0]), linearModelNames, linearModelDomains, k);
}
} else {
transformedRow[i * ntrees + j] = decode(transform(row, _orderedRules[i][j]), filteredOrderedRules, linearModelNames, linearModelDomains, -1);
}
}
}
return transformedRow;
}
static double decode(double[] cs, MojoRule[] rules, String[] linearModelNames, String[][] linearModelDomains, int classId) {
int newValue = -1;
for (int iCol = 0; iCol < cs.length; iCol++) {
if (cs[iCol] == 1) {
newValue = getValueByVarName(rules[iCol]._varName, linearModelNames, linearModelDomains, classId);
}
}
if (newValue >= 0)
return newValue;
else
return Double.NaN;
}
static int getValueByVarName(String varname, String[] linearModelNames, String[][] linearModelDomains, int classId) {
String var = varname.substring(0,varname.indexOf('N'));
if (classId != -1) {
var += "C" + classId;
}
int i = Arrays.asList(linearModelNames).indexOf(var);
return Arrays.asList(linearModelDomains[i]).indexOf(varname);
}
static double[] transform(double[] row, MojoRule[] rules) {
double[] transformedRow = new double[rules.length];
byte[] out = new byte[] {1};
for (int i = 0; i < rules.length; i++) {
out[0] = 1;
rules[i].map(row, out);
transformedRow[i] = out[0];
}
return transformedRow;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/rulefit/RuleFitMojoModel.java
|
package hex.genmodel.algos.rulefit;
import hex.genmodel.MojoModel;
import java.util.Arrays;
import java.util.List;
public class RuleFitMojoModel extends MojoModel {
public enum ModelType {LINEAR, RULES_AND_LINEAR, RULES}
MojoModel _linearModel;
MojoRuleEnsemble _ruleEnsemble;
int _depth;
int _ntrees;
ModelType _modelType;
String[] _dataFromRulesCodes;
String _weightsColumn;
String[] _linearNames;
RuleFitMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
@Override
public double[] score0(double[] row, double[] preds) {
double[] linearFromRules = null;
int testsize = 0;
if (!_modelType.equals(ModelType.LINEAR)) {
linearFromRules = _ruleEnsemble.transformRow(row, _depth, _ntrees, _linearModel._names, _linearModel._domains, this._domains[ Arrays.asList(this._names).indexOf(this._responseColumn)]);
testsize += linearFromRules.length;
if (_modelType.equals(ModelType.RULES_AND_LINEAR)) {
testsize += row.length;
}
}
double[] test = new double[testsize];
if (_modelType.equals(ModelType.RULES_AND_LINEAR) || _modelType.equals(ModelType.RULES)) {
System.arraycopy(linearFromRules, 0, test, 0, linearFromRules.length);
}
if (_modelType.equals(ModelType.RULES_AND_LINEAR)) {
System.arraycopy(row, 0, test, linearFromRules.length, row.length);
}
if (_modelType.equals(ModelType.LINEAR)) {
test = row;
}
double[] linearModelInput = map(test);
_linearModel.score0(linearModelInput, preds);
return preds;
}
double[] map(double[] test) {
double[] newtest = new double[_linearModel.nfeatures()];
List<String> list = Arrays.asList(_linearModel._names);
for (int i = 0; i < _linearModel.nfeatures(); i++) {
int id = list.indexOf(_linearNames[i]);
newtest[id] = test[i];
}
return newtest;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/rulefit/RuleFitMojoReader.java
|
package hex.genmodel.algos.rulefit;
import hex.genmodel.MultiModelMojoReader;
import java.io.IOException;
public class RuleFitMojoReader extends MultiModelMojoReader<RuleFitMojoModel> {
@Override
protected void readParentModelData() throws IOException {
_model._linearModel = getModel((String) readkv("linear_model"));
int modelType = readkv("model_type");
if (modelType == 0) {
_model._modelType = RuleFitMojoModel.ModelType.LINEAR;
} else if (modelType == 1) {
_model._modelType = RuleFitMojoModel.ModelType.RULES_AND_LINEAR;
} else {
_model._modelType = RuleFitMojoModel.ModelType.RULES;
}
_model._depth = readkv("depth");
_model._ntrees = readkv("ntrees");
if (!_model._modelType.equals(RuleFitMojoModel.ModelType.LINEAR)) {
_model._ruleEnsemble = readRuleEnseble();
}
int len = readkv("data_from_rules_codes_len");
_model._dataFromRulesCodes = new String[len];
for (int i = 0; i < len; i++) {
_model._dataFromRulesCodes[i] = readkv("data_from_rules_codes_" + i);
}
_model._weightsColumn = readkv("weights_column");
len = readkv("linear_names_len");
_model._linearNames = new String[len];
for (int i = 0; i < len; i++) {
_model._linearNames[i] = readkv("linear_names_" + i);
}
}
@Override
public String getModelName() {
return "rulefit";
}
@Override
protected RuleFitMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
return new RuleFitMojoModel(columns, domains, responseColumn);
}
@Override
public String mojoVersion() {
return "1.00";
}
MojoRuleEnsemble readRuleEnseble() throws IOException {
MojoRuleEnsemble ruleEnsemble = new MojoRuleEnsemble(readOrderedRuleEnseble());
return ruleEnsemble;
}
MojoRule[][][] readOrderedRuleEnseble() throws IOException {
MojoRule[][][] orderedRules = new MojoRule[_model._depth][_model._ntrees][];
for (int i = 0; i < _model._depth; i++) {
for (int j = 0; j < _model._ntrees; j++) {
int currNumRules = readkv("num_rules_M".concat(String.valueOf(i)).concat("T").concat(String.valueOf(j)));
MojoRule[] currRules = new MojoRule[currNumRules];
String currIdPrefix = i + "_" + j + "_";
for (int k = 0; k < currNumRules; k++) {
currRules[k] = readRule(currIdPrefix + k);
}
orderedRules[i][j] = currRules;
}
}
return orderedRules;
}
MojoRule readRule(String ruleId) throws IOException {
MojoRule rule = new MojoRule();
int numConditions = readkv("num_conditions_rule_id_" + ruleId);
MojoCondition[] conditions = new MojoCondition[numConditions];
for (int i = 0; i < numConditions; i++) {
conditions[i] = readCondition(i, ruleId);
}
rule._conditions = conditions;
rule._predictionValue = readkv("prediction_value_rule_id_" + ruleId);
rule._languageRule = readkv("language_rule_rule_id_" + ruleId);
rule._coefficient = readkv("coefficient_rule_id_" + ruleId);
rule._varName = readkv("var_name_rule_id_" + ruleId);
if (readkv("support_rule_id_" + ruleId) != null)
rule._support = readkv("support_rule_id_" + ruleId);
else
rule._support = Double.NaN;
return rule;
}
MojoCondition readCondition(int conditionId, String ruleId) {
MojoCondition condition = new MojoCondition();
String conditionIdentifier = conditionId + "_" + ruleId;
condition._featureIndex = readkv("feature_index_" + conditionIdentifier);
int type = readkv("type_" + conditionIdentifier);
if (type == 0) {
condition._type = MojoCondition.Type.Categorical;
int languageCatTresholdLength = readkv("language_cat_treshold_length_" + conditionIdentifier);
String[] languageCatTreshold = new String[languageCatTresholdLength];
for (int i = 0; i < languageCatTresholdLength; i++) {
languageCatTreshold[i] = readkv("language_cat_treshold_" + i + "_" + conditionIdentifier).toString();
}
condition._languageCatThreshold = languageCatTreshold;
int catTresholdLength = readkv("cat_treshold_length_" + conditionIdentifier);
int[] catTreshold = new int[catTresholdLength];
for (int i = 0; i < catTresholdLength; i++) {
catTreshold[i] = readkv("cat_treshold_length_" + i + "_" + conditionIdentifier);
}
condition._catThreshold = catTreshold;
} else {
condition._type = MojoCondition.Type.Numerical;
condition._numThreshold = readkv("num_treshold" + conditionIdentifier);
}
int operator = readkv("operator_" + conditionIdentifier);
if (operator == 0) {
condition._operator = MojoCondition.Operator.LessThan;
} else if (operator == 1) {
condition._operator = MojoCondition.Operator.GreaterThanOrEqual;
} else {
condition._operator = MojoCondition.Operator.In;
}
condition._featureName = readkv("feature_name_" + conditionIdentifier);
condition._NAsIncluded = readkv("nas_included_" + conditionIdentifier);
condition._languageCondition = readkv("language_condition" + conditionIdentifier);
return condition;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/svm/SvmMojoModel.java
|
package hex.genmodel.algos.svm;
import hex.genmodel.MojoModel;
public class SvmMojoModel extends MojoModel {
boolean meanImputation;
double[] weights;
double[] means;
double interceptor;
double defaultThreshold;
double threshold;
SvmMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
@Override
public double[] score0(double[] row, double[] preds) {
java.util.Arrays.fill(preds, 0);
double pred = interceptor;
for (int i = 0; i < row.length; i++) {
if (Double.isNaN(row[i]) && meanImputation) {
pred += (means[i] * weights[i]);
} else {
pred += (row[i] * weights[i]);
}
}
if (_nclasses == 1) {
preds[0] = pred;
} else {
if (pred > threshold) {
preds[2] = pred < defaultThreshold ? defaultThreshold : pred;
preds[1] = preds[2] - 1;
preds[0] = 1;
} else {
preds[2] = pred >= defaultThreshold ? defaultThreshold - 1 : pred;
preds[1] = preds[2] + 1;
preds[0] = 0;
}
}
return preds;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/svm/SvmMojoReader.java
|
package hex.genmodel.algos.svm;
import hex.genmodel.ModelMojoReader;
import java.io.IOException;
public class SvmMojoReader extends ModelMojoReader<SvmMojoModel> {
@Override
public String getModelName() {
return "SVM";
}
@Override
protected void readModelData() throws IOException {
_model.meanImputation = readkv("meanImputation");
if(_model.meanImputation) {
_model.means = readkv("means");
}
_model.weights = readkv("weights");
_model.interceptor = readkv("interceptor");
_model.defaultThreshold = readkv("defaultThreshold");
_model.threshold = readkv("threshold");
}
@Override
protected SvmMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
return new SvmMojoModel(columns, domains, responseColumn);
}
@Override public String mojoVersion() {
return "1.00";
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/targetencoder/ColumnsMapping.java
|
package hex.genmodel.algos.targetencoder;
import java.io.Serializable;
public class ColumnsMapping implements Serializable {
private String[] _from;
private String[] _to;
public ColumnsMapping(String[] from, String[] to) {
_from = from;
_to = to;
}
public String[] from() {
return _from;
}
public String[] to() {
return _to;
}
}
class ColumnsToSingleMapping extends ColumnsMapping {
private String[] _toDomain;
private long[] _toDomainAsNum;
public ColumnsToSingleMapping(String[] from, String to, String[] toDomain) {
super(from, new String[]{to});
_toDomain = toDomain;
_toDomainAsNum = stringArrayToLong(toDomain);
}
public String toSingle() {
return to()[0];
}
public String[] toDomain() {
return _toDomain;
}
public long[] toDomainAsNum() {
return _toDomainAsNum;
}
private static long[] stringArrayToLong(String[] arr) {
if (arr == null) return null;
try {
long[] res = new long[arr.length];
for (int i=0; i < arr.length; i++) {
res[i] = Long.parseLong(arr[i]);
}
return res;
} catch (NumberFormatException nfe) {
return null;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/targetencoder/EncodingMap.java
|
package hex.genmodel.algos.targetencoder;
import java.io.Serializable;
import java.util.*;
public class EncodingMap implements Serializable {
private static final Integer NO_TARGET_CLASS = -1;
/**
* Represents mapping from categorical level index to:
* - a 2-elements array of `numerator` and `denominator` for regression and binary problems.
* - a 3-elements array of `numerator`, `denominator` and `targetclass` for multiclass problems.
* Those are then used to calculate the target frequencies.
* Note that the last (group of) index is reserved for NA level and we rely on this fact.
*
* Example:
* a binary mapping (regression is similar with numerator accepting any nunerical value):
* Map (
* 0 = "A" -> [ 4, 7 ],
* 1 = "B" -> [ 2, 8 ],
* 2 = "C" -> [ 7, 12 ],
* 3 = "COL_NAME_NA" -> [ 5, 6 ],
* )
*
* a multiclass ('y' = 0, 'n' = 1, 'maybe' = 2, 'NA' = 3) mapping:
* Map (
* 0 = "A" -> Map (
* "y" = 0 -> [ 4, 7 ],
* "n" = 1 -> [ 2, 7 ],
* "maybe" = 2 -> [ 1, 7 ]
* "NA" = 3 -> [ 0, 7 ]
* ),
* 1 = "B" -> Map (
* "y" = 0 -> [ 2, 8 ],
* "n" = 1 -> [ 3, 8 ],
* "maybe" = 2 -> [ 3, 8 ]
* "NA" = 3 -> [ 0, 8 ]
* ),
* ...
* )
*/
private Map<Integer, Map<Integer, double[]>> _encodingMap = new HashMap<>();
private Map<Integer, Double> priors = new HashMap<>();
private int _nclasses; // 1: regression, 2: binary, 2+: multiclass
public EncodingMap(int nclasses) {
_nclasses = nclasses;
}
public double[] getNumDen(int category) {
Map<Integer, double[]> targetMap = _encodingMap.get(category);
assert _nclasses == 1 || _nclasses == 2;
assert targetMap.size() == 1;
return targetMap.get(NO_TARGET_CLASS);
}
public double[] getNumDen(int category, int targetClass) {
Map<Integer, double[]> targetMap = _encodingMap.get(category);
assert _nclasses > 2;
assert targetMap.size() > 1;
return targetMap.get(targetClass);
}
public int getNACategory() {
return _encodingMap.size() - 1;
}
public void add(int categorical, double[] encodingComponents) {
if (_nclasses <= 2) { // regression + binary
assert encodingComponents.length == 2;
_encodingMap.put(categorical, Collections.singletonMap(NO_TARGET_CLASS, encodingComponents));
} else { // multiclass
assert encodingComponents.length == 3;
if (!_encodingMap.containsKey(categorical))
_encodingMap.put(categorical, new HashMap<Integer, double[]>());
Integer targetClass = (int)encodingComponents[encodingComponents.length-1];
double[] numDen = Arrays.copyOf(encodingComponents, 2);
_encodingMap.get(categorical).put(targetClass, numDen);
}
}
public double getPriorMean() {
assert _nclasses == 1 || _nclasses == 2;
if (!priors.containsKey(NO_TARGET_CLASS)) {
priors.put(NO_TARGET_CLASS, doComputePriorMean(NO_TARGET_CLASS));
}
return priors.get(NO_TARGET_CLASS);
}
public double getPriorMean(int targetClass) {
assert _nclasses > 2;
assert targetClass >= 0 && targetClass < _nclasses;
if (!priors.containsKey(targetClass)) {
priors.put(targetClass, doComputePriorMean(targetClass));
}
return priors.get(targetClass);
}
private double doComputePriorMean(int targetClass) {
double num = 0;
double den = 0;
for (Map<Integer, double[]> targetMapping : _encodingMap.values()) {
double[] numDen = targetMapping.get(targetClass);
num += numDen[0];
den += numDen[1];
}
return num/den;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/targetencoder/EncodingMaps.java
|
package hex.genmodel.algos.targetencoder;
import java.io.Serializable;
import java.util.*;
import java.util.Map.Entry;
public class EncodingMaps implements Iterable<Entry<String, EncodingMap>>, Serializable {
/**
* Outer Map stores encoding maps for each categorical column.
*
* Example:
* Map( "categorical_col_name_1" -> EncodingMap ( ... )
* Map( "categorical_col_name_2" -> EncodingMap ( ... )
*/
private Map<String, EncodingMap> _encodingMaps;
public EncodingMaps(Map<String, EncodingMap> encodingMaps) {
_encodingMaps = encodingMaps;
}
public EncodingMaps() {
_encodingMaps = new HashMap<>();
}
public EncodingMap get(String categoricalCol) {
return _encodingMaps.get(categoricalCol);
}
public EncodingMap put(String categoricalCol, EncodingMap encodingMap) {
return _encodingMaps.put(categoricalCol, encodingMap);
}
public Map<String, EncodingMap> encodingMap() {
return _encodingMaps;
}
public Set<String> getColumns() {
return Collections.unmodifiableSet(_encodingMaps.keySet());
}
@Override
public Iterator<Entry<String, EncodingMap>> iterator() {
return _encodingMaps.entrySet().iterator();
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/targetencoder/TargetEncoderMojoModel.java
|
package hex.genmodel.algos.targetencoder;
import hex.genmodel.MojoModel;
import java.io.Serializable;
import java.util.*;
public class TargetEncoderMojoModel extends MojoModel {
public static double computeLambda(long nrows, double inflectionPoint, double smoothing) {
return 1.0 / (1 + Math.exp((inflectionPoint - nrows) / smoothing));
}
public static double computeBlendedEncoding(double lambda, double posteriorMean, double priorMean) {
return lambda * posteriorMean + (1 - lambda) * priorMean;
}
static Map<String, Integer> name2Idx(String[] columns) {
Map<String, Integer> nameToIdx = new HashMap<>(columns.length);
for (int i = 0; i < columns.length; i++) {
nameToIdx.put(columns[i], i);
}
return nameToIdx;
}
public final Map<String, Integer> _columnNameToIdx;
public Map<String, Boolean> _teColumn2HasNAs; // tells if a given encoded column has NAs
public boolean _withBlending;
public double _inflectionPoint;
public double _smoothing;
public List<ColumnsToSingleMapping> _inencMapping;
public List<ColumnsMapping> _inoutMapping;
List<String> _nonPredictors;
Map<String, EncodingMap> _encodingsByCol;
boolean _keepOriginalCategoricalColumns;
/**
* Whether during training of the model unknown categorical level was imputed with NA level.
* It will determine whether we are using posterior probability of NA level or prior probability when substitution didn't take place.
* TODO Default value is hardcoded to `true` as we need to investigate PUBDEV-6704
*/
private final boolean _imputeUnknownLevels = true;
public TargetEncoderMojoModel(String[] columns, String[][] domains, String responseName) {
super(columns, domains, responseName);
_columnNameToIdx = name2Idx(columns);
}
protected void init() {
if (_encodingsByCol == null) return;
if (_inencMapping == null) _inencMapping = new ArrayList<>();
if (_inoutMapping == null) _inoutMapping = new ArrayList<>();
if (_inencMapping.isEmpty() && _inoutMapping.isEmpty()) { // backwards compatibility for old mojos
for (String col : _encodingsByCol.keySet()) {
String[] in = new String[]{col};
// String[] domain = getDomainValues(col);
_inencMapping.add(new ColumnsToSingleMapping(in, col, null));
String[] out = new String[getNumEncColsPerPredictor()];
if (out.length > 1) {
for (int i = 0; i < out.length; i++) {
out[i] = col+"_"+(i+1)+"_te"; // better than nothing: (i+1) is the categorical value of the matching target
}
} else {
out[0] = col+"_te";
}
_inoutMapping.add(new ColumnsMapping(in, out));
}
}
}
protected void setEncodings(EncodingMaps encodingMaps) {
_encodingsByCol = encodingMaps.encodingMap();
}
@Override
public int getPredsSize() {
return _encodingsByCol == null ? 0 : _encodingsByCol.size() * getNumEncColsPerPredictor();
}
int getNumEncColsPerPredictor() {
return nclasses() > 1
? (nclasses()-1) // for classification we need to encode only n-1 classes
: 1; // for regression
}
@Override
public double[] score0(double[] row, double[] preds) {
if (_encodingsByCol == null) throw new IllegalStateException("Encoding map is missing.");
int predsIdx = 0;
for (ColumnsToSingleMapping colMap : _inencMapping) {
String[] colGroup = colMap.from();
String teColumn = colMap.toSingle();
EncodingMap encodings = _encodingsByCol.get(teColumn);
int[] colsIdx = columnsIndices(colGroup);
double category;
if (colsIdx.length == 1) {
category = row[colsIdx[0]];
} else {
assert colMap.toDomainAsNum() != null : "Missing domain for interaction between columns "+Arrays.toString(colGroup);
category = interactionValue(row, colsIdx, colMap.toDomainAsNum());
}
int filled;
if (Double.isNaN(category)) {
filled = encodeNA(preds, predsIdx, encodings, teColumn);
} else {
//It is assumed that categorical levels are only represented with int values
filled = encodeCategory(preds, predsIdx, encodings, (int)category);
}
predsIdx += filled;
}
return preds;
}
public EncodingMap getEncodings(String column) {
return _encodingsByCol.get(column);
}
private int[] columnsIndices(String[] names) {
int[] indices = new int[names.length];
for (int i=0; i < indices.length; i++) {
indices[i] = _columnNameToIdx.get(names[i]);
}
return indices;
}
/**
* a condensed version of the encoding logic as implemented for the training phase in {@link ai.h2o.targetencoding.interaction.InteractionsEncoder}
*/
private double interactionValue(double[] row, int[] colsIdx, long[] interactionDomain) {
// computing interaction value (see InteractionsEncoder)
long interaction = 0;
long multiplier = 1;
for (int colIdx : colsIdx) {
double val = row[colIdx];
int domainCard = getDomainValues(colIdx).length;
if (Double.isNaN(val) || val >= domainCard) val = domainCard;
interaction += multiplier * val;
multiplier *= (domainCard + 1);
}
int catVal = Arrays.binarySearch(interactionDomain, interaction);
return catVal < 0 ? Double.NaN : catVal;
}
private double computeEncodedValue(double[] numDen, double priorMean) {
double posteriorMean = numDen[0] / numDen[1];
if (_withBlending) {
long nrows = (long)numDen[1];
double lambda = computeLambda(nrows, _inflectionPoint, _smoothing);
return computeBlendedEncoding(lambda, posteriorMean, priorMean);
} else {
return posteriorMean;
}
}
int encodeCategory(double[] result, int startIdx, EncodingMap encodings, int category) {
if (nclasses() > 2) {
for (int i=0; i<nclasses()-1; i++) {
int targetClass = i+1; //for symmetry with binary, ignoring class 0
double[] numDen = encodings.getNumDen(category, targetClass);
double priorMean = encodings.getPriorMean(targetClass);
result[startIdx+i] = computeEncodedValue(numDen, priorMean);
}
return nclasses()-1;
} else {
double[] numDen = encodings.getNumDen(category);
double priorMean = encodings.getPriorMean();
result[startIdx] = computeEncodedValue(numDen, priorMean);
return 1;
}
}
int encodeNA(double[] result, int startIdx, EncodingMap encodings, String column) {
int filled = 0;
if (_imputeUnknownLevels) {
if (_teColumn2HasNAs.get(column)) {
filled = encodeCategory(result, startIdx, encodings, encodings.getNACategory());
} else { // imputation was enabled but we didn't encounter missing values in training data so using `_priorMean`
filled = encodeWithPriorMean(result, startIdx, encodings);
}
} else {
filled = encodeWithPriorMean(result, startIdx, encodings);
}
return filled;
}
private int encodeWithPriorMean(double[] preds, int startIdx, EncodingMap encodings) {
if (_nclasses > 2) {
for (int i=0; i<_nclasses-1; i++) {
preds[startIdx+i] = encodings.getPriorMean(i+1); //for symmetry with binary, ignoring class 0
}
return _nclasses-1;
} else {
preds[startIdx] = encodings.getPriorMean();
return 1;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/targetencoder/TargetEncoderMojoReader.java
|
package hex.genmodel.algos.targetencoder;
import hex.genmodel.ModelMojoReader;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class TargetEncoderMojoReader extends ModelMojoReader<TargetEncoderMojoModel> {
public static final String ENCODING_MAP_PATH = "feature_engineering/target_encoding/encoding_map.ini";
public static final String MISSING_VALUES_PRESENCE_MAP_PATH = "feature_engineering/target_encoding/te_column_name_to_missing_values_presence.ini";
public static final String INPUT_ENCODING_COLUMNS_MAPPING_PATH = "feature_engineering/target_encoding/input_encoding_columns_map.ini";
public static final String INPUT_OUTPUT_COLUMNS_MAPPING_PATH = "feature_engineering/target_encoding/input_output_columns_map.ini";
@Override
public String getModelName() {
return "TargetEncoder";
}
@Override
protected void readModelData() throws IOException {
_model._keepOriginalCategoricalColumns = readkv("keep_original_categorical_columns", false); // defaults to false for legacy TE Mojos
_model._withBlending = readkv("with_blending");
if(_model._withBlending) {
_model._inflectionPoint = readkv("inflection_point");
_model._smoothing = readkv("smoothing");
}
_model._nonPredictors = Arrays.asList((readkv("non_predictors", "")).split(";"));
_model.setEncodings(parseEncodingMap());
_model._teColumn2HasNAs = parseTEColumnsToHasNAs();
_model._inencMapping = parseInEncColumnsMapping(INPUT_ENCODING_COLUMNS_MAPPING_PATH);
_model._inoutMapping = parseInOutColumnsMapping(INPUT_OUTPUT_COLUMNS_MAPPING_PATH);
_model.init();
}
@Override
protected TargetEncoderMojoModel makeModel(String[] columns, String[][] domains, String responseColumn) {
return new TargetEncoderMojoModel(columns, domains, responseColumn);
}
private Map<String, Boolean> parseTEColumnsToHasNAs() throws IOException {
Map<String, Boolean> cols2HasNAs = new HashMap<>();
if (exists(MISSING_VALUES_PRESENCE_MAP_PATH)) {
Iterable<String> parsedFile = readtext(MISSING_VALUES_PRESENCE_MAP_PATH);
for (String line : parsedFile) {
String[] indexAndPresence = line.split("\\s*=\\s*", 2);
cols2HasNAs.put(indexAndPresence[0], Integer.parseInt(indexAndPresence[1]) == 1);
}
}
return cols2HasNAs;
}
protected EncodingMaps parseEncodingMap() throws IOException {
if (!exists(ENCODING_MAP_PATH)) {
return null;
}
Map<String, EncodingMap> encodingMaps = new HashMap<>();
try (BufferedReader source = getMojoReaderBackend().getTextFile(ENCODING_MAP_PATH)) {
EncodingMap colEncodingMap = new EncodingMap(_model.nclasses());
String sectionName = null;
String line;
while (true) {
line = source.readLine();
if (line == null) { // EOF
encodingMaps.put(sectionName, colEncodingMap);
break;
}
line = line.trim();
String matchSection = matchNewSection(line);
if (sectionName == null || matchSection != null) {
if (sectionName != null) encodingMaps.put(sectionName, colEncodingMap); // section completed
sectionName = matchSection;
colEncodingMap = new EncodingMap(_model.nclasses());
} else {
String[] res = line.split("\\s*=\\s*", 2);
double[] components = processEncodingsComponents(res[1].split(" "));
colEncodingMap.add(Integer.parseInt(res[0]), components);
}
}
}
return new EncodingMaps(encodingMaps);
}
private List<ColumnsMapping> parseInOutColumnsMapping(String fileName) throws IOException {
List<ColumnsMapping> mapping = new ArrayList<>();
for (List<String>[] entry : parseColumnsMapping(fileName)) {
mapping.add(new ColumnsMapping(
entry[0].toArray(new String[0]),
entry[1].toArray(new String[0])
));
}
return mapping;
}
private List<ColumnsToSingleMapping> parseInEncColumnsMapping(String fileName) throws IOException {
List<ColumnsToSingleMapping> mapping = new ArrayList<>();
for (List<String>[] entry : parseColumnsMapping(fileName)) {
mapping.add(new ColumnsToSingleMapping(
entry[0].toArray(new String[0]),
entry[1].get(0),
entry[2] == null ? null : entry[2].toArray(new String[0])
));
}
return mapping;
}
private List<List<String>[]> parseColumnsMapping(String fileName) throws IOException {
List<List<String>[]> mapping = new ArrayList<>();
if (exists(fileName)) {
List<String> from = null;
List<String> to = null;
List<String> toDomain = null;
for (String line : readtext(fileName)) {
if ("[from]".equals(line)) {
if (from != null && to != null) mapping.add(new List[]{from, to, toDomain}); // add previous from-to entry
from = new ArrayList<>();
to = null;
toDomain = null;
} else if ("[to]".equals(line)) {
to = new ArrayList<>();
} else if ("[to_domain]".equals(line)) {
toDomain = new ArrayList<>();
} else {
if (toDomain != null)
toDomain.add(line);
else if (to != null)
to.add(line);
else
from.add(line);
}
}
if (from != null && to != null) mapping.add(new List[]{from, to, toDomain}); // add trailing from-to entry
}
return mapping;
}
private String matchNewSection(String line) {
Pattern pattern = Pattern.compile("\\[(.*?)\\]");
Matcher matcher = pattern.matcher(line);
if (matcher.find()) {
return matcher.group(1);
} else return null;
}
private double[] processEncodingsComponents(String[] componentsStr) {
// note that there may be additional entries in those arrays outside the numerator and denominator.
// for multiclass problems, the last entry correspond to the target class associated with the num/den values.
double[] numDen = new double[componentsStr.length];
int i = 0;
for (String str : componentsStr) {
numDen[i] = Double.parseDouble(str);
i++;
}
return numDen;
}
@Override public String mojoVersion() {
return "1.00";
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/CalibrationMojoHelper.java
|
package hex.genmodel.algos.tree;
import hex.genmodel.algos.isotonic.IsotonicCalibrator;
import static hex.genmodel.GenModel.GLM_logitInv;
public class CalibrationMojoHelper {
public interface MojoModelWithCalibration {
double[] getCalibGlmBeta();
IsotonicCalibrator getIsotonicCalibrator();
}
public static boolean calibrateClassProbabilities(MojoModelWithCalibration model, double[] preds) {
if (model.getCalibGlmBeta() != null) {
double p = GLM_logitInv((preds[1] * model.getCalibGlmBeta()[0]) + model.getCalibGlmBeta()[1]);
preds[1] = 1 - p;
preds[2] = p;
return true;
} else if (model.getIsotonicCalibrator() != null) {
double p = model.getIsotonicCalibrator().calibrateP1(preds[2]);
preds[1] = 1 - p;
preds[2] = p;
return true;
}
return false;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/ContributionComposer.java
|
package hex.genmodel.algos.tree;
import hex.genmodel.utils.ArrayUtils;
import java.util.Arrays;
public class ContributionComposer {
/**
* Sort #contribNameIds according to #contribs values and compose desired output with correct #topN, #bottomN fields
*
* @param contribNameIds Contribution corresponding feature ids
* @param contribs Raw contribution values
* @param topN Return only #topN highest #contribNameIds + bias.
* If topN<0 then sort all SHAP values in descending order
* If topN<0 && bottomN<0 then sort all SHAP values in descending order
* @param bottomN Return only #bottomN lowest #contribNameIds + bias
* If topN and bottomN are defined together then return array of #topN + #bottomN + bias
* If bottomN<0 then sort all SHAP values in ascending order
* If topN<0 && bottomN<0 then sort all SHAP values in descending order
* @param compareAbs True to compare absolute values of #contribs
* @return Sorted contribNameIds array of corresponding contributions features.
* The size of returned array is #topN + #bottomN + bias
*/
public final int[] composeContributions(final int[] contribNameIds, final float[] contribs, int topN, int bottomN, boolean compareAbs) {
assert contribNameIds.length == contribs.length : "contribNameIds must have the same length as contribs";
if (returnOnlyTopN(topN, bottomN)) {
return composeSortedContributions(contribNameIds, contribs, topN, compareAbs, -1);
} else if (returnOnlyBottomN(topN, bottomN)) {
return composeSortedContributions(contribNameIds, contribs, bottomN, compareAbs,1);
} else if (returnAllTopN(topN, bottomN, contribs.length)) {
return composeSortedContributions(contribNameIds, contribs, contribs.length, compareAbs, -1);
}
composeSortedContributions(contribNameIds, contribs, contribNameIds.length, compareAbs,-1);
int[] bottomSorted = Arrays.copyOfRange(contribNameIds, contribNameIds.length - 1 - bottomN, contribNameIds.length);
reverse(bottomSorted, contribs, bottomSorted.length - 1);
int[] contribNameIdsTmp = Arrays.copyOf(contribNameIds, topN);
return ArrayUtils.append(contribNameIdsTmp, bottomSorted);
}
private boolean returnOnlyTopN(int topN, int bottomN) {
return topN != 0 && bottomN == 0;
}
private boolean returnOnlyBottomN(int topN, int bottomN) {
return topN == 0 && bottomN != 0;
}
private boolean returnAllTopN(int topN, int bottomN, int len) {
return (topN + bottomN) >= len || topN < 0 || bottomN < 0;
}
public int checkAndAdjustInput(int n, int len) {
if (n < 0 || n > len) {
return len;
}
return n;
}
private int[] composeSortedContributions(final int[] contribNameIds, final float[] contribs, int n, boolean compareAbs, int increasing) {
int nAdjusted = checkAndAdjustInput(n, contribs.length);
sortContributions(contribNameIds, contribs, compareAbs, increasing);
if (nAdjusted < contribs.length) {
int bias = contribNameIds[contribs.length-1];
int[] contribNameIdsSorted = Arrays.copyOfRange(contribNameIds, 0, nAdjusted + 1);
contribNameIdsSorted[nAdjusted] = bias;
return contribNameIdsSorted;
}
return contribNameIds;
}
private void sortContributions(final int[] contribNameIds, final float[] contribs, final boolean compareAbs, final int increasing) {
ArrayUtils.sort(contribNameIds, contribs, 0, contribs.length -1, compareAbs, increasing);
}
private void reverse(int[] contribNameIds, float[] contribs, int len) {
for (int i = 0; i < len/2; i++) {
if (contribs[contribNameIds[i]] != contribs[contribNameIds[len - i - 1]]) {
int tmp = contribNameIds[i];
contribNameIds[i] = contribNameIds[len - i - 1];
contribNameIds[len - i - 1] = tmp;
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/ContributionsPredictor.java
|
package hex.genmodel.algos.tree;
import hex.genmodel.PredictContributions;
import hex.genmodel.attributes.parameters.FeatureContribution;
import hex.genmodel.utils.ArrayUtils;
import java.util.Arrays;
public abstract class ContributionsPredictor<E> implements PredictContributions {
private final int _ncontribs;
private final String[] _contribution_names;
private final TreeSHAPPredictor<E> _treeSHAPPredictor;
private final int _workspaceSize;
private static final ThreadLocal<TreeSHAPPredictor.Workspace> _workspace = new ThreadLocal<>();
public ContributionsPredictor(int ncontribs, String[] featureContributionNames, TreeSHAPPredictor<E> treeSHAPPredictor) {
_ncontribs = ncontribs;
_contribution_names = ArrayUtils.append(featureContributionNames, "BiasTerm");
_treeSHAPPredictor = treeSHAPPredictor;
_workspaceSize = _treeSHAPPredictor.getWorkspaceSize();
}
@Override
public final String[] getContributionNames() {
return _contribution_names;
}
public final float[] calculateContributions(double[] input) {
float[] contribs = new float[_ncontribs];
_treeSHAPPredictor.calculateContributions(toInputRow(input), contribs, 0, -1, getWorkspace());
return getContribs(contribs);
}
protected abstract E toInputRow(double[] input);
public float[] getContribs(float[] contribs) {
return contribs;
}
private TreeSHAPPredictor.Workspace getWorkspace() {
TreeSHAPPredictor.Workspace workspace = _workspace.get();
if (workspace == null || workspace.getSize() != _workspaceSize) {
workspace = _treeSHAPPredictor.makeWorkspace();
assert workspace.getSize() == _workspaceSize;
_workspace.set(workspace);
}
return workspace;
}
@Override
public FeatureContribution[] calculateContributions(double[] input, int topN, int bottomN, boolean compareAbs) {
float[] contributions = calculateContributions(input);
int[] contributionNameIds = ArrayUtils.range(0, _contribution_names.length -1);
int[] sorted = (new ContributionComposer()).composeContributions(contributionNameIds, contributions, topN, bottomN, compareAbs);
FeatureContribution[] out = new FeatureContribution[sorted.length];
for (int i = 0; i < sorted.length; i++) {
out[i] = new FeatureContribution(_contribution_names[sorted[i]], contributions[sorted[i]]);
}
return out;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/ConvertTreeOptions.java
|
package hex.genmodel.algos.tree;
public class ConvertTreeOptions {
static final ConvertTreeOptions DEFAULT = new ConvertTreeOptions();
final boolean _checkTreeConsistency;
public ConvertTreeOptions() {
this(false);
}
private ConvertTreeOptions(boolean checkTreeConsistency) {
_checkTreeConsistency = checkTreeConsistency;
}
/**
* Performs a self-check on each converted tree. Inconsistencies are reported in the log.
* @return a new instance of the options object with consistency-check flag enabled
*/
public ConvertTreeOptions withTreeConsistencyCheckEnabled() {
return new ConvertTreeOptions(true);
}
@Override
public String toString() {
return "ConvertTreeOptions{" +
"_checkTreeConsistency=" + _checkTreeConsistency +
'}';
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/NaSplitDir.java
|
package hex.genmodel.algos.tree;
/**
* Copy of `hex.tree.DHistogram.NASplitDir` in package `h2o-algos`.
*/
public enum NaSplitDir {
//never saw NAs in training
None(0), //initial state - should not be present in a trained model
// saw NAs in training
NAvsREST(1), //split off non-NA (left) vs NA (right)
NALeft(2), //NA goes left
NARight(3), //NA goes right
// never NAs in training, but have a way to deal with them in scoring
Left(4), //test time NA should go left
Right(5); //test time NA should go right
private int value;
NaSplitDir(int v) { this.value = v; }
public int value() { return value; }
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/ScoreIsolationTree.java
|
package hex.genmodel.algos.tree;
import java.io.Serializable;
public interface ScoreIsolationTree extends Serializable {
double scoreTree(byte[] tree, double[] row);
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/ScoreIsolationTree0.java
|
package hex.genmodel.algos.tree;
import hex.genmodel.algos.isoforextended.ExtendedIsolationForestMojoModel;
public final class ScoreIsolationTree0 implements ScoreIsolationTree {
@Override
public double scoreTree(byte[] tree, double[] row) {
return ExtendedIsolationForestMojoModel.scoreTree0(tree, row);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/ScoreTree.java
|
package hex.genmodel.algos.tree;
import java.io.Serializable;
public interface ScoreTree extends Serializable {
double scoreTree(byte[] tree, double[] row, boolean computeLeafAssignment, String[][] domains);
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/ScoreTree0.java
|
package hex.genmodel.algos.tree;
public final class ScoreTree0 implements ScoreTree {
@Override
public final double scoreTree(byte[] tree, double[] row, boolean computeLeafAssignment, String[][] domains) {
return SharedTreeMojoModel.scoreTree0(tree, row, computeLeafAssignment);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/ScoreTree1.java
|
package hex.genmodel.algos.tree;
public final class ScoreTree1 implements ScoreTree {
@Override
public final double scoreTree(byte[] tree, double[] row, boolean computeLeafAssignment, String[][] domains) {
return SharedTreeMojoModel.scoreTree1(tree, row, computeLeafAssignment);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/ScoreTree2.java
|
package hex.genmodel.algos.tree;
public final class ScoreTree2 implements ScoreTree {
@Override
public final double scoreTree(byte[] tree, double[] row, boolean computeLeafAssignment, String[][] domains) {
return SharedTreeMojoModel.scoreTree(tree, row, computeLeafAssignment, domains);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/SharedTreeGraph.java
|
package hex.genmodel.algos.tree;
import hex.genmodel.tools.PrintMojo;
import java.io.PrintStream;
import java.util.*;
/**
* Graph for representing a GBM or DRF forest.
* A graph contains subgraphs (trees).
*/
public class SharedTreeGraph {
public final ArrayList<SharedTreeSubgraph> subgraphArray = new ArrayList<>();
/**
* Make a new forest.
*/
public SharedTreeGraph() {
}
/**
* Make a new tree.
* @param name Tree name.
* @return The new tree.
*/
public SharedTreeSubgraph makeSubgraph(String name) {
SharedTreeSubgraph sg = new SharedTreeSubgraph(subgraphArray.size(), name);
subgraphArray.add(sg);
return sg;
}
/**
* Debug printout of graph structure.
* For developer use only.
*/
public void print() {
System.out.println("------------------------------------------------------------");
System.out.println("Graph");
for (SharedTreeSubgraph sg : subgraphArray) {
sg.print();
}
}
public SharedTreeNode walkNodes(int subgraphId, String path) {
return subgraphArray.get(subgraphId).walkNodes(path);
}
/**
* Print graph output in a format readable by dot (graphviz).
* @param os Stream to write the output to
* @param maxLevelsToPrintPerEdge Limit the number of individual categorical level names printed per edge
* @param detail include addtional node detail information
* @param optionalTitle Optional title to override the default
* @param treeOptions object of PrintTreeOptions to control how trees are printed in terms of font size and number of decimal places for numerical values
*
*/
public void printDot(PrintStream os, int maxLevelsToPrintPerEdge, boolean detail, String optionalTitle, PrintMojo.PrintTreeOptions treeOptions) {
os.println("/*");
os.println("Generated by:");
os.println(" http://https://github.com/h2oai/h2o-3/tree/master/h2o-genmodel/src/main/java/hex/genmodel/tools/PrintMojo.java");
os.println("*/");
os.println("");
os.println("/*");
os.println("On a mac:");
os.println("");
os.println("$ brew install graphviz");
os.println("$ dot -Tpng file.gv -o file.png");
os.println("$ open file.png");
os.println("*/");
os.println("");
os.println("digraph G {");
for (SharedTreeSubgraph sg : subgraphArray) {
sg.printDot(os, maxLevelsToPrintPerEdge, detail, optionalTitle, treeOptions);
}
os.println("");
os.println("}");
os.println("");
}
public List<Map<String, Object>> toJson() {
List<Map<String, Object>> trees = new ArrayList<>();
for (SharedTreeSubgraph sg : subgraphArray) {
trees.add(sg.toJson());
}
return trees;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
SharedTreeGraph that = (SharedTreeGraph) o;
return Objects.equals(subgraphArray, that.subgraphArray);
}
@Override
public int hashCode() {
return Objects.hash(subgraphArray);
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/SharedTreeGraphConverter.java
|
package hex.genmodel.algos.tree;
/**
* Implementors of this interface are able to convert internal tree representation to a shared representation.
*/
public interface SharedTreeGraphConverter {
/**
* Converts internal tree representation to a shared representation.
*
* @param treeNumber Number of the tree in the model to convert
* @param treeClass Tree's class. If not specified, all the classes form a forest in the resulting {@link SharedTreeGraph}
* @param options Allows to fine-tune the conversion process (eg. disable some internal consistency self-checks)
* @return An instance of {@link SharedTreeGraph} containing a single tree or a forest of multiple trees.
*/
SharedTreeGraph convert(final int treeNumber, final String treeClass, final ConvertTreeOptions options);
SharedTreeGraph convert(final int treeNumber, final String treeClass);
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/SharedTreeMojoModel.java
|
package hex.genmodel.algos.tree;
import hex.genmodel.CategoricalEncoding;
import hex.genmodel.MojoModel;
import hex.genmodel.algos.drf.DrfMojoModel;
import hex.genmodel.algos.gbm.GbmMojoModel;
import hex.genmodel.algos.isotonic.IsotonicCalibrator;
import hex.genmodel.utils.ByteBufferWrapper;
import hex.genmodel.utils.GenmodelBitSet;
import water.logging.Logger;
import water.logging.LoggerFactory;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
/**
* Common ancestor for {@link DrfMojoModel} and {@link GbmMojoModel}.
* See also: `hex.tree.SharedTreeModel` and `hex.tree.TreeVisitor` classes.
*/
public abstract class SharedTreeMojoModel extends MojoModel implements TreeBackedMojoModel, CalibrationMojoHelper.MojoModelWithCalibration {
private static final int NsdNaVsRest = NaSplitDir.NAvsREST.value();
private static final int NsdNaLeft = NaSplitDir.NALeft.value();
private static final int NsdLeft = NaSplitDir.Left.value();
private ScoreTree _scoreTree;
private static Logger logger = LoggerFactory.getLogger(SharedTreeMojoModel.class);
/**
* {@code _ntree_groups} is the number of trees requested by the user. For
* binomial case or regression this is also the total number of trees
* trained; however in multinomial case each requested "tree" is actually
* represented as a group of trees, with {@code _ntrees_per_group} trees
* in each group. Each of these individual trees assesses the likelihood
* that a given observation belongs to class A, B, C, etc. of a
* multiclass response.
*/
protected int _ntree_groups;
protected int _ntrees_per_group;
/**
* Array of binary tree data, each tree being a {@code byte[]} array. The
* trees are logically grouped into a rectangular grid of dimensions
* {@link #_ntree_groups} x {@link #_ntrees_per_group}, however physically
* they are stored as 1-dimensional list, and an {@code [i, j]} logical
* tree is mapped to the index {@link #treeIndex(int, int)}.
*/
protected byte[][] _compressed_trees;
/**
* Array of auxiliary binary tree data, each being a {@code byte[]} array.
*/
protected byte[][] _compressed_trees_aux;
/**
* GLM's beta used for calibrating output probabilities using Platt Scaling.
*/
protected double[] _calib_glm_beta;
/**
* For calibrating using Isotonic Regression
*/
protected IsotonicCalibrator _isotonic_calibrator;
protected String _genmodel_encoding;
protected String[] _orig_names;
protected String[][] _orig_domain_values;
protected double[] _orig_projection_array;
protected void postInit() {
if (_mojo_version == 1.0) {
_scoreTree = new ScoreTree0(); // First version
} else if (_mojo_version == 1.1) {
_scoreTree = new ScoreTree1(); // Second version
} else
_scoreTree = new ScoreTree2(); // Current version
}
@Override
public final int getNTreeGroups() {
return _ntree_groups;
}
@Override
public final int getNTreesPerGroup() {
return _ntrees_per_group;
}
/**
* @deprecated use {@link #scoreTree0(byte[], double[], boolean)} instead.
*/
@Deprecated
public static double scoreTree0(byte[] tree, double[] row, int nclasses, boolean computeLeafAssignment) {
// note that nclasses is ignored (and in fact, always was)
return scoreTree0(tree, row, computeLeafAssignment);
}
/**
* @deprecated use {@link #scoreTree1(byte[], double[], boolean)} instead.
*/
@Deprecated
public static double scoreTree1(byte[] tree, double[] row, int nclasses, boolean computeLeafAssignment) {
// note that nclasses is ignored (and in fact, always was)
return scoreTree1(tree, row, computeLeafAssignment);
}
/**
* @deprecated use {@link #scoreTree(byte[], double[], boolean, String[][])} instead.
*/
@Deprecated
public static double scoreTree(byte[] tree, double[] row, int nclasses, boolean computeLeafAssignment, String[][] domains) {
// note that {@link nclasses} is ignored (and in fact, always was)
return scoreTree(tree, row, computeLeafAssignment, domains);
}
public static final int __INTERNAL_MAX_TREE_DEPTH = 64;
/**
* Highly efficient (critical path) tree scoring
*
* Given a tree (in the form of a byte array) and the row of input data, compute either this tree's
* predicted value when `computeLeafAssignment` is false, or the the decision path within the tree (but no more
* than 64 levels) when `computeLeafAssignment` is true. If path has 64 levels or more, Double.NaN is returned.
*
* Note: this function is also used from the `hex.tree.CompressedTree` class in `h2o-algos` project.
*/
@SuppressWarnings("ConstantConditions") // Complains that the code is too complex. Well duh!
public static double scoreTree(byte[] tree, double[] row, boolean computeLeafAssignment, String[][] domains) {
ByteBufferWrapper ab = new ByteBufferWrapper(tree);
GenmodelBitSet bs = null;
long bitsRight = 0;
int level = 0;
while (true) {
int nodeType = ab.get1U();
int colId = ab.get2();
if (colId == 65535) {
if (computeLeafAssignment) {
if (level >= __INTERNAL_MAX_TREE_DEPTH)
return Double.NaN;
bitsRight |= 1L << level; // mark the end of the tree
return Double.longBitsToDouble(bitsRight);
} else {
return ab.get4f();
}
}
int naSplitDir = ab.get1U();
boolean naVsRest = naSplitDir == NsdNaVsRest;
boolean leftward = naSplitDir == NsdNaLeft || naSplitDir == NsdLeft;
int lmask = (nodeType & 51);
int equal = (nodeType & 12); // Can be one of 0, 8, 12
assert equal != 4; // no longer supported
float splitVal = -1;
if (!naVsRest) {
// Extract value or group to split on
if (equal == 0) {
// Standard float-compare test (either < or ==)
splitVal = ab.get4f(); // Get the float to compare
} else {
// Bitset test
if (bs == null) bs = new GenmodelBitSet(0);
if (equal == 8)
bs.fill2(tree, ab);
else
bs.fill3(tree, ab);
}
}
// This logic:
//
// double d = row[colId];
// if (Double.isNaN(d) || ( equal != 0 && bs != null && !bs.isInRange((int)d) ) || (domains != null && domains[colId] != null && domains[colId].length <= (int)d)
// ? !leftward : !naVsRest && (equal == 0? d >= splitVal : bs.contains((int)d))) {
// Really does this:
//
// if (value is NaN or value is not in the range of the bitset or is outside the domain map length (but an integer) ) {
// if (leftward) {
// go left
// }
// else {
// go right
// }
// }
// else {
// if (naVsRest) {
// go left
// }
// else {
// if (numeric) {
// if (value < split value) {
// go left
// }
// else {
// go right
// }
// }
// else {
// if (value not in bitset) {
// go left
// }
// else {
// go right
// }
// }
// }
// }
double d = row[colId];
if (Double.isNaN(d) || ( equal != 0 && bs != null && !bs.isInRange((int)d) ) || (domains != null && domains[colId] != null && domains[colId].length <= (int)d)
? !leftward : !naVsRest && (equal == 0? d >= splitVal : bs.contains((int)d))) {
// go RIGHT
switch (lmask) {
case 0: ab.skip(ab.get1U()); break;
case 1: ab.skip(ab.get2()); break;
case 2: ab.skip(ab.get3()); break;
case 3: ab.skip(ab.get4()); break;
case 48: ab.skip(4); break; // skip the prediction
default:
assert false : "illegal lmask value " + lmask + " in tree " + Arrays.toString(tree);
}
if (computeLeafAssignment) {
if (level >= __INTERNAL_MAX_TREE_DEPTH)
return Double.NaN;
bitsRight |= 1L << level;
}
lmask = (nodeType & 0xC0) >> 2; // Replace leftmask with the rightmask
} else {
// go LEFT
if (lmask <= 3)
ab.skip(lmask + 1);
}
level++;
if ((lmask & 16) != 0) {
if (computeLeafAssignment) {
if (level >= __INTERNAL_MAX_TREE_DEPTH)
return Double.NaN;
bitsRight |= 1L << level; // mark the end of the tree
return Double.longBitsToDouble(bitsRight);
} else {
return ab.get4f();
}
}
}
}
@Override
public CategoricalEncoding getCategoricalEncoding() {
switch (_genmodel_encoding) {
case "AUTO":
case "Enum":
case "SortByResponse":
return CategoricalEncoding.AUTO;
case "OneHotExplicit":
return CategoricalEncoding.OneHotExplicit;
case "Binary":
return CategoricalEncoding.Binary;
case "EnumLimited":
return CategoricalEncoding.EnumLimited;
case "Eigen":
return CategoricalEncoding.Eigen;
case "LabelEncoder":
return CategoricalEncoding.LabelEncoder;
default:
return null;
}
}
@Override
public String[] getOrigNames() {
return _orig_names;
}
@Override
public double[] getOrigProjectionArray() {
return _orig_projection_array;
}
@Override
public String[][] getOrigDomainValues() {
return _orig_domain_values;
}
public interface DecisionPathTracker<T> {
boolean go(int depth, boolean right);
T terminate();
T invalidPath();
}
public static class StringDecisionPathTracker implements DecisionPathTracker<String> {
private final char[] _sb = new char[64];
private int _pos = 0;
@Override
public boolean go(int depth, boolean right) {
_sb[depth] = right ? 'R' : 'L';
if (right) _pos = depth;
return true;
}
@Override
public String terminate() {
String path = new String(_sb, 0, _pos);
_pos = 0;
return path;
}
@Override
public String invalidPath() {
return null;
}
}
public static class LeafDecisionPathTracker implements DecisionPathTracker<LeafDecisionPathTracker> {
private final AuxInfoLightReader _auxInfo;
private boolean _wentRight = false; // Was the last step _right_?
// OUT
private int _nodeId = 0; // Returned when the tree is empty (consistent with SharedTreeNode of an empty tree)
private LeafDecisionPathTracker(byte[] auxTree) {
_auxInfo = new AuxInfoLightReader(new ByteBufferWrapper(auxTree));
}
@Override
public boolean go(int depth, boolean right) {
if (!_auxInfo.hasNext()) {
assert _wentRight || depth == 0; // this can only happen if previous step was _right_ or the tree has no nodes
return false;
}
_auxInfo.readNext();
if (right) {
if (_wentRight && _nodeId != _auxInfo._nid)
return false;
_nodeId = _auxInfo.getRightNodeIdAndSkipNode();
_auxInfo.skipNodes(_auxInfo._numLeftChildren);
_wentRight = true;
} else { // left
_wentRight = false;
if (_auxInfo._numLeftChildren == 0) {
_nodeId = _auxInfo.getLeftNodeIdAndSkipNode();
return false;
} else {
_auxInfo.skipNode(); // proceed to next _left_ node
}
}
return true;
}
@Override
public LeafDecisionPathTracker terminate() {
return this;
}
final int getLeafNodeId() {
return _nodeId;
}
@Override
public LeafDecisionPathTracker invalidPath() {
_nodeId = -1;
return this;
}
}
public static <T> T getDecisionPath(double leafAssignment, DecisionPathTracker<T> tr) {
if (Double.isNaN(leafAssignment)) {
return tr.invalidPath();
}
long l = Double.doubleToRawLongBits(leafAssignment);
for (int i = 0; i < 64; ++i) {
boolean right = ((l>>i) & 0x1L) == 1;
if (! tr.go(i, right)) break;
}
return tr.terminate();
}
public static String getDecisionPath(double leafAssignment) {
return getDecisionPath(leafAssignment, new StringDecisionPathTracker());
}
public static int getLeafNodeId(double leafAssignment, byte[] auxTree) {
LeafDecisionPathTracker tr = new LeafDecisionPathTracker(auxTree);
return getDecisionPath(leafAssignment, tr).getLeafNodeId();
}
//------------------------------------------------------------------------------------------------------------------
// Computing a Tree Graph
//------------------------------------------------------------------------------------------------------------------
private static void computeTreeGraph(SharedTreeSubgraph sg, SharedTreeNode node, byte[] tree, ByteBufferWrapper ab, HashMap<Integer, AuxInfo> auxMap,
String names[], String[][] domains, ConvertTreeOptions options) {
int nodeType = ab.get1U();
int colId = ab.get2();
if (colId == 65535) {
float leafValue = ab.get4f();
node.setPredValue(leafValue);
return;
}
String colName = names[colId];
node.setCol(colId, colName);
int naSplitDir = ab.get1U();
boolean naVsRest = naSplitDir == NsdNaVsRest;
boolean leftward = naSplitDir == NsdNaLeft || naSplitDir == NsdLeft;
node.setLeftward(leftward);
node.setNaVsRest(naVsRest);
int lmask = (nodeType & 51);
int equal = (nodeType & 12); // Can be one of 0, 8, 12
assert equal != 4; // no longer supported
if (!naVsRest) {
// Extract value or group to split on
if (equal == 0) {
float splitVal = ab.get4f();
if (domains[colId] != null) {
node.setDomainValues(domains[colId]);
}
// Standard float-compare test (either < or ==)
node.setSplitValue(splitVal);
} else {
// Bitset test
GenmodelBitSet bs = new GenmodelBitSet(0);
if (equal == 8)
bs.fill2(tree, ab);
else
bs.fill3(tree, ab);
node.setBitset(domains[colId], bs);
}
}
AuxInfo auxInfo = auxMap.get(node.getNodeNumber());
// go RIGHT
{
ByteBufferWrapper ab2 = new ByteBufferWrapper(tree);
ab2.skip(ab.position());
switch (lmask) {
case 0:
ab2.skip(ab2.get1U());
break;
case 1:
ab2.skip(ab2.get2());
break;
case 2:
ab2.skip(ab2.get3());
break;
case 3:
ab2.skip(ab2.get4());
break;
case 48:
ab2.skip(4);
break; // skip the prediction
default:
assert false : "illegal lmask value " + lmask + " in tree " + Arrays.toString(tree);
}
int lmask2 = (nodeType & 0xC0) >> 2; // Replace leftmask with the rightmask
SharedTreeNode newNode = sg.makeRightChildNode(node);
newNode.setWeight(auxInfo.weightR);
newNode.setNodeNumber(auxInfo.nidR);
newNode.setPredValue(auxInfo.predR);
newNode.setSquaredError(auxInfo.sqErrR);
if ((lmask2 & 16) != 0) {
float leafValue = ab2.get4f();
newNode.setPredValue(leafValue);
auxInfo.predR = leafValue;
}
else {
computeTreeGraph(sg, newNode, tree, ab2, auxMap, names, domains, options);
}
}
// go LEFT
{
ByteBufferWrapper ab2 = new ByteBufferWrapper(tree);
ab2.skip(ab.position());
if (lmask <= 3)
ab2.skip(lmask + 1);
SharedTreeNode newNode = sg.makeLeftChildNode(node);
newNode.setWeight(auxInfo.weightL);
newNode.setNodeNumber(auxInfo.nidL);
newNode.setPredValue(auxInfo.predL);
newNode.setSquaredError(auxInfo.sqErrL);
if ((lmask & 16) != 0) {
float leafValue = ab2.get4f();
newNode.setPredValue(leafValue);
auxInfo.predL = leafValue;
}
else {
computeTreeGraph(sg, newNode, tree, ab2, auxMap, names, domains, options);
}
}
if (node.getNodeNumber() == 0) {
float p = (float)(((double)auxInfo.predL*(double)auxInfo.weightL + (double)auxInfo.predR*(double)auxInfo.weightR)/((double)auxInfo.weightL + (double)auxInfo.weightR));
if (Math.abs(p) < 1e-7) p = 0;
node.setPredValue(p);
node.setSquaredError(auxInfo.sqErrR + auxInfo.sqErrL);
node.setWeight(auxInfo.weightL + auxInfo.weightR);
}
if (options._checkTreeConsistency) {
checkConsistency(auxInfo, node);
}
}
/**
* Compute a graph of the forest.
*
* @return A graph of the forest.
*/
public SharedTreeGraph computeGraph(int treeToPrint, ConvertTreeOptions options) {
SharedTreeGraph g = new SharedTreeGraph();
if (treeToPrint >= _ntree_groups) {
throw new IllegalArgumentException("Tree " + treeToPrint + " does not exist (max " + _ntree_groups + ")");
}
int j;
if (treeToPrint >= 0) {
j = treeToPrint;
}
else {
j = 0;
}
for (; j < _ntree_groups; j++) {
for (int i = 0; i < _ntrees_per_group; i++) {
int itree = treeIndex(j, i);
String[] domainValues = isSupervised() ? getDomainValues(getResponseIdx()) : null;
String treeName = treeName(j, i, domainValues);
SharedTreeSubgraph sg = g.makeSubgraph(treeName);
computeTreeGraph(sg, _compressed_trees[itree], _compressed_trees_aux[itree],
getNames(), getDomainValues(), options);
}
if (treeToPrint >= 0) {
break;
}
}
return g;
}
public SharedTreeGraph computeGraph(int treeId) {
return computeGraph(treeId, ConvertTreeOptions.DEFAULT);
}
@Deprecated
@SuppressWarnings("unused")
public SharedTreeGraph _computeGraph(int treeId) {
return computeGraph(treeId);
}
public static SharedTreeSubgraph computeTreeGraph(int treeNum, String treeName, byte[] tree, byte[] auxTreeInfo,
String names[], String[][] domains) {
return computeTreeGraph(treeNum, treeName, tree, auxTreeInfo, names, domains, ConvertTreeOptions.DEFAULT);
}
public static SharedTreeSubgraph computeTreeGraph(int treeNum, String treeName, byte[] tree, byte[] auxTreeInfo,
String names[], String[][] domains, ConvertTreeOptions options) {
SharedTreeSubgraph sg = new SharedTreeSubgraph(treeNum, treeName);
computeTreeGraph(sg, tree, auxTreeInfo, names, domains, options);
return sg;
}
private static void computeTreeGraph(SharedTreeSubgraph sg, byte[] tree, byte[] auxTreeInfo,
String names[], String[][] domains, ConvertTreeOptions options) {
SharedTreeNode node = sg.makeRootNode();
node.setSquaredError(Float.NaN);
node.setPredValue(Float.NaN);
ByteBufferWrapper ab = new ByteBufferWrapper(tree);
ByteBufferWrapper abAux = new ByteBufferWrapper(auxTreeInfo);
HashMap<Integer, AuxInfo> auxMap = readAuxInfos(abAux);
computeTreeGraph(sg, node, tree, ab, auxMap, names, domains, options);
}
public static Map<Integer, AuxInfo> readAuxInfos(byte[] auxTreeInfo) {
ByteBufferWrapper abAux = new ByteBufferWrapper(auxTreeInfo);
return readAuxInfos(abAux);
}
public static int findMaxNodeId(byte[] auxTreeInfo) {
int maxNodeId = 0;
AuxInfoLightReader reader = new AuxInfoLightReader(auxTreeInfo);
while (reader.hasNext()) {
int nodeId = reader.readMaxChildNodeIdAndSkip();
if (maxNodeId < nodeId)
maxNodeId = nodeId;
}
return maxNodeId;
}
private static HashMap<Integer, AuxInfo> readAuxInfos(ByteBufferWrapper abAux) {
HashMap<Integer, AuxInfo> auxMap = new HashMap<>();
Map<Integer, AuxInfo> nodeIdToParent = new HashMap<>();
nodeIdToParent.put(0, new AuxInfo());
boolean reservedFieldIsParentId = false; // In older H2O versions `reserved` field was used for parent id
while (abAux.hasRemaining()) {
AuxInfo auxInfo = new AuxInfo(abAux);
if (auxMap.size() == 0) {
reservedFieldIsParentId = auxInfo.reserved < 0; // `-1` indicates No Parent, reserved >= 0 indicates reserved is not used for parent ids!
}
AuxInfo parent = nodeIdToParent.get(auxInfo.nid);
if (parent == null)
throw new IllegalStateException("Parent for nodeId=" + auxInfo.nid + " not found.");
assert !reservedFieldIsParentId || parent.nid == auxInfo.reserved : "Corrupted Tree Info: parent nodes do not correspond (pid: " +
parent.nid + ", reserved: " + auxInfo.reserved + ")";
auxInfo.setPid(parent.nid);
nodeIdToParent.put(auxInfo.nidL, auxInfo);
nodeIdToParent.put(auxInfo.nidR, auxInfo);
auxMap.put(auxInfo.nid, auxInfo);
}
return auxMap;
}
public static void writeUpdatedAuxInfos(byte[] origAux, Map<Integer, AuxInfo> updatedAuxInfos, ByteBuffer bb) {
AuxInfoLightReader reader = new AuxInfoLightReader(origAux);
int count = 0;
while (reader.hasNext()) {
count++;
int nid = reader.readNodeIdAndSkip();
AuxInfo auxInfo = updatedAuxInfos.get(nid);
if (auxInfo == null)
throw new IllegalStateException("Updated AuxInfo for nodeId=" + nid + " doesn't exist. " +
"All AuxInfos need to be represented in the updated structure.");
auxInfo.writeTo(bb);
}
assert count == updatedAuxInfos.size();
}
public static String treeName(int groupIndex, int classIndex, String[] domainValues) {
String className = "";
{
if (domainValues != null) {
className = ", Class " + domainValues[classIndex];
}
}
return "Tree " + groupIndex + className;
}
// Please see AuxInfo for details of the serialized format
private static class AuxInfoLightReader {
private final ByteBufferWrapper _abAux;
int _nid;
int _numLeftChildren;
private AuxInfoLightReader(byte[] auxInfo) {
this(new ByteBufferWrapper(auxInfo));
}
private AuxInfoLightReader(ByteBufferWrapper abAux) {
_abAux = abAux;
}
private void readNext() {
_nid = _abAux.get4();
_numLeftChildren = _abAux.get4();
}
private boolean hasNext() {
return _abAux.hasRemaining();
}
private int readMaxChildNodeIdAndSkip() {
_abAux.skip(AuxInfo.SIZE - 8);
int leftId = _abAux.get4();
int rightId = _abAux.get4();
return Math.max(leftId, rightId);
}
private int readNodeIdAndSkip() {
readNext();
skipNode();
return _nid;
}
private int getLeftNodeIdAndSkipNode() {
_abAux.skip(4 * 6);
int n = _abAux.get4();
_abAux.skip(4);
return n;
}
private int getRightNodeIdAndSkipNode() {
_abAux.skip(4 * 7);
return _abAux.get4();
}
private void skipNode() {
_abAux.skip(AuxInfo.SIZE - 8);
}
private void skipNodes(int num) {
_abAux.skip(AuxInfo.SIZE * num);
}
}
public static class AuxInfo {
private static final int SIZE = 10 * 4;
private AuxInfo() {
nid = -1;
reserved = -1;
}
// Warning: any changes in this structure need to be reflected also in AuxInfoLightReader!!!
AuxInfo(ByteBufferWrapper abAux) {
// node ID
nid = abAux.get4();
// ignored - can contain either parent id or number of children (depending on a MOJO version)
reserved = abAux.get4();
//sum of observation weights (typically, that's just the count of observations)
weightL = abAux.get4f();
weightR = abAux.get4f();
//predicted values
predL = abAux.get4f();
predR = abAux.get4f();
//squared error
sqErrL = abAux.get4f();
sqErrR = abAux.get4f();
//node IDs (consistent with tree construction)
nidL = abAux.get4();
nidR = abAux.get4();
}
void writeTo(ByteBuffer bb) {
// node ID
bb.putInt(nid);
// reserved
bb.putInt(reserved);
// sum of observation weights
bb.putFloat(weightL);
bb.putFloat(weightR);
// predicted values
bb.putFloat(predL);
bb.putFloat(predR);
// squared error
bb.putFloat(sqErrL);
bb.putFloat(sqErrR);
// node IDs
bb.putInt(nidL);
bb.putInt(nidR);
}
final void setPid(int parentId) {
pid = parentId;
}
@Override public String toString() {
return "nid: " + nid + "\n" +
"pid: " + pid + "\n" +
"nidL: " + nidL + "\n" +
"nidR: " + nidR + "\n" +
"weightL: " + weightL + "\n" +
"weightR: " + weightR + "\n" +
"predL: " + predL + "\n" +
"predR: " + predR + "\n" +
"sqErrL: " + sqErrL + "\n" +
"sqErrR: " + sqErrR + "\n" +
"reserved: " + reserved + "\n";
}
public int nid, pid, nidL, nidR;
private final int reserved;
public float weightL, weightR, predL, predR, sqErrL, sqErrR;
}
static void checkConsistency(AuxInfo auxInfo, SharedTreeNode node) {
boolean ok = true;
boolean weight_ok = true;
ok &= (auxInfo.nid == node.getNodeNumber());
double sum = 0;
if (node.leftChild!=null) {
ok &= (auxInfo.nidL == node.leftChild.getNodeNumber());
ok &= (auxInfo.weightL == node.leftChild.getWeight());
ok &= (auxInfo.predL == node.leftChild.predValue);
ok &= (auxInfo.sqErrL == node.leftChild.squaredError);
sum += node.leftChild.getWeight();
}
if (node.rightChild!=null) {
ok &= (auxInfo.nidR == node.rightChild.getNodeNumber());
ok &= (auxInfo.weightR == node.rightChild.getWeight());
ok &= (auxInfo.predR == node.rightChild.predValue);
ok &= (auxInfo.sqErrR == node.rightChild.squaredError);
sum += node.rightChild.getWeight();
}
if (node.parent!=null) {
ok &= (auxInfo.pid == node.parent.getNodeNumber());
weight_ok = (Math.abs(node.getWeight() - sum) < 1e-5 * (node.getWeight() + sum));
ok &= weight_ok;
}
if (!ok && logger.isErrorEnabled()) {
logger.error("\nTree inconsistency found:");
if (node.depth == 1 && !weight_ok) {
logger.error("Note: this is a known issue for DRF and Isolation Forest models, " +
"please refer to https://github.com/h2oai/h2o-3/issues/12971");
}
logger.error(node.getPrintString("parent"));
logger.error(node.leftChild.getPrintString("left child"));
logger.error(node.rightChild.getPrintString("right child"));
logger.error("Auxiliary tree info:");
logger.error(auxInfo.toString());
}
}
//------------------------------------------------------------------------------------------------------------------
// Private
//------------------------------------------------------------------------------------------------------------------
protected SharedTreeMojoModel(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
protected SharedTreeMojoModel(String[] columns, String[][] domains, String responseColumn, String treatmentColumn) {
super(columns, domains, responseColumn, treatmentColumn);
}
/**
* Score all trees and fill in the `preds` array.
*/
protected void scoreAllTrees(double[] row, double[] preds) {
java.util.Arrays.fill(preds, 0);
scoreTreeRange(row, 0, _ntree_groups, preds);
}
/**
* Transforms tree predictions into the final model predictions.
* For classification: converts tree preds into probability distribution and picks predicted class.
* For regression: projects tree prediction from link-space into the original space.
* @param row input row.
* @param offset offset.
* @param preds final output, same structure as of {@link SharedTreeMojoModel#score0}.
* @return preds array.
*/
public abstract double[] unifyPreds(double[] row, double offset, double[] preds);
/**
* Generates a (per-class) prediction using only a single tree.
* @param row input row
* @param index index of the tree (0..N-1)
* @param preds array of partial predictions.
*/
public final void scoreSingleTree(double[] row, int index, double preds[]) {
scoreTreeRange(row, index, index + 1, preds);
}
/**
* Generates (partial, per-class) predictions using only trees from a given range.
* @param row input row
* @param fromIndex low endpoint (inclusive) of the tree range
* @param toIndex high endpoint (exclusive) of the tree range
* @param preds array of partial predictions.
* To get final predictions pass the result to {@link SharedTreeMojoModel#unifyPreds}.
*/
public final void scoreTreeRange(double[] row, int fromIndex, int toIndex, double[] preds) {
final int clOffset = _nclasses == 1 ? 0 : 1;
for (int classIndex = 0; classIndex < _ntrees_per_group; classIndex++) {
int k = clOffset + classIndex;
int itree = treeIndex(fromIndex, classIndex);
for (int groupIndex = fromIndex; groupIndex < toIndex; groupIndex++) {
if (_compressed_trees[itree] != null) { // Skip all empty trees
preds[k] += _scoreTree.scoreTree(_compressed_trees[itree], row, false, _domains);
}
itree++;
}
}
}
// note that _ntree_group = _treekeys.length
// ntrees_per_group = _treeKeys[0].length
public String[] getDecisionPathNames() {
int classTrees = 0;
for (int i = 0; i < _ntrees_per_group; ++i) {
int itree = treeIndex(0, i);
if (_compressed_trees[itree] != null) classTrees++;
}
final int outputcols = _ntree_groups * classTrees;
final String[] names = new String[outputcols];
for (int c = 0; c < _ntrees_per_group; c++) {
for (int tidx = 0; tidx < _ntree_groups; tidx++) {
int itree = treeIndex(tidx, c);
if (_compressed_trees[itree] != null) {
names[itree] = "T" + (tidx + 1) + ".C" + (c + 1);
}
}
}
return names;
}
public static class LeafNodeAssignments {
public String[] _paths;
public int[] _nodeIds;
}
public LeafNodeAssignments getLeafNodeAssignments(final double[] row) {
LeafNodeAssignments assignments = new LeafNodeAssignments();
assignments._paths = new String[_compressed_trees.length];
if (_mojo_version >= 1.3 && _compressed_trees_aux != null) { // enable only for compatible MOJOs
assignments._nodeIds = new int[_compressed_trees_aux.length];
}
traceDecisions(row, assignments._paths, assignments._nodeIds);
return assignments;
}
public String[] getDecisionPath(final double[] row) {
String[] paths = new String[_compressed_trees.length];
traceDecisions(row, paths, null);
return paths;
}
private void traceDecisions(final double[] row, String[] paths, int[] nodeIds) {
if (_mojo_version < 1.2) {
throw new IllegalArgumentException("You can only obtain decision tree path with mojo versions 1.2 or higher");
}
for (int j = 0; j < _ntree_groups; j++) {
for (int i = 0; i < _ntrees_per_group; i++) {
int itree = treeIndex(j, i);
double d = scoreTree(_compressed_trees[itree], row, true, _domains);
if (paths != null)
paths[itree] = SharedTreeMojoModel.getDecisionPath(d);
if (nodeIds != null) {
assert _mojo_version >= 1.3;
nodeIds[itree] = SharedTreeMojoModel.getLeafNodeId(d, _compressed_trees_aux[itree]);
}
}
}
}
/**
* Locates a tree in the array of compressed trees.
* @param groupIndex index of the tree in a class-group of trees
* @param classIndex index of the class
* @return index of the tree in _compressed_trees.
*/
final int treeIndex(int groupIndex, int classIndex) {
return classIndex * _ntree_groups + groupIndex;
}
public final byte[] treeBytes(int groupIndex, int classIndex) {
return _compressed_trees[treeIndex(groupIndex, classIndex)];
}
// DO NOT CHANGE THE CODE BELOW THIS LINE
// DO NOT CHANGE THE CODE BELOW THIS LINE
// DO NOT CHANGE THE CODE BELOW THIS LINE
// DO NOT CHANGE THE CODE BELOW THIS LINE
// DO NOT CHANGE THE CODE BELOW THIS LINE
// DO NOT CHANGE THE CODE BELOW THIS LINE
// DO NOT CHANGE THE CODE BELOW THIS LINE
/////////////////////////////////////////////////////
/**
* SET IN STONE FOR MOJO VERSION "1.00" - DO NOT CHANGE
* @param tree
* @param row
* @param computeLeafAssignment
* @return
*/
@SuppressWarnings("ConstantConditions") // Complains that the code is too complex. Well duh!
public static double scoreTree0(byte[] tree, double[] row, boolean computeLeafAssignment) {
ByteBufferWrapper ab = new ByteBufferWrapper(tree);
GenmodelBitSet bs = null; // Lazily set on hitting first group test
long bitsRight = 0;
int level = 0;
while (true) {
int nodeType = ab.get1U();
int colId = ab.get2();
if (colId == 65535) return ab.get4f();
int naSplitDir = ab.get1U();
boolean naVsRest = naSplitDir == NsdNaVsRest;
boolean leftward = naSplitDir == NsdNaLeft || naSplitDir == NsdLeft;
int lmask = (nodeType & 51);
int equal = (nodeType & 12); // Can be one of 0, 8, 12
assert equal != 4; // no longer supported
float splitVal = -1;
if (!naVsRest) {
// Extract value or group to split on
if (equal == 0) {
// Standard float-compare test (either < or ==)
splitVal = ab.get4f(); // Get the float to compare
} else {
// Bitset test
if (bs == null) bs = new GenmodelBitSet(0);
if (equal == 8)
bs.fill2(tree, ab);
else
bs.fill3_1(tree, ab);
}
}
double d = row[colId];
if (Double.isNaN(d)? !leftward : !naVsRest && (equal == 0? d >= splitVal : bs.contains0((int)d))) {
// go RIGHT
switch (lmask) {
case 0: ab.skip(ab.get1U()); break;
case 1: ab.skip(ab.get2()); break;
case 2: ab.skip(ab.get3()); break;
case 3: ab.skip(ab.get4()); break;
case 48: ab.skip(4); break; // skip the prediction
default:
assert false : "illegal lmask value " + lmask + " in tree " + Arrays.toString(tree);
}
if (computeLeafAssignment && level < 64) bitsRight |= 1 << level;
lmask = (nodeType & 0xC0) >> 2; // Replace leftmask with the rightmask
} else {
// go LEFT
if (lmask <= 3)
ab.skip(lmask + 1);
}
level++;
if ((lmask & 16) != 0) {
if (computeLeafAssignment) {
bitsRight |= 1 << level; // mark the end of the tree
return Double.longBitsToDouble(bitsRight);
} else {
return ab.get4f();
}
}
}
}
/**
* SET IN STONE FOR MOJO VERSION "1.10" - DO NOT CHANGE
* @param tree
* @param row
* @param computeLeafAssignment
* @return
*/
@SuppressWarnings("ConstantConditions") // Complains that the code is too complex. Well duh!
public static double scoreTree1(byte[] tree, double[] row, boolean computeLeafAssignment) {
ByteBufferWrapper ab = new ByteBufferWrapper(tree);
GenmodelBitSet bs = null;
long bitsRight = 0;
int level = 0;
while (true) {
int nodeType = ab.get1U();
int colId = ab.get2();
if (colId == 65535) return ab.get4f();
int naSplitDir = ab.get1U();
boolean naVsRest = naSplitDir == NsdNaVsRest;
boolean leftward = naSplitDir == NsdNaLeft || naSplitDir == NsdLeft;
int lmask = (nodeType & 51);
int equal = (nodeType & 12); // Can be one of 0, 8, 12
assert equal != 4; // no longer supported
float splitVal = -1;
if (!naVsRest) {
// Extract value or group to split on
if (equal == 0) {
// Standard float-compare test (either < or ==)
splitVal = ab.get4f(); // Get the float to compare
} else {
// Bitset test
if (bs == null) bs = new GenmodelBitSet(0);
if (equal == 8)
bs.fill2(tree, ab);
else
bs.fill3_1(tree, ab);
}
}
double d = row[colId];
if (Double.isNaN(d) || ( equal != 0 && bs != null && !bs.isInRange((int)d) )
? !leftward : !naVsRest && (equal == 0? d >= splitVal : bs.contains((int)d))) {
// go RIGHT
switch (lmask) {
case 0: ab.skip(ab.get1U()); break;
case 1: ab.skip(ab.get2()); break;
case 2: ab.skip(ab.get3()); break;
case 3: ab.skip(ab.get4()); break;
case 48: ab.skip(4); break; // skip the prediction
default:
assert false : "illegal lmask value " + lmask + " in tree " + Arrays.toString(tree);
}
if (computeLeafAssignment && level < 64) bitsRight |= 1L << level;
lmask = (nodeType & 0xC0) >> 2; // Replace leftmask with the rightmask
} else {
// go LEFT
if (lmask <= 3)
ab.skip(lmask + 1);
}
level++;
if ((lmask & 16) != 0) {
if (computeLeafAssignment) {
bitsRight |= 1L << level; // mark the end of the tree
return Double.longBitsToDouble(bitsRight);
} else {
return ab.get4f();
}
}
}
}
@Override
public boolean calibrateClassProbabilities(double[] preds) {
return CalibrationMojoHelper.calibrateClassProbabilities(this, preds);
}
@Override
public double[] getCalibGlmBeta() {
return _calib_glm_beta;
}
@Override
public IsotonicCalibrator getIsotonicCalibrator() {
return _isotonic_calibrator;
}
@Override
public SharedTreeGraph convert(final int treeNumber, final String treeClass) {
return computeGraph(treeNumber);
}
@Override
public SharedTreeGraph convert(final int treeNumber, final String treeClass, ConvertTreeOptions options) {
return computeGraph(treeNumber, options);
}
/**
* Returns staged predictions of tree algorithms (prediction probabilities of trees per iteration).
* The output structure is for tree Tt and class Cc:
* Binomial models: [probability T1.C1, probability T2.C1, ..., Tt.C1] where Tt.C1 correspond to the the probability p0
* Multinomial models: [probability T1.C1, probability T1.C2, ..., Tt.Cc]
* @param row Input row.
* @param predsLength Length of prediction result.
* @return array of staged prediction probabilities
*/
public double[] scoreStagedPredictions(double[] row, int predsLength) {
int contribOffset = nclasses() == 1 ? 0 : 1;
double[] trees_result = new double[_ntree_groups * _ntrees_per_group];
for (int groupIndex = 0; groupIndex < _ntree_groups; groupIndex++) {
double[] tmpPreds = new double[predsLength];
scoreTreeRange(row, 0, groupIndex+1, tmpPreds);
unifyPreds(row, 0, tmpPreds);
for (int classIndex = 0; classIndex < _ntrees_per_group; classIndex++) {
int tree_index = groupIndex * _ntrees_per_group + classIndex;
trees_result[tree_index] = tmpPreds[contribOffset+classIndex];
}
}
return trees_result;
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/SharedTreeMojoModelWithContributions.java
|
package hex.genmodel.algos.tree;
import hex.genmodel.PredictContributions;
import hex.genmodel.PredictContributionsFactory;
import java.util.ArrayList;
import java.util.List;
public abstract class SharedTreeMojoModelWithContributions extends SharedTreeMojoModel implements PredictContributionsFactory {
protected SharedTreeMojoModelWithContributions(String[] columns, String[][] domains, String responseColumn) {
super(columns, domains, responseColumn);
}
public PredictContributions makeContributionsPredictor() {
if (_nclasses > 2) {
throw new UnsupportedOperationException("Predicting contributions for multinomial classification problems is not yet supported.");
}
SharedTreeGraph graph = computeGraph(-1);
List<TreeSHAPPredictor<double[]>> treeSHAPs = new ArrayList<>(graph.subgraphArray.size());
for (SharedTreeSubgraph tree : graph.subgraphArray) {
SharedTreeNode[] nodes = tree.getNodes();
treeSHAPs.add(new TreeSHAP<>(nodes));
}
TreeSHAPPredictor<double[]> predictor = new TreeSHAPEnsemble<>(treeSHAPs, (float) getInitF());
return getContributionsPredictor(predictor);
}
public double getInitF() {
return 0; // Set to zero by default, which is correct for DRF. However, need to override in GBMMojoModel with correct init_f.
}
protected abstract PredictContributions getContributionsPredictor(TreeSHAPPredictor<double[]> treeSHAPPredictor);
protected static class SharedTreeContributionsPredictor extends ContributionsPredictor<double[]> {
public SharedTreeContributionsPredictor(SharedTreeMojoModel model, TreeSHAPPredictor<double[]> treeSHAPPredictor) {
super(model._nfeatures + 1, model.features(), treeSHAPPredictor);
}
@Override
protected final double[] toInputRow(double[] input) {
return input;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/SharedTreeMojoReader.java
|
package hex.genmodel.algos.tree;
import com.google.gson.JsonObject;
import hex.genmodel.ModelMojoReader;
import hex.genmodel.attributes.*;
import java.io.IOException;
public abstract class SharedTreeMojoReader<M extends SharedTreeMojoModel> extends ModelMojoReader<M> {
@Override
protected void readModelData() throws IOException {
// In mojos v=1.0 this info wasn't saved.
Integer tpc = readkv("n_trees_per_class");
if (tpc == null) {
Boolean bdt = readkv("binomial_double_trees"); // This flag exists only for DRF models
tpc = _model.nclasses() == 2 && (bdt == null || !bdt)? 1 : _model.nclasses();
}
_model._ntree_groups = readkv("n_trees");
_model._ntrees_per_group = tpc;
_model._compressed_trees = new byte[_model._ntree_groups * tpc][];
_model._mojo_version = ((Number) readkv("mojo_version")).doubleValue();
if (_model._mojo_version < 1.40) {
_model._genmodel_encoding = "AUTO";
} else {
_model._genmodel_encoding = readkv("_genmodel_encoding").toString();
_model._orig_projection_array = readkv("_orig_projection_array", new double[0]);
Integer n = readkv("_n_orig_names");
if (n != null) {
_model._orig_names = readStringArray("_orig_names", n);
}
n = readkv("_n_orig_domain_values");
if (n != null) {
_model._orig_domain_values = new String[n][];
for (int i = 0; i < n; i++) {
int m = readkv("_m_orig_domain_values_" + i);
if (m > 0) {
_model._orig_domain_values[i] = readStringArray("_orig_domain_values_" + i, m);
}
}
}
}
if (_model._mojo_version > 1.0) { // In mojos v=1.0 this info wasn't saved
_model._compressed_trees_aux = new byte[_model._ntree_groups * tpc][];
}
for (int j = 0; j < _model._ntree_groups; j++)
for (int i = 0; i < tpc; i++) {
String blobName = String.format("trees/t%02d_%03d.bin", i, j);
if (!exists(blobName)) continue;
_model._compressed_trees[_model.treeIndex(j, i)] = readblob(blobName);
if (_model._compressed_trees_aux!=null) {
_model._compressed_trees_aux[_model.treeIndex(j, i)] = readblob(String.format("trees/t%02d_%03d_aux.bin", i, j));
}
}
// Calibration
String calibMethod = readkv("calib_method");
if (calibMethod != null) {
switch (calibMethod) {
case "platt":
_model._calib_glm_beta = readkv("calib_glm_beta", new double[0]);
break;
case "isotonic":
_model._isotonic_calibrator = readIsotonicCalibrator();
break;
default:
throw new IllegalStateException("Unknown calibration method: " + calibMethod);
}
}
_model.postInit();
}
@Override
protected SharedTreeModelAttributes readModelSpecificAttributes() {
final JsonObject modelJson = ModelJsonReader.parseModelJson(_reader);
if(modelJson != null) {
return new SharedTreeModelAttributes(modelJson, _model);
} else {
return null;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos
|
java-sources/ai/h2o/h2o-genmodel/3.46.0.7/hex/genmodel/algos/tree/SharedTreeNode.java
|
package hex.genmodel.algos.tree;
import ai.h2o.algos.tree.INode;
import ai.h2o.algos.tree.INodeStat;
import water.logging.Logger;
import water.logging.LoggerFactory;
import hex.genmodel.tools.PrintMojo;
import hex.genmodel.utils.GenmodelBitSet;
import java.io.PrintStream;
import java.util.*;
/**
* Node in a tree.
* A node (optionally) contains left and right edges to the left and right child nodes.
*/
public class SharedTreeNode implements INode<double[]>, INodeStat {
final int internalId; // internal tree id (that links the node back to the array of nodes of the tree) - don't confuse with user-facing nodeNumber!
final SharedTreeNode parent;
final int subgraphNumber;
int nodeNumber;
float weight;
final int depth;
int colId;
String colName;
boolean leftward;
boolean naVsRest;
float splitValue = Float.NaN;
String[] domainValues;
GenmodelBitSet bs;
float predValue = Float.NaN;
float squaredError = Float.NaN;
SharedTreeNode leftChild;
public SharedTreeNode rightChild;
float gain = Float.NaN;
// Whether NA for this colId is reachable to this node.
private boolean inclusiveNa;
// When a column is categorical, levels that are reachable to this node.
// This in particular includes any earlier splits of the same colId.
private BitSet inclusiveLevels;
/**
* Create a new node.
* @param p Parent node
* @param sn Tree number
* @param d Node depth within the tree
*/
SharedTreeNode(int id, SharedTreeNode p, int sn, int d) {
internalId = id;
parent = p;
subgraphNumber = sn;
depth = d;
}
public int getDepth() {
return depth;
}
public int getNodeNumber() {
return nodeNumber;
}
@Override
public float getWeight() {
return weight;
}
public void setNodeNumber(int id) {
nodeNumber = id;
}
public void setWeight(float w) {
weight = w;
}
public void setCol(int v1, String v2) {
colId = v1;
colName = v2;
}
public int getColId() {
return colId;
}
public void setLeftward(boolean v) {
leftward = v;
}
void setNaVsRest(boolean v) {
naVsRest = v;
}
public void setSplitValue(float v) {
splitValue = v;
}
public void setColName(String colName) {
this.colName = colName;
}
void setBitset(String[] v1, GenmodelBitSet v2) {
assert (v1 != null);
domainValues = v1;
bs = v2;
}
public void setPredValue(float v) {
predValue = v;
}
public void setSquaredError(float v) {
squaredError = v;
}
/**
* Calculate whether the NA value for a particular colId can reach this node.
* @param colIdToFind Column id to find
* @return true if NA of colId reaches this node, false otherwise
*/
private boolean findInclusiveNa(int colIdToFind) {
if (parent == null) {
return true;
}
else if (parent.getColId() == colIdToFind) {
return inclusiveNa;
}
return parent.findInclusiveNa(colIdToFind);
}
private boolean calculateChildInclusiveNa(boolean includeThisSplitEdge) {
return findInclusiveNa(colId) && includeThisSplitEdge;
}
/**
* Find the set of levels for a particular categorical column that can reach this node.
* A null return value implies the full set (i.e. every level).
* @param colIdToFind Column id to find
* @return Set of levels
*/
private BitSet findInclusiveLevels(int colIdToFind) {
if (parent == null) {
return null;
}
if (parent.getColId() == colIdToFind) {
return inclusiveLevels;
}
return parent.findInclusiveLevels(colIdToFind);
}
private boolean calculateIncludeThisLevel(BitSet inheritedInclusiveLevels, int i) {
if (inheritedInclusiveLevels == null) {
// If there is no prior split history for this column, then treat the
// inherited set as complete.
return true;
}
else if (inheritedInclusiveLevels.get(i)) {
// Allow levels that flowed into this node.
return true;
}
// Filter out levels that were already discarded from a previous split.
return false;
}
/**
* Calculate the set of levels that flow through to a child.
* @param includeAllLevels naVsRest dictates include all (inherited) levels
* @param discardAllLevels naVsRest dictates discard all levels
* @param nodeBitsetDoesContain true if the GenmodelBitset from the compressed_tree
* @return Calculated set of levels
*/
private BitSet calculateChildInclusiveLevels(boolean includeAllLevels,
boolean discardAllLevels,
boolean nodeBitsetDoesContain) {
BitSet inheritedInclusiveLevels = findInclusiveLevels(colId);
BitSet childInclusiveLevels = new BitSet();
for (int i = 0; i < domainValues.length; i++) {
// Calculate whether this level should flow into this child node.
boolean includeThisLevel = false;
{
if (discardAllLevels) {
includeThisLevel = false;
} else if (includeAllLevels) {
includeThisLevel = calculateIncludeThisLevel(inheritedInclusiveLevels, i);
} else if (!Float.isNaN(splitValue)) {
// This branch is only used if categorical split is represented numerically
includeThisLevel = splitValue < i ^ !nodeBitsetDoesContain;
} else if (bs.isInRange(i) && bs.contains(i) == nodeBitsetDoesContain) {
includeThisLevel = calculateIncludeThisLevel(inheritedInclusiveLevels, i);
}
}
if (includeThisLevel) {
childInclusiveLevels.set(i);
}
}
return childInclusiveLevels;
}
void setLeftChild(SharedTreeNode v) {
leftChild = v;
boolean childInclusiveNa = calculateChildInclusiveNa(leftward);
v.setInclusiveNa(childInclusiveNa);
if (! isBitset()) {
return;
}
BitSet childInclusiveLevels = calculateChildInclusiveLevels(naVsRest, false, false);
v.setInclusiveLevels(childInclusiveLevels);
}
void setRightChild(SharedTreeNode v) {
rightChild = v;
boolean childInclusiveNa = calculateChildInclusiveNa(!leftward);
v.setInclusiveNa(childInclusiveNa);
if (! isBitset()) {
return;
}
BitSet childInclusiveLevels = calculateChildInclusiveLevels(false, naVsRest, true);
v.setInclusiveLevels(childInclusiveLevels);
}
public void setInclusiveNa(boolean v) {
inclusiveNa = v;
}
public boolean getInclusiveNa() {
return inclusiveNa;
}
private void setInclusiveLevels(BitSet v) {
inclusiveLevels = v;
}
public BitSet getInclusiveLevels() {
return inclusiveLevels;
}
public String getName() {
return "Node " + nodeNumber;
}
public void print() {
print(System.out, null);
}
public void print(PrintStream out, String description) {
out.print(this.getPrintString(description));
}
public String getPrintString(String description) {
return " Node " + nodeNumber + (description != null ? " (" + description + ")" : "") +
"\n weight: " + weight +
"\n depth: " + depth +
"\n colId: " + colId +
"\n colName: " + ((colName != null) ? colName : "") +
"\n leftward: " + leftward +
"\n naVsRest: " + naVsRest +
"\n splitVal: " + splitValue +
"\n isBitset: " + isBitset() +
"\n predValue: " + predValue +
"\n squaredErr: " + squaredError +
"\n leftChild: " + ((leftChild != null) ? leftChild.getName() : "") +
"\n rightChild: " + ((rightChild != null) ? rightChild.getName() : "");
}
void printEdges() {
if (leftChild != null) {
System.out.println(" " + getName() + " ---left---> " + leftChild.getName());
leftChild.printEdges();
}
if (rightChild != null) {
System.out.println(" " + getName() + " ---right--> " + rightChild.getName());
rightChild.printEdges();
}
}
private String getDotName() {
return "SG_" + subgraphNumber + "_Node_" + nodeNumber;
}
public boolean isBitset() {
return (domainValues != null);
}
public static String escapeQuotes(String s) {
return s.replace("\"", "\\\"");
}
private void printDotNode(PrintStream os, boolean detail, PrintMojo.PrintTreeOptions treeOptions) {
os.print("\"" + getDotName() + "\"");
os.print(" [");
if (leftChild==null && rightChild==null) {
os.print("fontsize="+treeOptions._fontSize+", label=\"");
float predv = treeOptions._setDecimalPlace?treeOptions.roundNPlace(predValue):predValue;
os.print(predv);
} else if (isBitset() && (Float.isNaN(splitValue) || !treeOptions._internal)) {
os.print("shape=box, fontsize="+treeOptions._fontSize+", label=\"");
os.print(escapeQuotes(colName));
}
else {
assert(! Float.isNaN(splitValue));
float splitV = treeOptions._setDecimalPlace?treeOptions.roundNPlace(splitValue):splitValue;
os.print("shape=box, fontsize="+treeOptions._fontSize+", label=\"");
os.print(escapeQuotes(colName) + " < " + splitV);
}
if (detail) {
os.print("\\n\\nN" + getNodeNumber() + "\\n");
if (leftChild != null || rightChild != null) {
if (!Float.isNaN(predValue)) {
float predv = treeOptions._setDecimalPlace?treeOptions.roundNPlace(predValue):predValue;
os.print("\\nPred: " + predv);
}
}
if (!Float.isNaN(squaredError)) {
os.print("\\nSE: " + squaredError);
}
os.print("\\nW: " + getWeight());
if (naVsRest) {
os.print("\\n" + "nasVsRest");
}
if (leftChild != null) {
os.print("\\n" + "L: N" + leftChild.getNodeNumber());
}
if (rightChild != null) {
os.print("\\n" + "R: N" + rightChild.getNodeNumber());
}
}
os.print("\"]");
os.println("");
}
/**
* Recursively print nodes at a particular depth level in the tree. Useful to group them so they render properly.
* @param os output stream
* @param levelToPrint level number
* @param detail include additional node detail information
*/
void printDotNodesAtLevel(PrintStream os, int levelToPrint, boolean detail, PrintMojo.PrintTreeOptions treeOptions) {
if (getDepth() == levelToPrint) {
printDotNode(os, detail, treeOptions);
return;
}
assert (getDepth() < levelToPrint);
if (leftChild != null) {
leftChild.printDotNodesAtLevel(os, levelToPrint, detail, treeOptions);
}
if (rightChild != null) {
rightChild.printDotNodesAtLevel(os, levelToPrint, detail, treeOptions);
}
}
private void printDotEdgesCommon(PrintStream os, int maxLevelsToPrintPerEdge, ArrayList<String> arr,
SharedTreeNode child, float totalWeight, boolean detail,
PrintMojo.PrintTreeOptions treeOptions) {
if (isBitset() || (!Float.isNaN(splitValue) && treeOptions._internal)) { // Print categorical levels even in case of internal numerical representation
BitSet childInclusiveLevels = child.getInclusiveLevels();
int total = childInclusiveLevels.cardinality();
if ((total > 0) && (total <= maxLevelsToPrintPerEdge)) {
for (int i = childInclusiveLevels.nextSetBit(0); i >= 0; i = childInclusiveLevels.nextSetBit(i+1)) {
arr.add(domainValues[i]);
}
}
else {
arr.add(total + " levels");
}
}
if (detail) {
try {
final int max_width = 15 - 1;
float width = child.getWeight() / totalWeight * max_width;
int intWidth = Math.round(width) + 1;
os.print("penwidth=");
os.print(intWidth);
os.print(",");
} catch (Exception ignore) {
}
}
os.print("fontsize="+treeOptions._fontSize+", label=\"");
for (String s : arr) {
os.print(escapeQuotes(s) + "\n");
}
os.print("\"");
os.println("]");
}
/**
* Recursively print all edges in the tree.
* @param os output stream
* @param maxLevelsToPrintPerEdge Limit the number of individual categorical level names printed per edge
* @param totalWeight total weight of all observations (used to determine edge thickness)
* @param detail include additional edge detail information
*/
void printDotEdges(PrintStream os, int maxLevelsToPrintPerEdge, float totalWeight, boolean detail,
PrintMojo.PrintTreeOptions treeOptions) {
assert (leftChild == null) == (rightChild == null);
if (leftChild != null) {
os.print("\"" + getDotName() + "\"" + " -> " + "\"" + leftChild.getDotName() + "\"" + " [");
ArrayList<String> arr = new ArrayList<>();
if (leftChild.getInclusiveNa()) {
arr.add("[NA]");
}
if (naVsRest) {
arr.add("[Not NA]");
}
else {
if (!isBitset() || (!Float.isNaN(splitValue) && treeOptions._internal)) {
arr.add("<");
}
}
printDotEdgesCommon(os, maxLevelsToPrintPerEdge, arr, leftChild, totalWeight, detail, treeOptions);
}
if (rightChild != null) {
os.print("\"" + getDotName() + "\"" + " -> " + "\"" + rightChild.getDotName() + "\"" + " [");
ArrayList<String> arr = new ArrayList<>();
if (rightChild.getInclusiveNa()) {
arr.add("[NA]");
}
if (! naVsRest) {
if (!isBitset() || (!Float.isNaN(splitValue) && treeOptions._internal)) {
arr.add(">=");
}
}
printDotEdgesCommon(os, maxLevelsToPrintPerEdge, arr, rightChild, totalWeight, detail, treeOptions);
}
}
public Map<String, Object> toJson() {
Map<String, Object> json = new LinkedHashMap<>();
json.put("nodeNumber", nodeNumber);
if (!Float.isNaN(weight)) json.put("weight", weight);
if (isLeaf()) {
if (!Float.isNaN(predValue)) json.put("predValue", predValue);
} else {
json.put("colId", colId);
json.put("colName", colName);
json.put("leftward", leftward);
json.put("isCategorical", isBitset());
json.put("inclusiveNa", inclusiveNa);
if (!Float.isNaN(splitValue)) {
json.put("splitValue", splitValue);
}
}
if (inclusiveLevels != null) {
List<Integer> matchedDomainValues = new ArrayList<>();
for (int i = inclusiveLevels.nextSetBit(0); i >= 0; i = inclusiveLevels.nextSetBit(i+1)) {
matchedDomainValues.add(i);
}
json.put("matchValues", matchedDomainValues);
json.put("inclusiveNa", inclusiveNa);
}
if (!isLeaf()) {
json.put("rightChild", rightChild.toJson());
json.put("leftChild", leftChild.toJson());
}
return json;
}
public SharedTreeNode getParent() {
return parent;
}
public int getSubgraphNumber() {
return subgraphNumber;
}
public String getColName() {
return colName;
}
public boolean isLeftward() {
return leftward;
}
public boolean isNaVsRest() {
return naVsRest;
}
public float getSplitValue() {
return splitValue;
}
public String[] getDomainValues() {
return domainValues;
}
public void setDomainValues(String[] domainValues) {
this.domainValues = domainValues;
}
public GenmodelBitSet getBs() {
return bs;
}
public float getPredValue() {
return predValue;
}
public float getSquaredError() {
return squaredError;
}
public SharedTreeNode getLeftChild() {
return leftChild;
}
public SharedTreeNode getRightChild() {
return rightChild;
}
public boolean isInclusiveNa() {
return inclusiveNa;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
SharedTreeNode that = (SharedTreeNode) o;
return subgraphNumber == that.subgraphNumber &&
nodeNumber == that.nodeNumber &&
Float.compare(that.weight, weight) == 0 &&
depth == that.depth &&
colId == that.colId &&
leftward == that.leftward &&
naVsRest == that.naVsRest &&
Float.compare(that.splitValue, splitValue) == 0 &&
Float.compare(that.predValue, predValue) == 0 &&
Float.compare(that.squaredError, squaredError) == 0 &&
inclusiveNa == that.inclusiveNa &&
Objects.equals(colName, that.colName) &&
Arrays.equals(domainValues, that.domainValues) &&
Objects.equals(leftChild, that.leftChild) &&
Objects.equals(rightChild, that.rightChild) &&
Objects.equals(inclusiveLevels, that.inclusiveLevels);
}
@Override
public int hashCode() {
return Objects.hash(subgraphNumber, nodeNumber);
}
// This is the generic Node API (needed by TreeSHAP)
@Override
public final boolean isLeaf() {
return leftChild == null && rightChild == null;
}
@Override
public final float getLeafValue() {
return predValue;
}
@Override
public final int getSplitIndex() {
return colId;
}
@Override
public final int next(double[] value) {
final double d = value[colId];
if (
Double.isNaN(d) ||
(bs != null && !bs.isInRange((int)d)) ||
(domainValues != null && domainValues.length <= (int) d)
?
!leftward
:
!naVsRest && (bs == null ? d >= splitValue : bs.contains((int)d))
) {
// go RIGHT
return getRightChildIndex();
} else {
// go LEFT
return getLeftChildIndex();
}
}
@Override
public final int getLeftChildIndex() {
return leftChild != null ? leftChild.internalId : -1;
}
@Override
public final int getRightChildIndex() {
return rightChild != null ? rightChild.internalId : -1;
}
public float getGain(boolean useSquaredErrorForGain) {
if (useSquaredErrorForGain) {
return this.getSquaredError() - this.getRightChild().getSquaredError() - this.getLeftChild().getSquaredError();
} else {
return gain;
}
}
public void setGain(float gain) {
this.gain = gain;
}
public String getDebugId() {
return "#" + getNodeNumber() + "[internalId=" + internalId + "]";
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.