index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/string/AstCountSubstringsWords.java
|
package water.rapids.ast.prims.string;
import org.apache.commons.io.FileUtils;
import water.MRTask;
import water.fvec.*;
import water.parser.BufferedString;
import water.rapids.Env;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import java.io.File;
import java.io.IOException;
import java.util.HashSet;
/**
*/
public class AstCountSubstringsWords extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "words"};
}
@Override
public int nargs() {
return 1 + 2;
} // (num_valid_substrings x words)
@Override
public String str() {
return "num_valid_substrings";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
String wordsPath = asts[2].exec(env).getStr();
//Type check
for (Vec v : fr.vecs())
if (!(v.isCategorical() || v.isString()))
throw new IllegalArgumentException("num_valid_substrings() requires a string or categorical column. "
+ "Received " + fr.anyVec().get_type_str()
+ ". Please convert column to a string or categorical first.");
HashSet<String> words = null;
try {
words = new HashSet<>(FileUtils.readLines(new File(wordsPath)));
} catch (IOException e) {
throw new RuntimeException(e);
}
//Transform each vec
Vec nvs[] = new Vec[fr.numCols()];
int i = 0;
for (Vec v : fr.vecs()) {
if (v.isCategorical())
nvs[i] = countSubstringsWordsCategoricalCol(v, words);
else
nvs[i] = countSubstringsWordsStringCol(v, words);
i++;
}
return new ValFrame(new Frame(nvs));
}
private Vec countSubstringsWordsCategoricalCol(Vec vec, final HashSet<String> words) {
Vec res = new MRTask() {
transient double[] catCounts;
@Override
public void setupLocal() {
String[] doms = _fr.anyVec().domain();
catCounts = new double[doms.length];
for (int i = 0; i < doms.length; i++) catCounts[i] = calcCountSubstringsWords(doms[i], words);
}
@Override
public void map(Chunk chk, NewChunk newChk) {
//pre-allocate since the size is known
newChk.alloc_doubles(chk._len);
for (int i = 0; i < chk._len; i++)
if (chk.isNA(i))
newChk.addNA();
else
newChk.addNum(catCounts[(int) chk.atd(i)]);
}
}.doAll(1, Vec.T_NUM, new Frame(vec)).outputFrame().anyVec();
return res;
}
private Vec countSubstringsWordsStringCol(Vec vec, final HashSet<String> words) {
return new MRTask() {
@Override
public void map(Chunk chk, NewChunk newChk) {
if (chk instanceof C0DChunk) //all NAs
newChk.addNAs(chk.len());
else { //UTF requires Java string methods
BufferedString tmpStr = new BufferedString();
for (int i = 0; i < chk._len; i++) {
if (chk.isNA(i))
newChk.addNA();
else {
String str = chk.atStr(tmpStr, i).toString();
newChk.addNum(calcCountSubstringsWords(str, words));
}
}
}
}
}.doAll(new byte[]{Vec.T_NUM}, vec).outputFrame().anyVec();
}
// count all substrings >= 2 chars that are in words
private int calcCountSubstringsWords(String str, HashSet<String> words) {
int wordCount = 0;
int N = str.length();
for (int i = 0; i < N - 1; i++)
for (int j = i + 2; j < N + 1; j++) {
if (words.contains(str.substring(i, j)))
wordCount += 1;
}
return wordCount;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/string/AstEntropy.java
|
package water.rapids.ast.prims.string;
import water.MRTask;
import water.fvec.*;
import water.parser.BufferedString;
import water.rapids.Env;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import java.util.HashMap;
/**
*/
public class AstEntropy extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary"};
}
@Override
public int nargs() {
return 1 + 1;
} // (entropy x)
@Override
public String str() {
return "entropy";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
//Type check
for (Vec v : fr.vecs())
if (!(v.isCategorical() || v.isString()))
throw new IllegalArgumentException("entropy() requires a string or categorical column. "
+ "Received " + fr.anyVec().get_type_str()
+ ". Please convert column to a string or categorical first.");
//Transform each vec
Vec nvs[] = new Vec[fr.numCols()];
int i = 0;
for (Vec v : fr.vecs()) {
if (v.isCategorical())
nvs[i] = entropyCategoricalCol(v);
else
nvs[i] = entropyStringCol(v);
i++;
}
return new ValFrame(new Frame(nvs));
}
private Vec entropyCategoricalCol(Vec vec) {
Vec res = new MRTask() {
transient double[] catEntropies;
@Override
public void setupLocal() {
String[] doms = _fr.anyVec().domain();
catEntropies = new double[doms.length];
for (int i = 0; i < doms.length; i++) catEntropies[i] = calcEntropy(doms[i]);
}
@Override
public void map(Chunk chk, NewChunk newChk) {
//pre-allocate since the size is known
newChk.alloc_doubles(chk._len);
for (int i = 0; i < chk._len; i++)
if (chk.isNA(i))
newChk.addNA();
else
newChk.addNum(catEntropies[(int) chk.atd(i)]);
}
}.doAll(1, Vec.T_NUM, new Frame(vec)).outputFrame().anyVec();
return res;
}
private Vec entropyStringCol(Vec vec) {
return new MRTask() {
@Override
public void map(Chunk chk, NewChunk newChk) {
if (chk instanceof C0DChunk) //all NAs
newChk.addNAs(chk.len());
else if (((CStrChunk) chk)._isAllASCII) //fast-path operations
((CStrChunk) chk).asciiEntropy(newChk);
else { //UTF requires Java string methods
BufferedString tmpStr = new BufferedString();
for (int i = 0; i < chk._len; i++) {
if (chk.isNA(i))
newChk.addNA();
else {
String str = chk.atStr(tmpStr, i).toString();
newChk.addNum(calcEntropy(str));
}
}
}
}
}.doAll(new byte[]{Vec.T_NUM}, vec).outputFrame().anyVec();
}
//Shannon's entropy
private double calcEntropy(String str) {
HashMap<Character, Integer> freq = new HashMap<>();
for (int i = 0; i < str.length(); i++) {
char c = str.charAt(i);
Integer count = freq.get(c);
if (count == null) freq.put(c, 1);
else freq.put(c, count + 1);
}
double sume = 0;
int N = str.length();
double n;
for (char c : freq.keySet()) {
n = freq.get(c);
sume += -n / N * Math.log(n / N) / Math.log(2);
}
return sume;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/string/AstGrep.java
|
package water.rapids.ast.prims.string;
import water.Iced;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.parser.BufferedString;
import water.rapids.Val;
import water.rapids.ast.AstBuiltin;
import water.rapids.vals.ValFrame;
import java.util.Arrays;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Searches for matches to argument "regex" within each element
* of a string column.
*
* Params:
* - regex regular expression
* - ignore_case if ignore_case == 1, matching is case insensitive
* - invert if invert == 1, identifies elements that do not match the regex
* - output_logical if output_logical == 1, result will be a logical vector, otherwise returns matching positions
*/
public class AstGrep extends AstBuiltin<AstGrep> {
@Override
public String[] args() {
return new String[]{"ary", "regex", "ignore_case", "invert", "output_logical"};
}
@Override
public int nargs() {
return 1 + 5;
} // (grep x regex ignore_case invert output_logical)
@Override
public String str() {
return "grep";
}
@Override
protected Val exec(Val[] args) {
Frame fr = args[1].getFrame();
String regex = args[2].getStr();
boolean ignoreCase = args[3].getNum() == 1;
boolean invert = args[4].getNum() == 1;
boolean outputLogical = args[5].getNum() == 1;
GrepHelper grepHelper = new GrepHelper(regex, ignoreCase, invert, outputLogical);
if ((fr.numCols() != 1) || ! (fr.anyVec().isCategorical() || fr.anyVec().isString()))
throw new IllegalArgumentException("can only grep on a single categorical/string column.");
Vec v = fr.anyVec();
assert v != null;
Frame result;
if (v.isCategorical()) {
int[] filtered = grepDomain(grepHelper, v);
Arrays.sort(filtered);
result = new GrepCatTask(grepHelper, filtered).doAll(Vec.T_NUM, v).outputFrame();
} else {
result = new GrepStrTask(grepHelper).doAll(Vec.T_NUM, v).outputFrame();
}
return new ValFrame(result);
}
private static int[] grepDomain(GrepHelper grepHelper, Vec v) {
Pattern p = grepHelper.compilePattern();
String[] domain = v.domain();
int cnt = 0;
int[] filtered = new int[domain.length];
for (int i = 0; i < domain.length; i++) {
if (p.matcher(domain[i]).find())
filtered[cnt++] = i;
}
int[] result = new int[cnt];
System.arraycopy(filtered, 0, result, 0, cnt);
return result;
}
private static class GrepCatTask extends MRTask<GrepCatTask> {
private final int[] _matchingCats;
private final GrepHelper _gh;
GrepCatTask(GrepHelper gh, int[] matchingCats) {
_matchingCats = matchingCats;
_gh = gh;
}
@Override
public void map(Chunk c, NewChunk n) {
OutputWriter w = OutputWriter.makeWriter(_gh, n, c.start());
int rows = c._len;
for (int r = 0; r < rows; r++) {
if (c.isNA(r)) {
w.addNA(r);
} else {
int cat = (int) c.at8(r);
int pos = Arrays.binarySearch(_matchingCats, cat);
w.addRow(r, pos >= 0);
}
}
}
}
private static class GrepStrTask extends MRTask<GrepStrTask> {
private final GrepHelper _gh;
GrepStrTask(GrepHelper gh) {
_gh = gh;
}
@Override
public void map(Chunk c, NewChunk n) {
OutputWriter w = OutputWriter.makeWriter(_gh, n, c.start());
Pattern p = _gh.compilePattern();
Matcher m = p.matcher("");
BufferedString bs = new BufferedString();
int rows = c._len;
for (int r = 0; r < rows; r++) {
if (c.isNA(r)) {
w.addNA(r);
} else {
m.reset(c.atStr(bs, r).toString());
w.addRow(r, m.find());
}
}
}
}
private static class GrepHelper extends Iced<GrepHelper> {
private String _regex;
private boolean _ignoreCase;
private boolean _invert;
private boolean _outputLogical;
public GrepHelper() {}
GrepHelper(String regex, boolean ignoreCase, boolean invert, boolean outputLogical) {
_regex = regex;
_ignoreCase = ignoreCase;
_invert = invert;
_outputLogical = outputLogical;
}
Pattern compilePattern() {
int flags = _ignoreCase ? Pattern.CASE_INSENSITIVE | Pattern.UNICODE_CASE : 0;
return Pattern.compile(_regex, flags);
}
}
private static abstract class OutputWriter {
static final double MATCH = 1;
static final double NO_MATCH = 0;
NewChunk _nc;
long _start;
boolean _invert;
OutputWriter(NewChunk nc, long start, boolean invert) {
_nc = nc;
_start = start;
_invert = invert;
}
abstract void addNA(int row);
abstract void addRow(int row, boolean matched);
static OutputWriter makeWriter(GrepHelper gh, NewChunk nc, long start) {
return gh._outputLogical ? new IndicatorWriter(nc, start, gh._invert) : new PositionWriter(nc, start, gh._invert);
}
}
private static class IndicatorWriter extends OutputWriter {
IndicatorWriter(NewChunk nc, long start, boolean invert) {
super(nc, start, invert);
}
@Override
void addNA(int row) {
_nc.addNum(_invert ? MATCH : NO_MATCH);
}
@Override
void addRow(int row, boolean matched) {
_nc.addNum(matched != _invert ? MATCH : NO_MATCH);
}
}
private static class PositionWriter extends OutputWriter {
PositionWriter(NewChunk nc, long start, boolean invert) {
super(nc, start, invert);
}
@Override
void addNA(int row) {
if (_invert)
_nc.addNum(_start + row);
}
@Override
void addRow(int row, boolean matched) {
if (matched != _invert)
_nc.addNum(_start + row);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/string/AstLStrip.java
|
package water.rapids.ast.prims.string;
import org.apache.commons.lang.StringUtils;
import water.MRTask;
import water.fvec.*;
import water.parser.BufferedString;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.util.VecUtils;
import java.util.ArrayList;
import java.util.HashMap;
/**
* Accepts a frame with a single string column.
* Returns a new string column containing the lstripped versions of the strings in the target column.
* Stripping removes all characters in the strings for the target columns that match the user provided set
*/
public class AstLStrip extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "set"};
}
@Override
public int nargs() {
return 1 + 2;
}
@Override
public String str() {
return "lstrip";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
String set = asts[2].exec(env).getStr();
// Type check
for (Vec v : fr.vecs())
if (!(v.isCategorical() || v.isString()))
throw new IllegalArgumentException("trim() requires a string or categorical column. "
+ "Received " + fr.anyVec().get_type_str()
+ ". Please convert column to a string or categorical first.");
// Transform each vec
Vec nvs[] = new Vec[fr.numCols()];
int i = 0;
for (Vec v : fr.vecs()) {
if (v.isCategorical())
nvs[i] = lstripCategoricalCol(v, set);
else
nvs[i] = lstripStringCol(v, set);
i++;
}
return new ValFrame(new Frame(nvs));
}
private Vec lstripCategoricalCol(Vec vec, String set) {
String[] doms = vec.domain().clone();
HashMap<String, ArrayList<Integer>> strippedToOldDomainIndices = new HashMap<>();
String stripped;
for (int i = 0; i < doms.length; i++) {
stripped = StringUtils.stripStart(doms[i], set);
doms[i] = stripped;
if (!strippedToOldDomainIndices.containsKey(stripped)) {
ArrayList<Integer> val = new ArrayList<>();
val.add(i);
strippedToOldDomainIndices.put(stripped, val);
} else {
strippedToOldDomainIndices.get(stripped).add(i);
}
}
//Check for duplicated domains
if (strippedToOldDomainIndices.size() < doms.length)
return VecUtils.DomainDedupe.domainDeduper(vec, strippedToOldDomainIndices);
return vec.makeCopy(doms);
}
private Vec lstripStringCol(Vec vec, String set) {
final String charSet = set;
return new MRTask() {
@Override
public void map(Chunk chk, NewChunk newChk) {
if (chk instanceof C0DChunk) // all NAs
for (int i = 0; i < chk.len(); i++)
newChk.addNA();
else if (((CStrChunk) chk)._isAllASCII && StringUtils.isAsciiPrintable(charSet)) { // fast-path operations
((CStrChunk) chk).asciiLStrip(newChk, charSet);
} else {
BufferedString tmpStr = new BufferedString();
for (int i = 0; i < chk.len(); i++) {
if (chk.isNA(i))
newChk.addNA();
else
newChk.addStr(StringUtils.stripStart(chk.atStr(tmpStr, i).toString(), charSet));
}
}
}
}.doAll(new byte[]{Vec.T_STR}, vec).outputFrame().anyVec();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/string/AstRStrip.java
|
package water.rapids.ast.prims.string;
import org.apache.commons.lang.StringUtils;
import water.MRTask;
import water.fvec.*;
import water.parser.BufferedString;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.util.VecUtils;
import java.util.ArrayList;
import java.util.HashMap;
/**
* Accepts a frame with a single string column.
* Returns a new string column containing the rstripped versions of the strings in the target column.
* Stripping removes all characters in the strings for the target columns that match the user provided set
*/
public class AstRStrip extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "set"};
}
@Override
public int nargs() {
return 1 + 2;
}
@Override
public String str() {
return "rstrip";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
String set = asts[2].exec(env).getStr();
// Type check
for (Vec v : fr.vecs())
if (!(v.isCategorical() || v.isString()))
throw new IllegalArgumentException("trim() requires a string or categorical column. "
+ "Received " + fr.anyVec().get_type_str()
+ ". Please convert column to a string or categorical first.");
// Transform each vec
Vec nvs[] = new Vec[fr.numCols()];
int i = 0;
for (Vec v : fr.vecs()) {
if (v.isCategorical())
nvs[i] = rstripCategoricalCol(v, set);
else
nvs[i] = rstripStringCol(v, set);
i++;
}
return new ValFrame(new Frame(nvs));
}
private Vec rstripCategoricalCol(Vec vec, String set) {
String[] doms = vec.domain().clone();
HashMap<String, ArrayList<Integer>> strippedToOldDomainIndices = new HashMap<>();
String stripped;
for (int i = 0; i < doms.length; i++) {
stripped = StringUtils.stripEnd(doms[i], set);
doms[i] = stripped;
if (!strippedToOldDomainIndices.containsKey(stripped)) {
ArrayList<Integer> val = new ArrayList<>();
val.add(i);
strippedToOldDomainIndices.put(stripped, val);
} else {
strippedToOldDomainIndices.get(stripped).add(i);
}
}
//Check for duplicated domains
if (strippedToOldDomainIndices.size() < doms.length)
return VecUtils.DomainDedupe.domainDeduper(vec, strippedToOldDomainIndices);
return vec.makeCopy(doms);
}
private Vec rstripStringCol(Vec vec, String set) {
final String charSet = set;
return new MRTask() {
@Override
public void map(Chunk chk, NewChunk newChk) {
if (chk instanceof C0DChunk) // all NAs
for (int i = 0; i < chk.len(); i++)
newChk.addNA();
else if (((CStrChunk) chk)._isAllASCII && StringUtils.isAsciiPrintable(charSet)) { // fast-path operations
((CStrChunk) chk).asciiRStrip(newChk, charSet);
} else {
BufferedString tmpStr = new BufferedString();
for (int i = 0; i < chk.len(); i++) {
if (chk.isNA(i))
newChk.addNA();
else
newChk.addStr(StringUtils.stripEnd(chk.atStr(tmpStr, i).toString(), charSet));
}
}
}
}.doAll(new byte[]{Vec.T_STR}, vec).outputFrame().anyVec();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/string/AstReplaceAll.java
|
package water.rapids.ast.prims.string;
import water.Key;
import water.MRTask;
import water.fvec.*;
import water.parser.BufferedString;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.util.VecUtils;
import java.util.*;
import java.util.regex.Pattern;
/**
* Accepts a frame with a single string column, a regex pattern string, a replacement substring,
* and a boolean to indicate whether to ignore the case of the target string.
* Returns a new string column containing the results of the replaceAll method on each string
* in the target column.
* <p/>
* replaceAll - Replaces each substring of this string that matches the given regular expression
* with the given replacement.
*/
public class AstReplaceAll extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "pattern", "replacement", "ignore_case"};
}
@Override
public int nargs() {
return 1 + 4;
} // (sub x pattern replacement ignore.case)
@Override
public String str() {
return "replaceall";
}
@Override
public Val apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
final String pattern = asts[2].exec(env).getStr();
final String replacement = asts[3].exec(env).getStr();
Frame fr = stk.track(asts[1].exec(env)).getFrame();
final boolean ignoreCase = asts[4].exec(env).getNum() == 1;
// Type check
for (Vec v : fr.vecs())
if (!(v.isCategorical() || v.isString()))
throw new IllegalArgumentException("replaceall() requires a string or categorical column. "
+ "Received " + fr.anyVec().get_type_str()
+ ". Please convert column to a string or categorical first.");
// Transform each vec
Vec nvs[] = new Vec[fr.numCols()];
int i = 0;
for (Vec v : fr.vecs()) {
if (v.isCategorical())
nvs[i] = replaceAllCategoricalCol(v, pattern, replacement, ignoreCase);
else
nvs[i] = replaceAllStringCol(v, pattern, replacement, ignoreCase);
i++;
}
return new ValFrame(new Frame(nvs));
}
private Vec replaceAllCategoricalCol(Vec vec, String pattern, String replacement, boolean ignoreCase) {
final Pattern compiledPattern = Pattern.compile(pattern); // Compile the pattern once before replacement
String[] doms = vec.domain().clone();
Set<String> newDomainSet = new HashSet<>(); // The pattern might create multiple domains with the same name
for (int i = 0; i < doms.length; ++i) {
doms[i] = ignoreCase
? compiledPattern.matcher(doms[i].toLowerCase(Locale.ENGLISH)).replaceAll(replacement)
: compiledPattern.matcher(doms[i]).replaceAll(replacement);
newDomainSet.add(doms[i]);
}
if (newDomainSet.size() == doms.length) {
// Avoid remapping if cardinality is the same
newDomainSet = null;
return vec.makeCopy(doms);
} else {
newDomainSet = null;
return VecUtils.remapDomain(doms, vec);
}
}
private Vec replaceAllStringCol(Vec vec, final String pat, String rep, boolean ic) {
final String pattern = pat;
final String replacement = rep;
final boolean ignoreCase = ic;
return new MRTask() {
@Override
public void map(Chunk chk, NewChunk newChk) {
if (chk instanceof C0DChunk) // all NAs
for (int i = 0; i < chk.len(); i++)
newChk.addNA();
else {
// if (((CStrChunk)chk)._isAllASCII) { // fast-path operations
// ((CStrChunk) chk).asciiReplaceAll(newChk);
// } else { //UTF requires Java string methods for accuracy
BufferedString tmpStr = new BufferedString();
final Pattern compiledPattern = Pattern.compile(pattern); // Compile the pattern once before replacements
for (int i = 0; i < chk._len; i++) {
if (chk.isNA(i))
newChk.addNA();
else {
if (ignoreCase)
newChk.addStr(compiledPattern.matcher(chk.atStr(tmpStr, i).toString().toLowerCase(Locale.ENGLISH)).replaceAll(replacement));
else
newChk.addStr(compiledPattern.matcher(chk.atStr(tmpStr, i).toString()).replaceAll(replacement));
}
}
}
}
}.doAll(new byte[]{Vec.T_STR}, vec).outputFrame().anyVec();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/string/AstReplaceFirst.java
|
package water.rapids.ast.prims.string;
import water.MRTask;
import water.fvec.*;
import water.parser.BufferedString;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.util.VecUtils;
import java.util.HashSet;
import java.util.Locale;
import java.util.Set;
import java.util.regex.Pattern;
/**
* Accepts a frame with a single string column, a regex pattern string, a replacement substring,
* and a boolean to indicate whether to ignore the case of the target string.
* Returns a new string column containing the results of the replaceFirst method on each string
* in the target column.
* <p/>
* replaceAll - Replaces the first substring of this string that matches the given regular
* expression with the given replacement.
*/
public class AstReplaceFirst extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "pattern", "replacement", "ignore_case"};
}
@Override
public int nargs() {
return 1 + 4;
} // (sub x pattern replacement ignore.case)
@Override
public String str() {
return "replacefirst";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
final String pattern = asts[2].exec(env).getStr();
final String replacement = asts[3].exec(env).getStr();
Frame fr = stk.track(asts[1].exec(env)).getFrame();
final boolean ignoreCase = asts[4].exec(env).getNum() == 1;
// Type check
for (Vec v : fr.vecs())
if (!(v.isCategorical() || v.isString()))
throw new IllegalArgumentException("replacefirst() requires a string or categorical column. "
+ "Received " + fr.anyVec().get_type_str()
+ ". Please convert column to a string or categorical first.");
// Transform each vec
Vec nvs[] = new Vec[fr.numCols()];
int i = 0;
for (Vec v : fr.vecs()) {
if (v.isCategorical())
nvs[i] = replaceFirstCategoricalCol(v, pattern, replacement, ignoreCase);
else
nvs[i] = replaceFirstStringCol(v, pattern, replacement, ignoreCase);
i++;
}
return new ValFrame(new Frame(fr.names(), nvs));
}
private Vec replaceFirstCategoricalCol(Vec vec, String pattern, String replacement, boolean ignoreCase) {
final Pattern compiledPattern = Pattern.compile(pattern); // Compile the pattern once before replacement
String[] doms = vec.domain().clone();
Set<String> newDomainSet = new HashSet<>(); // The pattern might create multiple domains with the same name
for (int i = 0; i < doms.length; ++i) {
doms[i] = ignoreCase
? compiledPattern.matcher(doms[i].toLowerCase(Locale.ENGLISH)).replaceFirst(replacement)
: compiledPattern.matcher(doms[i]).replaceFirst(replacement);
newDomainSet.add(doms[i]);
}
if (newDomainSet.size() == doms.length) {
// Avoid remapping if cardinality is the same
newDomainSet = null;
return vec.makeCopy(doms);
} else {
newDomainSet = null;
return VecUtils.remapDomain(doms, vec);
}
}
private Vec replaceFirstStringCol(Vec vec, String pat, String rep, boolean ic) {
final String pattern = pat;
final String replacement = rep;
final boolean ignoreCase = ic;
return new MRTask() {
@Override
public void map(Chunk chk, NewChunk newChk) {
if (chk instanceof C0DChunk) // all NAs
for (int i = 0; i < chk.len(); i++)
newChk.addNA();
else {
// if (((CStrChunk)chk)._isAllASCII) { // fast-path operations
// ((CStrChunk) chk).asciiReplaceFirst(newChk);
// } else { //UTF requires Java string methods for accuracy
BufferedString tmpStr = new BufferedString();
final Pattern compiledPattern = Pattern.compile(pattern);
for (int i = 0; i < chk._len; i++) {
if (chk.isNA(i))
newChk.addNA();
else {
if (ignoreCase)
newChk.addStr(compiledPattern.matcher(chk.atStr(tmpStr, i).toString().toLowerCase(Locale.ENGLISH)).replaceFirst(replacement));
else
newChk.addStr(compiledPattern.matcher(chk.atStr(tmpStr, i).toString()).replaceFirst(replacement));
}
}
}
}
}.doAll(new byte[]{Vec.T_STR}, vec).outputFrame().anyVec();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/string/AstStrDistance.java
|
package water.rapids.ast.prims.string;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.parser.BufferedString;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
import water.util.comparison.string.StringComparator;
import water.util.comparison.string.StringComparatorFactory;
/**
* Calculates string distances between elements of two frames
*/
public class AstStrDistance extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary_x", "ary_y", "measure", "compare_empty"};
}
@Override
public int nargs() {
return 1 + 4;
} // (strDistance x y measure compare_empty)
@Override
public String str() {
return "strDistance";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame frX = stk.track(asts[1].exec(env)).getFrame();
Frame frY = stk.track(asts[2].exec(env)).getFrame();
String measure = asts[3].exec(env).getStr();
boolean compareEmpty = asts[4].exec(env).getNum() == 1;
if ((frX.numCols() != frY.numCols()) || (frX.numRows() != frY.numRows()))
throw new IllegalArgumentException("strDistance() requires the frames to have the same number of columns and rows.");
for (int i = 0; i < frX.numCols(); i++)
if (! (isCharacterType(frX.vec(i)) && isCharacterType(frY.vec(i))))
throw new IllegalArgumentException("Types of columns of both frames need to be String/Factor");
// make sure that name of the comparator comparator method is correct and it can be constructed
StringComparatorFactory.makeComparator(measure);
byte[] outputTypes = new byte[frX.numCols()];
Vec[] vecs = new Vec[frX.numCols() * 2];
for (int i = 0; i < outputTypes.length; i++) {
outputTypes[i] = Vec.T_NUM;
vecs[i] = frX.vec(i);
vecs[i + outputTypes.length] = frY.vec(i);
}
Frame distFr = new StringDistanceComparator(measure, compareEmpty).doAll(outputTypes, vecs).outputFrame();
return new ValFrame(distFr);
}
private static boolean isCharacterType(Vec v) {
return v.get_type() == Vec.T_STR || v.get_type() == Vec.T_CAT;
}
private static class StringDistanceComparator extends MRTask<StringDistanceComparator> {
private final String _measure;
private final boolean _compareEmpty;
private StringDistanceComparator(String measure, boolean compareEmpty) {
_measure = measure;
_compareEmpty = compareEmpty;
}
@Override
public void map(Chunk[] cs, NewChunk[] nc) {
BufferedString tmpStr = new BufferedString();
StringComparator cmp = StringComparatorFactory.makeComparator(_measure);
int N = nc.length;
assert N * 2 == cs.length;
for (int i = 0; i < N; i++) {
Chunk cX = cs[i];
String[] domainX = _fr.vec(i).domain();
Chunk cY = cs[i + N];
String[] domainY = _fr.vec(i + N).domain();
for (int row = 0; row < cX._len; row++) {
if (cX.isNA(row) || cY.isNA(row))
nc[i].addNA();
else {
String strX = getString(tmpStr, cX, row, domainX);
String strY = getString(tmpStr, cY, row, domainY);
if (!_compareEmpty && (strX.isEmpty() || strY.isEmpty())) {
nc[i].addNA();
} else {
double dist = cmp.compare(strX, strY);
nc[i].addNum(dist);
}
}
}
}
}
private static String getString(BufferedString tmpStr, Chunk chk, int row, String[] domain) {
if (domain != null)
return domain[(int) chk.at8(row)];
else
return chk.atStr(tmpStr, row).toString();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/string/AstStrLength.java
|
package water.rapids.ast.prims.string;
import water.MRTask;
import water.fvec.*;
import water.parser.BufferedString;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
/**
* Accepts a frame with a single string column.
* Returns a new integer column containing the character count for each string in the target column.
*/
public class AstStrLength extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary"};
}
@Override
public int nargs() {
return 1 + 1;
}
@Override
public String str() {
return "strlen";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
// Type check
for (Vec v : fr.vecs())
if (!(v.isCategorical() || v.isString()))
throw new IllegalArgumentException("length() requires a string or categorical column. "
+ "Received " + fr.anyVec().get_type_str()
+ ". Please convert column to a string or categorical first.");
// Transform each vec
Vec nvs[] = new Vec[fr.numCols()];
int i = 0;
for (Vec v : fr.vecs()) {
if (v.isCategorical())
nvs[i] = lengthCategoricalCol(v);
else
nvs[i] = lengthStringCol(v);
i++;
}
return new ValFrame(new Frame(nvs));
}
private Vec lengthCategoricalCol(Vec vec) {
//String[] doms = vec.domain();
//int[] catLengths = new int[doms.length];
//for (int i = 0; i < doms.length; ++i) catLengths[i] = doms[i].length();
Vec res = new MRTask() {
transient int[] catLengths;
@Override
public void setupLocal() {
String[] doms = _fr.anyVec().domain();
catLengths = new int[doms.length];
for (int i = 0; i < doms.length; ++i) catLengths[i] = doms[i].length();
}
@Override
public void map(Chunk chk, NewChunk newChk) {
// pre-allocate since the size is known
newChk.alloc_nums(chk._len);
for (int i = 0; i < chk._len; i++)
if (chk.isNA(i))
newChk.addNA();
else
newChk.addNum(catLengths[(int) chk.atd(i)], 0);
}
}.doAll(1, Vec.T_NUM, new Frame(vec)).outputFrame().anyVec();
return res;
}
private Vec lengthStringCol(Vec vec) {
return new MRTask() {
@Override
public void map(Chunk chk, NewChunk newChk) {
if (chk instanceof C0DChunk) { // All NAs
for (int i = 0; i < chk._len; i++)
newChk.addNA();
} else if (((CStrChunk) chk)._isAllASCII) { // fast-path operations
((CStrChunk) chk).asciiLength(newChk);
} else { //UTF requires Java string methods for accuracy
BufferedString tmpStr = new BufferedString();
for (int i = 0; i < chk._len; i++) {
if (chk.isNA(i)) newChk.addNA();
else newChk.addNum(chk.atStr(tmpStr, i).toString().length(), 0);
}
}
}
}.doAll(new byte[]{Vec.T_NUM}, vec).outputFrame().anyVec();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/string/AstStrSplit.java
|
package water.rapids.ast.prims.string;
import water.MRTask;
import water.fvec.*;
import water.parser.BufferedString;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
/**
*/
public class AstStrSplit extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "split"};
}
@Override
public int nargs() {
return 1 + 2;
} // (strsplit x split)
@Override
public String str() {
return "strsplit";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
String splitRegEx = asts[2].exec(env).getStr();
// Type check
for (Vec v : fr.vecs())
if (!(v.isCategorical() || v.isString()))
throw new IllegalArgumentException("strsplit() requires a string or categorical column. "
+ "Received " + fr.anyVec().get_type_str()
+ ". Please convert column to a string or categorical first.");
// Transform each vec
ArrayList<Vec> vs = new ArrayList<>(fr.numCols());
for (Vec v : fr.vecs()) {
Vec[] splits;
if (v.isCategorical()) {
splits = strSplitCategoricalCol(v, splitRegEx);
for (Vec split : splits) vs.add(split);
} else {
splits = strSplitStringCol(v, splitRegEx);
for (Vec split : splits) vs.add(split);
}
}
return new ValFrame(new Frame(vs.toArray(new Vec[vs.size()])));
}
private Vec[] strSplitCategoricalCol(Vec vec, String splitRegEx) {
final String[] old_domains = vec.domain();
final String[][] new_domains = newDomains(old_domains, splitRegEx);
final String regex = splitRegEx;
return new MRTask() {
@Override
public void map(Chunk[] cs, NewChunk[] ncs) {
Chunk c = cs[0];
for (int i = 0; i < c._len; ++i) {
int cnt = 0;
if (!c.isNA(i)) {
int idx = (int) c.at8(i);
String s = old_domains[idx];
String[] ss = s.split(regex);
for (String s1 : ss) {
int n_idx = Arrays.asList(new_domains[cnt]).indexOf(s1);
if (n_idx == -1) ncs[cnt++].addNA();
else ncs[cnt++].addNum(n_idx);
}
}
if (cnt < ncs.length)
for (; cnt < ncs.length; ++cnt) ncs[cnt].addNA();
}
}
}.doAll(new_domains.length, Vec.T_CAT, new Frame(vec)).outputFrame(null, null, new_domains).vecs();
}
// each domain level may split in its own uniq way.
// hold onto a hashset of domain levels for each "new" column
private String[][] newDomains(String[] domains, String regex) {
ArrayList<HashSet<String>> strs = new ArrayList<>();
// loop over each level in the domain
HashSet<String> x;
for (String domain : domains) {
String[] news = domain.split(regex);
for (int i = 0; i < news.length; ++i) {
// we have a "new" column, must add a new HashSet to the array
// list and start tracking levels for this "i"
if (strs.size() == i) {
x = new HashSet<>();
x.add(news[i]);
strs.add(x);
} else {
// ok not a new column
// whip out the current set of levels and add the new one
strs.get(i).add(news[i]);
}
}
}
return listToArray(strs);
}
private String[][] listToArray(ArrayList<HashSet<String>> strs) {
String[][] doms = new String[strs.size()][];
int i = 0;
for (HashSet<String> h : strs)
doms[i++] = h.toArray(new String[h.size()]);
return doms;
}
private Vec[] strSplitStringCol(Vec vec, final String splitRegEx) {
final int newColCnt = (new AstStrSplit.CountSplits(splitRegEx)).doAll(vec)._maxSplits;
return new MRTask() {
@Override
public void map(Chunk[] cs, NewChunk[] ncs) {
Chunk chk = cs[0];
if (chk instanceof C0DChunk) // all NAs
for (int row = 0; row < chk.len(); row++)
for (int col = 0; col < ncs.length; col++)
ncs[col].addNA();
else {
BufferedString tmpStr = new BufferedString();
for (int row = 0; row < chk._len; ++row) {
int col = 0;
if (!chk.isNA(row)) {
String[] ss = chk.atStr(tmpStr, row).toString().split(splitRegEx);
for (String s : ss) // distribute strings among new cols
ncs[col++].addStr(s);
}
if (col < ncs.length) // fill remaining cols w/ NA
for (; col < ncs.length; col++) ncs[col].addNA();
}
}
}
}.doAll(newColCnt, Vec.T_STR, new Frame(vec)).outputFrame().vecs();
}
/**
* Run through column to figure out the maximum split that
* any string in the column will need.
*/
private static class CountSplits extends MRTask<AstStrSplit.CountSplits> {
// IN
private final String _regex;
// OUT
int _maxSplits = 0;
CountSplits(String regex) {
_regex = regex;
}
@Override
public void map(Chunk chk) {
BufferedString tmpStr = new BufferedString();
for (int row = 0; row < chk._len; row++) {
if (!chk.isNA(row)) {
int split = chk.atStr(tmpStr, row).toString().split(_regex).length;
if (split > _maxSplits) _maxSplits = split;
}
}
}
@Override
public void reduce(AstStrSplit.CountSplits that) {
if (this._maxSplits < that._maxSplits) this._maxSplits = that._maxSplits;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/string/AstSubstring.java
|
package water.rapids.ast.prims.string;
import water.MRTask;
import water.fvec.*;
import water.parser.BufferedString;
import water.rapids.Env;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.ast.params.AstNumList;
import water.util.VecUtils;
import java.util.ArrayList;
import java.util.HashMap;
/**
*/
public class AstSubstring extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "startIndex", "endIndex"};
}
@Override
public int nargs() {
return 1 + 3;
} // (substring x startIndex endIndex)
@Override
public String str() {
return "substring";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
int startIndex = (int) asts[2].exec(env).getNum();
if (startIndex < 0) startIndex = 0;
int endIndex = asts[3] instanceof AstNumList ? Integer.MAX_VALUE : (int) asts[3].exec(env).getNum();
// Type check
for (Vec v : fr.vecs())
if (!(v.isCategorical() || v.isString()))
throw new IllegalArgumentException("substring() requires a string or categorical column. "
+ "Received " + fr.anyVec().get_type_str()
+ ". Please convert column to a string or categorical first.");
// Transform each vec
Vec nvs[] = new Vec[fr.numCols()];
int i = 0;
for (Vec v : fr.vecs()) {
if (v.isCategorical())
nvs[i] = substringCategoricalCol(v, startIndex, endIndex);
else
nvs[i] = substringStringCol(v, startIndex, endIndex);
i++;
}
return new ValFrame(new Frame(nvs));
}
private Vec substringCategoricalCol(Vec vec, int startIndex, int endIndex) {
if (startIndex >= endIndex) {
Vec v = Vec.makeZero(vec.length());
v.setDomain(new String[]{""});
return v;
}
String[] dom = vec.domain().clone();
HashMap<String, ArrayList<Integer>> substringToOldDomainIndices = new HashMap<>();
String substr;
for (int i = 0; i < dom.length; i++) {
substr = dom[i].substring(startIndex < dom[i].length() ? startIndex : dom[i].length(),
endIndex < dom[i].length() ? endIndex : dom[i].length());
dom[i] = substr;
if (!substringToOldDomainIndices.containsKey(substr)) {
ArrayList<Integer> val = new ArrayList<>();
val.add(i);
substringToOldDomainIndices.put(substr, val);
} else {
substringToOldDomainIndices.get(substr).add(i);
}
}
//Check for duplicated domains
if (substringToOldDomainIndices.size() < dom.length)
return VecUtils.DomainDedupe.domainDeduper(vec, substringToOldDomainIndices);
return vec.makeCopy(dom);
}
private Vec substringStringCol(Vec vec, final int startIndex, final int endIndex) {
return new MRTask() {
@Override
public void map(Chunk chk, NewChunk newChk) {
if (chk instanceof C0DChunk) // all NAs
for (int i = 0; i < chk.len(); i++)
newChk.addNA();
else if (startIndex >= endIndex) {
for (int i = 0; i < chk.len(); i++)
newChk.addStr("");
} else if (((CStrChunk) chk)._isAllASCII) { // fast-path operations
((CStrChunk) chk).asciiSubstring(newChk, startIndex, endIndex);
} else { //UTF requires Java string methods
BufferedString tmpStr = new BufferedString();
for (int i = 0; i < chk._len; i++) {
if (chk.isNA(i))
newChk.addNA();
else {
String str = chk.atStr(tmpStr, i).toString();
newChk.addStr(str.substring(startIndex < str.length() ? startIndex : str.length(),
endIndex < str.length() ? endIndex : str.length()));
}
}
}
}
}.doAll(new byte[]{Vec.T_STR}, vec).outputFrame().anyVec();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/string/AstToLower.java
|
package water.rapids.ast.prims.string;
import water.MRTask;
import water.fvec.*;
import water.parser.BufferedString;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import java.util.Locale;
/**
* Accepts a frame with a single string column.
* Returns a new string column containing the results of the toLower method on each string in the
* target column.
* <p/>
* toLower - Converts all of the characters in this String to lower case.
*/
public class AstToLower extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary"};
}
@Override
public int nargs() {
return 1 + 1;
} //(tolower x)
@Override
public String str() {
return "tolower";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
// Type check
for (Vec v : fr.vecs())
if (!(v.isCategorical() || v.isString()))
throw new IllegalArgumentException("tolower() requires a string or categorical column. "
+ "Received " + fr.anyVec().get_type_str()
+ ". Please convert column to a string or categorical first.");
// Transform each vec
Vec nvs[] = new Vec[fr.numCols()];
int i = 0;
for (Vec v : fr.vecs()) {
if (v.isCategorical())
nvs[i] = toLowerCategoricalCol(v);
else
nvs[i] = toLowerStringCol(v);
i++;
}
return new ValFrame(new Frame(nvs));
}
private Vec toLowerCategoricalCol(Vec vec) {
String[] dom = vec.domain().clone();
for (int i = 0; i < dom.length; ++i)
dom[i] = dom[i].toLowerCase(Locale.ENGLISH);
return vec.makeCopy(dom);
}
public static Vec toLowerStringCol(Vec vec) {
return new MRTask() {
@Override
public void map(Chunk chk, NewChunk newChk) {
if (chk instanceof C0DChunk) // all NAs
for (int i = 0; i < chk.len(); i++)
newChk.addNA();
else if (((CStrChunk) chk)._isAllASCII) { // fast-path operations
((CStrChunk) chk).asciiToLower(newChk);
} else { //UTF requires Java string methods for accuracy
BufferedString tmpStr = new BufferedString();
for (int i = 0; i < chk._len; i++) {
if (chk.isNA(i))
newChk.addNA();
else // Locale.ENGLISH to give the correct results for local insensitive strings
newChk.addStr(chk.atStr(tmpStr, i).toString().toLowerCase(Locale.ENGLISH));
}
}
}
}.doAll(new byte[]{Vec.T_STR}, vec).outputFrame().anyVec();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/string/AstToUpper.java
|
package water.rapids.ast.prims.string;
import water.MRTask;
import water.fvec.*;
import water.parser.BufferedString;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import java.util.Locale;
/**
* Accepts a frame with a single string column.
* Returns a new string column containing the results of the toUpper method on each string in the
* target column.
* <p/>
* toUpper - Converts all of the characters in this String to upper case.
*/
public class AstToUpper extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary"};
}
@Override
public int nargs() {
return 1 + 1;
} //(toupper x)
@Override
public String str() {
return "toupper";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
// Type check
for (Vec v : fr.vecs())
if (!(v.isCategorical() || v.isString()))
throw new IllegalArgumentException("toupper() requires a string or categorical column. "
+ "Received " + fr.anyVec().get_type_str()
+ ". Please convert column to a string or categorical first.");
// Transform each vec
Vec nvs[] = new Vec[fr.numCols()];
int i = 0;
for (Vec v : fr.vecs()) {
if (v.isCategorical())
nvs[i] = toUpperCategoricalCol(v);
else
nvs[i] = toUpperStringCol(v);
i++;
}
return new ValFrame(new Frame(nvs));
}
private Vec toUpperCategoricalCol(Vec vec) {
String[] dom = vec.domain().clone();
for (int i = 0; i < dom.length; ++i)
dom[i] = dom[i].toUpperCase(Locale.ENGLISH);
return vec.makeCopy(dom);
}
private Vec toUpperStringCol(Vec vec) {
return new MRTask() {
@Override
public void map(Chunk chk, NewChunk newChk) {
if (chk instanceof C0DChunk) // all NAs
for (int i = 0; i < chk.len(); i++)
newChk.addNA();
else if (((CStrChunk) chk)._isAllASCII) { // fast-path operations
((CStrChunk) chk).asciiToUpper(newChk);
} else { //UTF requires Java string methods for accuracy
BufferedString tmpStr = new BufferedString();
for (int i = 0; i < chk._len; i++) {
if (chk.isNA(i))
newChk.addNA();
else // Locale.ENGLISH to give the correct results for local insensitive strings
newChk.addStr(chk.atStr(tmpStr, i).toString().toUpperCase(Locale.ENGLISH));
}
}
}
}.doAll(new byte[]{Vec.T_STR}, vec).outputFrame().anyVec();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/string/AstTokenize.java
|
package water.rapids.ast.prims.string;
import hex.RegexTokenizer;
import water.MRTask;
import water.fvec.*;
import water.parser.BufferedString;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
public class AstTokenize extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "regex"};
}
@Override
public int nargs() {
return 1 + 2;
} // (tokenize x regex)
@Override
public String str() {
return "tokenize";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
String regex = asts[2].exec(env).getStr();
// Type check
for (Vec v : fr.vecs())
if (! v.isString())
throw new IllegalArgumentException("tokenize() requires all input columns to be of a String type. "
+ "Received " + fr.anyVec().get_type_str() + ". Please convert column to a string column first.");
Frame tokenized = new RegexTokenizer(regex).transform(fr);
return new ValFrame(tokenized);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/string/AstTrim.java
|
package water.rapids.ast.prims.string;
import water.MRTask;
import water.fvec.*;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.util.VecUtils;
import java.util.ArrayList;
import java.util.HashMap;
/**
* Accepts a frame with a single string column.
* Returns a new string column containing the trimmed versions of the strings in the target column.
* Trimming removes all characters of value 0x20 or lower at the beginning and end of the
* target string. Thus this only trims one of the 17 characters UTF considers as a space.
*/
public class AstTrim extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary"};
}
@Override
public int nargs() {
return 1 + 1;
} // (trim x)
@Override
public String str() {
return "trim";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
// Type check
for (Vec v : fr.vecs())
if (!(v.isCategorical() || v.isString()))
throw new IllegalArgumentException("trim() requires a string or categorical column. "
+ "Received " + fr.anyVec().get_type_str()
+ ". Please convert column to a string or categorical first.");
// Transform each vec
Vec nvs[] = new Vec[fr.numCols()];
int i = 0;
for (Vec v : fr.vecs()) {
if (v.isCategorical())
nvs[i] = trimCategoricalCol(v);
else
nvs[i] = trimStringCol(v);
i++;
}
return new ValFrame(new Frame(nvs));
}
private Vec trimCategoricalCol(Vec vec) {
String[] doms = vec.domain().clone();
HashMap<String, ArrayList<Integer>> trimmedToOldDomainIndices = new HashMap<>();
String trimmed;
for (int i = 0; i < doms.length; ++i) {
trimmed = doms[i].trim();
doms[i] = trimmed;
if (!trimmedToOldDomainIndices.containsKey(trimmed)) {
ArrayList<Integer> val = new ArrayList<>();
val.add(i);
trimmedToOldDomainIndices.put(trimmed, val);
} else {
trimmedToOldDomainIndices.get(trimmed).add(i);
}
}
//Check for duplicated domains
if (trimmedToOldDomainIndices.size() < doms.length)
return VecUtils.DomainDedupe.domainDeduper(vec, trimmedToOldDomainIndices);
return vec.makeCopy(doms);
}
private Vec trimStringCol(Vec vec) {
return new MRTask() {
@Override
public void map(Chunk chk, NewChunk newChk) {
if (chk instanceof C0DChunk) // all NAs
for (int i = 0; i < chk.len(); i++)
newChk.addNA();
// Java String.trim() only operates on ASCII whitespace
// so UTF-8 safe methods are not needed here.
else ((CStrChunk) chk).asciiTrim(newChk);
}
}.doAll(new byte[]{Vec.T_STR}, vec).outputFrame().anyVec();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/testing/AstSetReadForbidden.java
|
package water.rapids.ast.prims.testing;
import water.MRTask;
import water.rapids.Val;
import water.rapids.ast.AstBuiltin;
import water.rapids.vals.ValStr;
import water.testing.SandboxSecurityManager;
/**
* Internal operator that lets user set a given system property on all nodes of H2O cluster.
* It is meant for debugging of running clusters and it is not meant to be directly exposed to users.
*/
public class AstSetReadForbidden extends AstBuiltin<AstSetReadForbidden> {
@Override
public String[] args() {
return new String[]{"forbidden"};
}
@Override
public int nargs() {
return 1 + 1;
} // (testing.setreadforbidden forbidden)
@Override
public String str() {
return "testing.setreadforbidden";
}
@Override
protected ValStr exec(Val[] args) {
String[] forbidden = args[1].getStrs();
if (forbidden.length > 0) {
new SetForbiddenTask(forbidden).doAllNodes();
} else {
new ClearForbiddenTask().doAllNodes();
}
return new ValStr(String.join(", ", forbidden));
}
private static class SetForbiddenTask extends MRTask<SetForbiddenTask> {
private final String[] _forbidden;
private SetForbiddenTask(String[] forbidden) {
_forbidden = forbidden;
}
@Override
protected void setupLocal() {
SecurityManager sm = System.getSecurityManager();
if (sm == null) {
System.setSecurityManager(new SandboxSecurityManager());
sm = System.getSecurityManager();
}
if (!(sm instanceof SandboxSecurityManager)) {
throw new IllegalStateException("Unexpected Security Manager: " + sm);
}
((SandboxSecurityManager) sm).setForbiddenReadPrefixes(_forbidden);
}
}
private static class ClearForbiddenTask extends MRTask<SetForbiddenTask> {
@Override
protected void setupLocal() {
SecurityManager sm = System.getSecurityManager();
if (sm == null)
return;
if (!(sm instanceof SandboxSecurityManager)) {
throw new IllegalStateException("Unexpected Security Manager: " + sm);
}
System.setSecurityManager(null);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/time/AstAsDate.java
|
package water.rapids.ast.prims.time;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormatter;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.parser.BufferedString;
import water.parser.ParseTime;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
/**
* Convert a String to a Time (msec since Unix Epoch) via a given parse format
*/
public class AstAsDate extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"time", "format"};
}
// (as.Date time format)
@Override
public int nargs() {
return 1 + 2;
}
@Override
public String str() {
return "as.Date";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
Vec vec = fr.vecs()[0];
if (fr.vecs().length != 1 || !(vec.isCategorical() || vec.isString()))
throw new IllegalArgumentException("as.Date requires a single column of factors or strings");
final String format = asts[2].exec(env).getStr();
if (format.isEmpty()) throw new IllegalArgumentException("as.Date requires a non-empty format string");
// check the format string more?
final String[] dom = vec.domain();
final boolean isStr = dom == null && vec.isString();
assert isStr || dom != null : "as.Date error: domain is null, but vec is not String";
Frame fr2 = new MRTask() {
private transient DateTimeFormatter _fmt;
@Override
public void setupLocal() {
_fmt = water.util.ParseTime.forStrptimePattern(format).withZone(ParseTime.getTimezone());
}
@Override
public void map(Chunk c, NewChunk nc) {
//done on each node in lieu of rewriting DateTimeFormatter as Iced
String date;
BufferedString tmpStr = new BufferedString();
for (int i = 0; i < c._len; ++i) {
if (!c.isNA(i)) {
if (isStr) date = c.atStr(tmpStr, i).toString();
else date = dom[(int) c.at8(i)];
nc.addNum(DateTime.parse(date, _fmt).getMillis(), 0);
} else nc.addNA();
}
}
}.doAll(1, Vec.T_NUM, fr).outputFrame(fr._names, null);
return new ValFrame(fr2);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/time/AstDay.java
|
package water.rapids.ast.prims.time;
import org.joda.time.MutableDateTime;
/**
*/
public class AstDay extends AstTime {
public String str() {
return "day";
}
public long op(MutableDateTime dt) {
return dt.getDayOfMonth();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/time/AstDayOfWeek.java
|
package water.rapids.ast.prims.time;
import org.joda.time.MutableDateTime;
/**
*/
public class AstDayOfWeek extends AstTime {
static private final String[][] FACTORS = new String[][]{{"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"}}; // Order comes from Joda
@Override
protected String[][] factors() {
return FACTORS;
}
@Override
public String str() {
return "dayOfWeek";
}
@Override
public long op(MutableDateTime dt) {
return dt.getDayOfWeek() - 1;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/time/AstGetTimeZone.java
|
package water.rapids.ast.prims.time;
import water.parser.ParseTime;
import water.rapids.Env;
import water.rapids.vals.ValStr;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
public class AstGetTimeZone extends AstPrimitive {
@Override
public String[] args() {
return null;
}
// (getTimeZone)
@Override
public int nargs() {
return 1;
}
@Override
public String str() {
return "getTimeZone";
}
@Override
public ValStr apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
return new ValStr(ParseTime.getTimezone().toString());
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/time/AstHour.java
|
package water.rapids.ast.prims.time;
import org.joda.time.MutableDateTime;
/**
*/
public class AstHour extends AstTime {
public String str() {
return "hour";
}
public long op(MutableDateTime dt) {
return dt.getHourOfDay();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/time/AstListTimeZones.java
|
package water.rapids.ast.prims.time;
import water.fvec.Frame;
import water.fvec.Vec;
import water.parser.ParseTime;
import water.rapids.Env;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
/**
*/
public class AstListTimeZones extends AstPrimitive {
@Override
public String[] args() {
return null;
}
/* (listTimeZones) */
@Override
public int nargs() {
return 1;
}
@Override
public String str() {
return "listTimeZones";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
String[] domain = ParseTime.listTimezones().split("\n");
double ds[] = new double[domain.length];
for (int i = 0; i < domain.length; i++) ds[i] = i;
Vec vec = Vec.makeVec(ds, Vec.VectorGroup.VG_LEN1.addVec());
vec.setDomain(domain);
return new ValFrame(new Frame(new String[]{"Timezones"}, new Vec[]{vec}));
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/time/AstMillis.java
|
package water.rapids.ast.prims.time;
import org.joda.time.MutableDateTime;
/**
*/
public class AstMillis extends AstTime {
public String str() {
return "millis";
}
public long op(MutableDateTime dt) {
return dt.getMillisOfSecond();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/time/AstMinute.java
|
package water.rapids.ast.prims.time;
import org.joda.time.MutableDateTime;
/**
*/
public class AstMinute extends AstTime {
public String str() {
return "minute";
}
public long op(MutableDateTime dt) {
return dt.getMinuteOfHour();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/time/AstMktime.java
|
package water.rapids.ast.prims.time;
import org.joda.time.MutableDateTime;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.vals.ValNum;
import water.rapids.ast.AstExec;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.ast.params.AstId;
/**
* Convert year, month, day, hour, minute, sec, msec to Unix epoch time
*/
@Deprecated // Use {@link AstMoment} instead
public class AstMktime extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"yr", "mo", "dy", "hr", "mi", "se", "ms"};
}
/**
* (mktime yr mo dy hr mi se ms)
*/
@Override
public int nargs() {
return 1 + 7;
}
@Override
public String str() {
return "mktime";
}
@Override
public Val apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
// Seven args, all required. See if any are arrays.
Frame fs[] = new Frame[nargs() - 1];
int is[] = new int[nargs() - 1];
Frame x = null; // Sample frame (for auto-expanding constants)
for (int i = 1; i < nargs(); i++)
if (asts[i] instanceof AstId || asts[i] instanceof AstExec)
fs[i - 1] = x = stk.track(asts[i].exec(env)).getFrame();
else is[i - 1] = (int) asts[i].exec(env).getNum();
if (x == null) { // Single point
long msec = new MutableDateTime(
is[0], // year
is[1] + 1, // month
is[2] + 1, // day
is[3], // hour
is[4], // minute
is[5], // second
is[6]) // msec
.getMillis();
return new ValNum(msec);
}
// Make constant Vecs for the constant args. Commonly, they'll all be zero
Vec vecs[] = new Vec[7];
for (int i = 0; i < 7; i++) {
if (fs[i] == null) {
vecs[i] = x.anyVec().makeCon(is[i]);
} else {
if (fs[i].numCols() != 1) throw new IllegalArgumentException("Expect single column");
vecs[i] = fs[i].anyVec();
}
}
// Convert whole column to epoch msec
Frame fr2 = new MRTask() {
@Override
public void map(Chunk chks[], NewChunk nchks[]) {
MutableDateTime dt = new MutableDateTime(0);
NewChunk n = nchks[0];
int rlen = chks[0]._len;
for (int r = 0; r < rlen; r++) {
dt.setDateTime(
(int) chks[0].at8(r), // year
(int) chks[1].at8(r) + 1,// month
(int) chks[2].at8(r) + 1,// day
(int) chks[3].at8(r), // hour
(int) chks[4].at8(r), // minute
(int) chks[5].at8(r), // second
(int) chks[6].at8(r)); // msec
n.addNum(dt.getMillis());
}
}
}.doAll(new byte[]{Vec.T_NUM}, vecs).outputFrame(new String[]{"msec"}, null);
// Clean up the constants
for (int i = 0; i < nargs() - 1; i++)
if (fs[i] == null)
vecs[i].remove();
return new ValFrame(fr2);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/time/AstMoment.java
|
package water.rapids.ast.prims.time;
import org.joda.time.Chronology;
import org.joda.time.IllegalFieldValueException;
import org.joda.time.chrono.ISOChronology;
import water.Key;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.rapids.Val;
import water.rapids.ast.AstBuiltin;
import water.rapids.vals.ValFrame;
import water.util.ArrayUtils;
import java.util.ArrayList;
/**
* Convert year, month, day, hour, minute, sec, msec to Unix epoch time
* (in milliseconds).
*
* This is a replacement for {@code AstMktime} class.
*/
public class AstMoment extends AstBuiltin<AstMoment> {
@Override public int nargs() {
return 8;
}
public String[] args() {
return new String[]{"yr", "mo", "dy", "hr", "mi", "se", "ms"};
}
@Override public String str() {
return "moment";
}
@Override
protected ValFrame exec(Val[] args) {
// Parse the input arguments, verifying their validity.
boolean naResult = false;
long numRows = -1;
int[] timeparts = new int[7];
ArrayList<Integer> chunksmap = new ArrayList<>(7);
ArrayList<Vec> timevecs = new ArrayList<>(7);
for (int i = 0; i < 7; i++) {
Val vi = args[i + 1];
if (vi.isFrame()) {
Frame fr = vi.getFrame();
if (fr.numCols() != 1)
throw new IllegalArgumentException("Argument " + i + " is a frame with " + fr.numCols() + " columns");
if (!fr.vec(0).isNumeric())
throw new IllegalArgumentException("Argument " + i + " is not a numeric column");
if (fr.numRows() == 0)
throw new IllegalArgumentException("Column " + i + " has 0 rows");
if (fr.numRows() == 1) {
double d = fr.vec(0).at(0);
if (Double.isNaN(d))
naResult = true;
else
timeparts[i] = (int) d;
} else {
if (numRows == -1)
numRows = fr.numRows();
if (fr.numRows() != numRows)
throw new IllegalArgumentException("Incompatible vec " + i + " having " + fr.numRows() + " rows, whereas " +
"other vecs have " + numRows + " rows.");
timevecs.add(fr.vec(0));
chunksmap.add(i);
}
} else if (vi.isNum()){
double d = vi.getNum();
if (Double.isNaN(d))
naResult = true;
else
timeparts[i] = (int) d;
} else {
throw new IllegalArgumentException("Argument " + i + " is neither a number nor a frame");
}
}
// If all arguments are scalars, return a 1x1 frame
if (timevecs.isEmpty()) {
double val = Double.NaN;
if (!naResult) {
try {
val = ISOChronology.getInstanceUTC().getDateTimeMillis(timeparts[0], timeparts[1], timeparts[2],
timeparts[3], timeparts[4], timeparts[5], timeparts[6]);
} catch (IllegalFieldValueException ignored) {}
}
return make1x1Frame(val);
}
// If the result is all-NAs, make a constant NA vec
if (naResult) {
long n = timevecs.get(0).length();
Vec v = Vec.makeCon(Double.NaN, n, Vec.T_TIME);
Frame fr = new Frame(Key.<Frame>make(), new String[]{"time"}, new Vec[]{v});
return new ValFrame(fr);
}
// Some arguments are vecs -- create a frame of the same size
Vec[] vecs = timevecs.toArray(new Vec[timevecs.size()]);
int[] cm = ArrayUtils.toPrimitive(chunksmap);
Frame fr = new SetTimeTask(timeparts, cm)
.doAll(Vec.T_TIME, vecs)
.outputFrame(new String[]{"time"}, null);
return new ValFrame(fr);
}
private ValFrame make1x1Frame(double val) {
Vec v = Vec.makeTimeVec(new double[]{val}, null);
Frame f = new Frame(new String[]{"time"}, new Vec[]{v});
return new ValFrame(f);
}
private static class SetTimeTask extends MRTask<SetTimeTask> {
private int[] tp;
private int[] cm;
/**
* @param timeparts is the array of [year, month, day, hrs, mins, secs, ms]
* for all constant parts of the date;
* @param chunksmap is a mapping between chunks indices and the timeparts
* array. For example, if {@code chunksmap = [1, 2]},
* then the first chunk describes the "month" part of the
* date, and the second chunk the "day" part.
*/
public SetTimeTask(int[] timeparts, int[] chunksmap) {
tp = timeparts;
cm = chunksmap;
}
@Override public void map(Chunk[] chks, NewChunk nc) {
int nVecs = cm.length;
assert chks.length == nVecs;
Chronology chronology = ISOChronology.getInstanceUTC();
int nChunkRows = chks[0]._len;
int[] tpl = new int[tp.length];
System.arraycopy(tp, 0, tpl, 0, tp.length);
BYROW:
for (int i = 0; i < nChunkRows; i++) {
for (int j = 0; j < nVecs; j++) {
double d = chks[j].atd(i);
if (Double.isNaN(d)) {
nc.addNum(Double.NaN);
continue BYROW;
}
tpl[cm[j]] = (int) d;
}
try {
double millis = chronology.getDateTimeMillis(tpl[0], tpl[1], tpl[2], tpl[3], tpl[4], tpl[5], tpl[6]);
nc.addNum(millis);
} catch (IllegalFieldValueException e) {
nc.addNum(Double.NaN);
}
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/time/AstMonth.java
|
package water.rapids.ast.prims.time;
import org.joda.time.MutableDateTime;
/**
*/
public class AstMonth extends AstTime {
public String str() {
return "month";
}
public long op(MutableDateTime dt) {
return dt.getMonthOfYear();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/time/AstSecond.java
|
package water.rapids.ast.prims.time;
import org.joda.time.MutableDateTime;
/**
*/
public class AstSecond extends AstTime {
public String str() {
return "second";
}
public long op(MutableDateTime dt) {
return dt.getSecondOfMinute();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/time/AstSetTimeZone.java
|
package water.rapids.ast.prims.time;
import org.joda.time.DateTimeZone;
import water.parser.ParseTime;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValStr;
import java.util.Set;
/**
*/
public class AstSetTimeZone extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"tz"};
}
@Override
public int nargs() {
return 1 + 1;
} // (setTimeZone "TZ")
@Override
public String str() {
return "setTimeZone";
}
@Override
public ValStr apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
final String tz = asts[1].exec(env).getStr();
Set<String> idSet = DateTimeZone.getAvailableIDs();
if (!idSet.contains(tz))
throw new IllegalArgumentException("Unacceptable timezone " + tz + " given. For a list of acceptable names, use listTimezone().");
//This is a distributed operation
ParseTime.setTimezone(tz);
return new ValStr(tz);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/time/AstTime.java
|
package water.rapids.ast.prims.time;
import org.joda.time.MutableDateTime;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.parser.ParseTime;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.vals.ValNum;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
/**
* Basic time accessors; extract hours/days/years/etc from H2O's internal
* msec-since-Unix-epoch time
*/
public abstract class AstTime extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"time"};
}
// (op time)
@Override
public int nargs() {
return 1 + 1;
}
// Override for e.g. month and day-of-week
protected String[][] factors() {
return null;
}
public abstract long op(MutableDateTime dt);
private double op(MutableDateTime dt, double d) {
dt.setMillis((long) d);
return op(dt);
}
@Override
public Val apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Val val = asts[1].exec(env);
switch (val.type()) {
case Val.NUM:
double d = val.getNum();
return new ValNum(Double.isNaN(d) ? d : op(new MutableDateTime(0), d));
case Val.FRM:
Frame fr = stk.track(val).getFrame();
if (fr.numCols() > 1) throw water.H2O.unimpl();
return new ValFrame(new MRTask() {
@Override
public void map(Chunk chk, NewChunk cres) {
MutableDateTime mdt = new MutableDateTime(0, ParseTime.getTimezone());
for (int i = 0; i < chk._len; i++)
cres.addNum(chk.isNA(i) ? Double.NaN : op(mdt, chk.at8(i)));
}
}.doAll(1, Vec.T_NUM, fr).outputFrame(fr._names, factors()));
default:
throw water.H2O.fail();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/time/AstWeek.java
|
package water.rapids.ast.prims.time;
import org.joda.time.MutableDateTime;
/**
*/
public class AstWeek extends AstTime {
public String str() {
return "week";
}
public long op(MutableDateTime dt) {
return dt.getWeekOfWeekyear();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/time/AstYear.java
|
package water.rapids.ast.prims.time;
import org.joda.time.MutableDateTime;
/**
*/
public class AstYear extends AstTime {
public String str() {
return "year";
}
public long op(MutableDateTime dt) {
return dt.getYear();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/timeseries/AstDiffLag1.java
|
package water.rapids.ast.prims.timeseries;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.util.ArrayUtils;
/**
* Compute a difference of a time series where lag = 1
*/
public class AstDiffLag1 extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary"};
}
@Override
public int nargs() {
return 1 + 1;
}
@Override
public String str() {
return "difflag1";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env).getFrame());
if (fr.numCols() != 1)
throw new IllegalArgumentException("Expected a single column for diff. Got: " + fr.numCols() + " columns.");
if (!fr.anyVec().isNumeric())
throw new IllegalArgumentException("Expected a numeric column for diff. Got: " + fr.anyVec().get_type_str());
final double[] lastElemPerChk = GetLastElemPerChunkTask.get(fr.anyVec());
return new ValFrame(new MRTask() {
@Override
public void map(Chunk c, NewChunk nc) {
if (c.cidx() == 0) nc.addNA();
else nc.addNum(c.atd(0) - lastElemPerChk[c.cidx() - 1]);
for (int row = 1; row < c._len; ++row)
nc.addNum(c.atd(row) - c.atd(row - 1));
}
}.doAll(fr.types(), fr).outputFrame(fr.names(), fr.domains()));
}
private static class GetLastElemPerChunkTask extends MRTask<GetLastElemPerChunkTask> {
double[] _res;
GetLastElemPerChunkTask(Vec v) {
_res = new double[v.espc().length];
}
static double[] get(Vec v) {
GetLastElemPerChunkTask t = new GetLastElemPerChunkTask(v);
t.doAll(v);
return t._res;
}
@Override
public void map(Chunk c) {
_res[c.cidx()] = c.atd(c._len - 1);
}
@Override
public void reduce(GetLastElemPerChunkTask t) {
ArrayUtils.add(_res, t._res);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/timeseries/AstIsax.java
|
package water.rapids.ast.prims.timeseries;
import org.apache.commons.math3.distribution.NormalDistribution;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
import water.util.ArrayUtils;
import java.util.ArrayList;
import java.util.Arrays;
/**
* The iSAX algorithm is a time series indexing strategy that reduces the dimensionality of a time series along the time axis.
* For example, if a time series had 1000 unique values with data across 500 rows, reduce this data set to a time series that
* uses 100 unique values, across 10 buckets along the time span.
*
* References:
* http://www.cs.ucr.edu/~eamonn/SAX.pdf
* http://www.cs.ucr.edu/~eamonn/iSAX_2.0.pdf
*
* Note: This approach assumes the frame has the form of TS-i x T where TS-i is a single time series and T is time:
*
* T-1, T-2, T-3, T-4, ... , T-N
* TS-1 ...
* TS-2 ...
* TS-3 ...
* .
* .
* .
* TS-N ...
*
* @author markchan & navdeepgill
*/
public class AstIsax extends AstPrimitive {
protected double[][] _domain_hm = null;
@Override
public String[] args() { return new String[]{"ary", "numWords", "maxCardinality", "optimize_card"}; }
@Override
public int nargs() { return 1 + 4; } // (ary isax numWords maxCardinality optimize_card)
@Override
public String str() { return "isax"; }
@Override
public Val apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
AstRoot n = asts[2];
AstRoot mc = asts[3];
boolean optm_card = asts[4].exec(env).getNum() == 1;
//Check vecs are numeric
for(Vec v : fr.vecs()){
if(!v.isNumeric()){
throw new IllegalArgumentException("iSax only applies to numeric columns!");
}
}
int numWords = (int) n.exec(env).getNum();
int maxCardinality = (int) mc.exec(env).getNum();
//Check numWords and maxCardinality are >=0
if(numWords < 0 ){
throw new IllegalArgumentException("numWords must be greater than 0!");
}
if(maxCardinality < 0 ){
throw new IllegalArgumentException("maxCardinality must be greater than 0!");
}
ArrayList<String> columns = new ArrayList<>();
for (int i = 0; i < numWords; i++) {
columns.add("c"+i);
}
Frame fr2 = new AstIsax.IsaxTask(numWords, maxCardinality)
.doAll(numWords, Vec.T_NUM, fr).outputFrame(null, columns.toArray(new String[numWords]), null);
int[] maxCards = new int[numWords];
if(optm_card) {
_domain_hm = new double[numWords][maxCardinality];
for (double[] r : _domain_hm) Arrays.fill(r,Double.NaN);
// see if we can reduce the cardinality by checking all unique tokens in all series in a word
for (int i=0; i<fr2.numCols(); i++) {
String[] domains = fr2.vec(i).toCategoricalVec().domain();
for (int j = 0; j < domains.length; j++){
_domain_hm[i][j] = Double.valueOf(domains[j]);
}
}
// get the cardinalities of each word
for (int i = 0; i < numWords; i++) {
int cnt = 0;
for (double d : _domain_hm[i]) {
if (Double.isNaN(d)) break;
else cnt++;
}
maxCards[i] = cnt;
}
Frame fr2_reduced = new AstIsax.IsaxReduceCard(_domain_hm, maxCardinality).doAll(numWords, Vec.T_NUM, fr2)
.outputFrame(null, columns.toArray(new String[numWords]), null);
Frame fr3 = new AstIsax.IsaxStringTask(maxCards).doAll(1, Vec.T_STR, fr2_reduced)
.outputFrame(null, new String[]{"iSax_index"}, null);
fr2.delete(); //Not needed anymore
fr3.add(fr2_reduced);
return new ValFrame(fr3);
}
for(int i = 0; i < numWords; ++i){
maxCards[i] = maxCardinality;
}
Frame fr3 = new AstIsax.IsaxStringTask(maxCards).doAll(1, Vec.T_STR, fr2)
.outputFrame(null, new String[]{"iSax_index"}, null);
fr3.add(fr2);
return new ValFrame(fr3);
}
public static class IsaxReduceCard extends MRTask<AstIsax.IsaxReduceCard> {
double[][] _domain_hm;
int maxCardinality;
IsaxReduceCard(double[][] dm, int mc) {
_domain_hm = dm;
maxCardinality = mc;
}
@Override
public void map(Chunk cs[], NewChunk nc[]){
for (int i = 0; i<cs.length; i++) {
boolean ltMaxCardFlag = Double.isNaN(ArrayUtils.sum(_domain_hm[i]));
for (int j = 0; j<cs[i].len(); j++) {
int idxOf;
if (ltMaxCardFlag) {
idxOf = Arrays.binarySearch(_domain_hm[i],(int) cs[i].at8(j));
} else {
idxOf = (int) cs[i].at8(j);
}
nc[i].addNum(idxOf);
}
}
}
}
public static class IsaxStringTask extends MRTask<AstIsax.IsaxStringTask> {
int[] maxCards;
IsaxStringTask(int[] mc) { maxCards = mc; }
@Override
public void map(Chunk cs[], NewChunk nc[]) {
int csize = cs[0].len();
for (int c_i = 0; c_i < csize; c_i++) {
StringBuffer sb = new StringBuffer("");
for (int cs_i = 0; cs_i < cs.length; cs_i++) {
sb.append(cs[cs_i].at8(c_i) + "^" + maxCards[cs_i] + "_");
}
nc[0].addStr(sb.toString().substring(0,sb.length()-1));
}
}
}
public static class IsaxTask extends MRTask<AstIsax.IsaxTask> {
private int nw;
private int mc;
private static NormalDistribution nd = new NormalDistribution();
private ArrayList<Double> probBoundaries; // for tokenizing Sax
IsaxTask(int numWords, int maxCardinality) {
nw = numWords;
mc = maxCardinality;
// come up with NormalDist boundaries
double step = 1.0 / mc;
probBoundaries = new ArrayList<Double>(); //cumulative dist function boundaries R{0-1}
for (int i = 0; i < mc; i++) {
probBoundaries.add(nd.inverseCumulativeProbability(i*step));
}
}
@Override
public void map(Chunk cs[],NewChunk[] nc) {
int step = cs.length/nw;
int chunkSize = cs[0].len();
int w_i = 0; //word iterator
double[] seriesSums = new double[chunkSize];
double[] seriesCounts = new double[chunkSize];
double[] seriesSSE = new double[chunkSize];
double[][] chunkMeans = new double[chunkSize][nw];
// Loop by words in the time series
for (int i = 0; i < cs.length; i+=step) {
// Loop by each series in the chunk
for (int j = 0; j < chunkSize; j++) {
double mySum = 0.0;
double myCount = 0.0;
// Loop through all the data in the chunk for the given series in the given subset (word)
for (Chunk c : ArrayUtils.subarray(cs,i,i+step)) {
if (c != null) {
// Calculate mean and sigma in one pass
double oldMean = myCount < 1 ? 0.0 : mySum/myCount;
mySum += c.atd(j);
seriesSums[j] += c.atd(j);
myCount++;
seriesCounts[j] += 1;
seriesSSE[j] += (c.atd(j) - oldMean) * (c.atd(j) - mySum/myCount);
}
}
chunkMeans[j][w_i] = mySum / myCount;
}
w_i++;
if (w_i>= nw) break;
}
//
for (int w = 0; w < nw; w++) {
for (int i = 0; i < chunkSize; i++) {
double seriesMean = seriesSums[i] / seriesCounts[i];
double seriesStd = Math.sqrt(seriesSSE[i] / (seriesCounts[i] - 1));
double zscore = (chunkMeans[i][w] - seriesMean) / seriesStd;
int p_i = 0;
while (probBoundaries.get(p_i + 1) < zscore) {
p_i++;
if (p_i == mc - 1) break;
}
nc[w].addNum(p_i,0);
}
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/transforms/H2OBinaryOp.java
|
package water.rapids.transforms;
import water.H2O;
import water.rapids.ast.AstExec;
import water.rapids.ast.params.AstStr;
import java.util.HashMap;
@SuppressWarnings("unused") // called thru reflection
public class H2OBinaryOp extends H2OColOp {
boolean _leftIsCol;
boolean _riteIsCol;
String _binCol; // !=null only if _leftIsCol || _riteIsCol
public boolean getIsLeftColumn() {
return _leftIsCol;
}
public boolean getIsRightColumn() {
return _riteIsCol;
}
@Override
public String[] getOldNames() { return _binCol == null ? new String[]{_oldCol} : new String[]{_oldCol, _binCol}; }
private static final HashMap<String,String> binaryOps = new HashMap<>();
static {
binaryOps.put("+", "plus");
binaryOps.put("-", "minus");
binaryOps.put("*", "multiply");
binaryOps.put("/", "divide");
binaryOps.put("<", "lessThan");
binaryOps.put("<=","lessThanEquals");
binaryOps.put(">", "greaterThan");
binaryOps.put(">=","greaterThanEquals");
binaryOps.put("==", "equals");
binaryOps.put("!=", "notEquals");
binaryOps.put("^", "pow");
binaryOps.put("%", "mod");
binaryOps.put("%%", "mod");
binaryOps.put("&", "and");
binaryOps.put("&&", "and");
binaryOps.put("|", "or");
binaryOps.put("||", "or");
binaryOps.put("intDiv", "intDiv");
binaryOps.put("strDistance", "strDistance");
}
public H2OBinaryOp(String name, String ast, boolean inplace, String[] newNames) {
super(name, ast, inplace, newNames);
}
@Override protected void setupParamsImpl(int i, String[] args) {
if( _ast._asts[i+1] instanceof AstExec) {
if( !isBinaryOp(_fun) ) throw H2O.unimpl("unimpl: " + lookup(_fun));
if (args[i].equals("leftArg") || args[i].equals("ary_x")) {
_leftIsCol = true;
} else if (args[i].equals("rightArg") || args[i].equals("ary_y")) {
_riteIsCol = true;
}
_binCol = ((AstExec)_ast._asts[i+1])._asts[2].str();
_params.put(args[i], new AstStr(((AstExec) _ast._asts[i + 1])._asts[2].str()));
} else super.setupParamsImpl(i,args);
}
@Override protected String lookup(String op) { return binaryOps.get(op)==null?op:binaryOps.get(op); }
@Override protected boolean paramIsRow() { return _leftIsCol || _riteIsCol; }
@Override protected String addRowParam() {
return " _params.put(\""+ (_riteIsCol?"rightArg":"leftArg") + "\", " +
"new String[]{String.valueOf(row.get(\"" +_binCol+ "\"))}); // write over the previous value\n";
}
private static boolean isBinaryOp(String op) { return binaryOps.get(op)!=null; }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/transforms/H2OColOp.java
|
package water.rapids.transforms;
import org.apache.commons.lang.ArrayUtils;
import water.DKV;
import water.H2O;
import water.fvec.Frame;
import water.rapids.*;
import water.rapids.ast.AstExec;
import water.rapids.ast.AstParameter;
import water.rapids.ast.AstRoot;
import water.rapids.ast.params.AstId;
import water.rapids.ast.prims.mungers.AstColPySlice;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.Queue;
public class H2OColOp extends Transform<H2OColOp> {
private static final String FRAME_ID_PLACEHOLDER = "dummy";
protected final String _fun;
protected String _oldCol;
private String[] _newCol;
private String _newJavaColTypes;
private String _newColTypes;
boolean _multiColReturn;
@Override
public String[] getNewNames() { return _newCol; }
@Override
public String[] getNewTypes() {
String[] result = new String[_newCol.length == 0 ? 1 : _newCol.length];
Arrays.fill(result, _newColTypes);
return result;
}
public String[] getOldNames() { return new String[]{_oldCol}; }
public H2OColOp(String name, String ast, boolean inplace, String[] newNames) { // (op (cols fr cols) {extra_args})
super(name,ast,inplace,newNames);
_fun = _ast._asts[0].str();
_oldCol = null;
for(int i=1; i<_ast._asts.length; ++i) {
if (_ast._asts[i] instanceof AstExec) {
_oldCol = findOldName((AstExec)_ast._asts[i]);
break;
}
}
setupParams();
}
private void setupParams() {
String[] args = _ast.getArgs();
if( args!=null && args.length > 1 ) { // first arg is the frame
for(int i=0; i < args.length; ++i)
setupParamsImpl(i,args);
}
}
protected void setupParamsImpl(int i, String[] args) {
if (_ast._asts[i + 1] instanceof AstParameter) {
_params.put(args[i], (AstParameter) _ast._asts[i + 1]);
}
}
@Override public Transform<H2OColOp> fit(Frame f) { return this; }
@Override protected Frame transformImpl(Frame f) {
substitutePlaceholders(_ast, f);
Session ses = new Session();
Frame fr = ses.exec(_ast, null).getFrame();
_newCol = _newNames==null?new String[fr.numCols()]:_newNames;
_newColTypes = fr.anyVec().get_type_str();
_newJavaColTypes = toJavaPrimitive(_newColTypes);
if( (_multiColReturn=fr.numCols() > 1) ) {
for(int i=0;i<_newCol.length;i++) {
if(_newNames==null) _newCol[i] = f.uniquify(i > 0 ? _newCol[i - 1] : _oldCol);
f.add(_newCol[i], fr.vec(i));
}
if( _inplace ) f.remove(f.find(_oldCol)).remove();
} else {
_newCol = _newNames==null?new String[]{_inplace ? _oldCol : f.uniquify(_oldCol)}:_newCol;
if( _inplace ) f.replace(f.find(_oldCol), fr.anyVec()).remove();
else f.add(_newNames == null ? _newCol[0] : _newNames[0], fr.anyVec());
}
DKV.put(f);
return f;
}
private void substitutePlaceholders(AstExec root, Frame f) {
Queue<AstExec> execs = new LinkedList<>();
execs.add(root);
while (! execs.isEmpty()) {
AstExec exec = execs.poll();
for (int i = 1; i < exec._asts.length; i++) {
AstRoot<?> ast = exec._asts[i];
if (ast instanceof AstExec)
execs.add((AstExec) ast);
else if (ast instanceof AstId) {
AstId id = (AstId) ast;
if (FRAME_ID_PLACEHOLDER.equals(id.str()))
exec._asts[i] = new AstId(f);
}
}
}
}
private static String findOldName(AstExec root) {
AstColPySlice py = new AstColPySlice();
Queue<AstExec> execs = new LinkedList<>();
execs.add(root);
String oldName = null;
while (! execs.isEmpty()) {
AstExec exec = execs.poll();
// (cols_py dummy <oldName>)
if (exec._asts.length == 3 && py.str().equals(exec._asts[0].str()) && FRAME_ID_PLACEHOLDER.equals(exec._asts[1].str())) {
oldName = exec._asts[2].str();
break;
}
for (int i = 1; i < exec._asts.length; i++) {
AstRoot<?> ast = exec._asts[i];
if (ast instanceof AstExec)
execs.add((AstExec) ast);
}
}
return oldName;
}
@Override Frame inverseTransform(Frame f) { throw H2O.unimpl(); }
@Override public String genClassImpl() {
final int typeId = ArrayUtils.indexOf(_inNames, _oldCol);
if (typeId < 0)
throw new IllegalStateException("Unknown column " + _oldCol + " (known: " + Arrays.toString(_inNames));
String typeCast = _inTypes[typeId].equals("Numeric")?"Double":"String";
if( _multiColReturn ) {
StringBuilder sb = new StringBuilder(
" @Override public RowData transform(RowData row) {\n"+
(paramIsRow() ? addRowParam() : "") +
" "+_newJavaColTypes+"[] res = GenMunger."+lookup(_fun)+"(("+typeCast+")row.get(\""+_oldCol+"\"), _params);\n");
for(int i=0;i<_newCol.length;i++)
sb.append(
" row.put(\""+_newCol[i]+"\",("+i+">=res.length)?\"\":res["+i+"]);\n");
sb.append(
" return row;\n" +
" }\n");
return sb.toString();
} else {
return " @Override public RowData transform(RowData row) {\n"+
(paramIsRow() ? addRowParam() : "") +
" "+_newJavaColTypes+" res = GenMunger."+lookup(_fun)+"(("+typeCast+")row.get(\""+_oldCol+"\"), _params);\n"+
" row.put(\""+_newCol[0]+"\", res);\n" +
" return row;\n" +
" }\n";
}
}
protected boolean paramIsRow() { return false; }
protected String addRowParam() { return ""; }
protected String lookup(String op) { return op.replaceAll("\\.",""); }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/transforms/H2OColSelect.java
|
package water.rapids.transforms;
import water.DKV;
import water.H2O;
import water.Key;
import water.Scope;
import water.fvec.Frame;
import water.rapids.*;
import water.rapids.ast.AstParameter;
import water.rapids.ast.AstRoot;
import water.rapids.ast.params.AstId;
import water.rapids.ast.params.AstStr;
import water.rapids.ast.params.AstStrList;
public class H2OColSelect extends Transform<H2OColSelect> {
private final String[] _cols;
// not a public constructor -- used by the REST api only; must be public for stupid java.lang.reflect
public H2OColSelect(String name, String ast, boolean inplace, String[] newNames) {
super(name,ast,inplace,newNames);
AstParameter cols = ((AstParameter)_ast._asts[2]);
if( cols instanceof AstStrList) _cols = ((AstStrList)cols)._strs;
else if (cols instanceof AstStr) _cols = new String[]{((AstStr)cols).getStr()};
else _cols = null;
}
@Override public Transform<H2OColSelect> fit(Frame f) { return this; }
@Override protected Frame transformImpl(Frame f) {
_ast._asts[1] = new AstId(f);
Session ses = new Session();
Frame fr = ses.exec(_ast,null).getFrame();
if( fr._key==null ) fr = new Frame(Key.<Frame>make("H2OColSelect_"+f._key.toString()),fr.names(),fr.vecs());
Scope.track(fr);
DKV.put(fr);
return fr;
}
@Override Frame inverseTransform(Frame f) { throw H2O.unimpl(); }
public String genClassImpl() {
StringBuilder sb = new StringBuilder();
sb.append(" @Override public RowData transform(RowData row) {\n");
sb.append(" RowData colSelect = new RowData();\n");
for( String s: _cols)
sb.append(" colSelect.put(\""+s+"\", row.get(\""+s+"\"));\n");
sb.append(" return colSelect;\n").append(" }\n");
return sb.toString();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/transforms/H2OScaler.java
|
package water.rapids.transforms;
import hex.genmodel.GenMunger;
import water.H2O;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.util.ArrayUtils;
public class H2OScaler extends Transform<H2OScaler> {
double[] means;
double[] sdevs;
H2OScaler(String name, String ast, boolean inplace, String[] newNames) { super(name,ast,inplace,newNames); }
@Override public Transform<H2OScaler> fit(Frame f) {
means = new double[f.numCols()];
sdevs = new double[f.numCols()];
for(int i=0;i<f.numCols();++i) {
means[i] = f.vec(i).mean();
sdevs[i] = f.vec(i).sigma();
}
return this;
}
// TODO: handle Categorical, String, NA
@Override protected Frame transformImpl(Frame f) {
final double[] fmeans = means;
final double[] fmults = ArrayUtils.invert(sdevs);
return new MRTask() {
@Override public void map(Chunk[] cs, NewChunk[] ncs) {
double[] in = new double[cs.length];
for(int row=0; row<cs[0]._len; row++) {
for(int col=0; col<cs.length; col++)
in[col] = cs[col].atd(row);
GenMunger.scaleInPlace(fmeans, fmults, in);
for(int col=0; col<ncs.length; col++)
ncs[col].addNum(in[col]);
}
}
}.doAll(f.numCols(), Vec.T_NUM, f).outputFrame(f.names(), f.domains());
}
@Override Frame inverseTransform(Frame f) { throw H2O.unimpl(); }
@Override public String genClassImpl() {
throw H2O.unimpl();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/transforms/Transform.java
|
package water.rapids.transforms;
import water.Iced;
import water.fvec.Frame;
import water.rapids.ast.AstExec;
import water.rapids.ast.AstParameter;
import water.rapids.Rapids;
import water.util.IcedHashMap;
import water.util.SB;
public abstract class Transform<T> extends Iced {
protected final String _name;
protected final AstExec _ast;
protected final boolean _inplace;
protected final String[] _newNames;
protected final IcedHashMap<String,AstParameter> _params;
protected String[] _inNames;
protected String[] _inTypes;
protected String[] _outTypes;
protected String[] _outNames;
public String[] getInputNames() { return _inNames; }
public String[] getInputTypes() { return _inTypes; }
public String[] getOutputNames() { return _outNames; }
public String[] getOutputTypes(){ return _outTypes; }
public String[] getNewNames() { return new String[0]; }
public String[] getNewTypes() { return new String[0]; }
public AstExec getAst() { return _ast; }
public boolean isInPlace() { return _inplace; }
public IcedHashMap<String,AstParameter> getParams() { return _params; }
Transform(String name, String ast, boolean inplace, String[] newNames) {
_name=name;
_ast = (AstExec) Rapids.parse(ast);
_inplace = inplace;
_newNames = newNames;
_params = new IcedHashMap<>();
}
public String name() { return _name; }
public abstract Transform<T> fit(Frame f);
public Frame transform(Frame f) {
_inNames = f.names();
_inTypes = f.typesStr();
Frame ff = transformImpl(f);
_outTypes= ff.typesStr();
_outNames= ff.names();
return ff;
}
protected abstract Frame transformImpl(Frame f);
abstract Frame inverseTransform(Frame f);
public Frame fitTransform(Frame f) { return fit(f).transform(f); }
public abstract String genClassImpl();
public StringBuilder genClass() {
String stepName = name();
StringBuilder sb = new StringBuilder();
sb.append(" class " + stepName + " extends Step<" + stepName + "> {\n");
sb.append(" public " + stepName + "() { super(new String[]{" + toJavaString(_inNames) +"},\n");
sb.append(" new String[]{" + toJavaString(_inTypes) + "}," +
" new String[]{" + toJavaString(_outNames) +"});\n");
for (String k : _params.keySet()) {
String v = _params.get(k).toJavaString();
sb.append(
" _params.put(\""+k+"\", new String[]{"+v.replace("\\","\\\\")+"});\n"
);
}
sb.append(" }\n");
return sb.append(genClassImpl()).append(" }\n");
}
private static String toJavaString(String[] strs) {
if( strs==null || strs.length==0 ) return "\"null\"";
SB sb = new SB();
for(int i=0;i<strs.length;++i) {
sb.p("\"").p(strs[i]).p("\"");
if( i==strs.length-1) return sb.toString();
sb.p(',');
}
throw new RuntimeException("Should never be here");
}
protected static String toJavaPrimitive(String vecType) {
if( vecType.equals("String") || vecType.equals("Enum") ) return "String";
return "double";
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/vals/ValFrame.java
|
package water.rapids.vals;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Val;
/**
* Value that represents an H2O dataframe ({@link Frame}).
*/
public class ValFrame extends Val {
private final Frame _fr;
public ValFrame(Frame fr) {
assert fr != null : "Cannot construct a Frame from null";
_fr = fr;
}
@Override public int type() { return FRM; }
@Override public boolean isFrame() { return true; }
@Override public Frame getFrame() { return _fr; }
@Override public String toString() { return _fr.toString(); }
/**
* Extract row from a single-row frame.
* @return Array of row elements.
*/
@Override public double[] getRow() {
if (_fr.numRows() != 1)
throw new IllegalArgumentException("Trying to get a single row from a multirow frame: " + _fr.numRows() + "!=1");
double res[] = new double[_fr.numCols()];
for (int i = 0; i < _fr.numCols(); ++i)
res[i] = _fr.vec(i).at(0);
return res;
}
public static ValFrame fromRow(double... values) {
Vec[] vecs = new Vec[values.length];
for (int i = 0; i < values.length; i++) {
vecs[i] = Vec.makeCon(values[i], 1);
}
Frame fr = new Frame(vecs);
return new ValFrame(fr);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/vals/ValFun.java
|
package water.rapids.vals;
import water.rapids.Val;
import water.rapids.ast.AstPrimitive;
/**
* A Rapids function
*/
public class ValFun extends Val {
private final AstPrimitive _ast;
public ValFun(AstPrimitive ast) {
_ast = ast;
}
@Override public int type() { return FUN; }
@Override public boolean isFun() { return true; }
@Override public AstPrimitive getFun() { return _ast; }
@Override public String toString() { return _ast.toString(); }
public String[] getArgs() {
return _ast.args();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/vals/ValKeyed.java
|
package water.rapids.vals;
import water.Keyed;
import water.rapids.Val;
public class ValKeyed extends Val {
private final Keyed _k;
public ValKeyed(Keyed k) {
assert k != null : "Cannot construct a Keyed from null";
_k = k;
}
@Override public int type() { return KEYED; }
@Override public boolean isKeyed() { return true; }
@Override public Keyed getKeyed() { return _k; }
@Override public String toString() { return _k.toString(); }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/vals/ValMapFrame.java
|
package water.rapids.vals;
import water.fvec.Frame;
import water.rapids.Val;
import java.util.Map;
/**
* Value that represents a map of Frames.
*/
public class ValMapFrame extends Val {
private final Map<String, Frame> _map;
public ValMapFrame(Map<String, Frame> fr) {
_map = fr;
}
@Override public int type() { return MFRM; }
@Override public boolean isMapFrame() { return true; }
@Override public Map<String, Frame> getMapFrame() { return _map; }
@Override public String toString() { return "ValMapFrame"; }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/vals/ValModel.java
|
package water.rapids.vals;
import hex.Model;
import water.rapids.Val;
public class ValModel extends Val {
private final Model _m;
public ValModel(Model m) {
assert m != null : "Cannot construct a Model from null";
_m = m;
}
@Override public int type() { return MOD; }
@Override public boolean isModel() { return true; }
@Override public Model getModel() { return _m; }
@Override public String toString() { return _m.toString(); }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/vals/ValNum.java
|
package water.rapids.vals;
import water.rapids.Val;
/**
* Numeric value. We do not distinguish between integers and floating point numbers.
*/
public class ValNum extends Val {
private double _d;
public ValNum(double d) {
_d = d;
}
@Override public int type() { return NUM; }
@Override public boolean isNum() { return true; }
@Override public double getNum() { return _d; }
@Override public String getStr() {
return Double.isNaN(_d) ? null : super.getStr();
}
@Override public String toString() { return String.valueOf(_d); }
public void setNum(double d) {
_d = d;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/vals/ValNums.java
|
package water.rapids.vals;
import water.rapids.Val;
import java.util.Arrays;
/**
* Array of numbers
*/
public class ValNums extends Val {
private final double[] _ds;
public ValNums(double[] ds) {
_ds = ds;
}
@Override public int type() { return NUMS; }
@Override public boolean isNums() { return true; }
@Override public double[] getNums() { return _ds; }
@Override public String getStr() {
if (isEmpty())
return null;
return super.getStr();
}
@Override public String toString() { return Arrays.toString(_ds); }
@Override
public String[] getStrs() {
if (_ds.length == 0)
return new String[0];
else
return super.getStrs();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/vals/ValRow.java
|
package water.rapids.vals;
import water.rapids.Val;
import java.util.Arrays;
/**
* Row (array) of numbers.
*/
public class ValRow extends Val {
private final double[] _ds;
private final String[] _names;
public ValRow(double[] ds, String[] names) {
_ds = ds;
_names = names;
if (ds != null && names != null && ds.length != names.length)
throw new IllegalArgumentException("Lengths of data and names mismatch: " +
Arrays.toString(ds) + " and " + Arrays.toString(names));
}
@Override public int type() { return ROW; }
@Override public boolean isRow() { return true; }
@Override public double[] getRow() { return _ds; }
@Override public double[] getNums() { return _ds; }
@Override public String toString() { return Arrays.toString(_ds); }
public String[] getNames() {
return _names;
}
/**
* Creates a new ValRow by selecting elements at the specified indices.
* @param cols array of indices to select. We do not check for AIOOB errors.
* @return new ValRow object
*/
public ValRow slice(int[] cols) {
double[] ds = new double[cols.length];
String[] ns = new String[cols.length];
for (int i = 0; i < cols.length; ++i) {
ds[i] = _ds[cols[i]];
ns[i] = _names[cols[i]];
}
return new ValRow(ds, ns);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/vals/ValStr.java
|
package water.rapids.vals;
import water.rapids.Val;
/**
* A string
*/
public class ValStr extends Val {
private final String _str;
public ValStr(String str) {
_str = str;
}
@Override public int type() { return STR; }
@Override public boolean isStr() { return true; }
@Override public String getStr() { return _str; }
@Override public String[] getStrs() { return new String[]{_str}; }
// TODO: is this even safe? what if _str contains quotation marks, backslashes, etc?
@Override public String toString() { return '"' + _str + '"'; }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/vals/ValStrs.java
|
package water.rapids.vals;
import water.rapids.Val;
import java.util.Arrays;
/**
* Array of strings.
*/
public class ValStrs extends Val {
private final String[] _strs;
public ValStrs(String[] strs) {
_strs = strs;
}
@Override public int type() { return STRS; }
@Override public boolean isStrs() { return true; }
@Override public String[] getStrs() { return _strs; }
@Override public String toString() { return Arrays.toString(_strs); }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/server/LeaderNodeRequestFilter.java
|
package water.server;
import water.H2O;
import water.init.AbstractEmbeddedH2OConfig;
import water.webserver.iface.RequestAuthExtension;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
/**
* Only the H2O leader node should be queried by the end user at any time (reproducibility). While running eg. on Kubernetes,
* it is easy for to misconfigure services and let the traffic flow to non-leader nodes. If an H2O cluster is used
* in such a way, the results provided to the user might be irreproducible/incorrect and the user has no way to easily find out.
* <p>
* To solve this issue, this filter blocks all incoming requests on non-leader nodes when H2O is configured to have
* non-leader node access disabled.
* Some APIs intended for internal use (mainly used by other products such as Sparkling Water) might still remain active.
* All user-facing APIs, mostly the ones used by Python/R/Flow clients are disabled.
*/
public class LeaderNodeRequestFilter implements RequestAuthExtension {
private final Set<String> allowedContextPaths;
public LeaderNodeRequestFilter() {
Set<String> allowed = new HashSet<>();
// always enabled servlets (eg. XGBoost external endpoints)
allowed.addAll(ServletService.INSTANCE.getAlwaysEnabledServlets().keySet());
// websockets are always enabled - there almost no chance deployment would be configured to hit REST API correctly and only WS be misconfigured
allowed.addAll(ServletService.INSTANCE.getAllWebsockets().keySet());
allowedContextPaths = Collections.unmodifiableSet(allowed);
}
@Override
public boolean handle(String target, HttpServletRequest request, HttpServletResponse response) throws IOException {
if (H2O.SELF == null) {
// If H2O clustering process has not yet finished, disable API on all nodes to prevent premature cluster locking.
// Send HTTP 403 - Forbidden and indicate the clustering process has not finished yet.
response.sendError(HttpServletResponse.SC_FORBIDDEN, "H2O Node didn't start yet. H2O API is inaccessible at the moment.");
} else if (H2O.SELF.isLeaderNode()) {
// If clustering is finished and this node is the leader node, than the request landed correctly.
// Mark as not handled by this filter and do nothing - the request will be handled by the rest of the servlet
// chain.
return false;
} else if (allowedContextPaths.contains(target)) {
// If this is not the leader node, yet it is the part of API that should be enabled on every node, indicate this request
// not been handled by this filter and do nothing.
return false;
}
// If API is disabled on this node and the context path of the request is not listed in allowedPaths,
// Then send HTTP 403 - Forbidden and indicate the request has been handled by this filter.
response.sendError(HttpServletResponse.SC_FORBIDDEN, "Deployment configuration error - request reached a non-leader H2O node.");
return true;
}
@Override
public boolean isEnabled() {
AbstractEmbeddedH2OConfig config = H2O.getEmbeddedH2OConfig();
return config != null && config.disableNonLeaderNodeAccess();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/server/ServletMeta.java
|
package water.server;
import javax.servlet.http.HttpServlet;
import java.util.Objects;
/**
* Describes how to register a Servlet in H2O Server
*/
public class ServletMeta {
private String _contextPath;
private Class<? extends HttpServlet> _servletClass;
private boolean _alwaysEnabled;
/**
* Constructs a new instance of {@link ServletMeta} with the alwaysEnabled functionality turned off.
*
* @param contextPath Context path the underlying servlet handles
* @param servletClass Specific implementation of the {@link HttpServlet} to handle the context path
*/
public ServletMeta(final String contextPath, final Class<? extends HttpServlet> servletClass) {
Objects.requireNonNull(contextPath);
Objects.requireNonNull(servletClass);
_contextPath = contextPath;
_servletClass = servletClass;
_alwaysEnabled = false;
}
public String getContextPath() {
return _contextPath;
}
public Class<? extends HttpServlet> getServletClass() {
return _servletClass;
}
public boolean isAlwaysEnabled(){
return _alwaysEnabled;
}
public static class Builder {
private final ServletMeta servletMeta;
/**
* Constructs a new instance of {@link ServletMeta.Builder} with basic required parameters
*
* @param contextPath Context path the underlying servlet handles
* @param servletClass Specific implementation of the {@link HttpServlet} to handle the context path
*/
public Builder(final String contextPath, final Class<? extends HttpServlet> servletClass) {
Objects.requireNonNull(contextPath);
Objects.requireNonNull(servletClass);
servletMeta = new ServletMeta(contextPath, servletClass);
}
/**
* @return The underlying ServletMeta object. Returns reference to the same object if called multiple times. Never null.
*/
public ServletMeta build(){
return servletMeta;
}
/**
*
* @param alwaysEnabled
* @return This builder
*/
public ServletMeta.Builder withAlwaysEnabled(final boolean alwaysEnabled){
servletMeta._alwaysEnabled = alwaysEnabled;
return this;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/server/ServletProvider.java
|
package water.server;
import java.util.Collections;
import java.util.List;
public interface ServletProvider {
/**
* Provides a collection of Servlets that should be registered.
* @return a map of context path to a Servlet class
*/
List<ServletMeta> servlets();
default List<WebsocketMeta> websockets() {
return Collections.emptyList();
}
/**
* Provider priority, providers with higher priority will be used first. H2O Core Provider will be used last and
* will override any mappings previously registered with the same context path. It is thus not possible to override
* the H2O Core Servlets.
*
* A typical application will have just one custom provider and users don't need to worry about setting a priority.
* If your use case requires multiple Servlet Providers, please make sure your priorities are set properly and or
* the context paths do not overlap.
*
* @return a positive integer number (0 priority is reserved for H2O Core servlets)
*/
default int priority() {
return 1;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/server/ServletService.java
|
package water.server;
import water.webserver.iface.H2OWebsocketServlet;
import javax.servlet.http.HttpServlet;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
public final class ServletService {
public static final ServletService INSTANCE = new ServletService();
private final ServiceLoader<ServletProvider> _loader;
private ServletService() {
_loader = ServiceLoader.load(ServletProvider.class);
}
public synchronized LinkedHashMap<String, Class<? extends HttpServlet>> getAlwaysEnabledServlets() {
return StreamSupport
.stream(_loader.spliterator(), false)
.sorted(Comparator.comparing(ServletProvider::priority).reversed())
.flatMap(provider -> provider.servlets().stream())
.filter(ServletMeta::isAlwaysEnabled)
.collect(Collectors.toMap(ServletMeta::getContextPath, ServletMeta::getServletClass,
(val1, val2) -> val2, // Latest always wins
LinkedHashMap::new));
}
public synchronized LinkedHashMap<String, Class<? extends HttpServlet>> getAllServlets() {
return StreamSupport
.stream(_loader.spliterator(), false)
.sorted(Comparator.comparing(ServletProvider::priority).reversed())
.flatMap(provider -> provider.servlets().stream())
.collect(Collectors.toMap(ServletMeta::getContextPath, ServletMeta::getServletClass,
(val1, val2) -> val2, // Latest always wins
LinkedHashMap::new));
}
public synchronized LinkedHashMap<String, Class<? extends H2OWebsocketServlet>> getAllWebsockets() {
return StreamSupport
.stream(_loader.spliterator(), false)
.sorted(Comparator.comparing(ServletProvider::priority).reversed())
.flatMap(provider -> provider.websockets().stream())
.collect(Collectors.toMap(WebsocketMeta::getContextPath, WebsocketMeta::getHandlerClass,
(val1, val2) -> val2, // Latest always wins
LinkedHashMap::new));
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/server/ServletUtils.java
|
package water.server;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import water.H2O;
import water.H2OError;
import water.api.RapidsHandler;
import water.api.RequestServer;
import water.api.schemas3.H2OErrorV3;
import water.exceptions.H2OAbstractRuntimeException;
import water.exceptions.H2OFailException;
import water.rapids.Session;
import water.util.HttpResponseStatus;
import water.util.Log;
import water.util.StringUtils;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.EOFException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URLDecoder;
import java.util.Arrays;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Utilities supporting HTTP server-side functionality, without depending on specific version of Jetty, or on Jetty at all.
*/
public class ServletUtils {
private static final Logger LOG = LogManager.getLogger(RequestServer.class);
/**
* Adds headers that disable browser-side Cross-Origin Resource checks - allows requests
* to this server from any origin.
*/
private static final boolean DISABLE_CORS = Boolean.getBoolean(H2O.OptArgs.SYSTEM_PROP_PREFIX + "disable.cors");
/**
* Sets header that allows usage in i-frame. Off by default for security reasons.
*/
private static final boolean ENABLE_XFRAME_SAMEORIGIN = Boolean.getBoolean(H2O.OptArgs.SYSTEM_PROP_PREFIX + "enable.xframe.sameorigin");
private static final String TRACE_METHOD = "TRACE";
private static final ThreadLocal<Long> _startMillis = new ThreadLocal<>();
private static final ThreadLocal<Integer> _status = new ThreadLocal<>();
private static final ThreadLocal<Transaction> _transaction = new ThreadLocal<>();
private ServletUtils() {
// not instantiable
}
/**
* Called from JettyHTTPD.
*/
public static void startRequestLifecycle() {
_startMillis.set(System.currentTimeMillis());
_status.set(999);
}
private static void setStatus(int sc) {
_status.set(sc);
}
private static int getStatus() {
return _status.get();
}
private static long getStartMillis() {
return _startMillis.get();
}
public static void startTransaction(String userAgent, String sessionKey) {
_transaction.set(new Transaction(userAgent, sessionKey));
}
public static void endTransaction() {
_transaction.remove();
}
private static class Transaction {
final String _userAgent;
final String _sessionKey;
Transaction(String userAgent, String sessionKey) {
_userAgent = userAgent;
_sessionKey = sessionKey;
}
}
/**
* @return Thread-local User-Agent for this transaction.
*/
public static String getUserAgent() {
Transaction t = _transaction.get();
return t != null ? t._userAgent : null;
}
public static String getSessionProperty(String key, String defaultValue) {
Transaction t = _transaction.get();
if (t == null || t._sessionKey == null) {
return defaultValue;
}
Session session = RapidsHandler.getSession(t._sessionKey);
if (session == null) {
return defaultValue;
}
return session.getProperty(key, defaultValue);
}
public static void setResponseStatus(HttpServletResponse response, int sc) {
setStatus(sc);
response.setStatus(sc);
}
public static void sendResponseError(HttpServletResponse response, int sc, String msg) throws java.io.IOException {
setStatus(sc);
response.sendError(sc, msg);
}
public static InputStream extractInputStream(HttpServletRequest request, HttpServletResponse response) throws IOException {
final InputStream is;
final String contentType = request.getContentType();
// The Python client sends requests with null content-type when uploading large files,
// whereas Sparkling Water proxy sends requests with content-type set to application/octet-stream.
if (contentType == null || contentType.equals("application/octet-stream")) {
is = request.getInputStream();
} else {
is = extractPartInputStream(request, response);
}
return is;
}
public static InputStream extractPartInputStream (HttpServletRequest request, HttpServletResponse response) throws
IOException {
String ct = request.getContentType();
if (! ct.startsWith("multipart/form-data")) {
setResponseStatus(response, HttpServletResponse.SC_BAD_REQUEST);
response.getWriter().write("Content type must be multipart/form-data");
return null;
}
String boundaryString;
int idx = ct.indexOf("boundary=");
if (idx < 0) {
setResponseStatus(response, HttpServletResponse.SC_BAD_REQUEST);
response.getWriter().write("Boundary missing");
return null;
}
boundaryString = ct.substring(idx + "boundary=".length());
byte[] boundary = StringUtils.bytesOf(boundaryString);
// Consume headers of the mime part.
InputStream is = request.getInputStream();
String line = readLine(is);
while ((line != null) && (line.trim().length()>0)) {
line = readLine(is);
}
return new InputStreamWrapper(is, boundary);
}
public static void sendErrorResponse(HttpServletResponse response, Exception exception, String uri) {
if (exception instanceof H2OFailException) {
final H2OFailException ee = (H2OFailException) exception;
final H2OError error = ee.toH2OError(uri);
Log.fatal("Caught exception (fatal to the cluster): " + error.toString());
throw(H2O.fail(error.toString()));
}
else if (exception instanceof H2OAbstractRuntimeException) {
final H2OAbstractRuntimeException ee = (H2OAbstractRuntimeException) exception;
final H2OError error = ee.toH2OError(uri);
Log.warn("Caught exception: " + error.toString());
setResponseStatus(response, HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
writeResponseErrorBody(response, error);
}
else { // make sure that no Exception is ever thrown out from the request
final H2OError error = new H2OError(exception, uri);
// some special cases for which we return 400 because it's likely a problem with the client request:
if (exception instanceof IllegalArgumentException) {
error._http_status = HttpResponseStatus.BAD_REQUEST.getCode();
} else if (exception instanceof FileNotFoundException) {
error._http_status = HttpResponseStatus.BAD_REQUEST.getCode();
} else if (exception instanceof MalformedURLException) {
error._http_status = HttpResponseStatus.BAD_REQUEST.getCode();
}
setResponseStatus(response, error._http_status);
Log.warn("Caught exception: " + error.toString());
writeResponseErrorBody(response, error);
}
}
private static void writeResponseErrorBody(HttpServletResponse response, H2OError error) {
// Note: don't use Schema.schema(version, error) because we have to work at bootstrap:
try {
@SuppressWarnings("unchecked")
final String s = new H2OErrorV3().fillFromImpl(error).toJsonString();
response.getWriter().write(s);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static String getDecodedUri(HttpServletRequest request) {
try {
return URLDecoder.decode(request.getRequestURI(), "UTF-8");
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
public static String[] parseUriParams(String uri, HttpServletResponse response, Pattern p, int numParams) throws IOException {
Matcher m = p.matcher(uri);
if (!m.matches()) {
ServletUtils.setResponseStatus(response, HttpServletResponse.SC_BAD_REQUEST);
response.getWriter().write("Improperly formatted URI");
return null;
} else {
String[] result = new String[numParams];
for (int i = 0; i < numParams; i++) {
result[i] = m.group(i+1);
}
return result;
}
}
public static boolean isXhrRequest(final HttpServletRequest request) {
final String requestedWithHeader = request.getHeader("X-Requested-With");
return "XMLHttpRequest".equals(requestedWithHeader);
}
public static boolean isTraceRequest(final HttpServletRequest request) {
return TRACE_METHOD.equalsIgnoreCase(request.getMethod());
}
public static void setCommonResponseHttpHeaders(HttpServletResponse response, final boolean xhrRequest) {
if (xhrRequest) {
response.setHeader("Cache-Control", "no-cache");
}
if (DISABLE_CORS) {
response.setHeader("Access-Control-Allow-Origin", "*");
response.setHeader("Access-Control-Allow-Headers", "*");
response.setHeader("Access-Control-Allow-Methods", "*");
}
response.setHeader("X-h2o-build-project-version", H2O.ABV.projectVersion());
response.setHeader("X-h2o-rest-api-version-max", Integer.toString(water.api.RequestServer.H2O_REST_API_VERSION));
response.setHeader("X-h2o-cluster-id", Long.toString(H2O.CLUSTER_ID));
response.setHeader("X-h2o-cluster-good", Boolean.toString(H2O.CLOUD.healthy()));
// Security headers
if (ENABLE_XFRAME_SAMEORIGIN) {
response.setHeader("X-Frame-Options", "sameorigin");
} else {
response.setHeader("X-Frame-Options", "deny");
}
response.setHeader("X-XSS-Protection", "1; mode=block");
response.setHeader("X-Content-Type-Options", "nosniff");
response.setHeader("Content-Security-Policy", "default-src 'self' 'unsafe-eval' 'unsafe-inline'; img-src 'self' data:");
// Note: ^^^ unsafe-eval/-inline are essential for Flow to work
// this will also kill the component "Star H2O on Github" in Flow - see https://github.com/h2oai/private-h2o-3/issues/44
// Custom headers - using addHeader - can be multi-value and cannot overwrite the security headers
for (H2O.KeyValueArg header : H2O.ARGS.extra_headers) {
response.addHeader(header._key, header._value);
}
}
public static void logRequest(String method, HttpServletRequest request, HttpServletResponse response) {
LOG.info(
String.format(
" %-6s %3d %6d ms %s",
method, getStatus(), System.currentTimeMillis() - getStartMillis(), request.getRequestURI()
)
);
}
private static String readLine(InputStream in) throws IOException {
StringBuilder sb = new StringBuilder();
byte[] mem = new byte[1024];
while (true) {
int sz = readBufOrLine(in,mem);
sb.append(new String(mem,0,sz));
if (sz < mem.length)
break;
if (mem[sz-1]=='\n')
break;
}
if (sb.length()==0)
return null;
String line = sb.toString();
if (line.endsWith("\r\n"))
line = line.substring(0,line.length()-2);
else if (line.endsWith("\n"))
line = line.substring(0,line.length()-1);
return line;
}
@SuppressWarnings("all")
private static int readBufOrLine(InputStream in, byte[] mem) throws IOException {
byte[] bb = new byte[1];
int sz = 0;
while (true) {
byte b;
byte b2;
if (sz==mem.length)
break;
try {
in.read(bb,0,1);
b = bb[0];
mem[sz++] = b;
} catch (EOFException e) {
break;
}
if (b == '\n')
break;
if (sz==mem.length)
break;
if (b == '\r') {
try {
in.read(bb,0,1);
b2 = bb[0];
mem[sz++] = b2;
} catch (EOFException e) {
break;
}
if (b2 == '\n')
break;
}
}
return sz;
}
@SuppressWarnings("all")
private static final class InputStreamWrapper extends InputStream {
static final byte[] BOUNDARY_PREFIX = { '\r', '\n', '-', '-' };
final InputStream _wrapped;
final byte[] _boundary;
final byte[] _lookAheadBuf;
int _lookAheadLen;
public InputStreamWrapper(InputStream is, byte[] boundary) {
_wrapped = is;
_boundary = Arrays.copyOf(BOUNDARY_PREFIX, BOUNDARY_PREFIX.length + boundary.length);
System.arraycopy(boundary, 0, _boundary, BOUNDARY_PREFIX.length, boundary.length);
_lookAheadBuf = new byte[_boundary.length];
_lookAheadLen = 0;
}
@Override public void close() throws IOException { _wrapped.close(); }
@Override public int available() throws IOException { return _wrapped.available(); }
@Override public long skip(long n) throws IOException { return _wrapped.skip(n); }
@Override public void mark(int readlimit) { _wrapped.mark(readlimit); }
@Override public void reset() throws IOException { _wrapped.reset(); }
@Override public boolean markSupported() { return _wrapped.markSupported(); }
@Override public int read() throws IOException { throw new UnsupportedOperationException(); }
@Override public int read(byte[] b) throws IOException { return read(b, 0, b.length); }
@Override public int read(byte[] b, int off, int len) throws IOException {
if(_lookAheadLen == -1)
return -1;
int readLen = readInternal(b, off, len);
if (readLen != -1) {
int pos = findBoundary(b, off, readLen);
if (pos != -1) {
_lookAheadLen = -1;
return pos - off;
}
}
return readLen;
}
private int readInternal(byte b[], int off, int len) throws IOException {
if (len < _lookAheadLen ) {
System.arraycopy(_lookAheadBuf, 0, b, off, len);
_lookAheadLen -= len;
System.arraycopy(_lookAheadBuf, len, _lookAheadBuf, 0, _lookAheadLen);
return len;
}
if (_lookAheadLen > 0) {
System.arraycopy(_lookAheadBuf, 0, b, off, _lookAheadLen);
off += _lookAheadLen;
len -= _lookAheadLen;
int r = Math.max(_wrapped.read(b, off, len), 0) + _lookAheadLen;
_lookAheadLen = 0;
return r;
} else {
return _wrapped.read(b, off, len);
}
}
private int findBoundary(byte[] b, int off, int len) throws IOException {
int bidx = -1; // start index of boundary
int idx = 0; // actual index in boundary[]
for(int i = off; i < off+len; i++) {
if (_boundary[idx] != b[i]) { // reset
idx = 0;
bidx = -1;
}
if (_boundary[idx] == b[i]) {
if (idx == 0) bidx = i;
if (++idx == _boundary.length) return bidx; // boundary found
}
}
if (bidx != -1) { // it seems that there is boundary but we did not match all boundary length
assert _lookAheadLen == 0; // There should not be not read lookahead
_lookAheadLen = _boundary.length - idx;
int readLen = _wrapped.read(_lookAheadBuf, 0, _lookAheadLen);
if (readLen < _boundary.length - idx) { // There is not enough data to match boundary
_lookAheadLen = readLen;
return -1;
}
for (int i = 0; i < _boundary.length - idx; i++)
if (_boundary[i+idx] != _lookAheadBuf[i])
return -1; // There is not boundary => preserve lookahead buffer
// Boundary found => do not care about lookAheadBuffer since all remaining data are ignored
}
return bidx;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/server/WebsocketMeta.java
|
package water.server;
import water.webserver.iface.H2OWebsocketServlet;
/**
* Describes how to register a Websocket in H2O Server
*/
public class WebsocketMeta {
private final String _contextPath;
private final Class<? extends H2OWebsocketServlet> _servletClass;
public WebsocketMeta(String contextPath, Class<? extends H2OWebsocketServlet> servletClass) {
_contextPath = contextPath;
_servletClass = servletClass;
}
public String getContextPath() {
return _contextPath;
}
public Class<? extends H2OWebsocketServlet> getHandlerClass() {
return _servletClass;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/testing/SandboxSecurityManager.java
|
package water.testing;
import java.security.Permission;
public class SandboxSecurityManager extends SecurityManager {
private String[] _forbidden = new String[0];
@Override
public void checkPermission(Permission perm) {
// noop
}
@Override
public void checkPermission(Permission perm, Object context) {
// noop
}
@Override
public void checkRead(String file) {
for (String forbidden : _forbidden) {
if (file.startsWith(forbidden))
throw new SecurityException("Access to '" + file + "' is forbidden (rule: '" + forbidden + "').");
}
}
@Override
public void checkRead(String file, Object context) {
checkRead(file);
}
public void setForbiddenReadPrefixes(String[] forbidden) {
_forbidden = forbidden;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/tools/EncryptionTool.java
|
package water.tools;
import org.apache.commons.io.IOUtils;
import javax.crypto.Cipher;
import javax.crypto.CipherOutputStream;
import javax.crypto.spec.SecretKeySpec;
import java.io.*;
import java.security.GeneralSecurityException;
import java.security.KeyStore;
public class EncryptionTool {
private File _keystore_file; // where to find Java KeyStore file
private String _keystore_type; // what kind of KeyStore is used
private String _key_alias; // what is the alias of the key in the keystore
private char[] _password; // password to the keystore and to the keyentry
private String _cipher_spec; // specification of the cipher (and padding)
public SecretKeySpec readKey() {
try (InputStream ksStream = new FileInputStream(_keystore_file)) {
KeyStore keystore = KeyStore.getInstance(_keystore_type);
keystore.load(ksStream, _password);
if (! keystore.containsAlias(_key_alias)) {
throw new IllegalArgumentException("Key for alias='" + _key_alias + "' not found.");
}
java.security.Key key = keystore.getKey(_key_alias, _password);
return new SecretKeySpec(key.getEncoded(), key.getAlgorithm());
} catch (GeneralSecurityException e) {
throw new RuntimeException("Unable to load key '" + _key_alias + "' from keystore '" + _keystore_file.getAbsolutePath() + "'.", e);
} catch (IOException e) {
throw new RuntimeException("Failed to read keystore '" + _keystore_file.getAbsolutePath() + "'.", e);
}
}
public void encrypt(File input, File output) throws IOException, GeneralSecurityException {
SecretKeySpec key = readKey();
Cipher cipher = Cipher.getInstance(_cipher_spec);
cipher.init(Cipher.ENCRYPT_MODE, key);
try (FileInputStream inputStream = new FileInputStream(input);
FileOutputStream outputStream = new FileOutputStream(output);
CipherOutputStream cipherStream = new CipherOutputStream(outputStream, cipher);
) {
IOUtils.copyLarge(inputStream, cipherStream);
}
}
public static void main(String[] args) throws GeneralSecurityException, IOException {
mainInternal(args);
}
public static void mainInternal(String[] args) throws GeneralSecurityException, IOException {
EncryptionTool et = new EncryptionTool();
et._keystore_file = new File(args[0]);
et._keystore_type = args[1];
et._key_alias = args[2];
et._password = args[3].toCharArray();
et._cipher_spec = args[4];
et.encrypt(new File(args[5]), new File(args[6]));
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/CDistributionFunc.java
|
package water.udf;
/**
* Custom Distribution Function Interface to customize loss and prediction calculation in GBM algorithm
*
* The function has four parts:
* - link: link function transforms the probability of response variable to a continuous scale that is unbounded
* - init: computes numerator and denominator of the initial value.
* - gradient: computes (Negative half) Gradient of deviance function at predicted value for actual response
* - gamma: computes numerator and denominator of terminal node estimate
*/
public interface CDistributionFunc extends CFunc {
/**
* Type of Link function.
* @return name of link function. Possible functions: log, logit, identity, inverse, ologit, ologlog, oprobit
*/
String link();
/**
* Contribution for initial value computation (numerator and denominator).
* @param w weight
* @param o offset
* @param y response
* @return [weighted contribution to init numerator, weighted contribution to init denominator]
*/
double[] init(double w, double o, double y);
/**
* (Negative half) Gradient of deviance function at predicted value f, for actual response y.
* Important for customization of a loss function.
* @param y (actual) response
* @param f (predicted) response in link space (including offset)
* @return gradient
*/
double gradient(double y, double f);
/**
* (Negative half) Gradient of deviance function at predicted value f, for actual response y.
* Important for customization of a loss function.
* @param y (actual) response
* @param f (predicted) response in link space (including offset)
* @param l (class label) label of a class (converted lexicographically from original labels to 0-number of class - 1)
* @return gradient
*/
double gradient(double y, double f, int l);
/**
* Contribution for GBM's leaf node prediction (numerator and denominator).
* Important for customization of a loss function.
* @param w weight
* @param y response
* @param z residual
* @param f predicted value (including offset)
* @return [weighted contribution to gamma numerator, weighted contribution to gamma denominator]
*/
double[] gamma(double w, double y, double z, double f);
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/CFunc.java
|
package water.udf;
/**
* Custom function marker interface.
*/
interface CFunc {}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/CFuncLoader.java
|
package water.udf;
/**
* Custom function loader interface.
*
* The custom function loader provide way of
* instantiating give function.
*/
abstract public class CFuncLoader {
/**
* Supported language.
* @return language of function this provider can instantiate.
*/
public abstract String getLang();
public <F> F load(String jfuncName, Class<? extends F> targetKlazz) {
return load(jfuncName, targetKlazz, Thread.currentThread().getContextClassLoader());
}
/**
* Instantiate demanded function.
*
* @param jfuncName function name - this is target language specific!
* @param targetKlazz requested function Java interface
* @param classLoader classloader to use for function search
* @param <F> type of function
* @return return an object implementing given interface or null.
*/
public abstract <F> F load(String jfuncName, Class<? extends F> targetKlazz, ClassLoader classLoader);
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/CFuncLoaderService.java
|
package water.udf;
import java.util.Iterator;
import java.util.ServiceLoader;
/**
* Loader for custom function providers.
*
* A provider provides a way to instantiate given function reference (given as {@link CFuncRef}).
* It needs to publish {@link CFuncLoader} implementation via Java SPI.
*/
public class CFuncLoaderService {
public static CFuncLoaderService INSTANCE = new CFuncLoaderService();
private final ServiceLoader<CFuncLoader> loader;
public CFuncLoaderService() {
loader = ServiceLoader.load(CFuncLoader.class);
}
synchronized public CFuncLoader getByLang(String lang) {
Iterator<CFuncLoader> it = loader.iterator();
while (it.hasNext()) {
CFuncLoader ul = it.next();
if (ul.getLang().equals(lang)) {
return ul;
}
}
return null;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/CFuncObject.java
|
package water.udf;
import water.Iced;
abstract public class CFuncObject<T extends CFunc> extends Iced<CFuncObject> {
protected final CFuncRef cFuncRef;
protected transient T func;
public CFuncObject(CFuncRef cFuncRef) {
this.cFuncRef = cFuncRef;
}
protected void setupLocal() {
if (cFuncRef != null && func == null) {
ClassLoader localCl = getFuncClassLoader();
CFuncLoader loader = CFuncLoaderService.INSTANCE.getByLang(cFuncRef.language);
if (loader != null) {
func = loader.load(cFuncRef.funcName, getFuncType(), localCl);
}
}
}
protected ClassLoader getFuncClassLoader() {
return new DkvClassLoader(cFuncRef.getKey(), getParentClassloader());
}
protected ClassLoader getParentClassloader() { return Thread.currentThread().getContextClassLoader(); }
abstract protected Class<T> getFuncType();
public T getFunc() {
if (func == null) {
setupLocal();
}
return func;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/CFuncRef.java
|
package water.udf;
import water.Iced;
import water.Key;
// FIXME: should be CFuncRef (custom func reference
public class CFuncRef extends Iced<CFuncRef> {
public static final CFuncRef NOP = null;
public final String keyName;
public final String funcName;
public final String language;
public CFuncRef(String language, String keyName, String funcName) {
this.language = language;
this.keyName = keyName;
this.funcName = funcName;
}
/**
* Create function definition from "lang:keyName=funcName"
*
* @param def function definition
* @return instance of function of NOP if definition is wrong
*/
public static CFuncRef from(String def) {
if (def == null || def.isEmpty()) {
return NOP;
}
String[] parts = def.split("=");
assert parts.length == 2 : "Input should be `lang:key=funcName`";
String[] langParts = parts[0].split(":");
assert langParts.length == 2 : "Input should be `lang:key=funcName`";
return new CFuncRef(langParts[0], langParts[1], parts[1]);
}
public static CFuncRef from(String lang, String keyName, String funcName) {
return new CFuncRef(lang, keyName, funcName);
}
public String getName() {
return keyName;
}
public Key getKey() {
return Key.make(keyName);
}
public String toRef() {
return String.format("%s:%s=%s", language, keyName, funcName);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/CFuncTask.java
|
package water.udf;
import water.MRTask;
/**
* Low-level MRTask to invoke given function stored in DKV.
*/
abstract public class CFuncTask<T extends CFunc, S extends CFuncTask<T,S>> extends MRTask<S> {
protected final CFuncRef cFuncRef;
protected transient T func;
public CFuncTask(CFuncRef cFuncRef) {
this.cFuncRef = cFuncRef;
}
@Override
protected void setupLocal() {
if (cFuncRef != null && func == null) {
ClassLoader localCl = getFuncClassLoader();
CFuncLoader loader = CFuncLoaderService.INSTANCE.getByLang(cFuncRef.language);
if (loader != null) {
func = loader.load(cFuncRef.funcName, getFuncType(), localCl);
}
}
}
protected ClassLoader getFuncClassLoader() {
return new DkvClassLoader(cFuncRef.getKey(), getParentClassloader());
}
protected ClassLoader getParentClassloader() {
return Thread.currentThread().getContextClassLoader();
}
abstract protected Class<T> getFuncType();
// TODO: we should cleanup loader
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/CMetricFunc.java
|
package water.udf;
import hex.Model;
/**
* Custom metric evaluation function.
*
* The function has 3 parts:
* - map: the function is executed per row and return computation state in form of array of doubles
* - reduce: combine 2 map results
* - metric: compute final metric based on the computed state
*/
public interface CMetricFunc extends CFunc {
/**
* Compute temporary state for given row of data.
*
* The method is invoked per row represented by prediction and actual response
* and return "temporary computation state that is
* later combined together with other map-results to form final value of metric.
*
* @param preds predicted response value
* @param yact actual response value
* @param weight weight of row
* @param offset offset of row
* @param m model
* @return temporary result in form of array of doubles.
*/
double[] map(double preds[], float yact[],double weight, double offset, Model m);
/**
* Combine two map-call results together.
*
* @param l a result of map call
* @param r a result of map call
* @return combined results
*/
double[] reduce(double[] l, double r[]);
/**
* Get value of metric for given computation state.
* @param r computation state
* @return value of metric
*/
double metric(double[] r);
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/ChunkFactory.java
|
package water.udf;
import water.fvec.Chunk;
import water.util.fp.Function;
/**
* This factory creates a TypedChunk; there's a variety of data sources,
* can be a materialized Chunk, or a function.
* Have to keep a byte with type code, since, well, it's H2O.
*/
public interface ChunkFactory<DataType> extends Function<Chunk, DataChunk<DataType>> {
byte typeCode();
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/Column.java
|
package water.udf;
import water.fvec.Vec;
import water.util.fp.Function;
/**
* Generic typed data column
*
* This is a type-aware representation of id -> value accessors.
* E.g. our Vec has no clue about the data type. This one, if built on a Vec,
* does, because we provide the type.
* More, type may be something totally different from the standard four data types
* that are hard-coded in Vecs and Chunks.
*
* So that's why we have this interface to be an extension of Function<Long, T>.
* Due to some hard-coded peculiar features, we need to hold a pointer to a Vec (that helps
* us to materialize the data if needed).
*
* Of course somewhere deep inside, the data are split into chunks; so we have here
* a method chunkAt(i) that returns a <i>TypedChunk<T></i>.
*
* In general, the interface is similar, in its api, to Vecs.
*
* But, unlike Vec, any value T can be a type of column data. Does not have to be Serializable,
* for instance.
*/
public interface Column<T> extends Function<Long, T>, Vec.Holder {
T apply(long idx);
TypedChunk<T> chunkAt(int i);
boolean isNA(long idx);
int rowLayout();
long size();
boolean isCompatibleWith(Column<?> ys);
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/ColumnBase.java
|
package water.udf;
import water.Iced;
import water.fvec.Frame;
import water.fvec.Vec;
/**
* Basic common behavior for Functional Columns
*/
public abstract class ColumnBase<T> extends Iced<ColumnBase<T>> implements Column<T> {
public abstract T get(long idx);
@Override public T apply(long idx) { return get(idx); }
@Override public T apply(Long idx) { return get(idx); }
@Override
public boolean isCompatibleWith(Column<?> ys) {
return vec().isCompatibleWith(ys.vec());
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/ColumnFactory.java
|
package water.udf;
import water.fvec.Vec;
import water.util.fp.Function;
import java.io.IOException;
import java.util.List;
/**
* General-case factory for columns
*/
public interface ColumnFactory<T> extends ChunkFactory<T> {
DataColumn<T> newColumn(Vec vec);
DataColumn<T> newColumn(long length, final Function<Long, T> f) throws IOException;
DataColumn<T> newColumn(final List<T> xs) throws IOException;
DataColumn<T> materialize(Column<T> xs) throws IOException;
Vec buildZeroVec(long length);
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/DataChunk.java
|
package water.udf;
import water.fvec.Chunk;
import water.fvec.Vec;
/**
* Wrapper of a chunk that knows its type, with mutability
*/
public abstract class DataChunk<T> implements TypedChunk<T> {
protected Chunk c;
/**
* Deserializaiton only
*/
public DataChunk() {}
public DataChunk(Chunk c) { this.c = c; }
@Override public Chunk rawChunk() { return c; }
@Override public boolean isNA(int i) { return c.isNA(i); }
@Override public long start() { return c.start(); }
@Override public int length() { return c.len(); }
public abstract void set(int idx, T value);
@Override public int cidx() { return c.cidx(); }
@Override public Vec vec() { return c.vec(); }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/DataColumn.java
|
package water.udf;
import water.DKV;
import water.Key;
import water.fvec.Vec;
/**
* A Column based on actual data in a Vec (hence implementing Vec.Holder)
*/
public abstract class DataColumn<T> extends ColumnBase<T> {
protected transient Vec vec = null;
private Key<Vec> vecKey;
public final byte type;
private ChunkFactory<T> chunkFactory;
/**
* Deserialization only; pls don't use
*/
public DataColumn() {
type = Vec.T_BAD;
}
public abstract T get(long idx);
public abstract void set(long idx, T value);
@Override
public T apply(Long idx) {
return get(idx);
}
@Override
public T apply(long idx) {
return get(idx);
}
@Override
public int rowLayout() {
return vec()._rowLayout;
}
@Override
public long size() {
return vec().length();
}
@Override
public TypedChunk<T> chunkAt(int i) {
return chunkFactory.apply(vec().chunkForChunkIdx(i));
}
protected DataColumn(Vec vec, ChunkFactory<T> factory) {
this.vec = vec;
this.vecKey = vec._key;
this.type = factory.typeCode();
this.chunkFactory = factory;
}
public boolean isNA(long idx) {
return vec().isNA(idx);
}
public Vec vec() {
if (vec == null) vec = DKV.get(vecKey).get();
return vec;
}
@Override public String toString() {
return "DataColumn(type=" + type + ", factory=" + chunkFactory + ", vec=" + vec() + ")";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o instanceof DataColumn<?>) {
DataColumn<?> that = (DataColumn<?>) o;
return (type == that.type) && vecKey.equals(that.vecKey);
} else return false;
}
@Override
public int hashCode() {
return 61 * vecKey.hashCode() + type;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/DataColumns.java
|
package water.udf;
import water.fvec.Chunk;
import water.fvec.Vec;
import water.util.fp.Function;
import water.util.fp.Functions;
import java.io.IOException;
import java.util.List;
/**
* An adapter to Vec, allows type-safe access to data
*/
public class DataColumns {
protected DataColumns(){}
public static Vec buildZeroVec(long length, byte typeCode) {
return Vec.makeCon(0.0, length, true, typeCode);
}
public static abstract class BaseFactory<T>
implements ColumnFactory<T> {
public final byte typeCode;
public final String name;
protected BaseFactory(byte typeCode, String name) {
this.typeCode = typeCode;
this.name = name;
}
public byte typeCode() { return typeCode; }
public Vec buildZeroVec(long length) {
return DataColumns.buildZeroVec(length, typeCode);
}
public Vec buildZeroVec(Column<?> master) {
Vec vec = buildZeroVec(master.size());
vec.align(master.vec());
return vec;
}
public abstract DataChunk<T> apply(final Chunk c);
public abstract DataColumn<T> newColumn(Vec vec);
public DataColumn<T> newColumn(long length, final Function<Long, T> f) throws IOException {
return new TypedFrame<>(this, length, f).newColumn();
}
public DataColumn<T> materialize(Column<T> xs) throws IOException {
return TypedFrame.forColumn(this, xs).newColumn();
}
public DataColumn<T> newColumn(List<T> xs) throws IOException {
return newColumn(xs.size(), Functions.onList(xs));
}
public DataColumn<T> constColumn(final T t, long length) throws IOException {
return newColumn(length, Functions.<Long, T>constant(t));
}
@Override public String toString() { return name; }
}
// We may never need BufferedStrings
// public static class OfBS extends OnVector<BufferedString> {
// public OfBS(Vec vec) {
// super(vec, Vec.T_STR);
// }
//
// @Override
// public BufferedString get(long idx) {
// BufferedString bs = new BufferedString();
// return vec.atStr(bs, idx);
// }
// }
//-------------------------------------------------------------
// TODO(vlad): figure out if we should support UUIDs
// public static final Factory<UUID> UUIDs = new Factory<UUID>(Vec.T_UUID) {
//
// @Override public DataChunk<UUID> apply(final Chunk c) {
// return new DataChunk<UUID>(c) {
// @Override public UUID get(int idx) { return isNA(idx) ? null : new UUID(c.at16h(idx), c.at16l(idx)); }
// @Override public void set(int idx, UUID value) { c.set(idx, value); }
// };
// }
//
// @Override public DataColumn<UUID> newColumn(final Vec vec) {
// if (vec.get_type() != Vec.T_UUID)
// throw new IllegalArgumentException("Expected a type UUID, got " + vec.get_type_str());
// return new DataColumn<UUID>(vec, typeCode, this) {
// @Override public UUID get(long idx) { return isNA(idx) ? null : new UUID(vec.at16h(idx), vec.at16l(idx)); }
// @Override public void set(long idx, UUID value) { vec.set(idx, value); }
// };
// }
// };
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/DependentChunk.java
|
package water.udf;
/**
* Represents a chunk that depends on another
*/
public abstract class DependentChunk<T> implements TypedChunk<T> {
private final TypedChunk<?> master;
DependentChunk(TypedChunk<?> master) {
this.master = master;
}
public long start() { return master.rawChunk().start(); }
@Override public int length() { return master.length(); }
@Override public int cidx() { return master.cidx(); }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/DkvClassLoader.java
|
package water.udf;
import org.apache.commons.io.IOUtils;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;
import java.net.URLStreamHandler;
import java.util.Arrays;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.jar.JarEntry;
import java.util.jar.JarInputStream;
import water.DKV;
import water.Key;
import static water.udf.DkvClassLoader.DkvUrlStreamHandler.PROTO;
/**
* An classloader which use content of a jar file stored in K/V store under given key
* to search and load classes.
*/
class DkvClassLoader extends ClassLoader {
private final Map<String, byte[]> jarCache;
private final Key jarKey;
public DkvClassLoader(CFuncRef cFuncRef, ClassLoader parent) {
this(cFuncRef.keyName, parent);
}
public DkvClassLoader(String jarKeyName, ClassLoader parent) {
this(Key.make(jarKeyName), parent);
}
public DkvClassLoader(Key jarKey, ClassLoader parent) {
super(parent);
this.jarKey = jarKey;
this.jarCache = buildJarCache(jarKey);
}
@Override
protected Class<?> findClass(String name) throws ClassNotFoundException {
try {
return super.findClass(name);
} catch (ClassNotFoundException e) {
// Parent does not contain the requested class, look into cache we built.
String path = name.replace('.', '/').concat(".class");
byte[] klazzBytes = jarCache.get(path);
if (klazzBytes != null && klazzBytes.length > 0) {
return defineClass(name, klazzBytes, 0, klazzBytes.length);
}
throw new ClassNotFoundException(name);
}
}
@Override
protected URL findResource(String name) {
return url(name);
}
@Override
protected Enumeration<URL> findResources(String name) {
URL url = url(name);
return url == null
? Collections.<URL>emptyEnumeration()
: Collections.enumeration(Collections.singletonList(url));
}
protected URL url(String name) {
URL url = null;
byte[] content = jarCache.get(name);
if (content != null) {
try {
// Create a nice URL representing the resource following concept of JarUrl.
// Note: this is just for sake of clarity, but at this
// point we use cached content to serve content of URL.
url = new URL(PROTO, "", -1,
this.jarKey + (name.startsWith("/") ? "!" : "!/") + name,
new DkvUrlStreamHandler());
} catch (MalformedURLException e) {
// Fail quickly since this is not expected to fail
throw new RuntimeException(e);
}
}
return url;
}
static Map<String, byte[]> buildJarCache(Key jarKey) {
Map<String, byte[]> jarCache = new HashMap<>();
try(JarInputStream jis = new JarInputStream(new ByteArrayInputStream(DKV.get(jarKey).memOrLoad()))) {
JarEntry entry = null;
while ((entry = jis.getNextJarEntry()) != null) {
if (entry.isDirectory()) continue;
byte[] content = readJarEntry(jis, entry);
jarCache.put(entry.getName(), content);
}
} catch (IOException e) {
// Fail quickly
throw new RuntimeException(e);
}
return jarCache;
}
static byte[] readJarEntry(JarInputStream jis, JarEntry entry) throws IOException {
int len = (int) entry.getSize();
return len > 0 ? IOUtils.toByteArray(jis, len) : IOUtils.toByteArray(jis);
}
final class DkvUrlStreamHandler extends URLStreamHandler {
public static final String PROTO = "dkv";
@Override
protected URLConnection openConnection(URL url) throws IOException {
if (!url.getProtocol().equals(PROTO)) {
throw new IOException("Cannot handle protocol: " + url.getProtocol());
}
String path = url.getPath();
int separator = path.indexOf("!/");
if (separator == -1) {
throw new MalformedURLException("Cannot find '!/' in DKV URL!");
}
String file = path.substring(separator + 2);
byte[] content = jarCache.get(file);
assert content != null : " DkvUrlStreamHandler is not created properly to point to file resource: " + url.toString();
return new ByteArrayUrlConnection(url, new ByteArrayInputStream(content));
}
}
protected static class ByteArrayUrlConnection extends URLConnection {
/**
* The input stream to return for this connection.
*/
private final InputStream inputStream;
/**
* Creates a new byte array URL connection.
*
* @param url The URL that this connection represents.
* @param inputStream The input stream to return from this connection.
*/
protected ByteArrayUrlConnection(URL url, InputStream inputStream) {
super(url);
this.inputStream = inputStream;
}
@Override
public void connect() {
connected = true;
}
@Override
public InputStream getInputStream() {
connect(); // Mimics the semantics of an actual URL connection.
return inputStream;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/FoldingColumn.java
|
package water.udf;
import water.fvec.Chunk;
import water.fvec.RawChunk;
import water.fvec.Vec;
import water.util.fp.Foldable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import static water.util.Java7.*;
/**
* This column depends a plurality of columns
*/
public class FoldingColumn<X, Y> extends FunColumnBase<Y> {
private final Foldable<X, Y> f;
private final Column<X>[] columns;
@Override public int rowLayout() {
return columns.length > 0 ? columns[0].rowLayout() : 0;
}
/**
* deserialization :(
*/
public FoldingColumn() {
f = null; columns = null;
}
public FoldingColumn(Foldable<X, Y> f, Column<X>... columns) {
super(columns.length == 0 ? null : columns[0]);
assert columns.length > 0 : "Require at least one column for folding";
this.f = f;
this.columns = columns;
if (columns.length > 1) {
Column<X> c0 = columns[0];
for (int i = 1; i < columns.length; i++) {
Column<X> c = columns[i];
assert c0.isCompatibleWith(c) : "Columns must be compatible; " + c0 + " vs #" + i + ": " + c;
}
}
}
@SuppressWarnings("unchecked")
public FoldingColumn(Foldable<X, Y> f, Iterable<Column<X>> columns) {
super(columns.iterator().next());
this.f = f;
ArrayList<Column<X>> list = new ArrayList<>();
for (Column<X> column : columns) {
list.add(column);
}
this.columns = (Column<X>[])list.toArray();
}
@Override public Y get(long idx) {
Y y = f.initial();
for (Column<X> col : columns) y = f.apply(y, col.apply(idx));
return y;
}
@Override
public TypedChunk<Y> chunkAt(int i) {
List<TypedChunk<X>> chunks = new LinkedList<>();
for (Column<X> c : columns) chunks.add(c.chunkAt(i));
return new FunChunk(chunks);
}
@Override public boolean isNA(long idx) {
for (Column<X> col : columns) if (col.isNA(idx)) return true;
return false;
}
private class FunChunk extends DependentChunk<Y> {
private final List<TypedChunk<X>> chunks;
public FunChunk(List<TypedChunk<X>> chunks) {
super(chunks.get(0));
this.chunks = chunks;
}
private RawChunk myChunk = new RawChunk(this);
@Override public Vec vec() { return FoldingColumn.this.vec(); }
@Override public Chunk rawChunk() { return myChunk; }
@Override public boolean isNA(int i) {
for (TypedChunk<X> c : chunks) if (c.isNA(i)) return true;
return false;
}
@Override
public Y get(int idx) {
Y y = f.initial();
for (TypedChunk<X> c : chunks) y = f.apply(y, c.get(idx));
return y;
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o instanceof FoldingColumn) {
FoldingColumn other = (FoldingColumn) o;
return Objects.equals(f, other.f) && Arrays.equals(columns, other.columns);
}
return false;
}
@Override
public int hashCode() {
return 61 * Arrays.hashCode(columns) + Objects.hashCode(f);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/Fun2Column.java
|
package water.udf;
import water.fvec.Chunk;
import water.fvec.RawChunk;
import water.fvec.Vec;
import water.util.fp.Function2;
import static water.util.Java7.*;
/**
* This column depends on two other columns
*/
public class Fun2Column<X, Y, Z> extends FunColumnBase<Z> {
private final Function2<X, Y, Z> f;
private final Column<X> xs;
private final Column<Y> ys;
@Override public int rowLayout() { return xs.rowLayout(); }
/**
* deserialization :(
*/
public Fun2Column() {
f = null; xs = null; ys = null;
}
public Fun2Column(Function2<X, Y, Z> f, Column<X> xs, Column<Y> ys) {
super(xs);
this.f = f;
this.xs = xs;
this.ys = ys;
assert xs.isCompatibleWith(ys) : "Columns must be compatible: " + xs + ", " + ys;
}
@Override public Z get(long idx) {
return isNA(idx) ? null : f.apply(xs.apply(idx), ys.apply(idx));
}
@Override
public TypedChunk<Z> chunkAt(int i) {
return new FunChunk(xs.chunkAt(i), ys.chunkAt(i));
}
@Override public boolean isNA(long idx) { return xs.isNA(idx) || ys.isNA(idx); }
/**
* Pretends to be a chunk of a column, for distributed calculations.
* Has type, and is not materialized
*/
public class FunChunk extends DependentChunk<Z> {
private final TypedChunk<X> cx;
private final TypedChunk<Y> cy;
private RawChunk myChunk = new RawChunk(this);
@Override public Chunk rawChunk() { return myChunk; }
@Override public Vec vec() { return Fun2Column.this.vec(); }
public FunChunk(TypedChunk<X> cx, TypedChunk<Y> cy) {
super(cx);
this.cx = cx;
this.cy = cy;
}
@Override public boolean isNA(int i) { return cx.isNA(i) || cy.isNA(i); }
@Override public Z get(int i) { return f.apply(cx.get(i), cy.get(i)); }
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o instanceof Fun2Column) {
Fun2Column other = (Fun2Column) o;
return Objects.equals(f, other.f) && xs.equals(other.xs);
}
return false;
}
@Override
public int hashCode() {
return 61 * xs.hashCode() + Objects.hashCode(f);
}
@Override public String toString() { return "Fun2Column(" + f.getClass().getSimpleName() + "," + xs + "," + ys + ")"; }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/Fun3Column.java
|
package water.udf;
import water.fvec.Chunk;
import water.fvec.RawChunk;
import water.fvec.Vec;
import water.util.fp.Function3;
import static water.util.Java7.*;
/**
* This column depends on three other columns
*/
public class Fun3Column<X, Y, Z, T> extends FunColumnBase<T> {
private final Function3<X, Y, Z, T> f;
private final Column<X> xs;
private final Column<Y> ys;
private final Column<Z> zs;
@Override public int rowLayout() { return xs.rowLayout(); }
/**
* deserialization :(
*/
public Fun3Column() {
f = null; xs = null; ys = null; zs = null;
}
public Fun3Column(Function3<X, Y, Z, T> f, Column<X> xs, Column<Y> ys, Column<Z> zs) {
super(xs);
this.f = f;
this.xs = xs;
this.ys = ys;
this.zs = zs;
assert xs.isCompatibleWith(ys) : "Columns 1 and 2 must be compatible: " + xs + ", " + ys;
assert xs.isCompatibleWith(zs) : "Columns 1 and 3 must be compatible: " + xs + ", " + zs;
}
@Override public T get(long idx) {
return isNA(idx) ? null : f.apply(xs.apply(idx), ys.apply(idx), zs.apply(idx));
}
@Override
public TypedChunk<T> chunkAt(int i) {
return new FunChunk(xs.chunkAt(i), ys.chunkAt(i), zs.chunkAt(i));
}
@Override public boolean isNA(long idx) { return xs.isNA(idx) || ys.isNA(idx); }
/**
* Pretends to be a chunk of a column, for distributed calculations.
* Has type, and is not materialized
*/
public class FunChunk extends DependentChunk<T> {
private final TypedChunk<X> cx;
private final TypedChunk<Y> cy;
private final TypedChunk<Z> cz;
public FunChunk(TypedChunk<X> cx, TypedChunk<Y> cy, TypedChunk<Z> cz) {
super(cx);
this.cx = cx;
this.cy = cy;
this.cz = cz;
}
private RawChunk myChunk = new RawChunk(this);
@Override public Chunk rawChunk() { return myChunk; }
@Override public Vec vec() { return Fun3Column.this.vec(); }
@Override public boolean isNA(int i) { return cx.isNA(i) || cy.isNA(i) || cz.isNA(i); }
@Override public T get(int i) {
return f.apply(cx.get(i), cy.get(i), cz.get(i));
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o instanceof Fun3Column) {
Fun3Column other = (Fun3Column) o;
return Objects.equals(f, other.f) && xs.equals(other.xs);
}
return false;
}
@Override
public int hashCode() {
return 61 * xs.hashCode() + Objects.hashCode(f);
}
@Override public String toString() { return "Fun3Column(" + f.getClass().getSimpleName() + "," + xs + "," + ys+ "," + zs + ")"; }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/FunColumn.java
|
package water.udf;
import water.fvec.Chunk;
import water.fvec.RawChunk;
import water.fvec.Vec;
import water.util.fp.Function;
import static water.util.Java7.*;
/**
* This column depends on another column
*/
public class FunColumn<X, Y> extends FunColumnBase<Y> {
private final Function<X, Y> f;
private final Column<X> xs;
@Override public int rowLayout() { return xs.rowLayout(); }
/**
* deserialization :(
*/
public FunColumn() {
f = null; xs = null;
}
public FunColumn(Function<X, Y> f, Column<X> xs) {
super(xs);
this.f = f;
this.xs = xs;
}
@Override public TypedChunk<Y> chunkAt(int i) {
return new FunChunk(xs.chunkAt(i));
}
public Y get(long idx) { return isNA(idx) ? null : f.apply(xs.apply(idx)); }
@Override public boolean isNA(long idx) { return xs.isNA(idx); }
/**
* Pretends to be a chunk of a column, for distributed calculations.
* Has type, and is not materialized
*/
public class FunChunk extends DependentChunk<Y> {
private final TypedChunk<X> cx;
public FunChunk(TypedChunk<X> cx) {
super(cx);
this.cx = cx;
}
@Override public Vec vec() { return FunColumn.this.vec(); }
@Override public int length() { return cx.length(); }
private RawChunk myChunk = new RawChunk(this);
@Override public Chunk rawChunk() { return myChunk; }
@Override public boolean isNA(int i) { return cx.isNA(i); }
@Override public Y get(int i) { return f.apply(cx.get(i)); }
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o instanceof FunColumn) {
FunColumn other = (FunColumn) o;
return Objects.equals(f, other.f) && xs.equals(other.xs);
}
return false;
}
@Override
public int hashCode() {
return 61 * xs.hashCode() + Objects.hashCode(f);
}
@Override public String toString() { return "FunColumn(" + f.getClass().getSimpleName() + "," + xs + ")"; }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/FunColumnBase.java
|
package water.udf;
import water.fvec.Vec;
/**
* Basic common behavior for Functional Columns
*/
public abstract class FunColumnBase<T> extends ColumnBase<T> implements Column<T> {
Column<?> master;
/**
* deserialization :(
*/
public FunColumnBase() {}
FunColumnBase(Column<?> master) {
this.master = master;
}
@Override public Vec vec() {
return master.vec();
}
@Override public long size() { return master.size(); }
public abstract T get(long idx);
@Override public T apply(long idx) { return get(idx); }
@Override public T apply(Long idx) { return get(idx); }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/JavaCFuncLoader.java
|
package water.udf;
/**
* Custom function loader of java-based function.
*
* This loader is a tiny wrapper around {@link ClassLoader#loadClass(String, boolean)} call.
*/
public class JavaCFuncLoader extends CFuncLoader {
@Override
public String getLang() {
return "java";
}
@Override
public <F> F load(String jfuncName, Class<? extends F> targetKlazz, ClassLoader classLoader) {
try {
return (F) classLoader.loadClass(jfuncName).newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/TypedChunk.java
|
package water.udf;
import water.fvec.Chunk;
import water.fvec.Vec;
/**
* Represents a chunk that knows its type
*/
public interface TypedChunk<T> extends Vec.Holder {
T get(int i);
boolean isNA(int i);
int length();
long start();
Chunk rawChunk();
int cidx();
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/TypedFrame.java
|
package water.udf;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.fp.Function;
import water.udf.specialized.Enums;
import static water.udf.DataColumns.*;
import java.io.IOException;
/**
* Single column frame that knows its data type
*/
public class TypedFrame<X> extends Frame {
private final ColumnFactory<X> factory;
private final long length;
private final Function<Long, X> function;
private Column<X> column;
/**
* deserialization :(
*/
public TypedFrame() {
factory = null;
length = -1;
function = null;
}
public TypedFrame(BaseFactory<X> factory, long length, Function<Long, X> function) {
super();
this.factory = factory;
this.length = length;
this.function = function;
}
public static <X> TypedFrame<X> forColumn(final BaseFactory<X> factory, final Column<X> column) {
return new TypedFrame<X>(factory, column.size(), column) {
@Override protected Vec buildZeroVec() { return factory.buildZeroVec(column); }
};
}
public final static class EnumFrame extends TypedFrame<Integer> {
private final String[] domain;
public EnumFrame(long length, Function<Long, Integer> function, String[] domain) {
super(Enums.enums(domain), length, function);
this.domain = domain;
}
}
protected Vec buildZeroVec() { return factory.buildZeroVec(length); }
protected Vec makeVec() throws IOException {
final Vec vec0 = buildZeroVec();
MRTask task = new MRTask() {
@Override public void map(Chunk[] cs) {
for (Chunk c : cs) {
DataChunk<X> tc = factory.apply(c);
for (int r = 0; r < c._len; r++) {
long i = r + c.start();
tc.set(r, function.apply(i));
}
}
}
};
MRTask mrTask = task.doAll(vec0);
return mrTask._fr.vecs()[0];
}
protected DataColumn<X> newColumn(Vec vec) throws IOException {
return factory.newColumn(vec);
}
public DataColumn<X> newColumn() throws IOException {
return newColumn(makeVec());
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/UnfoldingColumn.java
|
package water.udf;
import water.H2O;
import water.util.fp.Unfoldable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import static water.util.Java7.*;
/**
* This column depends a plurality of columns
*/
public class UnfoldingColumn<X, Y> extends FunColumnBase<List<Y>> {
private final Unfoldable<X, Y> f;
private final Column<X> column;
private int requiredSize;
@Override public long size() { return column.size(); }
@Override public int rowLayout() { return column.rowLayout(); }
/**
* deserialization :(
*/
public UnfoldingColumn() {
f = null;
column = null;
}
public UnfoldingColumn(Unfoldable<X, Y> f, Column<X> column) {
super(column);
this.f = f;
this.column = column;
this.requiredSize = 0;
}
public UnfoldingColumn(Unfoldable<X, Y> f, Column<X> column, int requiredSize) {
super(column);
this.f = f;
this.column = column;
this.requiredSize = requiredSize;
}
public List<Y> get(long idx) {
List<Y> raw = isNA(idx) ? Collections.<Y>emptyList() : f.apply(column.apply(idx));
if (requiredSize == 0 || raw.size() == requiredSize) return raw;
else {
List<Y> result = raw.subList(0, Math.min(raw.size(), requiredSize));
if (result.size() < requiredSize) {
List<Y> fullResult = new ArrayList<>(requiredSize);
fullResult.addAll(result);
for (int i = result.size(); i < requiredSize; i++) {
fullResult.add(null);
}
return fullResult;
}
return result;
}
}
@Override public List<Y> apply(long idx) { return get(idx); }
@Override public List<Y> apply(Long idx) { return get(idx); }
@Override
public TypedChunk<List<Y>> chunkAt(int i) {
throw H2O.unimpl("Will have to think how to implement multi-string chunks...");
}
@Override public boolean isNA(long idx) {
return column.isNA(idx);
}
public static String join(String delimiter, Iterable<?> xs) {
StringBuilder sb = new StringBuilder();
for (Object x : xs) {
if (sb.length() > 0) sb.append(delimiter);
sb.append(x);
}
return sb.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o instanceof UnfoldingColumn) {
UnfoldingColumn<?, ?> that = (UnfoldingColumn<?, ?>) o;
return (requiredSize == that.requiredSize) &&
Objects.equals(f, that.f) &&
column.equals(that.column);
} else return false;
}
@Override
public int hashCode() {
int result = 61 * column.hashCode() + Objects.hashCode(f);
return 19 * result + requiredSize;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/UnfoldingFrame.java
|
package water.udf;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.fp.Function;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static water.udf.specialized.Enums.enums;
/**
* Single-column frame that knows its data type and can unfold
*/
public class UnfoldingFrame<X> extends Frame {
protected final ColumnFactory<X> factory;
protected final long len;
protected final Function<Long, List<X>> function;
protected final int width;
/** for deserialization (sigh) */
public UnfoldingFrame() {
factory = null;
len = -1;
function = null;
width = -1;
}
public UnfoldingFrame(ColumnFactory<X> factory, long len, Function<Long, List<X>> function, int width) {
super();
this.factory = factory;
this.len = len;
this.function = function;
this.width = width;
assert len >= 0: "Frame must have a nonnegative length, but found"+len;
assert width >= 0: "Multicolumn frame must have a nonnegative width, but found"+width;
}
public static <X> UnfoldingFrame<X> unfoldingFrame(final ColumnFactory<X> factory, final Column<List<X>> master, int width) {
return new UnfoldingFrame<X>(factory, master.size(), master, width) {
@Override protected Vec buildZeroVec() {
Vec v0 = DataColumns.buildZeroVec(this.len, factory.typeCode());
v0.align(master.vec());
return v0;
}
};
}
static class UnfoldingEnumFrame extends UnfoldingFrame<Integer> {
private final String[] domain;
/** for deserialization */
public UnfoldingEnumFrame() {domain = null; }
public UnfoldingEnumFrame(long length, Function<Long, List<Integer>> function, int width, String[] domain) {
super(enums(domain), length, function, width);
this.domain = domain;
assert domain != null : "An enum must have a domain";
assert domain.length > 0 : "Domain cannot be empty";
}
}
public static <X> UnfoldingEnumFrame UnfoldingEnumFrame(final Column<List<Integer>> master, int width, String[] domain) {
return new UnfoldingEnumFrame(master.size(), master, width, domain) {
@Override protected Vec buildZeroVec() {
Vec v0 = DataColumns.buildZeroVec(this.len, Vec.T_CAT);
v0.align(master.vec());
return v0;
}
};
}
protected Vec buildZeroVec() {
return DataColumns.buildZeroVec(len, factory.typeCode());
}
protected List<Vec> makeVecs() throws IOException {
Vec[] vecs = new Vec[width];
for (int j = 0; j < width; j++) {
vecs[j] = buildZeroVec();
}
MRTask task = new MRTask() {
@Override
public void map(Chunk[] cs) {
int size = cs[0].len(); // TODO(vlad): find a solution for empty
long start = cs[0].start();
for (int r = 0; r < size; r++) {
long i = r + start;
List<X> values = function.apply(i);
for (int j = 0; j < cs.length; j++) {
DataChunk<X> tc = factory.apply(cs[j]);
tc.set(r, j < values.size() ? values.get(j) : null);
}
}
}
};
MRTask mrTask = task.doAll(vecs);
return Arrays.asList(mrTask._fr.vecs());
}
public List<DataColumn<X>> materialize() throws IOException {
List<Vec> vecs = makeVecs();
List<DataColumn<X>> result = new ArrayList<>(width);
for (Vec vec : vecs) result.add(factory.newColumn(vec));
return result;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/specialized/Dates.java
|
package water.udf.specialized;
import water.fvec.Chunk;
import water.fvec.Vec;
import water.udf.ColumnFactory;
import water.udf.DataChunk;
import water.udf.DataColumn;
import water.udf.DataColumns;
import java.util.Date;
/**
* Specialized factory for dates
*/
public class Dates extends DataColumns.BaseFactory<Date> {
public static final water.udf.specialized.Dates Dates = new Dates();
public Dates() {
super(Vec.T_TIME, "Time");
}
static class DateChunk extends DataChunk<Date> {
/**
* for deserialization
*/
public DateChunk(){}
public DateChunk(Chunk c) {
super(c);
}
@Override
public Date get(int idx) {
return isNA(idx) ? null : new Date(c.at8(idx));
}
@Override
public void set(int idx, Date value) {
if (value == null) c.setNA(idx);
else c.set(idx, value.getTime());
}
}
@Override
public DataChunk<Date> apply(final Chunk c) {
return new DateChunk(c);
}
static class Column extends DataColumn<Date> {
public Column() {}
public Column(Vec v, ColumnFactory<Date> factory) {
super(v, factory);
}
@Override
public Date get(long idx) {
return isNA(idx) ? null : new Date(vec().at8(idx));
}
@Override
public void set(long idx, Date value) {
if (value == null) vec().setNA(idx);
else vec().set(idx, value.getTime());
}
}
@Override
public DataColumn<Date> newColumn(final Vec vec) {
if (vec.get_type() != Vec.T_TIME && vec.get_type() != Vec.T_NUM)
throw new IllegalArgumentException("Expected a type compatible with Dates, got " + vec.get_type_str());
return new Column(vec, this);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/specialized/Doubles.java
|
package water.udf.specialized;
import water.fvec.Chunk;
import water.fvec.Vec;
import water.udf.ColumnFactory;
import water.udf.DataChunk;
import water.udf.DataColumn;
import water.udf.DataColumns;
/**
* Specialized factory for double numbers
*/
public class Doubles extends DataColumns.BaseFactory<Double> {
public static final water.udf.specialized.Doubles Doubles = new Doubles();
public Doubles() {
super(Vec.T_NUM, "Doubles");
}
@Override
public DataChunk<Double> apply(final Chunk c) {
return new DoubleChunk(c);
}
@Override
public DataColumn<Double> newColumn(final Vec vec) {
if (vec.get_type() != Vec.T_NUM)
throw new IllegalArgumentException("Expected type T_NUM, got " + vec.get_type_str());
return new Column(vec, this);
}
static class DoubleChunk extends DataChunk<Double> {
/**
* deserialization wants it
*/
public DoubleChunk() {}
DoubleChunk(Chunk c) {
super(c);
}
@Override public Double get(int idx) { return c.isNA(idx) ? null : c.atd(idx); }
@Override public void set(int idx, Double value) {
if (value == null) c.setNA(idx); else c.set(idx, value);
}
public void set(int idx, double value) { c.set(idx, value); }
}
static class Column extends DataColumn<Double> {
/**
* deserialization wants it
*/
public Column() {}
Column(Vec vec, ColumnFactory<Double> factory) {
super(vec, factory);
}
public Double get(long idx) { return vec().at(idx); }
@Override public Double apply(Long idx) { return get(idx); }
@Override public Double apply(long idx) { return get(idx); }
@Override public void set(long idx, Double value) {
if (value == null) vec().setNA(idx); else vec().set(idx, value);
}
public void set(long idx, double value) { vec().set(idx, value); }
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/specialized/Enums.java
|
package water.udf.specialized;
import water.fvec.Chunk;
import water.fvec.Vec;
import water.udf.*;
import water.util.fp.Function;
import java.io.IOException;
/**
* Specialized factory for enums (aka Cats)
*/
public class Enums extends DataColumns.BaseFactory<Integer> {
private final String[] domain;
/**
* deserialization :(
*/
public Enums() {
super(Vec.T_CAT, "Cats");
domain = null;
}
public Enums(String[] domain) {
super(Vec.T_CAT, "Cats");
this.domain = domain;
}
public static Enums enums(String[] domain) {
return new Enums(domain);
}
public static class EnumChunk extends DataChunk<Integer> {
/**
* deserialization :(
*/
EnumChunk() {}
EnumChunk(Chunk c) { super(c); }
@Override
public Integer get(int idx) {
return c.isNA(idx) ? null : (int) c.at8(idx);
}
@Override
public void set(int idx, Integer value) {
if (value == null) c.setNA(idx);
else c.set(idx, value);
}
public void set(int idx, int value) {
c.set(idx, value);
}
}
@Override
public DataChunk<Integer> apply(final Chunk c) {
return new EnumChunk(c);
}
public DataColumn<Integer> newColumn(long length, final Function<Long, Integer> f) throws IOException {
return new TypedFrame.EnumFrame(length, f, domain).newColumn();
}
static class Column extends DataColumn<Integer> {
private final String[] domain;
/**
* deserialization :(
*/
public Column() { domain = null; }
Column(Vec v, Enums factory) {
super(v, factory);
domain = factory.domain;
assert domain != null && domain.length > 0 : "Need a domain for enums";
}
@Override
public Integer get(long idx) {
return isNA(idx) ? null : (int) vec().at8(idx);
}
@Override
public void set(long idx, Integer value) {
if (value == null) vec().setNA(idx);
else vec().set(idx, value);
}
public void set(long idx, int value) {
vec().set(idx, value);
}
}
@Override
public DataColumn<Integer> newColumn(final Vec vec) {
if (vec.get_type() != Vec.T_CAT)
throw new IllegalArgumentException("Expected type T_CAT, got " + vec.get_type_str());
vec.setDomain(domain);
return new Column(vec, this);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/udf/specialized/Strings.java
|
package water.udf.specialized;
import water.fvec.Chunk;
import water.fvec.Vec;
import water.parser.BufferedString;
import water.udf.ColumnFactory;
import water.udf.DataChunk;
import water.udf.DataColumn;
import water.udf.DataColumns;
/**
* Factory for strings column
*/
public class Strings extends DataColumns.BaseFactory<String> {
public static final Strings Strings = new Strings();
public Strings() {
super(Vec.T_STR, "Strings");
}
static class StringChunk extends DataChunk<String> {
/**
* deserialization :(
*/
public StringChunk() {}
public StringChunk(Chunk c) { super(c); }
@Override
public String get(int idx) {
return asString(c.atStr(new BufferedString(), idx));
}
@Override
public void set(int idx, String value) {
c.set(idx, value);
}
}
@Override
public DataChunk<String> apply(final Chunk c) {
return new StringChunk(c);
}
static class StringColumn extends DataColumn<String> {
/**
* deserialization :(
*/
public StringColumn() {}
StringColumn(Vec vec, ColumnFactory<String> factory) { super(vec, factory); }
@Override
public String get(long idx) {
return isNA(idx) ? null : asString(vec().atStr(new BufferedString(), idx));
}
@Override
public void set(long idx, String value) {
vec().set(idx, value);
}
}
@Override
public DataColumn<String> newColumn(final Vec vec) {
if (vec.get_type() != Vec.T_STR)
throw new IllegalArgumentException("Expected type T_STR, got " + vec.get_type_str());
return new StringColumn(vec, this);
}
private static String asString(Object x) { return x == null ? null : x.toString(); }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/ArrayUtils.java
|
package water.util;
import Jama.Matrix;
import water.*;
import water.fvec.*;
import java.text.DecimalFormat;
import java.util.*;
import static java.lang.StrictMath.sqrt;
import static water.util.RandomUtils.getRNG;
/* Bulk Array Utilities */
public class ArrayUtils {
private static final byte[] EMPTY_BYTE_ARRAY = new byte[] {};
public static int[] cumsum(final int[] from) {
int arryLen = from.length;
int[] cumsumR = new int[arryLen];
int result=0;
for (int index = 0; index < arryLen; index++) {
result += from[index];
cumsumR[index] = result;
}
return cumsumR;
}
/***
* Given an array with first dimension J and second dimension q, this function will flatten the 2-D array into
* 1-D array of length J*q. It basically concates each row of arr into one big 1-D array.
*/
public static double[] flattenArray(double[][] arr) {
int numRandCoeff = arr[0].length;
int numLevel2 = arr.length;
int len = numRandCoeff * numLevel2;
double[] flatA = new double[len];
int longIndex;
for (int index2 = 0; index2 < numLevel2; index2++) {
for (int coefInd = 0; coefInd < numRandCoeff; coefInd++) {
longIndex = index2*numRandCoeff+coefInd;
flatA[longIndex] = arr[index2][coefInd];
}
}
return flatA;
}
public static String[] flattenArray(String[][] arr) {
int numRandCoeff = arr[0].length;
int numLevel2 = arr.length;
int len = numRandCoeff * numLevel2;
String[] flatA = new String[len];
int longIndex;
for (int index2 = 0; index2 < numLevel2; index2++) {
for (int coefInd = 0; coefInd < numRandCoeff; coefInd++) {
longIndex = index2*numRandCoeff+coefInd;
flatA[longIndex] = arr[index2][coefInd];
}
}
return flatA;
}
public static void copy2DArray(double[][] src_array, double[][] dest_array) {
int numRows = src_array.length;
for (int colIdx = 0; colIdx < numRows; colIdx++) { // save zMatrix for debugging purposes or later scoring on training dataset
System.arraycopy(src_array[colIdx], 0, dest_array[colIdx], 0,
src_array[colIdx].length);
}
}
// copy a square array
public static double[][] copy2DArray(double[][] src_array) {
double[][] dest_array = MemoryManager.malloc8d(src_array.length, src_array[0].length);
copy2DArray(src_array, dest_array);
return dest_array;
}
public static void copy2DArray(int[][] src_array, int[][] dest_array) {
int numRows = src_array.length;
for (int colIdx = 0; colIdx < numRows; colIdx++) { // save zMatrix for debugging purposes or later scoring on training dataset
System.arraycopy(src_array[colIdx], 0, dest_array[colIdx], 0,
src_array[colIdx].length);
}
}
/***
* This method will take a 2D array and expand it to be of size numLevel2*tmat.length. Basically, it will copy tmat
* into the diagonal block a bigger matrix of size numLevel2*tmat.length.
*/
public static double[][] expandMat(double[][] tmat, int numLevel2) {
int numRandomCoeff = tmat.length;
int qTimesJ = numRandomCoeff * numLevel2;
double[][] gMat = new double[qTimesJ][qTimesJ];
int colInd = 0;
int rowInd = 0;
for (int ind2 = 0; ind2 < numLevel2; ind2++) {
for (int ind = 0; ind < numRandomCoeff; ind++) {
System.arraycopy(tmat[ind], 0, gMat[rowInd], colInd, numRandomCoeff);
rowInd++;
}
colInd += numRandomCoeff;
}
return gMat;
}
// Sum elements of an array
public static long sum(final long[] from) {
long result = 0;
for (long d: from) result += d;
return result;
}
public static long sum(final long[] from, int startIdx, int endIdx) {
long result = 0;
for (int i = startIdx; i < endIdx; i++) result += from[i];
return result;
}
public static int sum(final int[] from) {
int result = 0;
for( int d : from ) result += d;
return result;
}
public static long suml(final int[] from) {
long result = 0;
for( int d : from ) result += d;
return result;
}
public static void elementwiseSumSymmetricArrays(double[][] a, double[][] b) {
int arrSize = a.length;
for (int index=0; index<arrSize; index++) {
a[index][index] += b[index][index];
for (int index2 = index+1; index2 < arrSize; index2++) {
a[index][index2] += b[index][index2];
a[index2][index] = a[index][index2];
}
}
}
public static float sum(final float[] from) {
float result = 0;
for (float d: from) result += d;
return result;
}
public static double sum(final double[] from) {
double result = 0;
for (double d: from) result += d;
return result;
}
public static float[] reduceMin(float[] a, float[] b) {
for (int i=0; i<a.length; ++i)
a[i] = Math.min(a[i], b[i]);
return a;
}
public static float[] reduceMax(float[] a, float[] b) {
for (int i=0; i<a.length; ++i)
a[i] = Math.max(a[i], b[i]);
return a;
}
public static double innerProduct(double [] x, double [] y){
double result = 0;
for (int i = 0; i < x.length; i++)
result += x[i] * y[i];
return result;
}
public static double innerProductPartial(double [] x, int[] x_index, double [] y){
double result = 0;
for (int i = 0; i < y.length; i++)
result += x[x_index[i]] * y[i];
return result;
}
public static double [] mmul(double [][] M, double [] V) {
double [] res = new double[M.length];
for(int i = 0; i < M.length; ++i) {
double d = 0;
for (int j = 0; j < V.length; ++j) {
d += M[i][j] * V[j];
}
res[i] = d;
}
return res;
}
public static double[][] outerProduct(double[] x, double[] y){
return outerProduct(null, x, y);
}
public static double[][] outerProduct(double[][] result, double[] x, double[] y) {
if (result == null)
result = new double[x.length][y.length];
for(int i = 0; i < x.length; i++) {
for(int j = 0; j < y.length; j++)
result[i][j] = x[i] * y[j];
}
return result;
}
public static double[][] outerProductCum(double[][] result, double[] x, double[] y) {
if (result == null)
result = new double[x.length][y.length];
for(int i = 0; i < x.length; i++) {
for(int j = 0; j < y.length; j++)
result[i][j] += x[i] * y[j];
}
return result;
}
public static void outputProductSymCum(double[][] result, double[] x) {
if (result == null)
throw new IllegalArgumentException("result should have been a double[][] array of size x.length.");
int xLen = x.length;
for (int rInd = 0; rInd < xLen; rInd++)
for (int cInd=0; cInd <= rInd; cInd++) {
result[rInd][cInd] += x[rInd] * x[cInd];
if (rInd != cInd)
result[cInd][rInd] = result[rInd][cInd];
}
}
// return the sqrt of each element of the array. Will overwrite the original array in this case
public static double[] sqrtArr(double [] x){
assert (x != null);
int len = x.length;
for (int index = 0; index < len; index++) {
assert (x[index]>=0.0);
x[index] = sqrt(x[index]);
}
return x;
}
public static double l2norm2(double [] x){ return l2norm2(x, false); }
public static double l2norm2(double [][] xs, boolean skipLast){
double res = 0;
for(double [] x:xs)
res += l2norm2(x,skipLast);
return res;
}
public static double l2norm2(double [] x, boolean skipLast){
double sum = 0;
int last = x.length - (skipLast?1:0);
for(int i = 0; i < last; ++i)
sum += x[i]*x[i];
return sum;
}
public static double l2norm2(double[] x, double[] y) { // Computes \sum_{i=1}^n (x_i - y_i)^2
assert x.length == y.length;
double sse = 0;
for(int i = 0; i < x.length; i++) {
double diff = x[i] - y[i];
sse += diff * diff;
}
return sse;
}
public static double l2norm2(double[][] x, double[][] y) {
assert x.length == y.length && x[0].length == y[0].length;
double sse = 0;
for(int i = 0; i < x.length; i++)
sse += l2norm2(x[i], y[i]);
return sse;
}
public static double l1norm(double [] x){ return l1norm(x, false); }
public static double l1norm(double [] x, boolean skipLast){
double sum = 0;
int last = x.length -(skipLast?1:0);
for(int i = 0; i < last; ++i)
sum += x[i] >= 0?x[i]:-x[i];
return sum;
}
/**
* Like the R norm for matrices, this function will calculate the maximum absolute col sum if type='o' or
* return the maximum absolute row sum otherwise
* @param arr
* @param type
* @return
*/
public static double rNorm(double[][] arr, char type) {
double rnorm = Double.NEGATIVE_INFINITY;
int numArr = arr.length;
for (int rind = 0; rind < numArr; rind++) {
double tempSum = 0.0;
for (int cind = 0; cind < numArr; cind++) {
tempSum += type == 'o' ? Math.abs(arr[rind][cind]) : Math.abs(arr[cind][rind]);
}
if (tempSum > rnorm)
rnorm = tempSum;
}
return rnorm;
}
public static double linfnorm(double [] x, boolean skipLast){
double res = Double.NEGATIVE_INFINITY;
int last = x.length -(skipLast?1:0);
for(int i = 0; i < last; ++i) {
if(x[i] > res) res = x[i];
if(-x[i] > res) res = -x[i];
}
return res;
}
public static double l2norm(double[] x) { return Math.sqrt(l2norm2(x)); }
public static double l2norm(double [] x, boolean skipLast){
return Math.sqrt(l2norm2(x, skipLast));
}
public static double l2norm(double[] x, double[] y) { return Math.sqrt(l2norm2(x,y)); }
public static double l2norm(double[][] x, double[][] y) { return Math.sqrt(l2norm2(x,y)); }
// Add arrays, element-by-element
public static byte[] add(byte[] a, byte[] b) {
for(int i = 0; i < a.length; i++ ) a[i] += b[i];
return a;
}
public static int[] add(int[] a, int[] b) {
for(int i = 0; i < a.length; i++ ) a[i] += b[i];
return a;
}
public static int[][] add(int[][] a, int[][] b) {
for(int i = 0; i < a.length; i++ ) add(a[i],b[i]);
return a;
}
public static long[] add(long[] a, long[] b) {
if( b==null ) return a;
for(int i = 0; i < a.length; i++ ) a[i] += b[i];
return a;
}
public static long[][] add(long[][] a, long[][] b) {
for(int i = 0; i < a.length; i++ ) add(a[i],b[i]);
return a;
}
public static long[][][] add(long[][][] a, long[][][] b) {
for(int i = 0; i < a.length; i++ ) add(a[i],b[i]);
return a;
}
public static float[] add(float[] a, float[] b) {
if( b==null ) return a;
for(int i = 0; i < a.length; i++ ) a[i] += b[i];
return a;
}
public static float[] add(float ca, float[] a, float cb, float[] b) {
for(int i = 0; i < a.length; i++ ) a[i] = (ca * a[i]) + (cb * b[i]);
return a;
}
public static float[][] add(float[][] a, float[][] b) {
for(int i = 0; i < a.length; i++ ) add(a[i],b[i]);
return a;
}
public static boolean[] or(boolean[] a, boolean[] b) {
if (b==null)return a;
for (int i = 0; i < a.length; i++) a[i] |= b[i];
return a;
}
public static double[][] deepClone(double [][] ary){
double [][] res = ary.clone();
for(int i = 0 ; i < res.length; ++i)
res[i] = ary[i].clone();
return res;
}
public static <T extends Iced> T[][] deepClone(T [][] ary){
T [][] res = ary.clone();
for(int i = 0 ; i < res.length; ++i)
res[i] = deepClone(res[i]);
return res;
}
public static <T extends Iced> T[] deepClone(T [] ary){
T [] res = ary.clone();
for(int j = 0; j < res.length; ++j)
if(res[j] != null)
res[j] = (T)res[j].clone();
return res;
}
public static double[] add(double[] a, double[] b) {
if( a==null ) return b;
for(int i = 0; i < a.length; i++ ) a[i] += b[i];
return a;
}
public static double[] add(double[] a, double b) {
for(int i = 0; i < a.length; i++ ) a[i] += b;
return a;
}
public static int[] add(int[] a, int b) {
for(int i = 0; i < a.length; i++ ) a[i] += b;
return a;
}
public static double[] wadd(double[] a, double[] b, double w) {
if( a==null ) return b;
for(int i = 0; i < a.length; i++ )
a[i] += w*b[i];
return a;
}
public static double[] wadd(double[] a, double[] b, double [] c, double w) {
if( a==null ) return b;
for(int i = 0; i < a.length; i++ )
c[i] = a[i] + w*b[i];
return c;
}
// a <- b + c
public static double[] add(double[] a, double[] b, double [] c) {
for(int i = 0; i < a.length; i++ )
a[i] = b[i] + c[i];
return a;
}
/**
* Note that this add is cumulative, meaning if you have double matrices a, b, c and you do
* add(a, b) followed by add(a, c), you will get a+b+c.
*/
public static double[][] add(double[][] a, double[][] b) {
if (a == null) return b;
for(int i = 0; i < a.length; i++ ) a[i] = add(a[i], b[i]);
return a;
}
/**
* This add is not cumulative. It will simply return result = a+b.
*/
public static void add(double[][] result, double[][] a, double[][] b) {
if (result == null || result.length != a.length || result[0].length != a[0].length || a.length != b.length ||
a[0].length != b[0].length)
throw new IllegalArgumentException("matrices must be of the same size.");
int numRow = a.length;
int numCol = a[0].length;
for (int rInd = 0; rInd < numRow; rInd++)
for (int cInd = 0; cInd < numCol; cInd++)
result[rInd][cInd] = a[rInd][cInd] + b[rInd][cInd];
}
public static void minus(double[] result, double[] a, double[] b){
if (result == null || result.length != a.length || a.length != b.length)
throw new IllegalArgumentException("matrices must be of the same size.");
int numRow = a.length;
for (int rInd = 0; rInd < numRow; rInd++)
result[rInd] = a[rInd] - b[rInd];
}
public static void minus(double[][] result, double[][] a, double[][] b){
if (result == null || result.length != a.length || a.length != b.length || result[0].length != a[0].length || a[0].length != b[0].length)
throw new IllegalArgumentException("matrices must be of the same size.");
int numRow = a.length;
for (int rInd = 0; rInd < numRow; rInd++)
minus(result[rInd], a[rInd], b[rInd]);
}
public static double[][][] add(double[][][] a, double[][][] b) {
for(int i = 0; i < a.length; i++ ) a[i] = add(a[i],b[i]);
return a;
}
public static double avg(double[] nums) {
double sum = 0;
for(double n: nums) sum+=n;
return sum/nums.length;
}
public static double avg(long[] nums) {
long sum = 0;
for(long n: nums) sum+=n;
return sum/nums.length;
}
public static long[] add(long[] nums, long a) {
for (int i=0;i<nums.length;i++) nums[i] += a;
return nums;
}
public static float[] div(float[] nums, int n) {
for (int i=0; i<nums.length; i++) nums[i] /= n;
return nums;
}
public static float[] div(float[] nums, float n) {
assert !Float.isInfinite(n) : "Trying to divide " + Arrays.toString(nums) + " by " + n; // Almost surely not what you want
for (int i=0; i<nums.length; i++) nums[i] /= n;
return nums;
}
public static double[] div(double[] nums, double n) {
assert !Double.isInfinite(n) : "Trying to divide " + Arrays.toString(nums) + " by " + n; // Almost surely not what you want
for (int i=0; i<nums.length; i++) nums[i] /= n;
return nums;
}
public static double[][] div(double[][] ds, long[] n) {
for (int i=0; i<ds.length; i++) div(ds[i],n[i]);
return ds;
}
public static double[][] div(double[][] ds, double[] n) {
for (int i=0; i<ds.length; i++) div(ds[i],n[i]);
return ds;
}
public static double[] div(double[] ds, long[] n) {
for (int i=0; i<ds.length; i++) ds[i]/=n[i];
return ds;
}
public static double[] div(double[] ds, double[] n) {
for (int i=0; i<ds.length; i++) ds[i]/=n[i];
return ds;
}
public static double[][] mult(double[][] ds, double[] n) {
for (int i=0; i<ds.length; i++) mult(ds[i],n[i]);
return ds;
}
public static float[] mult(float[] nums, float n) {
// assert !Float.isInfinite(n) : "Trying to multiply " + Arrays.toString(nums) + " by " + n; // Almost surely not what you want
for (int i=0; i<nums.length; i++) nums[i] *= n;
return nums;
}
public static double[][] mult(double[][] ary, double n) {
if(ary == null) return null;
for (double[] row : ary) mult(row, n);
return ary;
}
public static double[] mult(double[] source, double[] dest, double n) {
if (source != null && dest != null && source.length==dest.length)
for (int i=0; i<source.length; i++)
dest[i]=source[i]*n;
return dest;
}
public static void mult(double[][] source, double[][] dest, double n) {
if (dest != null && source.length == dest.length && source[0].length == dest[0].length) {
int numRow = source.length;
for (int i=0; i<numRow; i++)
mult(source[i], dest[i], n);
}
}
public static double[] multCum(double[] source, double[] dest, double n) {
if (source != null && dest != null && source.length==dest.length)
for (int i=0; i<source.length; i++)
dest[i]+=source[i]*n;
return dest;
}
public static double[] mult(double[] nums, double n) {
// assert !Double.isInfinite(n) : "Trying to multiply " + Arrays.toString(nums) + " by " + n; // Almost surely not what you want
if (nums != null)
for (int i=0; i<nums.length; i++) nums[i] *= n;
return nums;
}
public static void matrixMult(double[][] result, double[][] op1, double[][] op2) {
if (result == null)
throw new IllegalArgumentException("Result should be a double[][] array already allocated.");
if (op1.length != result.length)
throw new IllegalArgumentException("Number of rows of result should equal to number of rows of op1.");
int numRow = op1.length;
int numCol = op1[0].length;
int resultNumCol = op2[0].length;
for (int rInd = 0; rInd < numRow; rInd++)
for (int cInd = 0; cInd < resultNumCol; cInd++)
for (int tempInd = 0; tempInd < numCol; tempInd++) {
result[rInd][cInd] += op1[rInd][tempInd]*op2[tempInd][cInd];
}
}
public static void matrixVectorMult(double[] result, double[][] matrix, double[] vector) {
if (result == null)
throw new IllegalArgumentException("Result should be a double[] array already allocated.");
int numRow = matrix.length;
int numCol = matrix[0].length;
for (int rInd = 0; rInd < numRow; rInd++)
for (int cInd = 0; cInd < numCol; cInd++)
result[rInd] += matrix[rInd][cInd]*vector[cInd];
}
public static double[] mult(double[] nums, double[] nums2) {
for (int i=0; i<nums.length; i++) nums[i] *= nums2[i];
return nums;
}
/**
*
* @param data vector (1 x n)
* @param p vector (1 x n)
* @param n vector (1 x n)
* @return Result of matrix operation (data - p) * n
*/
public static double subAndMul(double[] data, double[] p, double[] n) {
double res = 0;
for (int col=0; col<data.length; col++)
res += (data[col] - p[col]) * n[col];
return res;
}
public static double[] invert(double[] ary) {
if(ary == null) return null;
for(int i=0;i<ary.length;i++) ary[i] = 1. / ary[i];
return ary;
}
public static double[] multArrVec(double[][] ary, double[] nums) {
if(ary == null) return null;
double[] res = new double[ary.length];
return multArrVec(ary, nums, res);
}
public static double[] multArrVecPartial(double[][] ary, double[] nums, int[] numColInd) {
if(ary == null) return null;
double[] res = new double[ary.length];
for (int ind = 0; ind < ary.length; ind++) {
res[ind] = innerProductPartial(nums, numColInd, ary[ind]);
}
return res;
}
public static double[] diagArray(double[][] ary) {
if(ary == null) return null;
int arraylen = ary.length;
double[] res = new double[ary.length];
for (int index=0; index < arraylen; index++)
res[index] = ary[index][index];
return res;
}
/***
* Return the index of an element val that is less than tol array from an element of the array arr.
* Note that arr does not need to be sorted.
*
* @param arr: double array possibly containing an element of interest.
* @param val: val to be found in array arr
* @param tol: maximum difference between value of interest val and an element of array
* @return the index of element that is within tol away from val or -1 if not found
*/
public static int locate(double[] arr, double val, double tol) {
int arrLen = arr.length;
for (int index = 0; index < arrLen; index++) {
if (Math.abs(arr[index]-val) < tol)
return index;
}
return -1;
}
public static double[] multArrVec(double[][] ary, double[] nums, double[] res) {
if(ary == null || nums == null) return null;
assert ary[0].length == nums.length : "Inner dimensions must match: Got " + ary[0].length + " != " + nums.length;
for(int i = 0; i < ary.length; i++)
res[i] = innerProduct(ary[i], nums);
return res;
}
public static double trace(double[][] mat) {
double temp = 0;
int length = mat.length;
for (int index=0; index<length; index++)
temp += mat[index][index];
return temp;
}
public static double[] multVecArr(double[] nums, double[][] ary) {
if(ary == null || nums == null) return null;
assert nums.length == ary.length : "Inner dimensions must match: Got " + nums.length + " != " + ary.length;
double[] res = new double[ary[0].length]; // number of columns
for(int j = 0; j < ary[0].length; j++) { // go through each column
res[j] = 0;
for(int i = 0; i < ary.length; i++) // inner product of nums with each column of ary
res[j] += nums[i] * ary[i][j];
}
return res;
}
/*
with no memory allocation for results. We assume the memory is already allocated.
*/
public static double[][] multArrArr(double[][] ary1, double[][] ary2, double[][] res) {
if(ary1 == null || ary2 == null) return null;
// Inner dimensions must match
assert ary1[0].length == ary2.length : "Inner dimensions must match: Got " + ary1[0].length + " != " + ary2.length;
for(int i = 0; i < ary1.length; i++) {
for(int j = 0; j < ary2[0].length; j++) {
double tmp = 0;
for(int k = 0; k < ary1[0].length; k++)
tmp += ary1[i][k] * ary2[k][j];
res[i][j] = tmp;
}
}
return res;
}
/*
with memory allocation for results
*/
public static double[][] multArrArr(double[][] ary1, double[][] ary2) {
if(ary1 == null || ary2 == null) return null;
double[][] res = new double[ary1.length][ary2[0].length];
return multArrArr(ary1, ary2, res);
}
public static double[][] transpose(double[][] ary) {
if(ary == null) return null;
double[][] res = new double[ary[0].length][ary.length];
for(int i = 0; i < res.length; i++) {
for(int j = 0; j < res[0].length; j++)
res[i][j] = ary[j][i];
}
return res;
}
public static double[][] expandArray(double[][] ary, int newColNum) {
if(ary == null) return null;
assert ary.length < newColNum : "new array should be greater than original array in second dimension.";
int oldMatRow = ary.length;
double[][] res = new double[newColNum][newColNum];
for(int i = 0; i < oldMatRow; i++) {
System.arraycopy(ary[i], 0, res[i], 0, oldMatRow);
}
return res;
}
/***
* This function will perform transpose of triangular matrices only. If the original matrix is lower triangular,
* the return matrix will be upper triangular and vice versa.
*
* @param ary
* @return
*/
public static double[][] transposeTriangular(double[][] ary, boolean upperTriangular) {
if(ary == null) return null;
int rowNums = ary.length;
double[][] res = new double[ary.length][]; // allocate as many rows as original matrix
for (int rowIndex=0; rowIndex < rowNums; rowIndex++) {
int colNum = upperTriangular?(rowIndex+1):(rowNums-rowIndex);
res[rowIndex] = new double[colNum];
for (int colIndex=0; colIndex < colNum; colIndex++)
res[rowIndex][colIndex] = ary[colIndex+rowIndex][rowIndex];
}
return res;
}
public static <T> T[] cloneOrNull(T[] ary){return ary == null?null:ary.clone();}
public static <T> T[][] transpose(T[][] ary) {
if(ary == null|| ary.length == 0) return ary;
T [][] res = Arrays.copyOf(ary,ary[0].length);
for(int i = 0; i < res.length; ++i)
res[i] = Arrays.copyOf(ary[0],ary.length);
for(int i = 0; i < res.length; i++) {
for(int j = 0; j < res[0].length; j++)
res[i][j] = ary[j][i];
}
return res;
}
/**
* Provide array from start to end in steps of 1
* @param start beginning value (inclusive)
* @param end ending value (inclusive)
* @return specified range of integers
*/
public static int[] range(int start, int end) {
int[] r = new int[end-start+1];
for(int i=0;i<r.length;i++)
r[i] = i+start;
return r;
}
/**
* Given a n by k matrix X, form its Gram matrix
* @param x Matrix of real numbers
* @param transpose If true, compute n by n Gram of rows = XX'
* If false, compute k by k Gram of cols = X'X
* @return A symmetric positive semi-definite Gram matrix
*/
public static double[][] formGram(double[][] x, boolean transpose) {
if (x == null) return null;
int dim_in = transpose ? x[0].length : x.length;
int dim_out = transpose ? x.length : x[0].length;
double[][] xgram = new double[dim_out][dim_out];
// Compute all entries on and above diagonal
if(transpose) {
for (int i = 0; i < dim_in; i++) {
// Outer product = x[i] * x[i]', where x[i] is col i
for (int j = 0; j < dim_out; j++) {
for (int k = j; k < dim_out; k++)
xgram[j][k] += x[j][i] * x[k][i];
}
}
} else {
for (int i = 0; i < dim_in; i++) {
// Outer product = x[i]' * x[i], where x[i] is row i
for (int j = 0; j < dim_out; j++) {
for (int k = j; k < dim_out; k++)
xgram[j][k] += x[i][j] * x[i][k];
}
}
}
// Fill in entries below diagonal since Gram is symmetric
for (int i = 0; i < dim_in; i++) {
for (int j = 0; j < dim_out; j++) {
for (int k = 0; k < j; k++)
xgram[j][k] = xgram[k][j];
}
}
return xgram;
}
public static double[][] formGram(double[][] x) { return formGram(x, false); }
public static double[] permute(double[] vec, int[] idx) {
if(vec == null) return null;
assert vec.length == idx.length : "Length of vector must match permutation vector length: Got " + vec.length + " != " + idx.length;
double[] res = new double[vec.length];
for(int i = 0; i < vec.length; i++)
res[i] = vec[idx[i]];
return res;
}
public static double[][] permuteCols(double[][] ary, int[] idx) {
if(ary == null) return null;
assert ary[0].length == idx.length : "Number of columns must match permutation vector length: Got " + ary[0].length + " != " + idx.length;
double[][] res = new double[ary.length][ary[0].length];
for(int j = 0; j < ary[0].length; j++) {
for(int i = 0; i < ary.length; i++)
res[i][j] = ary[i][idx[j]];
}
return res;
}
public static double[][] permuteRows(double[][] ary, int[] idx) {
if(ary == null) return null;
assert ary.length == idx.length : "Number of rows must match permutation vector length: Got " + ary.length + " != " + idx.length;
double[][] res = new double[ary.length][ary[0].length];
for(int i = 0; i < ary.length; i++)
res[i] = permute(ary[i], idx);
return res;
}
public static double [][] generateLineSearchVecs(double [] srcVec, double [] gradient, int n, final double step) {
double [][] res = new double[n][];
double x = step;
for(int i = 0; i < res.length; ++i) {
res[i] = MemoryManager.malloc8d(srcVec.length);
for(int j = 0; j < res[i].length; ++j)
res[i][j] = srcVec[j] + gradient[j] * x;
x *= step;
}
return res;
}
public static String arrayToString(int[] ary) {
if (ary == null || ary.length==0 ) return "";
int m = ary.length - 1;
StringBuilder sb = new StringBuilder();
for (int i = 0; ; i++) {
sb.append(ary[i]);
if (i == m) return sb.toString();
sb.append(", ");
}
}
// Convert array of primitives to an array of Strings.
public static String[] toString(long[] dom) {
String[] result = new String[dom.length];
for (int i=0; i<dom.length; i++) result[i] = String.valueOf(dom[i]);
return result;
}
public static String[] toString(int[] dom) {
String[] result = new String[dom.length];
for (int i=0; i<dom.length; i++) result[i] = String.valueOf(dom[i]);
return result;
}
public static String[] toString(Object[] ary) {
String[] result = new String[ary.length];
for (int i=0; i<ary.length; i++) {
Object o = ary[i];
if (o != null && o.getClass().isArray()) {
Class klazz = ary[i].getClass();
result[i] = byte[].class.equals(klazz) ? Arrays.toString((byte[]) o) :
short[].class.equals(klazz) ? Arrays.toString((short[]) o) :
int[].class.equals(klazz) ? Arrays.toString((int[]) o) :
long[].class.equals(klazz) ? Arrays.toString((long[]) o) :
boolean[].class.equals(klazz) ? Arrays.toString((boolean[]) o) :
float[].class.equals(klazz) ? Arrays.toString((float[]) o) :
double[].class.equals(klazz) ? Arrays.toString((double[]) o) : Arrays.toString((Object[]) o);
} else {
result[i] = String.valueOf(o);
}
}
return result;
}
public static String toStringQuotedElements(Object[] a) {
return toStringQuotedElements(a, -1);
}
public static String toStringQuotedElements(Object[] a, int maxItems) {
if (a == null)
return "null";
if (a.length == 0)
return "[]";
int max = a.length;
int ellipsisIdx = max+1;
if (maxItems > 0 && maxItems < a.length) {
max = maxItems + 1;
ellipsisIdx = max / 2;
}
StringBuilder b = new StringBuilder();
b.append('[');
for (int i = 0; i < max; i++) {
int idx = i == ellipsisIdx ? -1
: i < ellipsisIdx ? i
: a.length - max + i;
if (idx >= 0)
b.append('"').append(a[idx]).append('"');
else
b.append("...").append(a.length - maxItems).append(" not listed...");
if (i < max-1) b.append(", ");
}
return b.append(']').toString();
}
public static <T> boolean contains(T[] arr, T target) {
if (null == arr) return false;
for (T t : arr) {
if (t == target) return true;
if (t != null && t.equals(target)) return true;
}
return false;
}
static public boolean contains(byte[] a, byte d) {
for (byte anA : a) if (anA == d) return true;
return false;
}
static public boolean contains(int[] a, int d) {
for (int anA : a) if (anA == d) return true;
return false;
}
public static byte[] subarray(byte[] a, int off, int len) {
return Arrays.copyOfRange(a,off,off+len);
}
public static <T> T[] subarray(T[] a, int off, int len) {
return Arrays.copyOfRange(a,off,off+len);
}
public static <T> T[][] subarray2DLazy(T[][] a, int columnOffset, int len) {
return Arrays.copyOfRange(a, columnOffset, columnOffset + len);
}
/** Returns the index of the largest value in the array.
* In case of a tie, an the index is selected randomly.
*/
public static int maxIndex(int[] from, Random rand) {
assert rand != null;
int result = 0;
int maxCount = 0; // count of maximal element for a 1 item reservoir sample
for( int i = 1; i < from.length; ++i ) {
if( from[i] > from[result] ) {
result = i;
maxCount = 1;
} else if( from[i] == from[result] ) {
if( rand.nextInt(++maxCount) == 0 ) result = i;
}
}
return result;
}
public static int maxIndex(float[] from, Random rand) {
assert rand != null;
int result = 0;
int maxCount = 0; // count of maximal element for a 1 item reservoir sample
for( int i = 1; i < from.length; ++i ) {
if( from[i] > from[result] ) {
result = i;
maxCount = 1;
} else if( from[i] == from[result] ) {
if( rand.nextInt(++maxCount) == 0 ) result = i;
}
}
return result;
}
public static int maxIndex(double[] from, Random rand) {
assert rand != null;
int result = 0;
int maxCount = 0; // count of maximal element for a 1 item reservoir sample
for( int i = 1; i < from.length; ++i ) {
if( from[i] > from[result] ) {
result = i;
maxCount = 1;
} else if( from[i] == from[result] ) {
if( rand.nextInt(++maxCount) == 0 ) result = i;
}
}
return result;
}
public static int maxIndex(int[] from) {
int result = 0;
for (int i = 1; i<from.length; ++i)
if (from[i]>from[result]) result = i;
return result;
}
public static int maxIndex(long[] from) {
int result = 0;
for (int i = 1; i<from.length; ++i)
if (from[i]>from[result]) result = i;
return result;
}
public static int maxIndex(long[] from, int off) {
int result = off;
for (int i = off+1; i<from.length; ++i)
if (from[i]>from[result]) result = i;
return result;
}
public static int maxIndex(float[] from) {
int result = 0;
for (int i = 1; i<from.length; ++i)
if (from[i]>from[result]) result = i;
return result;
}
public static int maxIndex(double[] from) {
int result = 0;
for (int i = 1; i<from.length; ++i)
if (from[i]>from[result]) result = i;
return result;
}
public static int minIndex(int[] from) {
int result = 0;
for (int i = 1; i<from.length; ++i)
if (from[i]<from[result]) result = i;
return result;
}
public static int minIndex(float[] from) {
int result = 0;
for (int i = 1; i<from.length; ++i)
if (from[i]<from[result]) result = i;
return result;
}
public static int minIndex(double[] from) {
int result = 0;
for (int i = 1; i<from.length; ++i)
if (from[i]<from[result]) result = i;
return result;
}
public static double maxValue(double[] ary) {
return maxValue(ary,0,ary.length);
}
public static double maxValue(double[] ary, int from, int to) {
double result = ary[from];
for (int i = from+1; i<to; ++i)
if (ary[i]>result) result = ary[i];
return result;
}
public static float maxValue(float[] ary) {
return maxValue(ary,0,ary.length);
}
public static float maxValue(float[] ary, int from, int to) {
float result = ary[from];
for (int i = from+1; i<to; ++i)
if (ary[i]>result) result = ary[i];
return result;
}
public static float minValue(float[] from) {
float result = from[0];
for (int i = 1; i<from.length; ++i)
if (from[i]<result) result = from[i];
return result;
}
public static double minValue(double[] ary, int from, int to) {
double result = ary[from];
for (int i = from+1; i<to; ++i)
if (ary[i]<result) result = ary[i];
return result;
}
public static double minValue(double[] from) {
return Arrays.stream(from).min().getAsDouble();
}
/**
* Find minimum and maximum in array in the same time
*
* @return Array with 2 fields. First field is minimum and second field is maximum.
*/
public static double[] minMaxValue(double[] from) {
double min = Double.MAX_VALUE;
double max = Double.MIN_VALUE;
for (int i = 0; i < from.length; ++i) {
if (from[i] < min) min = from[i];
if (from[i] > max) max = from[i];
}
return new double[]{min, max};
}
public static long maxValue(long[] from) {
return Arrays.stream(from).max().getAsLong();
}
public static int maxValue(Integer[] from) {
return Arrays.stream(from).max(Integer::compare).get();
}
public static int maxValue(int[] from) {
return Arrays.stream(from).max().getAsInt();
}
public static long minValue(long[] from) {
long result = from[0];
for (int i = 1; i<from.length; ++i)
if (from[i]<result) result = from[i];
return result;
}
public static long minValue(int[] from) {
int result = from[0];
for (int i = 1; i<from.length; ++i)
if (from[i]<result) result = from[i];
return result;
}
public static double maxMag(double[] arr) {
int len = arr.length;
double maxVal = 0;
double oneEle;
for (int index = 0; index < len; index++) {
oneEle = Math.abs(arr[index]);
if (!Double.isNaN(oneEle) && oneEle > maxVal)
maxVal = oneEle;
}
return maxVal;
}
public static double maxMag(double[][] arr) {
int numRow = arr.length;
double maxVal = 0;
double oneRowMax;
for (int index = 0; index < numRow; index++) {
oneRowMax = maxMag(arr[index]);
if (oneRowMax > maxVal)
maxVal = oneRowMax;
}
return maxVal;
}
// Find an element with linear search & return it's index, or -1
public static <T> int find(T[] ts, T elem) {return find(ts,elem,0);}
// Find an element with linear search & return it's index, or -1
public static <T> int find(T[] ts, T elem, int off) {
for (int i = off; i < ts.length; i++)
if (elem == ts[i] || elem.equals(ts[i]))
return i;
return -1;
}
/**
* Find an element with prefix with linear search & return it's index if find exactly,
* -index-2 if find first occurrence with prefix or -1
*/
public static int findWithPrefix(String[] array, String prefix) {
return findWithPrefix(array, prefix, 0);
}
/**
* Find an element with prefix with linear search & return it's index if find exactly,
* -index-2 if find first occurrence with prefix or -1
*/
public static int findWithPrefix(String[] array, String prefix, int off) {
for (int i = off; i < array.length; i++) {
if(array[i].equals(prefix)){
return i;
}
if (array[i].startsWith(prefix)) {
return -i - 2;
}
}
return -1;
}
public static int find(long[] ls, long elem) {
for(int i=0; i<ls.length; ++i )
if( elem==ls[i] ) return i;
return -1;
}
public static int find(int[] ls, int elem) {
for(int i=0; i<ls.length; ++i )
if( elem==ls[i] ) return i;
return -1;
}
// behaves like Arrays.binarySearch, but is slower -> Just good for tiny arrays (length<20)
public static int linearSearch(double[] vals, double v) {
final int N=vals.length;
for (int i=0; i<N; ++i) {
if (vals[i]==v) return i;
if (vals[i]>v) return -i-1;
}
return -1;
}
private static final DecimalFormat default_dformat = new DecimalFormat("0.#####");
public static String pprint(double[][] arr){
return pprint(arr, default_dformat);
}
// pretty print Matrix(2D array of doubles)
public static String pprint(double[][] arr,DecimalFormat dformat) {
int colDim = 0;
for( double[] line : arr )
colDim = Math.max(colDim, line.length);
StringBuilder sb = new StringBuilder();
int max_width = 0;
int[] ilengths = new int[colDim];
Arrays.fill(ilengths, -1);
for( double[] line : arr ) {
for( int c = 0; c < line.length; ++c ) {
double d = line[c];
String dStr = dformat.format(d);
if( dStr.indexOf('.') == -1 ) dStr += ".0";
ilengths[c] = Math.max(ilengths[c], dStr.indexOf('.'));
int prefix = (d >= 0 ? 1 : 2);
max_width = Math.max(dStr.length() + prefix, max_width);
}
}
for( double[] line : arr ) {
for( int c = 0; c < line.length; ++c ) {
double d = line[c];
String dStr = dformat.format(d);
if( dStr.indexOf('.') == -1 ) dStr += ".0";
for( int x = dStr.indexOf('.'); x < ilengths[c] + 1; ++x )
sb.append(' ');
sb.append(dStr);
if( dStr.indexOf('.') == -1 ) sb.append('.');
for( int i = dStr.length() - Math.max(0, dStr.indexOf('.')); i <= 5; ++i )
sb.append('0');
}
sb.append("\n");
}
return sb.toString();
}
public static int[] unpackInts(long... longs) {
int len = 2*longs.length;
int result[] = new int[len];
int i = 0;
for (long l : longs) {
result[i++] = (int) (l & 0xffffffffL);
result[i++] = (int) (l>>32);
}
return result;
}
private static void swap(long[] a, int i, int change) {
long helper = a[i];
a[i] = a[change];
a[change] = helper;
}
private static void swap(int[] a, int i, int change) {
int helper = a[i];
a[i] = a[change];
a[change] = helper;
}
/**
* Extract a shuffled array of integers
* @param a input array
* @param n number of elements to extract
* @param result array to store the results into (will be of size n)
* @param seed random number seed
* @param startIndex offset into a
* @return result
*/
public static int[] shuffleArray(int[] a, int n, int result[], long seed, int startIndex) {
if (n<=0) return result;
Random random = getRNG(seed);
if (result == null || result.length != n)
result = new int[n];
result[0] = a[startIndex];
for (int i = 1; i < n; i++) {
int j = random.nextInt(i+1);
if (j!=i) result[i] = result[j];
result[j] = a[startIndex+i];
}
for (int i = 0; i < n; ++i)
assert(ArrayUtils.contains(result, a[startIndex+i]));
return result;
}
public static void shuffleArray(int[] a, Random rng) {
int n = a.length;
for (int i = 0; i < n; i++) {
int change = i + rng.nextInt(n - i);
swap(a, i, change);
}
}
/**
* Generates a random array of n distinct non-negative long values. Values are sorted for reproducibility.
*
* @param n desired size of the array
* @param bound (exclusive) upper bound of maximum long-value that can be included
* @param rng random generator
* @return long array of length n holding values [0, bound)
*/
public static long[] distinctLongs(int n, long bound, Random rng) {
if (n > bound)
throw new IllegalArgumentException("argument bound (=" + bound + ") needs to be lower or equal to n (=" + n + ")");
if (!(rng instanceof RandomBase))
throw new IllegalArgumentException("Random implementation needs to be created by RandomUtils and inherit from RandomBase");
Set<Long> rows = new HashSet<>();
while (rows.size() < n) {
rows.add(((RandomBase) rng).nextLong(bound));
}
return rows.stream().sorted().mapToLong(Long::longValue).toArray();
}
// Generate a n by m array of random numbers drawn from the standard normal distribution
public static double[][] gaussianArray(int n, int m) { return gaussianArray(n, m, System.currentTimeMillis()); }
public static double[][] gaussianArray(int n, int m, long seed) {
if(n <= 0 || m <= 0) return null;
double[][] result = new double[n][m];
Random random = getRNG(seed);
for(int i = 0; i < n; i++) {
for(int j = 0; j < m; j++)
result[i][j] = random.nextGaussian();
}
return result;
}
public static double[] gaussianVector(int n) { return gaussianVector(n, System.currentTimeMillis()); }
public static double[] gaussianVector(int n, long seed) { return gaussianVector(n, getRNG(seed)); }
/**
* Make a new array initialized to random Gaussian N(0,1) values with the given seed.
*
* @param n length of generated vector
* @return array with gaussian values. Randomly selected {@code zeroNum} item values are zeros.
*/
public static double[] gaussianVector(int n, Random random) {
if(n <= 0) return null;
double[] result = new double[n]; // ToDo: Get rid of this new action.
for(int i = 0; i < n; i++)
result[i] = random.nextGaussian();
return result;
}
/** Remove the array allocation in this one */
public static double[] gaussianVector(long seed, double[] vseed) {
if (vseed == null)
return null;
Random random = getRNG(seed);
int arraySize = vseed.length;
for (int i=0; i < arraySize; i++) {
vseed[i] = random.nextGaussian();
}
return vseed;
}
/** Remove the array allocation in this one */
public static double[][] gaussianVector(Random random, double[][] vseed, int firstInd, int secondInd) {
if (vseed == null) {
vseed = new double[firstInd][secondInd];
} else {
firstInd = vseed.length;
secondInd = vseed[0].length;
}
for (int rowInd = 0; rowInd < firstInd; rowInd++)
for (int colInd=0; colInd < secondInd; colInd++) {
vseed[rowInd][colInd] = random.nextGaussian();
}
return vseed;
}
/** Returns number of strings which represents a number. */
public static int numInts(String... a) {
int cnt = 0;
for(String s : a) if (isInt(s)) cnt++;
return cnt;
}
public static boolean isInt(String... ary) {
for(String s:ary) {
if (s == null || s.isEmpty()) return false;
int i = s.charAt(0) == '-' ? 1 : 0;
for (; i < s.length(); i++) if (!Character.isDigit(s.charAt(i))) return false;
}
return true;
}
public static int[] toInt(String[] a, int off, int len) {
int[] res = new int[len];
for(int i=0; i<len; i++) res[i] = Integer.valueOf(a[off + i]);
return res;
}
public static Integer[] toIntegers(int[] a, int off, int len) {
Integer [] res = new Integer[len];
for(int i = 0; i < len; ++i)
res[i] = a[off+i];
return res;
}
public static int[] toInt(Integer[] a, int off, int len) {
int [] res = new int[len];
for(int i = 0; i < len; ++i)
res[i] = a[off+i];
return res;
}
/** Clever union of String arrays.
*
* For union of numeric arrays (strings represent integers) it is expecting numeric ordering.
* For pure string domains it is expecting lexicographical ordering.
* For mixed domains it always expects lexicographical ordering since such a domain were produce
* by a parser which sort string with Array.sort().
*
* PRECONDITION - string domain was sorted by Array.sort(String[]), integer domain by Array.sort(int[]) and switched to Strings !!!
*
* @param a a set of strings
* @param b a set of strings
* @return union of arrays
* // TODO: add tests
*/
public static String[] domainUnion(String[] a, String[] b) {
if (a == null) return b;
if (b == null) return a;
int cIinA = numInts(a);
int cIinB = numInts(b);
// Trivial case - all strings or ints, sorted
if (cIinA==0 && cIinB==0 // only strings
|| cIinA==a.length && cIinB==b.length ) // only integers
return union(a, b, cIinA==0);
// Be little bit clever here: sort string representing numbers first and append
// a,b were sorted by Array.sort() but can contain some numbers.
// So sort numbers in numeric way, and then string in lexicographical order
int[] ai = toInt(a, 0, cIinA); Arrays.sort(ai); // extract int part but sort it in numeric order
int[] bi = toInt(b, 0, cIinB); Arrays.sort(bi);
String[] ri = toString(union(ai,bi)); // integer part
String[] si = union(a, b, cIinA, a.length - cIinA, cIinB, b.length - cIinB, true);
return join(ri, si);
}
/** Union of given String arrays.
*
* The method expects ordering of domains in given order (lexicographical, numeric)
*
* @param a first array
* @param b second array
* @param lexo - true if domains are sorted in lexicographical order or false for numeric domains
* @return union of values in given arrays.
*
* precondition lexo ? a,b are lexicographically sorted : a,b are sorted numerically
* precondition a!=null && b!=null
*/
public static String[] union(String[] a, String[] b, boolean lexo) {
if (a == null) return b;
if (b == null) return a;
return union(a, b, 0, a.length, 0, b.length, lexo);
}
public static String[] union(String[] a, String[] b, int aoff, int alen, int boff, int blen, boolean lexo) {
assert a!=null && b!=null : "Union expect non-null input!";
String[] r = new String[alen+blen];
int ia = aoff, ib = boff, i = 0;
while (ia < aoff+alen && ib < boff+blen) {
int c = lexo ? a[ia].compareTo(b[ib]) : Integer.valueOf(a[ia]).compareTo(Integer.valueOf(b[ib]));
if ( c < 0) r[i++] = a[ia++];
else if (c == 0) { r[i++] = a[ia++]; ib++; }
else r[i++] = b[ib++];
}
if (ia < aoff+alen) while (ia<aoff+alen) r[i++] = a[ia++];
if (ib < boff+blen) while (ib<boff+blen) r[i++] = b[ib++];
return Arrays.copyOf(r, i);
}
/** Returns a union of given sorted arrays. */
public static int[] union(int[] a, int[] b) {
assert a!=null && b!=null : "Union expect non-null input!";
int[] r = new int[a.length+b.length];
int ia = 0, ib = 0, i = 0;
while (ia < a.length && ib < b.length) {
int c = a[ia]-b[ib];
if ( c < 0) r[i++] = a[ia++];
else if (c == 0) { r[i++] = a[ia++]; ib++; }
else r[i++] = b[ib++];
}
if (ia < a.length) while (ia<a.length) r[i++] = a[ia++];
if (ib < b.length) while (ib<b.length) r[i++] = b[ib++];
return Arrays.copyOf(r, i);
}
public static long[] join(long[] a, long[] b) {
long[] res = Arrays.copyOf(a, a.length+b.length);
System.arraycopy(b, 0, res, a.length, b.length);
return res;
}
public static float [] join(float[] a, float[] b) {
float[] res = Arrays.copyOf(a, a.length+b.length);
System.arraycopy(b, 0, res, a.length, b.length);
return res;
}
public static <T> T[] join(T[] a, T[] b) {
T[] res = Arrays.copyOf(a, a.length+b.length);
System.arraycopy(b, 0, res, a.length, b.length);
return res;
}
public static boolean hasNaNsOrInfs(double [] ary){
for(double d:ary)
if(Double.isNaN(d) || Double.isInfinite(d))
return true;
return false;
}
public static boolean hasNaNs(double [] ary){
for(double d:ary)
if(Double.isNaN(d))
return true;
return false;
}
public static boolean hasNaNsOrInfs(float [] ary){
for(float d:ary)
if(Double.isNaN(d) || Double.isInfinite(d))
return true;
return false;
}
/** Generates sequence (start, stop) of integers: (start, start+1, ...., stop-1) */
static public int[] seq(int start, int stop) {
assert start<stop;
int len = stop-start;
int[] res = new int[len];
for(int i=start; i<stop;i++) res[i-start] = i;
return res;
}
// warning: Non-Symmetric! Returns all elements in a that are not in b (but NOT the other way around)
static public int[] difference(int a[], int b[]) {
if (a == null) return new int[]{};
if (b == null) return a.clone();
int[] r = new int[a.length];
int cnt = 0;
for (int x : a) {
if (!contains(b, x)) r[cnt++] = x;
}
return Arrays.copyOf(r, cnt);
}
// warning: Non-Symmetric! Returns all elements in a that are not in b (but NOT the other way around)
static public String[] difference(String a[], String b[]) {
if (a == null) return new String[]{};
if (b == null) return a.clone();
String[] r = new String[a.length];
int cnt = 0;
for (String s : a) {
if (!contains(b, s)) r[cnt++] = s;
}
return Arrays.copyOf(r, cnt);
}
static public double[][] append( double[][] a, double[][] b ) {
if( a==null ) return b;
if( b==null ) return a;
if( a.length==0 ) return b;
if( b.length==0 ) return a;
assert a[0].length==b[0].length;
double[][] c = Arrays.copyOf(a,a.length+b.length);
System.arraycopy(b,0,c,a.length,b.length);
return c;
}
static public byte[] append( byte[] a, byte... b ) {
if( a==null ) return b;
if( b==null ) return a;
if( a.length==0 ) return b;
if( b.length==0 ) return a;
byte[] c = Arrays.copyOf(a,a.length+b.length);
System.arraycopy(b,0,c,a.length,b.length);
return c;
}
static public int[] append( int[] a, int[] b ) {
if( a==null ) return b;
if( b==null ) return a;
if( a.length==0 ) return b;
if( b.length==0 ) return a;
int[] c = Arrays.copyOf(a,a.length+b.length);
System.arraycopy(b,0,c,a.length,b.length);
return c;
}
static public long[] append( long[] a, long[] b ) {
if( a==null ) return b;
if( b==null ) return a;
if( a.length==0 ) return b;
if( b.length==0 ) return a;
long[] c = Arrays.copyOf(a,a.length+b.length);
System.arraycopy(b,0,c,a.length,b.length);
return c;
}
static public double[] append( double[] a, double[] b ) {
if( a==null ) return b;
if( b==null ) return a;
if( a.length==0 ) return b;
if( b.length==0 ) return a;
double[] c = Arrays.copyOf(a,a.length+b.length);
System.arraycopy(b,0,c,a.length,b.length);
return c;
}
static public String[] append( String[] a, String[] b ) {
if( a==null ) return b;
if( b==null ) return a;
if( a.length==0 ) return b;
if( b.length==0 ) return a;
String[] c = Arrays.copyOf(a,a.length+b.length);
System.arraycopy(b,0,c,a.length,b.length);
return c;
}
static public String[] insert( String[] a, String[] b, int pos ) {
if( a==null ) return b;
if( b==null ) return a;
if( a.length==0 ) return b;
if( b.length==0 ) return a;
String[] c = new String[a.length + b.length];
System.arraycopy(a, 0, c, 0, pos);
System.arraycopy(b, 0, c, pos, b.length);
System.arraycopy(a, pos, c, pos + b.length, a.length - pos);
return c;
}
// Java7+ @SafeVarargs
public static <T> T[] append(T[] a, T... b) {
if( a==null ) return b;
T[] tmp = Arrays.copyOf(a,a.length+b.length);
System.arraycopy(b,0,tmp,a.length,b.length);
return tmp;
}
public static int[] append(int[] a, int b) {
if( a==null || a.length == 0) return new int[]{b};
int[] tmp = Arrays.copyOf(a,a.length+1);
tmp[a.length] = b;
return tmp;
}
public static double[] append(double[] a, double b) {
if( a==null || a.length == 0) return new double[]{b};
double[] tmp = Arrays.copyOf(a,a.length+1);
tmp[a.length] = b;
return tmp;
}
static public String[] prepend(String[] ary, String s) {
if (ary==null) return new String[] { s };
String[] nary = new String[ary.length+1];
nary[0] = s;
System.arraycopy(ary,0,nary,1,ary.length);
return nary;
}
static public <T> T[] copyAndFillOf(T[] original, int newLength, T padding) {
if(newLength < 0) throw new NegativeArraySizeException("The array size is negative.");
T[] newArray = Arrays.copyOf(original, newLength);
if(original.length < newLength) {
System.arraycopy(original, 0, newArray, 0, original.length);
Arrays.fill(newArray, original.length, newArray.length, padding);
} else
System.arraycopy(original, 0, newArray, 0, newLength);
return newArray;
}
static public double[] copyAndFillOf(double[] original, int newLength, double padding) {
if(newLength < 0) throw new NegativeArraySizeException("The array size is negative.");
double[] newArray = new double[newLength];
if(original.length < newLength) {
System.arraycopy(original, 0, newArray, 0, original.length);
Arrays.fill(newArray, original.length, newArray.length, padding);
} else
System.arraycopy(original, 0, newArray, 0, newLength);
return newArray;
}
static public long[] copyAndFillOf(long[] original, int newLength, long padding) {
if(newLength < 0) throw new NegativeArraySizeException("The array size is negative.");
long[] newArray = new long[newLength];
if(original.length < newLength) {
System.arraycopy(original, 0, newArray, 0, original.length);
Arrays.fill(newArray, original.length, newArray.length, padding);
} else
System.arraycopy(original, 0, newArray, 0, newLength);
return newArray;
}
static public int[] copyAndFillOf(int[] original, int newLength, int padding) {
if(newLength < 0) throw new NegativeArraySizeException("The array size is negative.");
int[] newArray = new int[newLength];
if(original.length < newLength) {
System.arraycopy(original, 0, newArray, 0, original.length);
Arrays.fill(newArray, original.length, newArray.length, padding);
} else
System.arraycopy(original, 0, newArray, 0, newLength);
return newArray;
}
static public double[] copyFromIntArray(int[] a) {
double[] da = new double[a.length];
for(int i=0;i<a.length;++i) da[i] = a[i];
return da;
}
public static int [] sortedMerge(int[] a, int [] b) {
int [] c = MemoryManager.malloc4(a.length + b.length);
int i = 0, j = 0;
for(int k = 0; k < c.length; ++k){
if(i == a.length) c[k] = b[j++];
else if(j == b.length)c[k] = a[i++];
else if(b[j] < a[i]) c[k] = b[j++];
else c[k] = a[i++];
}
return c;
}
public static double [] sortedMerge(double[] a, double [] b) {
double [] c = MemoryManager.malloc8d(a.length + b.length);
int i = 0, j = 0;
for(int k = 0; k < c.length; ++k){
if(i == a.length) c[k] = b[j++];
else if(j == b.length)c[k] = a[i++];
else if(b[j] < a[i]) c[k] = b[j++];
else c[k] = a[i++];
}
return c;
}
// sparse sortedMerge (ids and vals)
public static void sortedMerge(int[] aIds, double [] aVals, int[] bIds, double [] bVals, int [] resIds, double [] resVals) {
int i = 0, j = 0;
for(int k = 0; k < resIds.length; ++k){
if(i == aIds.length){
System.arraycopy(bIds,j,resIds,k,resIds.length-k);
System.arraycopy(bVals,j,resVals,k,resVals.length-k);
j = bIds.length;
break;
}
if(j == bIds.length) {
System.arraycopy(aIds,i,resIds,k,resIds.length-k);
System.arraycopy(aVals,i,resVals,k,resVals.length-k);
i = aIds.length;
break;
}
if(aIds[i] > bIds[j]) {
resIds[k] = bIds[j];
resVals[k] = bVals[j];
++j;
} else {
resIds[k] = aIds[i];
resVals[k] = aVals[i];
++i;
}
}
assert i == aIds.length && j == bIds.length;
}
public static String[] select(String[] ary, int[] idxs) {
String [] res = new String[idxs.length];
for(int i = 0; i < res.length; ++i)
res[i] = ary[idxs[i]];
return res;
}
public static String[] select(String[] ary, byte[] idxs) {
String [] res = new String[idxs.length];
for(int i = 0; i < res.length; ++i)
res[i] = ary[idxs[i]];
return res;
}
public static double[] select(double[] ary, int[] idxs) {
double [] res = MemoryManager.malloc8d(idxs.length);
for(int i = 0; i < res.length; ++i)
res[i] = ary[idxs[i]];
return res;
}
public static int[] select(int[] ary, int[] idxs) {
int [] res = MemoryManager.malloc4(idxs.length);
for(int i = 0; i < res.length; ++i)
res[i] = ary[idxs[i]];
return res;
}
public static byte[] select(byte[] array, int[] idxs) {
byte[] res = MemoryManager.malloc1(idxs.length);
for(int i = 0; i < res.length; ++i)
res[i] = array[idxs[i]];
return res;
}
public static double [] expandAndScatter(double [] ary, int N, int [] ids) {
assert ary.length == ids.length:"ary.length = " + ary.length + " != " + ids.length + " = ids.length";
double [] res = MemoryManager.malloc8d(N);
for(int i = 0; i < ids.length; ++i) res[ids[i]] = ary[i];
return res;
}
/**
* Sort an integer array of indices based on values
* Updates indices in place, keeps values the same
* @param idxs indices
* @param values values
*/
public static void sort(final int[] idxs, final double[] values) {
sort(idxs, values, 500, 1);
}
public static void sort(final int[] idxs, final double[] values, int cutoff) {
sort(idxs, values, cutoff, 1);
}
// set increasing to 1 for ascending sort and -1 for descending sort
public static void sort(final int[] idxs, final double[] values, int cutoff, int increasing) {
if (idxs.length < cutoff) {
//hand-rolled insertion sort
for (int i = 0; i < idxs.length; i++) {
for (int j = i; j > 0 && values[idxs[j - 1]]*increasing > values[idxs[j]]*increasing; j--) {
int tmp = idxs[j];
idxs[j] = idxs[j - 1];
idxs[j - 1] = tmp;
}
}
} else {
Integer[] d = new Integer[idxs.length];
for (int i = 0; i < idxs.length; ++i) d[i] = idxs[i];
// Arrays.parallelSort(d, new Comparator<Integer>() {
Arrays.sort(d, new Comparator<Integer>() {
@Override
public int compare(Integer x, Integer y) {
return values[x]*increasing < values[y]*increasing ? -1 :
(values[x]*increasing > values[y]*increasing ? 1 : 0);
}
});
for (int i = 0; i < idxs.length; ++i) idxs[i] = d[i];
}
}
public static double [] subtract (double [] a, double [] b) {
double [] c = MemoryManager.malloc8d(a.length);
subtract(a,b,c);
return c;
}
public static double [][] subtract (double [][] a, double [][] b) {
double [][] c = MemoryManager.malloc8d(a.length, a[0].length);
for (int rowIndex = 0; rowIndex < c.length; rowIndex++) {
c[rowIndex] = subtract(a[rowIndex], b[rowIndex], c[rowIndex]);
}
return c;
}
public static int [] subtract (int [] a, int [] b) {
int [] c = MemoryManager.malloc4 (a.length);
for (int i = 0; i < a.length; i++)
c[i] = a[i]-b[i];
return c;
}
public static double[] subtract (double [] a, double [] b, double [] c) {
for(int i = 0; i < a.length; ++i)
c[i] = a[i] - b[i];
return c;
}
/** Flatenize given array (skips null arrays)
*
* Example: [[1,2], null, [3,null], [4]] -> [1,2,3,null,4]
* @param arr array of arrays
* @param <T> any type
* @return flattened array, if input was null return null, if input was empty return null
*/
public static <T> T[] flat(T[][] arr) {
if (arr == null) return null;
if (arr.length == 0) return null;
int tlen = 0;
for (T[] t : arr) tlen += (t != null) ? t.length : 0;
T[] result = Arrays.copyOf(arr[0], tlen);
int j = arr[0].length;
for (int i = 1; i < arr.length; i++) {
if (arr[i] == null)
continue;
System.arraycopy(arr[i], 0, result, j, arr[i].length);
j += arr[i].length;
}
return result;
}
public static double [][] convertTo2DMatrix(double [] x, int N) {
assert x.length % N == 0: "number of coefficient should be divisible by number of coefficients per class ";
int len = x.length/N; // N is number of coefficients per class
double [][] res = new double[len][];
for(int i = 0; i < len; ++i) { // go through each class
res[i] = MemoryManager.malloc8d(N);
System.arraycopy(x,i*N,res[i],0,N);
}
return res;
}
public static Object[][] zip(Object[] a, Object[] b) {
if (a.length != b.length) throw new IllegalArgumentException("Cannot zip arrays of different lengths!");
Object[][] result = new Object[a.length][2];
for (int i = 0; i < a.length; i++) {
result[i][0] = a[i];
result[i][1] = b[i];
}
return result;
}
public static <K, V> int crossProductSize(Map<K, V[]> hyperSpace) {
int size = 1;
for (Map.Entry<K,V[]> entry : hyperSpace.entrySet()) {
V[] value = entry.getValue();
size *= value != null ? value.length : 1;
}
return size;
}
public static Integer[] interval(Integer start, Integer end) {
return interval(start, end, 1);
}
public static Integer[] interval(Integer start, Integer end, Integer step) {
int len = 1 + (end - start) / step; // Include both ends of interval
Integer[] result = new Integer[len];
for(int i = 0, value = start; i < len; i++, value += step) {
result[i] = value;
}
return result;
}
public static Float[] interval(Float start, Float end, Float step) {
int len = 1 + (int)((end - start) / step); // Include both ends of interval
Float[] result = new Float[len];
Float value = start;
for(int i = 0; i < len; i++, value = start + i*step) {
result[i] = value;
}
return result;
}
public static Double[] interval(Double start, Double end, Double step) {
int len = 1 + (int)((end - start) / step); // Include both ends of interval
Double[] result = new Double[len];
Double value = start;
for(int i = 0; i < len; i++, value = start + i*step) {
result[i] = value;
}
return result;
}
public static String [] remove(String [] ary, String s) {
if(s == null)return ary;
int cnt = 0;
int idx = find(ary,s);
while(idx >= 0) {
++cnt;
idx = find(ary,s,++idx);
}
if(cnt == 0)return ary;
String [] res = new String[ary.length-cnt];
int j = 0;
for(String x:ary)
if(!x.equals(s))
res[j++] = x;
return res;
}
public static int[] sorted_set_diff(int[] x, int[] y) {
assert isSorted(x);
assert isSorted(y);
int [] res = new int[x.length];
int j = 0, k = 0;
for(int i = 0; i < x.length; i++){
while(j < y.length && y[j] < x[i])j++;
if(j == y.length || y[j] != x[i])
res[k++] = x[i];
}
return Arrays.copyOf(res,k);
}
/*
This class is written to copy the contents of a frame to a 2-D double array.
*/
public static class FrameToArray extends MRTask<FrameToArray> {
int _startColIndex; // first column index to extract
int _endColIndex; // last column index to extract
int _rowNum; // number of columns in
public double[][] _frameContent;
public FrameToArray(int startCol, int endCol, long rowNum, double[][] frameContent) {
assert ((startCol >= 0) && (endCol >= startCol) && (rowNum > 0));
_startColIndex = startCol;
_endColIndex = endCol;
_rowNum = (int) rowNum;
int colNum = endCol-startCol+1;
if (frameContent == null) { // allocate memory here if user has not provided one
_frameContent = MemoryManager.malloc8d(_rowNum, colNum);
} else { // make sure we are passed the correct size 2-D double array
assert (_rowNum == frameContent.length && frameContent[0].length == colNum);
for (int index = 0; index < _rowNum; index++) { // zero fill use array
Arrays.fill(frameContent[index], 0.0);
}
_frameContent = frameContent;
}
}
@Override public void map(Chunk[] c) {
assert _endColIndex < c.length;
int endCol = _endColIndex+1;
int rowOffset = (int) c[0].start(); // real row index
int chkRows = c[0]._len;
for (int rowIndex = 0; rowIndex < chkRows; rowIndex++) {
for (int colIndex = _startColIndex; colIndex < endCol; colIndex++) {
_frameContent[rowIndex+rowOffset][colIndex-_startColIndex] = c[colIndex].atd(rowIndex);
}
}
}
@Override public void reduce(FrameToArray other) {
ArrayUtils.add(_frameContent, other._frameContent);
}
public double[][] getArray() {
return _frameContent;
}
}
/*
This class is written to a 2-D array to the frame instead of allocating new memory every time.
*/
public static class CopyArrayToFrame extends MRTask<CopyArrayToFrame> {
int _startColIndex; // first column index to extract
int _endColIndex; // last column index to extract
int _rowNum; // number of columns in
public double[][] _frameContent;
public CopyArrayToFrame(int startCol, int endCol, long rowNum, double[][] frameContent) {
assert ((startCol >= 0) && (endCol >= startCol) && (rowNum > 0));
_startColIndex = startCol;
_endColIndex = endCol;
_rowNum = (int) rowNum;
int colNum = endCol-startCol+1;
assert (_rowNum == frameContent.length && frameContent[0].length == colNum);
_frameContent = frameContent;
}
@Override public void map(Chunk[] c) {
assert _endColIndex < c.length;
int endCol = _endColIndex+1;
int rowOffset = (int) c[0].start(); // real row index
int chkRows = c[0]._len;
for (int rowIndex = 0; rowIndex < chkRows; rowIndex++) {
for (int colIndex = _startColIndex; colIndex < endCol; colIndex++) {
c[colIndex].set(rowIndex, _frameContent[rowIndex+rowOffset][colIndex-_startColIndex]);
}
}
}
}
/** Create a new frame based on given row data.
* @param key Key for the frame
* @param names names of frame columns
* @param rows data given in the form of rows
* @return new frame which contains columns named according given names and including given data */
public static Frame frame(Key<Frame> key, String[] names, double[]... rows) {
assert names == null || names.length == rows[0].length;
Futures fs = new Futures();
Vec[] vecs = new Vec[rows[0].length];
Key<Vec>[] keys = Vec.VectorGroup.VG_LEN1.addVecs(vecs.length);
int rowLayout = -1;
for( int c = 0; c < vecs.length; c++ ) {
AppendableVec vec = new AppendableVec(keys[c], Vec.T_NUM);
NewChunk chunk = new NewChunk(vec, 0);
for (double[] row : rows) chunk.addNum(row[c]);
chunk.close(0, fs);
if( rowLayout== -1) rowLayout = vec.compute_rowLayout();
vecs[c] = vec.close(rowLayout,fs);
}
fs.blockForPending();
Frame fr = new Frame(key, names, vecs);
if( key != null ) DKV.put(key, fr);
return fr;
}
public static Frame frame(double[]... rows) { return frame(null, rows); }
public static Frame frame(String[] names, double[]... rows) { return frame(Key.<Frame>make(), names, rows); }
public static Frame frame(String name, Vec vec) { Frame f = new Frame(); f.add(name, vec); return f; }
/**
* Remove b from a, both a,b are assumed to be sorted.
*/
public static int[] removeSorted(int [] a, int [] b) {
int [] indeces = new int[b.length];
indeces[0] = Arrays.binarySearch(a,0,a.length,b[0]);
if(indeces[0] < 0)
throw new NoSuchElementException("value " + b[0] + " not found in the first array.");
for(int i = 1; i < b.length; ++i) {
indeces[i] = Arrays.binarySearch(a,indeces[i-1],a.length,b[i]);
if(indeces[i] < 0)
throw new NoSuchElementException("value " + b[i] + " not found in the first array.");
}
return removeIds(a,indeces);
}
public static int[] removeIds(int[] x, int[] ids) {
int [] res = new int[x.length-ids.length];
int j = 0;
for(int i = 0; i < x.length; ++i)
if(j == ids.length || i != ids[j]) res[i-j] = x[i]; else ++j;
return res;
}
public static double[] removeIds(double[] x, int[] ids) {
double [] res = new double[x.length-ids.length];
int j = 0;
for(int i = 0; i < x.length; ++i)
if(j == ids.length || i != ids[j]) res[i-j] = x[i]; else ++j;
return res;
}
public static boolean hasNzs(double[] x) {
if(x == null)
return false;
for(double d:x)
if(d != 0) return true;
return false;
}
public static int countNonzeros(double[] beta) {
int res = 0;
for(double d:beta)
if(d != 0)++res;
return res;
}
public static long[] subtract(long n, long[] nums) {
for (int i=0; i<nums.length; i++) nums[i] = n - nums[i];
return nums;
}
public static <T> T[] remove( T[] ary, int id) {
if(id < 0 || id >= ary.length) return Arrays.copyOf(ary,ary.length);
if(id == ary.length-1) return Arrays.copyOf(ary,id);
if(id == 0) return Arrays.copyOfRange(ary,1,ary.length);
return append(Arrays.copyOf(ary,id), Arrays.copyOfRange(ary,id+1,ary.length));
}
public static byte[] remove(byte[] ary, int id) {
if(id < 0 || id >= ary.length) return Arrays.copyOf(ary,ary.length);
if(id == ary.length-1) return Arrays.copyOf(ary,id);
if(id == 0) return Arrays.copyOfRange(ary,1,ary.length);
return append(Arrays.copyOf(ary,id), Arrays.copyOfRange(ary,id+1,ary.length));
}
public static int[] remove(int[] ary, int id) {
if(id < 0 || id >= ary.length) return Arrays.copyOf(ary,ary.length);
if(id == ary.length-1) return Arrays.copyOf(ary,id);
if(id == 0) return Arrays.copyOfRange(ary,1,ary.length);
return append(Arrays.copyOf(ary,id), Arrays.copyOfRange(ary,id+1,ary.length));
}
public static long[] remove(long[] ary, int id) {
if(id < 0 || id >= ary.length) return Arrays.copyOf(ary,ary.length);
if(id == ary.length-1) return Arrays.copyOf(ary,id);
if(id == 0) return Arrays.copyOfRange(ary,1,ary.length);
return append(Arrays.copyOf(ary,id), Arrays.copyOfRange(ary,id+1,ary.length));
}
public static double[] padUniformly(double[] origPoints, int newLength) {
int origLength = origPoints.length;
if (newLength <= origLength || origLength<=1) return origPoints;
int extraPoints = newLength - origLength;
int extraPointsPerBin = extraPoints/(origLength-1);
double[] res = new double[newLength];
int pos=0;
int rem = extraPoints - extraPointsPerBin*(origLength-1);
for (int i=0;i<origLength-1;++i) {
double startPos = origPoints[i];
double delta = origPoints[i+1]-startPos;
int ext = extraPointsPerBin + (i<rem ? 1 : 0);
res[pos++] = startPos;
for (int j=0;j<ext;++j)
res[pos++] = startPos + (j+0.5) / ext * delta;
}
res[pos] = origPoints[origLength-1];
return res;
}
// See HistogramTest JUnit for tests
public static double[] makeUniqueAndLimitToRange(double[] splitPoints, double min, double maxEx) {
double last= splitPoints[0];
double[] uniqueValidPoints = new double[splitPoints.length+2];
int count=0;
// keep all unique points that are minimally overlapping with min..maxEx
for (int i = 0; i< splitPoints.length; ++i) {
double pos = splitPoints[i];
// first one
if (pos >= min && count==0) {
uniqueValidPoints[count++]= min;
if (pos> min) uniqueValidPoints[count++]=pos;
last=pos;
}
//last one
else if (pos > maxEx) {
break;
}
// regular case: add to uniques
else if (pos > min && pos < maxEx && (i==0 || pos != last)) {
uniqueValidPoints[count++] = pos;
last = pos;
}
}
if (count==0) {
return new double[]{min};
}
return Arrays.copyOfRange(uniqueValidPoints,0,count);
}
// See HistogramTest JUnit for tests
public static double[] limitToRange(double[] sortedSplitPoints, double min, double maxEx) {
int start=Arrays.binarySearch(sortedSplitPoints, min);
if (start<0) start=-start-1;
// go back one more to return at least one value
if (start==sortedSplitPoints.length) start--;
// go back one more to include the min (inclusive)
if (sortedSplitPoints[start] > min && start>0) start--;
assert(start>=0);
assert(sortedSplitPoints[start] <= min);
int end=Arrays.binarySearch(sortedSplitPoints, maxEx);
if (end<0) end=-end-1;
assert(end>0 && end<= sortedSplitPoints.length): "End index ("+end+") should be > 0 and <= split points size ("+sortedSplitPoints.length+"). "+collectArrayInfo(sortedSplitPoints);
assert(end>=start): "End index ("+end+") should be >= start index ("+start+"). " + collectArrayInfo(sortedSplitPoints);
assert(sortedSplitPoints[end-1] < maxEx): "Split valued at index end-1 ("+sortedSplitPoints[end-1]+") should be < maxEx value ("+maxEx+"). "+collectArrayInfo(sortedSplitPoints);
return Arrays.copyOfRange(sortedSplitPoints,start,end);
}
private static String collectArrayInfo(double[] array){
StringBuilder info = new StringBuilder("Array info - length: "+array.length + " values: ");
for(double value: array){
info.append(value+" ");
}
return info.toString();
}
public static double[] extractCol(int i, double[][] ary) {
double [] res = new double[ary.length];
for(int j = 0; j < ary.length; ++j)
res[j] = ary[j][i];
return res;
}
public static long encodeAsLong(byte[] b) {
return encodeAsLong(b, 0, b.length);
}
public static long encodeAsLong(byte[] b, int off, int len) {
assert len <= 8 : "Cannot encode more then 8 bytes into long: len = " + len;
long r = 0;
int shift = 0;
for(int i = 0; i < len; i++) {
r |= (b[i + off] & 0xFFL) << shift;
shift += 8;
}
return r;
}
public static int encodeAsInt(byte[] b) {
assert b.length == 4 : "Cannot encode more than 4 bytes into int: len = " + b.length;
return (b[0]&0xFF)+((b[1]&0xFF)<<8)+((b[2]&0xFF)<<16)+((b[3]&0xFF)<<24);
}
public static int encodeAsInt(byte[] bs, int at) {
if (at + 4 > bs.length) throw new IndexOutOfBoundsException("Cannot encode more than 4 bytes into int: len = " + bs.length + ", pos=" + at);
return (bs[at]&0xFF)+((bs[at+1]&0xFF)<<8)+((bs[at+2]&0xFF)<<16)+((bs[at+3]&0xFF)<<24);
}
public static byte[] decodeAsInt(int what, byte[] bs, int at) {
if (bs.length < at + 4) throw new IndexOutOfBoundsException("Wrong position " + at + ", array length is " + bs.length);
for (int i = at; i < at+4 && i < bs.length; i++) {
bs[i] = (byte)(what&0xFF);
what >>= 8;
}
return bs;
}
/** Transform given long numbers into byte array.
* Highest 8-bits of the first long will stored in the first field of returned byte array.
*
* Example:
* 0xff18000000000000L -> new byte[] { 0xff, 0x18, 0, 0, 0, 0, 0, 0}
*/
public static byte[] toByteArray(long ...nums) {
if (nums == null || nums.length == 0) return EMPTY_BYTE_ARRAY;
byte[] result = new byte[8*nums.length];
int c = 0;
for (long n : nums) {
for (int i = 0; i < 8; i++) {
result[c*8 + i] = (byte) ((n >>> (56 - 8 * i)) & 0xFF);
}
c++;
}
return result;
}
public static byte[] toByteArray(int[] ary) {
byte[] r = new byte[ary.length];
for (int i = 0; i < ary.length; i++) {
r[i] = (byte) (ary[i] & 0xff);
}
return r;
}
public static boolean equalsAny(long value, long...lhs) {
if (lhs == null || lhs.length == 0) return false;
for (long lhValue : lhs) {
if (value == lhValue) return true;
}
return false;
}
/**
* Convert an array of primitive types into an array of corresponding boxed types. Due to quirks of Java language
* this cannot be done in any generic way -- there should be a separate function for each use case...
* @param arr input array of `char`s
* @return output array of `Character`s
*/
public static Character[] box(char[] arr) {
Character[] res = new Character[arr.length];
for (int i = 0; i < arr.length; i++)
res[i] = arr[i];
return res;
}
/**
* Convert an ArrayList of Integers to a primitive int[] array.
*/
public static int[] toPrimitive(ArrayList<Integer> arr) {
int[] res = new int[arr.size()];
for (int i = 0; i < res.length; i++)
res[i] = arr.get(i);
return res;
}
public static boolean isSorted(int[] vals) {
for (int i = 1; i < vals.length; ++i)
if (vals[i - 1] > vals[i]) return false;
return true;
}
public static boolean isSorted(double[] vals) {
for (int i = 1; i < vals.length; ++i)
if (vals[i - 1] > vals[i]) return false;
return true;
}
public static byte[] constAry(int len, byte b) {
byte[] ary = new byte[len];
Arrays.fill(ary, b);
return ary;
}
public static double[] constAry(int len, double c) {
double[] ary = new double[len];
Arrays.fill(ary, c);
return ary;
}
public static double[] toDouble(float[] floats) {
if (floats == null)
return null;
double[] ary = new double[floats.length];
for (int i = 0; i < floats.length; i++)
ary[i] = floats[i];
return ary;
}
public static double[] toDouble(int[] ints) {
if (ints == null)
return null;
double[] ary = new double[ints.length];
for (int i = 0; i < ints.length; i++)
ary[i] = ints[i];
return ary;
}
public static boolean isInstance(Object object, Class[] comparedClasses) {
for (Class c : comparedClasses) {
if (c.isInstance(object)) return true;
}
return false;
}
/**
* Count number of occurrences of element in given array.
*
* @param array array in which number of occurrences should be counted.
* @param element element whose occurrences should be counted.
*
* @return number of occurrences of element in given array.
*/
public static int occurrenceCount(byte[] array, byte element) {
int cnt = 0;
for (byte b : array)
if (b == element)
cnt++;
return cnt;
}
public static String findLongestCommonPrefix(String inputArray[]) {
String referenceWord = inputArray[0];
String result = "";
for (int j = 1; j <= referenceWord.length(); j++) {
String prefix = referenceWord.substring(0, j);
if (isPresentInAllWords(prefix, inputArray) && result.length() < prefix.length()) {
result = prefix;
}
}
return result;
}
private static boolean isPresentInAllWords(String prefix, String[] words) {
int n = words.length, k;
for (k = 1; k < n; k++) {
if (!words[k].startsWith(prefix)) {
return false;
}
}
return true;
}
/**
*
* @return Array dimension of array.length with values from uniform distribution with bounds taken from array.
* For example first value of the result is from Unif(First column min value, First column max value)
*/
public static double[] uniformDistFromArray(double[][] array, long seed) {
double[] p = new double[array.length];
Random random = RandomUtils.getRNG(seed);
for (int col = 0; col < array.length; col++) {
double[] minMax = ArrayUtils.minMaxValue(array[col]);
double min = minMax[0];
double max = minMax[1];
p[col] = min + random.nextDouble() * (max - min);
}
return p;
}
/*
* Linear interpolation values in the array with Double.NaN values.
* The interpolation always starts from the first item of the array.
* The last element of array cannot be Double.NaN.
*
* @param array input array with Double.NaN values
*/
public static void interpolateLinear(double[] array){
assert array.length > 0 && !Double.isNaN(array[array.length-1]):
"Input array length should be > 0 and the first item should not be NaN";
if (array.length == 1){
return;
}
List<Integer> nonNullIdx = new ArrayList<>();
List<Integer> steps = new ArrayList<>();
int tmpStep = 0;
for (int i = 0; i < array.length; i++) {
if (!Double.isNaN(array[i])) {
nonNullIdx.add(i);
if (tmpStep != 0) {
steps.add(tmpStep);
}
tmpStep = 0;
}
else {
tmpStep++;
}
}
if(nonNullIdx.size() == 0) return;
double start = Double.NaN, end = Double.NaN, step = Double.NaN, mean = Double.NaN;
for (int i=0; i<array.length; i++) {
// always begin with 0
if(i == 0 && Double.isNaN(array[i])) {
start = 0;
end = array[nonNullIdx.get(0)];
step = 1.0 / (double)(steps.get(0) + 1);
mean = step;
array[i] = start * (1 - mean) + end * mean;
mean += step;
} else if (!Double.isNaN(array[i]) && nonNullIdx.size() > 1 && steps.size() > 0) {
start = array[nonNullIdx.get(0)];
end = array[nonNullIdx.get(1)];
step = 1.0 / (double)(steps.get(0) + 1);
mean = step;
nonNullIdx.remove(0);
steps.remove(0);
} else if (Double.isNaN(array[i])) {
array[i] = start * (1 - mean) + end * mean;
mean += step;
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/AtomicUtils.java
|
package water.util;
import sun.misc.Unsafe;
import water.nbhm.UtilUnsafe;
public abstract class AtomicUtils {
// Atomically-updated float array
public abstract static class FloatArray {
private static final Unsafe _unsafe = UtilUnsafe.getUnsafe();
private static final int _Fbase = _unsafe.arrayBaseOffset(float[].class);
private static final int _Fscale = _unsafe.arrayIndexScale(float[].class);
private static long rawIndex(final float[] ary, final int idx) {
assert idx >= 0 && idx < ary.length;
return _Fbase + idx * _Fscale;
}
static public void add( float ds[], int i, float y ) {
long adr = rawIndex(ds,i);
float old = ds[i];
while( !_unsafe.compareAndSwapInt(ds,adr, Float.floatToRawIntBits(old), Float.floatToRawIntBits(old+y) ) )
old = ds[i];
}
static public String toString( float fs[] ) {
SB sb = new SB();
sb.p('[');
for( float f : fs )
sb.p(f==Float.MAX_VALUE ? "max": (f==-Float.MAX_VALUE ? "min": Float.toString(f))).p(',');
return sb.p(']').toString();
}
}
// Atomically-updated double array
public static class DoubleArray {
private static final Unsafe _unsafe = UtilUnsafe.getUnsafe();
private static final int _Dbase = _unsafe.arrayBaseOffset(double[].class);
private static final int _Dscale = _unsafe.arrayIndexScale(double[].class);
private static long rawIndex(final double[] ary, final int idx) {
assert idx >= 0 && idx < ary.length;
return _Dbase + idx * _Dscale;
}
static public boolean CAS( double[] ds, int i, double old, double newd ) {
return _unsafe.compareAndSwapLong(ds,rawIndex(ds,i), Double.doubleToRawLongBits(old), Double.doubleToRawLongBits(newd) );
}
static public void add( double ds[], int i, double y ) {
double old;
while( !CAS(ds,i,old=ds[i],old+y) ) ;
}
static public void min( double ds[], int i, double min ) {
double old;
while( !CAS(ds,i,old=ds[i],Math.min(old,min)) ) ;
}
static public void max( double ds[], int i, double max ) {
double old;
while( !CAS(ds,i,old=ds[i],Math.max(old,max)) ) ;
}
}
// Atomically-updated long array. Instead of using the similar JDK pieces,
// allows the bare array to be exposed for fast readers.
public static class LongArray {
private static final Unsafe _unsafe = UtilUnsafe.getUnsafe();
private static final int _Lbase = _unsafe.arrayBaseOffset(long[].class);
private static final int _Lscale = _unsafe.arrayIndexScale(long[].class);
private static long rawIndex(final long[] ary, final int idx) {
assert idx >= 0 && idx < ary.length;
return _Lbase + idx * _Lscale;
}
static public void incr( long ls[], int i ) { add(ls,i,1); }
static public void add( long ls[], int i, long x ) {
long adr = rawIndex(ls,i);
long old = ls[i];
while( !_unsafe.compareAndSwapLong(ls,adr, old, old+x) )
old = ls[i];
}
}
// Atomically-updated int array. Instead of using the similar JDK pieces,
// allows the bare array to be exposed for fast readers.
public static class IntArray {
private static final Unsafe _unsafe = UtilUnsafe.getUnsafe();
private static final int _Ibase = _unsafe.arrayBaseOffset(int[].class);
private static final int _Iscale = _unsafe.arrayIndexScale(int[].class);
private static long rawIndex(final int[] ary, final int idx) {
assert idx >= 0 && idx < ary.length;
return _Ibase + idx * _Iscale;
}
static public void incr( int is[], int i ) { add(is,i,1); }
static public void add( int is[], int i, int x ) {
long adr = rawIndex(is,i);
int old = is[i];
while( !_unsafe.compareAndSwapInt(is,adr, old, old+x) )
old = is[i];
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/BinaryFileTransfer.java
|
package water.util;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
public class BinaryFileTransfer {
/**
* Hexadecimal string to brute-force convert into an array of bytes.
* The length of the string must be even.
* The length of the string is 2x the length of the byte array.
*
* @param s Hexadecimal string
* @return byte array
*/
public static byte[] convertStringToByteArr(String s) {
if ((s.length() % 2) != 0) {
throw new RuntimeException("String length must be even (was " + s.length() + ")");
}
ArrayList<Byte> byteArrayList = new ArrayList<Byte>();
for (int i = 0; i < s.length(); i = i + 2) {
String s2 = s.substring(i, i + 2);
Integer i2 = Integer.parseInt(s2, 16);
Byte b2 = (byte) (i2 & 0xff);
byteArrayList.add(b2);
}
byte[] byteArr = new byte[byteArrayList.size()];
for (int i = 0; i < byteArr.length; i++) {
byteArr[i] = byteArrayList.get(i);
}
return byteArr;
}
public static void writeBinaryFile(String fileName, byte[] byteArr) throws IOException {
FileOutputStream out = new FileOutputStream(fileName);
for (byte b : byteArr) {
out.write(b);
}
out.close();
}
/**
* Array of bytes to brute-force convert into a hexadecimal string.
* The length of the returned string is byteArr.length * 2.
*
* @param byteArr byte array to convert
* @return hexadecimal string
*/
public static String convertByteArrToString(byte[] byteArr) {
StringBuilder sb = new StringBuilder();
for (byte b : byteArr) {
int i = b;
i = i & 0xff;
sb.append(String.format("%02x", i));
}
return sb.toString();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/ByteStreams.java
|
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package water.util;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
/**
* Several helper methods inspired by Guava library - https://github.com/google/guava/. We want to avoid bringing guava dependency when possible.
*
* Duplicating some code from that library is a small sacrifice for not bringing the whole dependency
*/
public class ByteStreams {
public static void readFully(InputStream in, byte[] b) throws IOException {
readFully(in, b, 0, b.length);
}
private static void readFully(InputStream in, byte[] b, int off, int len) throws IOException {
int read = read(in, b, off, len);
if (read != len) {
throw new EOFException(
"reached end of stream after reading " + read + " bytes; " + len + " bytes expected");
}
}
public static int read(InputStream in, byte[] b, int off, int len) throws IOException {
checkNotNull(in);
checkNotNull(b);
if (len < 0) {
throw new IndexOutOfBoundsException("len is negative");
}
int total = 0;
while (total < len) {
int result = in.read(b, off + total, len - total);
if (result == -1) {
break;
}
total += result;
}
return total;
}
private static <T> T checkNotNull(T reference) {
if (reference == null) {
throw new NullPointerException();
}
return reference;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/ChunkSummary.java
|
package water.util;
import water.H2O;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.CategoricalWrappedVec;
import water.fvec.Vec;
/**
* Simple summary of how many chunks of each type are in a Frame
*/
public class ChunkSummary extends MRTask<ChunkSummary> {
ChunkSummary() { super((byte)(Thread.currentThread() instanceof H2O.FJWThr ? currThrPriority()+1 : H2O.MIN_HI_PRIORITY - 2)); }
public static final String[][] chunkTypes = new String[][]{
{"C0L","Constant long"},
{"C0D","Constant double"},
{"CBS","Binary"},
{"CXI","Sparse Integers"}, // Sparse ints
{"CXF","Sparse Reals"}, // Sparse ints
{"C1","1-Byte Integers"},
{"C1N","1-Byte Integers (w/o NAs)"},
{"C1S","1-Byte Fractions"},
{"C2","2-Byte Integers"},
{"C2S","2-Byte Fractions"},
{"CDecimal","N-Byte Fractions"},
{"C4","4-Byte Integers"},
{"C4S","4-Byte Fractions"},
{"C4F","4-byte Reals"},
{"C8","8-byte Integers"},
{"C16","UUIDs"},
{"CStr","Strings"},
{"CUD","Unique Reals"},
{"C8D","64-bit Reals"},
};
// OUTPUT
private long[] chunk_counts;
private long total_chunk_count;
private long[] chunk_byte_sizes;
private long total_chunk_byte_size;
private long[] byte_size_per_node; //averaged over all chunks
private double byte_size_per_node_mean;
private double byte_size_per_node_min;
private double byte_size_per_node_max;
private double byte_size_per_node_stddev;
private long total_row_count;
private long[] row_count_per_node;
private double row_count_per_node_mean;
private double row_count_per_node_min;
private double row_count_per_node_max;
private double row_count_per_node_stddev;
private long total_chunk_count_per_col;
private long[] chunk_count_per_col_per_node;
private double chunk_count_per_col_per_node_mean;
private double chunk_count_per_col_per_node_min;
private double chunk_count_per_col_per_node_max;
private double chunk_count_per_col_per_node_stddev;
@Override
public void map(Chunk[] cs) {
chunk_counts = new long[chunkTypes.length];
chunk_byte_sizes = new long[chunkTypes.length];
byte_size_per_node = new long[H2O.CLOUD.size()];
row_count_per_node = new long[H2O.CLOUD.size()];
chunk_count_per_col_per_node = new long[H2O.CLOUD.size()];
for( Chunk c : cs ) { // Can be a big loop, for high column counts
// Pull out the class name; trim a trailing "Chunk"
String cname = c.getClass().getSimpleName();
int nlen = cname.length();
assert nlen > 5 && cname.charAt(nlen-5)=='C' && cname.charAt(nlen-1)=='k';
String sname = cname.substring(0,nlen-5);
if (sname.equals("CategoricalWrapped")) {
Chunk ec = ((CategoricalWrappedVec.CategoricalWrappedChunk)c)._c;
cname = ec.getClass().getSimpleName();
nlen = cname.length();
assert nlen > 5 && cname.charAt(nlen-5)=='C' && cname.charAt(nlen-1)=='k';
sname = cname.substring(0,nlen-5);
}
// Table lookup, roughly sorted by frequency
int j;
for( j = 0; j < chunkTypes.length; ++j )
if( sname.equals(chunkTypes[j][0]) )
break;
if( j==chunkTypes.length ) throw H2O.fail("Unknown Chunk Type: " + sname);
chunk_counts[j]++;
chunk_byte_sizes[j] += c.byteSize();
byte_size_per_node[H2O.SELF.index()] += c.byteSize();
}
row_count_per_node[H2O.SELF.index()] += cs[0].len();
total_row_count += cs[0].len();
chunk_count_per_col_per_node[H2O.SELF.index()]++;
total_chunk_count_per_col++;
}
@Override
public void reduce(ChunkSummary mrt) {
ArrayUtils.add(chunk_counts,mrt.chunk_counts);
ArrayUtils.add(chunk_byte_sizes,mrt.chunk_byte_sizes);
ArrayUtils.add(byte_size_per_node,mrt.byte_size_per_node);
ArrayUtils.add(row_count_per_node,mrt.row_count_per_node);
ArrayUtils.add(chunk_count_per_col_per_node,mrt.chunk_count_per_col_per_node);
total_row_count += mrt.total_row_count;
total_chunk_count_per_col += mrt.total_chunk_count_per_col;
}
@Override
protected void postGlobal() {
if (chunk_counts == null || chunk_byte_sizes == null || byte_size_per_node == null) return;
assert(total_row_count == _fr.numRows()): "total_row_count["+total_row_count+"] != _fr.numRows()["+_fr.numRows()+"]. ";
// compute counts and sizes
total_chunk_byte_size = 0;
total_chunk_count = 0;
for (int j = 0; j < chunkTypes.length; ++j) {
total_chunk_byte_size += chunk_byte_sizes[j];
total_chunk_count += chunk_counts[j];
}
long check = 0;
for (Vec v : _fr.vecs())
check += v.nChunks();
assert(total_chunk_count == check);
// This doesn't always hold, FileVecs have File-based byte size, while Vecs have Chunk-based byte size.
// assert(total_chunk_byte_size == _fr.byteSize());
double[] res=MathUtils.min_max_mean_stddev(byte_size_per_node);
byte_size_per_node_min = res[0];
byte_size_per_node_max = res[1];
byte_size_per_node_mean = res[2];
byte_size_per_node_stddev = res[3];
res=MathUtils.min_max_mean_stddev(row_count_per_node);
row_count_per_node_min = res[0];
row_count_per_node_max = res[1];
row_count_per_node_mean = res[2];
row_count_per_node_stddev = res[3];
res=MathUtils.min_max_mean_stddev(chunk_count_per_col_per_node);
chunk_count_per_col_per_node_min = res[0];
chunk_count_per_col_per_node_max = res[1];
chunk_count_per_col_per_node_mean = res[2];
chunk_count_per_col_per_node_stddev = res[3];
}
String display(long val) { return String.format("%10s", val == 0 ? " 0 B" : PrettyPrint.bytes(val)); }
public TwoDimTable toTwoDimTableChunkTypes() {
final String tableHeader = "Chunk compression summary";
int rows = 0;
for (int j = 0; j < chunkTypes.length; ++j) if (chunk_counts != null && chunk_counts[j] > 0) rows++;
final String[] rowHeaders = new String[rows];
final String[] colHeaders = new String[]{"Chunk Type", "Chunk Name", "Count", "Count Percentage", "Size", "Size Percentage"};
final String[] colTypes = new String[]{"string", "string", "int", "float", "string", "float"};
final String[] colFormats = new String[]{"%8s", "%s", "%10d", "%10.3f %%", "%10s", "%10.3f %%"};
final String colHeaderForRowHeaders = null;
TwoDimTable table = new TwoDimTable(tableHeader, null, rowHeaders, colHeaders, colTypes, colFormats, colHeaderForRowHeaders);
int row = 0;
for (int j = 0; j < chunkTypes.length; ++j) {
if (chunk_counts != null && chunk_counts[j] > 0) {
table.set(row, 0, chunkTypes[j][0]);
table.set(row, 1, chunkTypes[j][1]);
table.set(row, 2, chunk_counts[j]);
table.set(row, 3, (float) chunk_counts[j] / total_chunk_count * 100.f);
table.set(row, 4, display(chunk_byte_sizes[j]));
table.set(row, 5, (float) chunk_byte_sizes[j] / total_chunk_byte_size * 100.f);
row++;
}
}
return table;
}
public TwoDimTable toTwoDimTableDistribution() {
final String tableHeader = "Frame distribution summary";
int rows = H2O.CLOUD.size() + 5;
final String[] rowHeaders = new String[rows];
int row;
for (row=0; row<rows-5; ++row) {
rowHeaders[row] = H2O.CLOUD._memary[row].getIpPortString();
}
rowHeaders[row++] = "mean";
rowHeaders[row++] = "min";
rowHeaders[row++] = "max";
rowHeaders[row++] = "stddev";
rowHeaders[row ] = "total";
final String[] colHeaders = new String[]{"Size", "Number of Rows", "Number of Chunks per Column", "Number of Chunks"};
final String[] colTypes = new String[]{"string", "float", "float", "float"};
final String[] colFormats = new String[]{"%s", "%f", "%f", "%f"};
final String colHeaderForRowHeaders = "";
TwoDimTable table = new TwoDimTable(tableHeader, null, rowHeaders, colHeaders, colTypes, colFormats, colHeaderForRowHeaders);
for (row = 0; row < rows-5; ++row) {
if (byte_size_per_node != null) {
table.set(row, 0, display(byte_size_per_node[row]));
table.set(row, 1, row_count_per_node[row]);
table.set(row, 2, chunk_count_per_col_per_node[row]);
table.set(row, 3, _fr.numCols() * chunk_count_per_col_per_node[row]);
}
}
table.set(row, 0, display((long)byte_size_per_node_mean));
table.set(row, 1, row_count_per_node_mean);
table.set(row, 2, chunk_count_per_col_per_node_mean);
table.set(row++, 3, _fr.numCols()*chunk_count_per_col_per_node_mean);
table.set(row, 0, display((long)byte_size_per_node_min));
table.set(row, 1, row_count_per_node_min);
table.set(row, 2, chunk_count_per_col_per_node_min);
table.set(row++, 3, _fr.numCols()*chunk_count_per_col_per_node_min);
table.set(row, 0, display((long)byte_size_per_node_max));
table.set(row, 1, row_count_per_node_max);
table.set(row, 2, chunk_count_per_col_per_node_max);
table.set(row++, 3, _fr.numCols()*chunk_count_per_col_per_node_max);
table.set(row, 0, display((long)byte_size_per_node_stddev));
table.set(row, 1, row_count_per_node_stddev);
table.set(row, 2, chunk_count_per_col_per_node_stddev);
table.set(row++, 3, _fr.numCols()*chunk_count_per_col_per_node_stddev);
table.set(row, 0, display(total_chunk_byte_size));
table.set(row, 1, total_row_count);
table.set(row, 2, total_chunk_count_per_col);
table.set(row, 3, _fr.numCols()*total_chunk_count_per_col);
return table;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(toTwoDimTableChunkTypes().toString());
sb.append(toTwoDimTableDistribution().toString());
if (H2O.CLOUD.size() > 1 && byte_size_per_node_stddev > 0.2 * byte_size_per_node_mean) {
sb.append("** Note: Dataset is not well distributed, consider rebalancing **\n");
}
return sb.toString();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/CollectionUtils.java
|
package water.util;
import java.util.*;
/**
*/
public class CollectionUtils {
public static <K, V> Map<K, V> createMap(K[] keys, V[] values) {
assert keys.length == values.length : "Lengths of keys and values should be the same";
Map<K, V> res = new HashMap<>(keys.length);
for (int i = 0; i < keys.length; i++)
res.put(keys[i], values[i]);
return res;
}
/** Convert a Collection of Bytes to a primitive array byte[]. */
public static byte[] unboxBytes(Collection<Byte> coll) {
byte[] res = new byte[coll.size()];
int i = 0;
for (Byte elem : coll)
res[i++] = elem;
return res;
}
/** Convert a Collection of Strings to a plain array String[]. */
public static String[] unboxStrings(Collection<String> coll) {
return coll.toArray(new String[coll.size()]);
}
/** Convert a Collection of Strings[] to a plain array String[][]. */
public static String[][] unboxStringArrays(Collection<String[]> coll) {
return coll.toArray(new String[coll.size()][]);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/CompressionFactory.java
|
package water.util;
import com.github.luben.zstd.ZstdOutputStream;
import water.Iced;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.lang.reflect.Constructor;
import java.util.zip.GZIPOutputStream;
public class CompressionFactory extends Iced<CompressionFactory> {
private final String _name;
private CompressionFactory(String name) {
_name = name;
}
OutputStream wrapOutputStream(OutputStream os) throws IOException {
final String n = _name.toLowerCase();
switch (n) {
case "none":
return os;
case "gzip":
return new GZIPOutputStream(os);
case "zstd":
return new ZstdOutputStream(os);
case "bzip2":
return wrapDynamic("org.python.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream", os);
case "snappy":
return wrapDynamic("org.xerial.snappy.SnappyOutputStream", os);
default:
return wrapDynamic(_name, os);
}
}
private OutputStream wrapDynamic(String className, OutputStream os) {
try {
Class<?> cls = Class.forName(className);
Constructor<?> constructor = cls.getConstructor(OutputStream.class);
return (OutputStream) constructor.newInstance(os);
} catch (ReflectiveOperationException e) {
throw new IllegalArgumentException("Cannot create a compressor using class " + className, e);
}
}
private void checkAvailability() {
try {
wrapOutputStream(new ByteArrayOutputStream());
} catch (IOException e) {
throw new IllegalStateException("Initialization failed for compression method " + _name, e);
}
}
public static CompressionFactory make(String name) {
CompressionFactory cf = new CompressionFactory(name);
cf.checkAvailability();
return cf;
}
public String getName() {
return _name;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/Countdown.java
|
package water.util;
import water.Iced;
import java.util.Date;
/**
* Simple countdown to encapsulate timeouts and durations.
* time_limit_millis <= 0 is interpreted as infinite countdown (no timeout).
* All durations in this class are milliseconds:
* <li>
* <ul>{@link #duration()}</ul>
* <ul>{@link #elapsedTime()}</ul>
* <ul>{@link #remainingTime()}</ul>
* </li>
*/
public class Countdown extends Iced<Countdown> {
private long _time_limit_millis;
private long _start_time;
private long _stop_time;
public static Countdown fromSeconds(double seconds) {
return new Countdown(seconds <= 0 ? 0 : (long)Math.ceil(seconds * 1000));
}
public Countdown(long time_limit_millis) {
_time_limit_millis = time_limit_millis;
}
public Countdown(long _time_limit_millis, boolean start) {
this(_time_limit_millis);
if (start) start();
}
public Date start_time() {
return new Date(_start_time);
}
public Date stop_time() {
return new Date(_stop_time);
}
public long duration() {
if (!ended()) throw new IllegalStateException("Countdown was never started or stopped.");
return elapsedTime();
}
public void start() {
if (running()) throw new IllegalStateException("Countdown is already running.");
reset();
_start_time = now();
}
public boolean running() {
return _start_time > 0 && _stop_time == 0;
}
public boolean ended() {
return _start_time > 0 && _stop_time > 0;
}
public long stop() {
if (running() && !ended())
_stop_time = now();
return elapsedTime();
}
public long elapsedTime() {
if (running()) return now() - _start_time;
if (ended()) return _stop_time - _start_time;
return 0;
}
public long remainingTime() {
if (running()) return _time_limit_millis > 0 ? Math.max(0, _start_time + _time_limit_millis - now()) : Long.MAX_VALUE;
if (ended()) return 0;
return _time_limit_millis > 0 ? _time_limit_millis : Long.MAX_VALUE;
}
public void reset() {
_start_time = 0;
_stop_time = 0;
}
public boolean timedOut() {
return _start_time > 0 && _time_limit_millis > 0 && elapsedTime() > _time_limit_millis;
}
private long now() { return System.currentTimeMillis(); }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/DCTTransformer.java
|
package water.util;
import hex.Transformer;
import water.DKV;
import water.H2O;
import water.Job;
import water.Key;
import water.exceptions.H2OIllegalArgumentException;
import water.fvec.Frame;
public class DCTTransformer extends Transformer<Frame> {
/** Input dataset to transform */
public Frame _dataset;
/** Dimensions of the input array (3 ints) Length, Width, Depth */
int[] _dimensions = null;
/** Whether to do inverse DCT */
final boolean _inverse = false;
public DCTTransformer(Key<Frame> dest) {
super(dest, Frame.class.getName(), "DCTTransformer job");
}
@Override protected Job<Frame> execImpl() {
if (_dimensions.length != 3)
throw new H2OIllegalArgumentException("Need 3 dimensions (width/height/depth): WxHxD (1D: Wx1x1, 2D: WxHx1, 3D: WxHxD)");
if (ArrayUtils.minValue(_dimensions) < 1)
throw new H2OIllegalArgumentException("Dimensions must be >= 1");
if( _dataset == null ) throw new H2OIllegalArgumentException("Missing dataset");
if (_dataset.numCols() < _dimensions[0] * _dimensions[1] * _dimensions[2])
throw new H2OIllegalArgumentException("Product of dimensions WxHxD must be <= #columns (" + _dataset.numCols() + ")");
MathUtils.DCT.initCheck(_dataset, _dimensions[0], _dimensions[1], _dimensions[2]);
return _job.start(
new H2O.H2OCountedCompleter() {
@Override
public void compute2() {
Frame fft;
if (_dimensions[1] == 1 && _dimensions[2] == 1) {
fft = MathUtils.DCT.transform1D(_dataset, _dimensions[0], _inverse);
} else if (_dimensions[2] == 1) {
fft = MathUtils.DCT.transform2D(_dataset, _dimensions[0], _dimensions[1], _inverse);
} else {
fft = MathUtils.DCT.transform3D(_dataset, _dimensions[0], _dimensions[1], _dimensions[2], _inverse);
}
Frame dest = new Frame(_job._result, fft.names(), fft.vecs());
DKV.put(dest);
tryComplete();
}
}, 1);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/DKVUtils.java
|
package water.util;
import water.Job;
import water.Key;
import water.Lockable;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
public final class DKVUtils {
private DKVUtils() {}
public static <T extends Lockable<T>> void atomicUpdate(Lockable<T> target, Runnable update, Key<Job> jobKey) {
target.write_lock(jobKey);
try {
update.run();
target.update(jobKey);
} finally {
target.unlock(jobKey);
}
}
public static <T extends Lockable<T>> void atomicUpdate(Lockable<T> target, Runnable update, Key<Job> jobKey, ReadWriteLock lock) {
final Lock writeLock = lock.writeLock();
if (lock instanceof ReentrantReadWriteLock && ((ReentrantReadWriteLock.WriteLock)writeLock).isHeldByCurrentThread()) {
writeLock.lock();
try {
update.run();
} finally {
writeLock.unlock();
}
} else {
writeLock.lock();
try {
atomicUpdate(target, update, jobKey);
} finally {
writeLock.unlock();
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/DecompressionFactory.java
|
package water.util;
import com.github.luben.zstd.ZstdInputStream;
import water.Iced;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Constructor;
import java.util.zip.GZIPInputStream;
public class DecompressionFactory extends Iced<DecompressionFactory> {
private final String _name;
private DecompressionFactory(String name) {
_name = name;
}
InputStream wrapInputStream(InputStream is) throws IOException {
final String n = _name.toLowerCase();
switch (n) {
case "none":
return is;
case "gzip":
return new GZIPInputStream(is);
case "bzip2":
return wrapDynamic("org.python.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream", is);
case "snappy":
return wrapDynamic("org.xerial.snappy.SnappyInputStream", is);
case "zstd":
return new ZstdInputStream(is);
default:
return wrapDynamic(_name, is);
}
}
private InputStream wrapDynamic(String className, InputStream os) {
try {
Class<?> cls = Class.forName(className);
Constructor<?> constructor = cls.getConstructor(InputStream.class);
return (InputStream) constructor.newInstance(os);
} catch (ReflectiveOperationException e) {
throw new IllegalArgumentException("Cannot create a decompressor using class " + className, e);
}
}
public static DecompressionFactory make(String name) {
return new DecompressionFactory(name);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/util/DistributedException.java
|
package water.util;
import water.AutoBuffer;
import water.Freezable;
import water.H2O;
import water.TypeMap;
import java.io.Serializable;
import java.util.Arrays;
/**
* Created by tomas on 3/2/16.
*/
public final class DistributedException extends RuntimeException {
public DistributedException(){truncateStackTrace(true);}
public DistributedException(Throwable cause){ this(cause,true);}
public DistributedException(Throwable cause, boolean keepStackTrace){
super("DistributedException from " + H2O.SELF + ": '" + cause.getMessage() + "'",cause);
truncateStackTrace(keepStackTrace);
}
public DistributedException(String msg, Throwable cause){
super(msg,cause);
try {
truncateStackTrace(true);
}catch(Throwable t) {
// just in case it throws, do nothing, truncating stacktrace not really that important
}
}
public String toString(){return getMessage() + ", caused by " + getCause().toString();}
private void truncateStackTrace(boolean keepStackTrace){
if(keepStackTrace) {
StackTraceElement[] stackTrace = getStackTrace();
int i = 0;
for (; i < stackTrace.length; ++i)
if (stackTrace[i].getFileName() != null && stackTrace[i].getFileName().equals("JettyHTTPD.java"))
break;
setStackTrace(Arrays.copyOf(stackTrace, i));
} else setStackTrace(new StackTraceElement[0]);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.