index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/RadixOrder.java
|
package water.rapids;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.ArrayUtils;
import water.util.Log;
import water.util.MathUtils;
import java.math.BigInteger;
import static java.math.BigInteger.ONE;
import static java.math.BigInteger.ZERO;
import static water.rapids.Merge.MEM_MULTIPLIER;
import static water.rapids.Merge.OPTIMAL_BATCHSIZE;
// counted completer so that left and right index can run at the same time
class RadixOrder extends H2O.H2OCountedCompleter<RadixOrder> {
private final Frame _DF;
private final boolean _isLeft;
private final int _whichCols[], _id_maps[][];
final boolean _isInt[];
final boolean _isCategorical[];
final int _shift[];
final int _bytesUsed[];
final BigInteger _base[];
final int[] _ascending; // 0 to sort ASC, 1 to sort DESC
final long _mergeId;
RadixOrder(Frame DF, boolean isLeft, int whichCols[], int id_maps[][], int[] ascending, long mergeId) {
_DF = DF;
_isLeft = isLeft;
_whichCols = whichCols;
_id_maps = id_maps;
_shift = new int[_whichCols.length]; // currently only _shift[0] is used
_bytesUsed = new int[_whichCols.length];
//_base = new long[_whichCols.length];
_base = new BigInteger[_whichCols.length];
_isInt = new boolean[_whichCols.length];
_isCategorical = new boolean[_whichCols.length];
_ascending = ascending;
_mergeId = mergeId;
}
@Override
public void compute2() {
long t0 = System.nanoTime(), t1;
initBaseShift();
// The MSB is stored (seemingly wastefully on first glance) because we need
// it when aligning two keys in Merge()
int keySize = ArrayUtils.sum(_bytesUsed);
// 256MB is the DKV limit. / 2 because we fit o and x together in one OXBatch.
int batchSize = OPTIMAL_BATCHSIZE ; // larger, requires more memory with less remote row fetch and vice versa for smaller
// go through all node memory and reduce batchSize if needed
long minMem = Long.MAX_VALUE; // memory size of nodes with smallest memory
for (H2ONode h2o : H2O.CLOUD._memary) {
long mem = h2o._heartbeat.get_free_mem(); // in bytes
if (mem < minMem)
minMem = mem;
}
// at some point, a MSB worth of compare columns will be stored at any one node. Make sure we have enough memory
// for that.
long minSortMemory = _whichCols.length*Math.max(_DF.numRows(), batchSize)*8*MEM_MULTIPLIER;
if (minMem < minSortMemory) // if not enough, just throw an error and get out
throw new RuntimeException("The minimum memory per node needed is too small to accommodate the sorting/merging " +
"operation. Make sure the smallest node has at least "+minSortMemory+" bytes of memory.");
// an array of size batchsize by numCols will be created for each sorted chunk in the end. Memory is in bytes
long dataSetMemoryPerRow = 8*((long) _DF.numCols())*MEM_MULTIPLIER; // 8 to translate 64 bits into 8 bytes, MEM_MULTIPLIER to scale up
long batchMemory = Math.max((long) batchSize*dataSetMemoryPerRow, minSortMemory); // memory needed to store one chunk of dataset frame
if (batchMemory > minMem) { // batchsize is too big for node with smallest memory, reduce it
batchSize = (int) Math.floor(minMem/dataSetMemoryPerRow);
if (batchSize == 0)
throw new RuntimeException("The minimum memory per node needed is too small to accommodate the sorting/merging " +
"operation. Make sure the smallest node has at least "+minMem*100+" bytes of memory.");
}
// The Math.max ensures that batches of o and x are aligned, even for wide
// keys. To save % and / in deep iteration; e.g. in insert().
Log.debug("Time to use rollup stats to determine biggestBit: " + ((t1=System.nanoTime()) - t0) / 1e9+" seconds."); t0=t1;
if( _whichCols.length > 0 ) // batchsize is not used here
new RadixCount(_isLeft, _base[0], _shift[0], _whichCols[0], _id_maps, _ascending[0], _mergeId).doAll(_DF.vec(_whichCols[0]));
Log.debug("Time of MSB count MRTask left local on each node (no reduce): " + ((t1=System.nanoTime()) - t0) / 1e9+" seconds."); t0=t1;
// NOT TO DO: we do need the full allocation of x[] and o[]. We need o[] anyway. x[] will be compressed and dense.
// o is the full ordering vector of the right size
// x is the byte key aligned with o
// o AND x are what bmerge() needs. Pushing x to each node as well as o avoids inter-node comms.
// Workaround for incorrectly blocking closeLocal() in MRTask is to do a
// double MRTask and pass a key between them to pass output from first on
// that node to second on that node.
// TODO: fix closeLocal() blocking issue and revert to simpler usage of closeLocal()
Key linkTwoMRTask = Key.make();
if( _whichCols.length > 0 )
new SplitByMSBLocal(_isLeft, _base, _shift[0], keySize, batchSize, _bytesUsed, _whichCols, linkTwoMRTask,
_id_maps, _ascending, _mergeId).doAll(_DF.vecs(_whichCols)); // postLocal needs DKV.put()
Log.debug("SplitByMSBLocal MRTask (all local per node, no network) took : " + ((t1=System.nanoTime()) - t0) / 1e9+" seconds."); t0=t1;
if( _whichCols.length > 0 )
new SendSplitMSB(linkTwoMRTask).doAllNodes();
Log.debug("SendSplitMSB across all nodes took : " + ((t1=System.nanoTime()) - t0) / 1e9+" seconds."); t0=t1;
// dispatch in parallel
RPC[] radixOrders = new RPC[256];
Log.info("Sending SingleThreadRadixOrder async RPC calls ... ");
for (int i = 0; i < 256; i++)
radixOrders[i] = new RPC<>(SplitByMSBLocal.ownerOfMSB(i), new SingleThreadRadixOrder(_DF, _isLeft, batchSize,
keySize, /*nGroup,*/ i, _mergeId)).call();
Log.debug("took : " + ((t1=System.nanoTime()) - t0) / 1e9); t0=t1;
Log.info("Waiting for RPC SingleThreadRadixOrder to finish ... ");
for( RPC rpc : radixOrders )
rpc.get();
Log.debug("took " + (System.nanoTime() - t0) / 1e9+" seconds.");
tryComplete();
// serial, do one at a time
// for (int i = 0; i < 256; i++) {
// H2ONode node = MoveByFirstByte.ownerOfMSB(i);
// SingleThreadRadixOrder radixOrder = new RPC<>(node, new SingleThreadRadixOrder(DF, batchSize, keySize, nGroup, i)).call().get();
// _o[i] = radixOrder._o;
// _x[i] = radixOrder._x;
// }
// If sum(nGroup) == nrow then the index is unique.
// 1) useful to know if an index is unique or not (when joining to it we
// know multiples can't be returned so can allocate more efficiently)
// 2) If all groups are size 1 there's no need to actually allocate an
// all-1 group size vector (perhaps user was checking for uniqueness by
// counting group sizes)
// 3) some nodes may have unique input and others may contain dups; e.g.,
// in the case of looking for rare dups. So only a few threads may have
// found dups.
// 4) can sweep again in parallel and cache-efficient finding the groups,
// and allocate known size up front to hold the group sizes.
// 5) can return to Flow early with the group count. User may now realise
// they selected wrong columns and cancel early.
}
private void initBaseShift() {
for (int i=0; i<_whichCols.length; i++) {
Vec col = _DF.vec(_whichCols[i]);
// TODO: strings that aren't already categoricals and fixed precision double.
BigInteger max=ZERO;
_isInt[i] = col.isCategorical() || col.isInt();
_isCategorical[i] = col.isCategorical();
if (col.isCategorical()) {
// simpler and more robust for now for all categorical bases to be 0,
// even though some subsets may be far above 0; i.e. forgo uncommon
// efficiency savings for now
_base[i] = ZERO;
assert _id_maps[i] != null;
max = _isLeft?BigInteger.valueOf(ArrayUtils.maxValue(_id_maps[i])):BigInteger.valueOf(col.domain().length);
} else {
double colMin = col.min();
double colMax = col.max();
if (col.isInt()) {
GetLongStatsTask glst = GetLongStatsTask.getLongStats(col);
long colMini = glst._colMin;
long colMaxi = glst._colMax;
_base[i] = BigInteger.valueOf(Math.min(colMini, colMaxi*(_ascending[i])));
max = BigInteger.valueOf(Math.max(colMaxi, colMini*(_ascending[i])));
} else{
_base[i] = MathUtils.convertDouble2BigInteger(Math.min(col.min(), colMax*(_ascending[i])));
max = MathUtils.convertDouble2BigInteger(Math.max(col.max(), colMin*(_ascending[i])));
}
}
// Compute the span or range between min and max. Compute a
// shift amount to bring the high order bits of the range down
// low for radix sorting. Lower the lower-bound to be an even
// power of the shift.
long chk = computeShift(max, i);
// On rare occasions, lowering the lower-bound also increases
// the span or range until another bit is needed in the sort.
// In this case, we need to re-compute the shift amount and
// perhaps use an even lower lower-bound.
if( chk == 256 ) chk = computeShift(max, i);
assert chk <= 255;
assert chk >= 0;
_bytesUsed[i] = Math.min(8, (_shift[i]+15) / 8); // should not go over 8 bytes
//assert (biggestBit-1)/8 + 1 == _bytesUsed[i];
}
}
// TODO: push these into Rollups?
private static class GetLongStatsTask extends MRTask<GetLongStatsTask> {
long _colMin=Long.MAX_VALUE;
long _colMax=Long.MIN_VALUE;
static GetLongStatsTask getLongStats(Vec col) {
return new GetLongStatsTask().doAll(col);
}
@Override public void map(Chunk c) {
for(int i=0; i<c._len; ++i) {
if( !c.isNA(i) ) {
long l = c.at8(i);
_colMin = Math.min(_colMin, l);
_colMax = Math.max(_colMax, l);
}
}
}
@Override public void reduce(GetLongStatsTask that) {
_colMin = Math.min(_colMin, that._colMin);
_colMax = Math.max(_colMax, that._colMax);
}
}
// Compute the span or range between min and max. Compute a
// shift amount to bring the high order bits of the range down
// low for radix sorting. Lower the lower-bound to be an even
// power of the shift.
private long computeShift( final BigInteger max, final int i ) {
int biggestBit = 0;
int rangeD = max.subtract(_base[i]).add(ONE).add(ONE).bitLength();
biggestBit = _isInt[i] ? rangeD : (rangeD == 64 ? 64 : rangeD + 1);
// TODO: feed back to R warnings()
if (biggestBit < 8) Log.warn("biggest bit should be >= 8 otherwise need to dip into next column (TODO)");
assert biggestBit >= 1;
_shift[i] = Math.max(8, biggestBit)-8;
long MSBwidth = 1L << _shift[i];
BigInteger msbWidth = BigInteger.valueOf(MSBwidth);
if (_base[i].mod(msbWidth).compareTo(ZERO) != 0) {
_base[i] = _isInt[i]? msbWidth.multiply(_base[i].divide(msbWidth).add(_base[i].signum()<0?BigInteger.valueOf(-1L):ZERO))
:msbWidth.multiply (_base[i].divide(msbWidth));; // dealing with unsigned integer here
assert _base[i].mod(msbWidth).compareTo(ZERO) == 0;
}
return max.subtract(_base[i]).add(ONE).shiftRight(_shift[i]).intValue();
}
private static class SendSplitMSB extends MRTask<SendSplitMSB> {
final Key _linkTwoMRTask;
SendSplitMSB(Key linkTwoMRTask) { _linkTwoMRTask = linkTwoMRTask; }
@Override public void setupLocal() {
SplitByMSBLocal.MOVESHASH.get(_linkTwoMRTask).sendSplitMSB();
SplitByMSBLocal.MOVESHASH.remove(_linkTwoMRTask);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/Rapids.java
|
package water.rapids;
import org.apache.commons.lang.math.NumberUtils;
import water.H2O;
import water.fvec.Frame;
import water.rapids.ast.AstExec;
import water.rapids.ast.AstFunction;
import water.rapids.ast.AstParameter;
import water.rapids.ast.AstRoot;
import water.rapids.ast.params.*;
import water.util.CollectionUtils;
import water.util.StringUtils;
import java.util.ArrayList;
import java.util.Map;
import java.util.Set;
/**
* <p> Rapids is an interpreter of abstract syntax trees.
*
* <p> This file contains the AstRoot parser and parser helper functions.
* AstRoot Execution starts in the AstExec file, but spreads throughout Rapids.
*
* <p> Trees have a Lisp-like structure with the following "reserved" special
* characters:
* <dl>
* <dt> '(' <dd> a nested function application expression till ')'
* <dt> '{' <dd> a nested function definition expression till '}'
* <dt> '[' <dd> a numeric or string list expression, till ']'
* <dt> '"' <dd> a String (double quote)
* <dt> "'" <dd> a String (single quote)
* <dt> digits: <dd> a number
* <dt> letters or other specials: <dd> an ID
* </dl>
*
* <p> Variables are lexically scoped inside 'let' expressions or at the top-level
* looked-up in the DKV directly (and must refer to a known type that is valid
* on the execution stack).
*/
public class Rapids {
private final String _str; // Statement to parse and execute
private int _x; // Parse pointer, points to the index of the next character to be consumed
/**
* Parse a Rapids expression string into an Abstract Syntax Tree object.
* @param rapids expression to parse
*/
public static AstRoot parse(String rapids) {
Rapids r = new Rapids(rapids);
AstRoot res = r.parseNext();
if (r.skipWS() != ' ')
throw new IllegalASTException("Syntax error: illegal Rapids expression `" + rapids + "`");
return res;
}
/**
* Execute a single rapids call in a short-lived session
* @param rapids expression to parse
*/
public static Val exec(String rapids) {
Session session = new Session();
try {
H2O.incrementActiveRapidsCounter();
AstRoot ast = Rapids.parse(rapids);
Val val = session.exec(ast, null);
// Any returned Frame has it's REFCNT raised by +1, and the end(val) call
// will account for that, copying Vecs as needed so that the returned
// Frame is independent of the Session (which is disappearing).
return session.end(val);
} catch (Throwable ex) {
throw session.endQuietly(ex);
}
finally {
H2O.decrementActiveRapidsCounter();
}
}
/**
* Compute and return a value in this session. Any returned frame shares
* Vecs with the session (is not deep copied), and so must be deleted by the
* caller (with a Rapids "rm" call) or will disappear on session exit, or is
* a normal global frame.
* @param rapids expression to parse
*/
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
public static Val exec(String rapids, Session session) {
try {
H2O.incrementActiveRapidsCounter();
AstRoot ast = Rapids.parse(rapids);
// Synchronize the session, to stop back-to-back overlapping Rapids calls
// on the same session, which Flow sometimes does
synchronized (session) {
Val val = session.exec(ast, null);
// Any returned Frame has it's REFCNT raised by +1, but is exiting the
// session. If it's a global, we simply need to lower the internal refcnts
// (which won't delete on zero cnts because of the global). If it's a
// named temp, the ref cnts are accounted for by being in the temp table.
if (val.isFrame()) {
Frame frame = val.getFrame();
assert frame._key != null : "Returned frame has no key";
session.addRefCnt(frame, -1);
}
return val;
}
}
finally {
H2O.decrementActiveRapidsCounter();
}
}
//--------------------------------------------------------------------------------------------------------------------
// Private
//--------------------------------------------------------------------------------------------------------------------
// Set of characters that cannot appear inside a token
private static Set<Character> invalidTokenCharacters = StringUtils.toCharacterSet("({[]}) \t\r\n\\\"\'");
// Set of characters that cannot appear inside a number. Note that "NaN" or "nan" is also a number.
private static Set<Character> invalidNumberCharacters = StringUtils.toCharacterSet(":,({[]}) \t\r\n\\\"\'");
// List of all "simple" backslash-escape sequences (i.e. those that are only 2-characters long, i.e. '\n')
private static Map<Character, Character> simpleEscapeSequences =
CollectionUtils.createMap(StringUtils.toCharacterArray("ntrfb'\"\\"),
StringUtils.toCharacterArray("\n\t\r\f\b'\"\\"));
/**
* The constructor is private: rapids expression can be parsed into an AST tree, or executed, but the "naked" Rapids
* object has no external purpose.
* @param rapidsStr String containing a Rapids expression.
*/
private Rapids(String rapidsStr) {
_str = rapidsStr;
_x = 0;
}
/**
* Parse and return the next expression from the rapids string.
* '(' a nested function application expression ')
* '{' a nested function definition expression '}'
* '[' a numeric list expression, till ']'
* '"' a String (double quote): attached_token
* "'" a String (single quote): attached_token
* digits: a double
* letters or other specials: an ID
*/
private AstRoot parseNext() {
switch (skipWS()) {
case '(': return parseFunctionApplication();
case '{': return parseFunctionDefinition();
case '[': return parseList();
case '\"': case '\'':
return new AstStr(string());
case ' ': throw new IllegalASTException("Expected an expression but ran out of text");
default : return parseNumberOrId();
}
}
/**
* Parse "function application" expression, i.e. pattern of the form "(func ...args)"
*/
private AstExec parseFunctionApplication() {
eatChar('(');
ArrayList<AstRoot> asts = new ArrayList<>();
while (skipWS() != ')')
asts.add(parseNext());
eatChar(')');
AstExec res = new AstExec(asts);
if (peek(0) == '-') {
eatChar('-');
eatChar('>');
AstId tmpid = new AstId(token());
res = new AstExec(new AstRoot[]{new AstId("tmp="), tmpid, res});
}
return res;
}
/**
* Parse and return a user defined function of the form "{arg1 arg2 . (expr)}"
*/
private AstFunction parseFunctionDefinition() {
eatChar('{');
// Parse the list of ids
ArrayList<String> ids = new ArrayList<>();
ids.add(""); // 1-based ID list
while (skipWS() != '.') {
String id = token();
if (!Character.isJavaIdentifierStart(id.charAt(0)))
throw new IllegalASTException("variable must be a valid Java identifier: " + id);
for (char c : id.toCharArray())
if (!Character.isJavaIdentifierPart(c))
throw new IllegalASTException("variable must be a valid Java identifier: " + id);
ids.add(id);
}
// Single dot separates the list of ids from the body of the function
eatChar('.');
// Parse the body
AstRoot body = parseNext();
if (skipWS() != '}')
throw new IllegalASTException("Expected the end of the function, but found '" + peek(0) + "'");
eatChar('}');
return new AstFunction(ids, body);
}
/**
* Parse and return a list of tokens: either a list of strings, or a list of numbers.
* We do not support lists of mixed types, or lists containing variables (for now).
*/
private AstParameter parseList() {
eatChar('[');
char nextChar = skipWS();
AstParameter res = isQuote(nextChar)? parseStringList() : parseNumList();
eatChar(']');
return res;
}
/**
* Parse a list of strings. Strings can be either in single- or in double quotes.
*/
private AstStrList parseStringList() {
ArrayList<String> strs = new ArrayList<>(10);
while (isQuote(skipWS())) {
strs.add(string());
if (skipWS() == ',') eatChar(',');
}
return new AstStrList(strs);
}
/**
* Parse a "num list". This could be either a plain list of numbers, or a range, or a list of ranges. For example
* [2 3 4 5 6 7] can also be written as [2:6] or [2:2 4:4:1]. The format of each "range" is `start:count[:stride]`,
* and it denotes the sequence {start, start + stride, ..., start + (count-1)*stride}. Here start and stride may
* be real numbers, however count must be a non-negative integer. Negative strides are also not allowed.
*/
private AstNumList parseNumList() {
ArrayList<Double> bases = new ArrayList<>();
ArrayList<Double> strides = new ArrayList<>();
ArrayList<Long> counts = new ArrayList<>();
while (skipWS() != ']') {
double base = number();
double count = 1;
double stride = 1;
if (skipWS() == ':') {
eatChar(':');
skipWS();
count = number();
if (count < 1 || ((long) count) != count)
throw new IllegalASTException("Count must be a positive integer, got " + count);
}
if (skipWS() == ':') {
eatChar(':');
skipWS();
stride = number();
if (stride < 0 || Double.isNaN(stride))
throw new IllegalASTException("Stride must be positive, got " + stride);
}
if (count == 1 && stride != 1)
throw new IllegalASTException("If count is 1, then stride must be one (and ignored)");
bases.add(base);
counts.add((long) count);
strides.add(stride);
// Optional comma separating span
if (skipWS() == ',') eatChar(',');
}
return new AstNumList(bases, strides, counts);
}
private AstParameter parseNumberOrId() {
String t = token();
if (NumberUtils.isNumber(t))
try {
return new AstNum(parseDouble(t));
} catch (NumberFormatException e) {
throw new IllegalASTException(e.toString());
}
else
return new AstId(t);
}
/**
* Return the character at the current parse position (or `offset` chars in the future), without advancing it.
* If there are no more characters to peek, return ' '.
*/
private char peek(int offset) {
return _x + offset < _str.length() ? _str.charAt(_x + offset) : ' ';
}
/**
* Consume the next character from the parse stream, throwing an exception if it is not `c`.
*/
private void eatChar(char c) {
if (peek(0) != c)
throw new IllegalASTException("Expected '" + c + "'. Got: '" + peek(0));
_x++;
}
/**
* Advance parse pointer to the first non-whitespace character, and return that character.
* If such non-whitespace character cannot be found, then return ' '.
*/
private char skipWS() {
char c = ' ';
while (_x < _str.length() && isWS(c = peek(0))) _x++;
return c;
}
/**
* Parse a "token" from the input stream. A token is terminated by the next whitespace, or any of the
* following characters: )}],:
*
* NOTE: our notion of "token" is very permissive. We may want to restrict it in the future...
*/
private String token() {
return token(invalidTokenCharacters);
}
private String token(Set<Character> invalidCharacters) {
int start = _x;
while (!invalidCharacters.contains(peek(0))) _x++;
if (start == _x) throw new IllegalASTException("Missing token");
return _str.substring(start, _x);
}
/**
* Parse a number from the token stream.
*/
private double number() {
return parseDouble(token(invalidNumberCharacters));
}
private double parseDouble(String s) {
if (s.toLowerCase().equals("nan")) return Double.NaN;
try {
return Double.valueOf(s);
} catch (NumberFormatException e) {
throw new IllegalASTException(e.toString());
}
}
/**
* Parse a string from the token stream.
*/
private String string() {
char quote = peek(0);
int start = ++_x;
boolean has_escapes = false;
while (_x < _str.length()) {
char c = peek(0);
if (c == '\\') {
has_escapes = true;
char cc = peek(1);
if (simpleEscapeSequences.containsKey(cc)) {
_x += 2;
} else if (cc == 'x') {
_x += 4; // e.g: \x5A
} else if (cc == 'u') {
_x += 6; // e.g: \u1234
} else if (cc == 'U') {
_x += 10; // e.g: \U0010FFFF
} else
throw new IllegalASTException("Invalid escape sequence \\" + cc);
} else if (c == quote) {
_x++;
if (has_escapes) {
StringBuilder sb = new StringBuilder();
for (int i = start; i < _x - 1; i++) {
char ch = _str.charAt(i);
if (ch == '\\') {
char cc = _str.charAt(++i);
if (simpleEscapeSequences.containsKey(cc)) {
sb.append(simpleEscapeSequences.get(cc));
} else {
int n = (cc == 'x')? 2 : (cc == 'u')? 4 : (cc == 'U')? 8 : -1;
int hex = -1;
try {
hex = StringUtils.unhex(_str.substring(i + 1, i + 1 + n));
} catch (NumberFormatException e) {
throw new IllegalASTException(e.toString());
}
if (hex > 0x10FFFF)
throw new IllegalASTException("Illegal unicode codepoint " + hex);
sb.append(Character.toChars(hex));
i += n;
}
} else {
sb.append(ch);
}
}
return sb.toString();
} else {
return _str.substring(start, _x - 1);
}
} else {
_x++;
}
}
throw new IllegalASTException("Unterminated string at " + start);
}
/**
* Return true if `c` is a whitespace character.
*/
private static boolean isWS(char c) {
return c == ' ' || c == '\t' || c == '\n' || c == '\r';
}
/**
* Return true if `c` is a quote character.
*/
private static boolean isQuote(char c) {
return c == '\'' || c == '\"';
}
// Return unparsed text, useful in error messages and debugging
// private String unparsed() {
// return _str.substring(_x, _str.length());
// }
// public AstRoot throwErr(String msg) {
// int idx = _str.length() - 1;
// int lo = _x, hi = idx;
//
// if (idx < lo) {
// lo = idx;
// hi = lo;
// }
// String s = msg + '\n' + _str + '\n';
// int i;
// for (i = 0; i < lo; i++) s += ' ';
// s += '^';
// i++;
// for (; i < hi; i++) s += '-';
// if (i <= hi) s += '^';
// s += '\n';
// throw new IllegalASTException(s);
// }
public static class IllegalASTException extends IllegalArgumentException {
public IllegalASTException(String s) {
super(s);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/Session.java
|
package water.rapids;
import water.*;
import water.fvec.Frame;
import water.fvec.Vec;
import water.nbhm.*;
import water.rapids.ast.AstFunction;
import water.rapids.ast.AstRoot;
import water.rapids.ast.prims.operators.AstPlus;
import water.util.Log;
import java.util.Collections;
import java.util.Map;
import java.util.Properties;
/**
* Session is a long-lasting environment supporting caching and Copy-On-Write optimization of Vecs. This session may
* last over many different Rapids calls (provided they refer to the same session). When the session ends, all the
* cached Vecs will be deleted (except those in user facing Frames).
**/
public class Session {
// How often to perform sanity checks. 0 means disable checks, 1000 is to always check.
private final static int sanityChecksFrequency = 1000;
private static int sanityChecksCounter = 0;
private String id;
// --------------------------------------------------------------------------
// Copy On Write optimization
// --------------------------------------------------------------------------
// COW optimization: instead of copying Vecs, they are "virtually" copied by
// simply pointer-sharing, and raising the ref-cnt here. Losing a copy can
// lower the ref-cnt, and when it goes to zero the Vec can be removed. If
// the Vec needs to be modified, and the ref-cnt is 1 - an update-in-place
// can happen. Otherwise a true data copy is made, and the private copy is
// modified.
// Ref-counts per Vec. Always positive; zero is removed from the table;
// negative is an error. At the end of any given Rapids expression the
// counts should match all the Vecs in the FRAMES set.
private NonBlockingHashMap<Key<Vec>, Integer> REFCNTS = new NonBlockingHashMap<>();
// Frames tracked by this Session and alive to the next Rapids call. When
// the whole session ends, these frames can be removed from the DKV. These
// Frames can share Vecs amongst themselves (tracked by the REFCNTS) and also
// with other global frames.
private NonBlockingHashMap<Key, Frame> FRAMES = new NonBlockingHashMap<>();
// Vec that came from global frames, and are considered immutable. Rapids
// will always copy these Vecs before mutating or deleting. Total visible
// refcnts are effectively the normal refcnts plus 1 for being in the GLOBALS
// set.
private NonBlockingHashSet<Key<Vec>> GLOBALS = new NonBlockingHashSet<>();
private final Properties properties = new Properties();
/**
* Constructor
*/
public Session() {
this(Key.make().toString());
}
public Session(String id) {
this.id = id;
cluster_init();
}
/** Return this session's id. */
public String id() {
return id;
}
public void setProperty(String key, String value) {
if (value != null) {
properties.setProperty(key, value);
} else {
properties.remove(key);
}
}
public String getProperty(String key, String defaultValue) {
return properties.getProperty(key, defaultValue);
}
/**
* Execute an AstRoot in the current Session with much assertion-checking
* @param ast Rapids expression to execute
* @param scope ?
* @return the result from the Rapids expression
*/
public Val exec(AstRoot ast, AstFunction scope) {
sanity_check_refs(null);
// Execute
Env env = new Env(this);
env._scope = scope;
Val val = ast.exec(env); // Execute
assert env.sp() == 0; // Stack balanced at end
sanity_check_refs(val);
return val; // Can return a frame, which may point to session-shared Vecs
}
/**
* Normal session exit. Returned Frames are fully deep-copied, and are responsibility of the caller to delete.
* Returned Frames have their refcnts currently up by 1 (for the returned value itself).
*/
public Val end(Val returning) {
sanity_check_refs(returning);
// Remove all temp frames
Futures fs = new Futures();
for (Frame fr : FRAMES.values()) {
fs = downRefCnt(fr, fs); // Remove internal Vecs one by one
DKV.remove(fr._key, fs); // Shallow remove, internal Vecs removed 1-by-1
}
fs.blockForPending();
FRAMES.clear(); // No more temp frames
// Copy (as needed) so the returning Frame is completely independent of the
// (disappearing) session.
if (returning != null && returning.isFrame()) {
Frame fr = returning.getFrame();
Key<Vec>[] vecs = fr.keys();
for (int i = 0; i < vecs.length; i++) {
_addRefCnt(vecs[i], -1); // Returning frame has refcnt +1, lower it now; should go to zero internal refcnts.
if (GLOBALS.contains(vecs[i])) // Copy if shared with globals
fr.replace(i, vecs[i].get().makeCopy());
}
}
GLOBALS.clear(); // No longer tracking globals
sanity_check_refs(null);
REFCNTS.clear();
return returning;
}
/**
* The Rapids call threw an exception. Best-effort cleanup, no more exceptions
*/
public RuntimeException endQuietly(Throwable ex) {
try {
GLOBALS.clear();
Futures fs = new Futures();
for (Frame fr : FRAMES.values()) {
for (Key<Vec> vec : fr.keys()) {
Integer I = REFCNTS.get(vec);
int i = (I == null ? 0 : I) - 1;
if (i > 0) REFCNTS.put(vec, i);
else {
REFCNTS.remove(vec);
Keyed.remove(vec, fs, true);
}
}
DKV.remove(fr._key, fs); // Shallow remove, internal Vecs removed 1-by-1
}
fs.blockForPending();
FRAMES.clear();
REFCNTS.clear();
} catch (Exception ex2) {
Log.warn("Exception " + ex2 + " suppressed while cleaning up Rapids Session after already throwing " + ex);
}
return ex instanceof RuntimeException ? (RuntimeException) ex : new RuntimeException(ex);
}
/**
* Internal ref cnts (not counting globals - which only ever keep things alive, and have a virtual +1 to refcnts
* always).
*/
private int _getRefCnt(Key<Vec> vec) {
Integer I = REFCNTS.get(vec);
assert I == null || I > 0; // No zero or negative counts
return I == null ? 0 : I;
}
private int _putRefCnt(Key<Vec> vec, int i) {
assert i >= 0; // No negative counts
if (i > 0) REFCNTS.put(vec, i);
else REFCNTS.remove(vec);
return i;
}
/**
* Bump internal count, not counting globals
*/
private int _addRefCnt(Key<Vec> vec, int i) {
return _putRefCnt(vec, _getRefCnt(vec) + i);
}
/**
* External refcnt: internal refcnt plus 1 for being global
*/
private int getRefCnt(Key<Vec> vec) {
return _getRefCnt(vec) + (GLOBALS.contains(vec) ? 1 : 0);
}
/**
* RefCnt +i this Vec; Global Refs can be alive with zero internal counts
*/
private int addRefCnt(Key<Vec> vec, int i) {
return _addRefCnt(vec, i) + (GLOBALS.contains(vec) ? 1 : 0);
}
/**
* RefCnt +i all Vecs this Frame.
*/
Frame addRefCnt(Frame fr, int i) {
if (fr != null) // Allow and ignore null Frame, easier calling convention
for (Key<Vec> vec : fr.keys()) _addRefCnt(vec, i);
return fr; // Flow coding
}
/**
* Found in the DKV, if not a tracked TEMP make it a global
*/
Frame addGlobals(Frame fr) {
if (!FRAMES.containsKey(fr._key))
Collections.addAll(GLOBALS, fr.keys());
return fr; // Flow coding
}
/**
* Track a freshly minted tmp frame. This frame can be removed when the session ends (unlike global frames), or
* anytime during the session when the client removes it.
*/
public Frame track_tmp(Frame fr) {
assert fr._key != null; // Temps have names
FRAMES.put(fr._key, fr); // Track for session
addRefCnt(fr, 1); // Refcnt is also up: these Vecs stick around after single Rapids call for the next one
DKV.put(fr); // Into DKV, so e.g. Flow can view for debugging
return fr; // Flow coding
}
/**
* Remove and delete a session-tracked frame.
* Remove from all session tracking spaces.
* Remove any newly-unshared Vecs, but keep the shared ones.
*/
public void remove(Frame fr) {
if (fr == null) return;
Futures fs = new Futures();
if (!FRAMES.containsKey(fr._key)) { // In globals and not temps?
for (Key<Vec> vec : fr.keys()) {
GLOBALS.remove(vec); // Not a global anymore
if (REFCNTS.get(vec) == null) // If not shared with temps
Keyed.remove(vec, fs, true); // Remove unshared dead global
}
} else { // Else a temp and not a global
fs = downRefCnt(fr, fs); // Standard down-ref counting of all Vecs
FRAMES.remove(fr._key); // And remove from temps
}
DKV.remove(fr._key, fs); // Shallow remove, internal were Vecs removed 1-by-1
fs.blockForPending();
}
/**
* Lower refcnt of all Vecs in frame, deleting Vecs that go to zero refs.
* Passed in a Futures which is returned, and set to non-null if something gets deleted.
*/
Futures downRefCnt(Frame fr, Futures fs) {
for (Key<Vec> vec : fr.keys()) // Refcnt -1 all Vecs
if (addRefCnt(vec, -1) == 0) {
if (fs == null) fs = new Futures();
Keyed.remove(vec, fs, true);
}
return fs;
}
/**
* Update a global ID, maintaining sharing of Vecs
*/
public Frame assign(Key<Frame> id, Frame src) {
if (FRAMES.containsKey(id)) throw new IllegalArgumentException("Cannot reassign temp " + id);
Futures fs = new Futures();
// Vec lifetime invariant: Globals do not share with other globals (but can
// share with temps). All the src Vecs are about to become globals. If
// the ID already exists, and global Vecs within it are about to die, and thus
// may be deleted.
Frame fr = DKV.getGet(id);
if (fr != null) { // Prior frame exists
for (Key<Vec> vec : fr.keys()) {
if (GLOBALS.remove(vec) && _getRefCnt(vec) == 0)
Keyed.remove(vec, fs, true); // Remove unused global vec
}
}
// Copy (defensive) the base vecs array. Then copy any vecs which are
// already globals - this new global must be independent of any other
// global Vecs - because global Vecs get side-effected by unrelated
// operations.
Vec[] svecs = src.vecs().clone();
for (int i = 0; i < svecs.length; i++)
if (GLOBALS.contains(svecs[i]._key))
svecs[i] = svecs[i].makeCopy();
// Make and install new global Frame
Frame fr2 = new Frame(id, src._names.clone(), svecs);
DKV.put(fr2, fs);
addGlobals(fr2);
fs.blockForPending();
return fr2;
}
/**
* Support C-O-W optimizations: the following list of columns are about to be updated. Copy them as-needed and
* replace in the Frame. Return the updated Frame vecs for flow-coding.
*/
public Vec[] copyOnWrite(Frame fr, int[] cols) {
Vec did_copy = null; // Did a copy?
Vec[] vecs = fr.vecs();
for (int col : cols) {
Vec vec = vecs[col];
int refcnt = getRefCnt(vec._key);
assert refcnt > 0;
if (refcnt > 1) // If refcnt is 1, we allow the update to take in-place
fr.replace(col, (did_copy = vec.makeCopy()));
}
if (did_copy != null && fr._key != null) DKV.put(fr); // Then update frame in the DKV
return vecs;
}
/**
* Check that ref counts are in a consistent state.
* This should only be called between calls to Rapids expressions (otherwise may blow false-positives).
* @param returning If sanity check is done at the end of the session, there is a value being returned. This value
* might be a Frame (which would not be in FRAMES). So we pass the "returning" value explicitly,
* so that all its references can be properly accounted for.
*/
private void sanity_check_refs(Val returning) {
if ((sanityChecksCounter++) % 1000 >= sanityChecksFrequency) return;
// Compute refcnts from tracked frames only. Since we are between Rapids
// calls the only tracked Vecs should be those from tracked frames.
NonBlockingHashMap<Key<Vec>, Integer> refcnts = new NonBlockingHashMap<>(REFCNTS.size());
for (Frame fr : FRAMES.values())
for (Key<Vec> vec : fr.keys()) {
Integer count = refcnts.get(vec);
refcnts.put(vec, count == null ? 1 : count + 1);
}
// Now account for the returning frame (if it is a Frame). Note that it is entirely possible that this frame is
// already in the FRAMES list, however we need to account for it anyways -- this is how Env works...
if (returning != null && returning.isFrame())
for (Key<Vec> vec : returning.getFrame().keys()) {
Integer count = refcnts.get(vec);
refcnts.put(vec, count == null ? 1 : count + 1);
}
// Now compare computed refcnts to cached REFCNTS.
// First check that every Vec in computed refcnt is also in REFCNTS, with equal counts.
for (Map.Entry<Key<Vec>,Integer> pair : refcnts.entrySet()) {
Key<Vec> vec = pair.getKey();
Integer count = pair.getValue();
Integer savedCount = REFCNTS.get(vec);
if (savedCount == null) throw new IllegalStateException("REFCNTS missing vec " + vec);
if (count.intValue() != savedCount.intValue())
throw new IllegalStateException(
"Ref-count mismatch for vec " + vec + ": REFCNT = " + savedCount + ", should be " + count);
}
// Then check that every cached REFCNT is in the computed set as well.
if (refcnts.size() != REFCNTS.size())
for (Map.Entry<Key<Vec>, Integer> pair : REFCNTS.entrySet()) {
if (!refcnts.containsKey(pair.getKey()))
throw new IllegalStateException(
"REFCNTs contains an extra vec " + pair.getKey() + ", count = " + pair.getValue());
}
}
// To avoid a class-circularity hang, we need to force other members of the
// cluster to load the Rapids & AstRoot classes BEFORE trying to execute code
// remotely, because e.g. ddply runs functions on all nodes.
private static volatile boolean _initialized; // One-shot init
static void cluster_init() {
if (_initialized) return;
// Touch a common class to force loading
new MRTask() {
@Override
public void setupLocal() {
new AstPlus();
}
}.doAllNodes();
_initialized = true;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/SingleThreadRadixOrder.java
|
package water.rapids;
// General principle here is that several parallel, tight, branch free loops,
// faster than one heavy DKV pass per row
// It is intended that several of these SingleThreadRadixOrder run on the same
// node, to utilize the cores available. The initial MSB needs to split by num
// nodes * cpus per node; e.g. 256 is pretty good for 10 nodes of 32 cores.
// Later, use 9 bits, or a few more bits accordingly.
// Its this 256 * 4kB = 1MB that needs to be < cache per core for cache write
// efficiency in MoveByFirstByte(). 10 bits (1024 threads) would be 4MB which
// still < L2
// Since o[] and x[] are arrays here (not Vecs) it's harder to see how to
// parallelize inside this function. Therefore avoid that issue by using more
// threads in calling split.
import water.*;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.ArrayUtils;
import java.util.Arrays;
class SingleThreadRadixOrder extends DTask<SingleThreadRadixOrder> {
private final Frame _fr;
private final int _MSBvalue; // only needed to be able to return the number of groups back to the caller RadixOrder
private final int _keySize, _batchSize;
private final boolean _isLeft;
private transient long _o[/*batch*/][];
private transient byte _x[/*batch*/][];
private transient long _otmp[][];
private transient byte _xtmp[][];
// TEMPs
private transient long counts[][];
private transient byte keytmp[];
//public long _groupSizes[][];
final long _mergeId;
// outputs ...
// o and x are changed in-place always
// iff _groupsToo==true then the following are allocated and returned
SingleThreadRadixOrder(Frame fr, boolean isLeft, int batchSize, int keySize, /*long nGroup[],*/ int MSBvalue,
long mergeId) {
_fr = fr;
_isLeft = isLeft;
_batchSize = batchSize;
_keySize = keySize;
_MSBvalue = MSBvalue;
_mergeId = mergeId;
}
@Override
public void compute2() {
keytmp = MemoryManager.malloc1(_keySize);
counts = new long[_keySize][256];
Key k;
SplitByMSBLocal.MSBNodeHeader[] MSBnodeHeader = new SplitByMSBLocal.MSBNodeHeader[H2O.CLOUD.size()];
long numRows =0;
for (int n=0; n<H2O.CLOUD.size(); n++) {
// Log.info("Getting MSB " + MSBvalue + " Node Header from node " + n + "/" + H2O.CLOUD.size() + " for Frame " + _fr._key);
// Log.info("Getting");
k = SplitByMSBLocal.getMSBNodeHeaderKey(_isLeft, _MSBvalue, n, _mergeId);
MSBnodeHeader[n] = DKV.getGet(k);
if (MSBnodeHeader[n]==null) continue;
DKV.remove(k);
numRows += ArrayUtils.sum(MSBnodeHeader[n]._MSBnodeChunkCounts); // This numRows is split into nbatch batches on that node.
// This header has the counts of each chunk (the ordered chunk numbers on that node)
}
if (numRows == 0) { tryComplete(); return; }
// Allocate final _o and _x for this MSB which is gathered together on this
// node from the other nodes.
// TO DO: as Arno suggested, wrap up into class for fixed width batching
// (to save espc overhead)
int nbatch = (int) ((numRows-1) / _batchSize +1); // at least one batch.
// the size of the last batch (could be batchSize, too if happens to be
// exact multiple of batchSize)
int lastSize = (int) (numRows - (nbatch-1)*_batchSize);
_o = new long[nbatch][];
_x = new byte[nbatch][];
int b;
for (b = 0; b < nbatch-1; b++) {
_o[b] = MemoryManager.malloc8(_batchSize); // TO DO?: use MemoryManager.malloc8()
_x[b] = MemoryManager.malloc1(_batchSize * _keySize);
}
_o[b] = MemoryManager.malloc8(lastSize);
_x[b] = MemoryManager.malloc1(lastSize * _keySize);
SplitByMSBLocal.OXbatch ox[/*node*/] = new SplitByMSBLocal.OXbatch[H2O.CLOUD.size()];
int oxBatchNum[/*node*/] = new int[H2O.CLOUD.size()]; // which batch of OX are we on from that node? Initialized to 0.
for (int node=0; node<H2O.CLOUD.size(); node++) { //TO DO: why is this serial? Relying on
k = SplitByMSBLocal.getNodeOXbatchKey(_isLeft, _MSBvalue, node, /*batch=*/0, _mergeId);
// assert k.home(); // TODO: PUBDEV-3074
ox[node] = DKV.getGet(k); // get the first batch for each node for this MSB
DKV.remove(k);
}
int oxOffset[] = MemoryManager.malloc4(H2O.CLOUD.size());
int oxChunkIdx[] = MemoryManager.malloc4(H2O.CLOUD.size()); // that node has n chunks and which of those are we currently on?
int targetBatch = 0, targetOffset = 0, targetBatchRemaining = _batchSize;
final Vec vec = _fr.anyVec();
assert vec != null;
for (int c=0; c<vec.nChunks(); c++) {
int fromNode = vec.chunkKey(c).home_node().index(); // each chunk in the column may be on different nodes
// See long comment at the top of SendSplitMSB. One line from there repeated here :
// " When the helper node (i.e. this one, now) (i.e the node doing all
// the A's) gets the A's from that node, it must stack all the nodes' A's
// with the A's from the other nodes in chunk order in order to maintain
// the original order of the A's within the global table. "
// TODO: We could process these in node order and or/in parallel if we
// cumulated the counts first to know the offsets - should be doable and
// high value
if (MSBnodeHeader[fromNode] == null) continue;
// magically this works, given the outer for loop through global
// chunk. Relies on LINE_ANCHOR_1 above.
int numRowsToCopy = MSBnodeHeader[fromNode]._MSBnodeChunkCounts[oxChunkIdx[fromNode]++];
// _MSBnodeChunkCounts is a vector of the number of contributions from
// each Vec chunk. Since each chunk is length int, this must less than
// that, so int The set of data corresponding to the Vec chunk
// contributions is stored packed in batched vectors _o and _x.
// at most batchSize remaining. No need to actually put the number of rows left in here
int sourceBatchRemaining = _batchSize - oxOffset[fromNode];
while (numRowsToCopy > 0) { // No need for class now, as this is a bit different to the other batch copier. Two isn't too bad.
int thisCopy = Math.min(numRowsToCopy, Math.min(sourceBatchRemaining, targetBatchRemaining));
System.arraycopy(ox[fromNode]._o, oxOffset[fromNode], _o[targetBatch], targetOffset, thisCopy);
System.arraycopy(ox[fromNode]._x, oxOffset[fromNode]*_keySize, _x[targetBatch], targetOffset*_keySize, thisCopy*_keySize);
numRowsToCopy -= thisCopy;
oxOffset[fromNode] += thisCopy; sourceBatchRemaining -= thisCopy;
targetOffset += thisCopy; targetBatchRemaining -= thisCopy;
if (sourceBatchRemaining == 0) {
// fetch the next batch :
k = SplitByMSBLocal.getNodeOXbatchKey(_isLeft, _MSBvalue, fromNode, ++oxBatchNum[fromNode], _mergeId);
assert k.home();
ox[fromNode] = DKV.getGet(k);
DKV.remove(k);
if (ox[fromNode] == null) {
// if the last chunksworth fills a batchsize exactly, the getGet above will have returned null.
// TODO: Check will Cliff that a known fetch of a non-existent key is ok e.g. won't cause a delay/block? If ok, leave as good check.
int numNonZero = 0; for (int tmp : MSBnodeHeader[fromNode]._MSBnodeChunkCounts) if (tmp>0) numNonZero++;
assert oxBatchNum[fromNode]==numNonZero;
assert ArrayUtils.sum(MSBnodeHeader[fromNode]._MSBnodeChunkCounts) % _batchSize == 0;
}
oxOffset[fromNode] = 0;
sourceBatchRemaining = _batchSize;
}
if (targetBatchRemaining == 0) {
targetBatch++;
targetOffset = 0;
targetBatchRemaining = _batchSize;
}
}
}
// We now have _o and _x collated from all the contributing nodes, in the correct original order.
// TODO save this allocation and reuse per thread? Or will heap just take care of it. Time this allocation and copy as step 1 anyway.
_xtmp = new byte[_x.length][];
_otmp = new long[_o.length][];
assert _x.length == _o.length; // i.e. aligned batch size between x and o (think 20 bytes keys and 8 bytes of long in o)
// Seems like no deep clone available in Java. Maybe System.arraycopy but
// maybe that needs target to be allocated first
for (int i=0; i<_x.length; i++) {
_xtmp[i] = Arrays.copyOf(_x[i], _x[i].length);
_otmp[i] = Arrays.copyOf(_o[i], _o[i].length);
}
// TO DO: a way to share this working memory between threads.
// Just create enough for the 4 threads active at any one time. Not 256 allocations and releases.
// We need o[] and x[] in full for the result. But this way we don't need full size xtmp[] and otmp[] at any single time.
// Currently Java will allocate and free these xtmp and otmp and maybe it does good enough job reusing heap that we don't need to explicitly optimize this reuse.
// Perhaps iterating this task through the largest bins first will help java reuse heap.
assert(_o != null);
assert(numRows > 0);
// The main work. Radix sort this batch ...
run(0, numRows, _keySize-1); // if keySize is 6 bytes, first byte is byte 5
// don't need to clear these now using private transient
// _counts = null;
// keytmp = null;
//_nGroup = null;
// tell the world how many batches and rows for this MSB
OXHeader msbh = new OXHeader(_o.length, numRows, _batchSize);
Futures fs = new Futures();
DKV.put(getSortedOXHeaderKey(_isLeft, _MSBvalue, _mergeId), msbh, fs, true);
assert _o.length == _x.length;
for (b=0; b<_o.length; b++) {
SplitByMSBLocal.OXbatch tmp = new SplitByMSBLocal.OXbatch(_o[b], _x[b]);
Value v = new Value(SplitByMSBLocal.getSortedOXbatchKey(_isLeft, _MSBvalue, b, _mergeId), tmp);
DKV.put(v._key, v, fs, true); // the OXbatchKey's on this node will be reused for the new keys
v.freeMem();
}
// TODO: check numRows is the total of the _x[b] lengths
fs.blockForPending();
tryComplete();
}
static Key getSortedOXHeaderKey(boolean isLeft, int MSBvalue, long mergeId) {
// This guy has merges together data from all nodes and its data is not "from"
// any particular node. Therefore node number should not be in the key.
return Key.make("__radix_order__SortedOXHeader_MSB" + MSBvalue + "_" + mergeId + (isLeft ? "_LEFT" : "_RIGHT")); // If we don't say this it's random ... (byte) 1 /*replica factor*/, (byte) 31 /*hidden user-key*/, true, H2O.SELF);
}
static class OXHeader extends Iced<OXHeader> {
OXHeader(int batches, long numRows, int batchSize) { _nBatch = batches; _numRows = numRows; _batchSize = batchSize; }
final int _nBatch;
final long _numRows;
final int _batchSize;
}
private int keycmp(byte x[], int xi, byte y[], int yi) {
// Same return value as strcmp in C. <0 => xi<yi
xi *= _keySize; yi *= _keySize;
int len = _keySize;
while (len > 1 && x[xi] == y[yi]) { xi++; yi++; len--; }
return ((x[xi] & 0xFF) - (y[yi] & 0xFF)); // 0xFF for getting back from -1 to 255
}
// orders both x and o by reference in-place. Fast for small vectors, low
// overhead. don't be tempted to binsearch backwards here because have to
// shift anyway
public void insert(long start, /*only for small len so len can be type int*/int len) {
int batch0 = (int) (start / _batchSize);
int batch1 = (int) ((start+len-1) / _batchSize);
long origstart = start; // just for when straddle batch boundaries
int len0 = 0; // same
byte _xbatch[];
long _obatch[];
if (batch1 != batch0) {
// small len straddles a batch boundary. Unlikely very often since len<=200
assert batch0 == batch1-1;
len0 = _batchSize - (int)(start % _batchSize);
// copy two halves to contiguous temp memory, do the below, then split it back to the two halves afterwards.
// Straddles batches very rarely (at most once per batch) so no speed impact at all.
_xbatch = new byte[len * _keySize];
System.arraycopy(_x[batch0], (int)((start % _batchSize)*_keySize),_xbatch, 0, len0*_keySize);
System.arraycopy( _x[batch1], 0,_xbatch, len0*_keySize, (len-len0)*_keySize);
_obatch = new long[len];
System.arraycopy(_o[batch0], (int)(start % _batchSize), _obatch, 0, len0);
System.arraycopy(_o[batch1], 0, _obatch, len0, len-len0);
start = 0;
} else {
_xbatch = _x[batch0]; // taking this outside the loop does indeed make quite a big different (hotspot isn't catching this, then)
_obatch = _o[batch0];
}
int offset = (int) (start % _batchSize);
for (int i=1; i<len; i++) { // like bubble sort
int cmp = keycmp(_xbatch, offset+i, _xbatch, offset+i-1); // TO DO: we don't need to compare the whole key here. Set cmpLen < keySize
if (cmp < 0) {
System.arraycopy(_xbatch, (offset+i)*_keySize, keytmp, 0, _keySize);
int j = i-1;
long otmp = _obatch[offset+i];
do {
System.arraycopy(_xbatch, (offset+j)*_keySize, _xbatch, (offset+j+1)*_keySize, _keySize);
_obatch[offset+j+1] = _obatch[offset+j];
j--;
} while (j >= 0 && keycmp(keytmp, 0, _xbatch, offset+j)<0);
System.arraycopy(keytmp, 0, _xbatch, (offset+j+1)*_keySize, _keySize);
_obatch[offset + j + 1] = otmp;
}
}
if (batch1 != batch0) {
// Put the sorted data back into original two places straddling the boundary
System.arraycopy(_xbatch, 0,_x[batch0], (int)(origstart % _batchSize) *_keySize, len0*_keySize);
System.arraycopy(_xbatch, len0*_keySize,_x[batch1], 0, (len-len0)*_keySize);
System.arraycopy( _obatch, 0,_o[batch0], (int)(origstart % _batchSize), len0);
System.arraycopy(_obatch, len0,_o[batch1], 0, len-len0);
}
}
public void run(final long start, final long len, final int Byte) {
if (len < 200) { // N_SMALL=200 is guess based on limited testing. Needs calibrate().
// Was 50 based on sum(1:50)=1275 worst -vs- 256 cummulate + 256 memset +
// allowance since reverse order is unlikely.
insert(start, (int)len); // when nalast==0, iinsert will be called only from within iradix.
// TO DO: inside insert it doesn't need to compare the bytes so far as
// they're known equal, so pass Byte (NB: not Byte-1) through to insert()
// TO DO: Maybe transposing keys to be a set of _keySize byte columns
// might in fact be quicker - no harm trying. What about long and varying
// length string keys?
return;
}
final int batch0 = (int) (start / _batchSize);
final int batch1 = (int) ((start+len-1) / _batchSize);
// could well span more than one boundary when very large number of rows.
final long thisHist[] = counts[Byte];
// thisHist reused and carefully set back to 0 below so we don't need to clear it now
int idx = (int)(start%_batchSize)*_keySize + _keySize-Byte-1;
int bin=-1; // the last bin incremented. Just to see if there is only one bin with a count.
int thisLen = (int)Math.min(len, _batchSize - start%_batchSize);
final int nbatch = batch1-batch0+1; // number of batches this span of len covers. Usually 1. Minimum 1.
for (int b=0; b<nbatch; b++) {
// taking this outside the loop below does indeed make quite a big different (hotspot isn't catching this, then)
byte _xbatch[] = _x[batch0+b];
for (int i = 0; i < thisLen; i++) {
bin = 0xff & _xbatch[idx];
thisHist[bin]++;
idx += _keySize;
// maybe TO DO: shorten key by 1 byte on each iteration, so we only
// need to thisx && 0xFF. No, because we need for construction of
// final table key columns.
}
idx = _keySize-Byte-1;
thisLen = (b==nbatch-2/*next iteration will be last batch*/ ? (int)((start+len)%_batchSize) : _batchSize);
// thisLen will be set to _batchSize for the middle batches when nbatch>=3
}
if (thisHist[bin] == len) {
// one bin has count len and the rest zero => next byte quick
thisHist[bin] = 0; // important, clear for reuse
if (Byte != 0)
run(start, len, Byte-1);
return;
}
long rollSum = 0;
for (int c = 0; c < 256; c++) {
if (rollSum == len) break; // done, all other bins are zero, no need to loop through them all
final long tmp = thisHist[c];
// important to skip zeros for logic below to undo cumulate. Worth the
// branch to save a deeply iterative memset back to zero
if (tmp == 0) continue;
thisHist[c] = rollSum;
rollSum += tmp;
}
// Sigh. Now deal with batches here as well because Java doesn't have 64bit indexing.
int oidx = (int)(start%_batchSize);
int xidx = oidx*_keySize + _keySize-Byte-1;
thisLen = (int)Math.min(len, _batchSize - start%_batchSize);
for (int b=0; b<nbatch; b++) {
// taking these outside the loop below does indeed make quite a big
// different (hotspot isn't catching this, then)
final long _obatch[] = _o[batch0+b];
final byte _xbatch[] = _x[batch0+b];
for (int i = 0; i < thisLen; i++) {
long target = thisHist[0xff & _xbatch[xidx]]++;
// now always write to the beginning of _otmp and _xtmp just to reuse the first hot pages
_otmp[(int)(target/_batchSize)][(int)(target%_batchSize)] = _obatch[oidx+i]; // this must be kept in 8 bytes longs
System.arraycopy(_xbatch, (oidx+i)*_keySize, _xtmp[(int)(target/_batchSize)], (int)(target%_batchSize)*_keySize, _keySize );
xidx += _keySize;
// Maybe TO DO: this can be variable byte width and smaller widths as
// descend through bytes (TO DO: reverse byte order so always doing &0xFF)
}
xidx = _keySize-Byte-1;
oidx = 0;
thisLen = (b==nbatch-2/*next iteration will be last batch*/ ? (int)((start+len)%_batchSize) : _batchSize);
}
// now copy _otmp and _xtmp back over _o and _x from the start position, allowing for boundaries
// _o, _x, _otmp and _xtmp all have the same _batchsize
runCopy(start,len,_keySize,_batchSize,_otmp,_xtmp,_o,_x);
long itmp = 0;
for (int i=0; i<256; i++) {
if (thisHist[i]==0) continue;
final long thisgrpn = thisHist[i] - itmp;
if( !(thisgrpn == 1 || Byte == 0) )
run(start+itmp, thisgrpn, Byte-1);
itmp = thisHist[i];
thisHist[i] = 0; // important, to save clearing counts on next iteration
}
}
// Hot loop, pulled out from the main run code
private static void runCopy(final long start, final long len, final int keySize, final int batchSize, final long otmp[][], final byte xtmp[][], final long o[][], final byte x[][]) {
// now copy _otmp and _xtmp back over _o and _x from the start position, allowing for boundaries
// _o, _x, _otmp and _xtmp all have the same _batchsize
// Would be really nice if Java had 64bit indexing to save programmer time.
long numRowsToCopy = len;
int sourceBatch = 0, sourceOffset = 0;
int targetBatch = (int)(start / batchSize), targetOffset = (int)(start % batchSize);
int targetBatchRemaining = batchSize - targetOffset; // 'remaining' means of the the full batch, not of the numRowsToCopy
int sourceBatchRemaining = batchSize - sourceOffset; // at most batchSize remaining. No need to actually put the number of rows left in here
while (numRowsToCopy > 0) { // TO DO: put this into class as well, to ArrayCopy into batched
final int thisCopy = (int)Math.min(numRowsToCopy, Math.min(sourceBatchRemaining, targetBatchRemaining));
System.arraycopy(otmp[sourceBatch], sourceOffset, o[targetBatch], targetOffset, thisCopy);
System.arraycopy(xtmp[sourceBatch], sourceOffset*keySize, x[targetBatch], targetOffset*keySize, thisCopy*keySize);
numRowsToCopy -= thisCopy;
sourceOffset += thisCopy; sourceBatchRemaining -= thisCopy;
targetOffset += thisCopy; targetBatchRemaining -= thisCopy;
if (sourceBatchRemaining == 0) { sourceBatch++; sourceOffset = 0; sourceBatchRemaining = batchSize; }
if (targetBatchRemaining == 0) { targetBatch++; targetOffset = 0; targetBatchRemaining = batchSize; }
// 'source' and 'target' deliberately the same length variable names and long lines deliberately used so we
// can easy match them up vertically to ensure they are the same
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/SortCombine.java
|
package water.rapids;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.parser.BufferedString;
import water.util.Log;
import java.util.Arrays;
/**
* Before this class is called, sorting has been completed on the sorted columns. The job of this class is to
* gather the sorted rows of one MSB into chunks which will later be stitched together to form the whole frame.
*/
class SortCombine extends DTask<SortCombine> {
long _numRowsInResult = 0; // returned to caller, so not transient
int _chunkSizes[]; // TODO: only _chunkSizes.length is needed by caller, so return that length only
double _timings[];
final FFSB _leftSB;
private transient KeyOrder _leftKO;
private transient long _leftFrom;
private transient int _retBatchSize;
private int[][] _numRowsPerCidx;
private int _chunkNum;
private boolean[] _stringCols;
private boolean[] _intCols;
final SingleThreadRadixOrder.OXHeader _leftSortedOXHeader;
final long _mergeId;
// around the cluster.
static class FFSB extends Iced<FFSB> {
private final Frame _frame;
private final Vec _vec;
private final int _chunkNode[]; // Chunk homenode index
final int _msb;
FFSB(Frame frame, int msb) {
assert -1 <= msb && msb <= 255; // left ranges from 0 to 255, right from -1 to 255
_frame = frame;
_msb = msb;
// Create fast lookups to go from chunk index to node index of that chunk
Vec vec = _vec = frame.anyVec();
_chunkNode = vec == null ? null : MemoryManager.malloc4(vec.nChunks());
if (vec == null) return; // Zero-columns for Sort
for (int i = 0; i < _chunkNode.length; i++)
_chunkNode[i] = vec.chunkKey(i).home_node().index();
}
}
SortCombine(FFSB leftSB, SingleThreadRadixOrder.OXHeader leftSortedOXHeader, long mergeId) {
_leftSB = leftSB;
_mergeId = mergeId;
int columnsInResult = _leftSB._frame.numCols();
_stringCols = MemoryManager.mallocZ(columnsInResult);
_intCols = MemoryManager.mallocZ(columnsInResult);
if (_leftSB._frame!=null) {
for (int col=0; col < _leftSB._frame.numCols(); col++) {
if (_leftSB._frame.vec(col).isInt())
_intCols[col] = true;
if (_leftSB._frame.vec(col).isString())
_stringCols[col] = true;
}
}
_chunkNum = _leftSB._frame.anyVec().nChunks();
_leftSortedOXHeader = leftSortedOXHeader;
}
@Override
public void compute2() {
_timings = MemoryManager.malloc8d(20);
long t0 = System.nanoTime();
_leftKO = new KeyOrder(_leftSortedOXHeader, _mergeId);
_leftKO.initKeyOrder(_leftSB._msb,/*left=*/true);
_retBatchSize = (int) _leftKO._batchSize;
final long leftN = _leftSortedOXHeader._numRows; // store number of rows in left frame for the MSB
assert leftN >= 1;
_timings[0] += (System.nanoTime() - t0) / 1e9;
_leftFrom = -1;
long leftTo = leftN; // number of rows in left frame
long retSize = leftTo - _leftFrom - 1;
assert retSize >= 0;
if (retSize == 0) {
tryComplete();
return;
}
_numRowsInResult = retSize;
setPerNodeNumsToFetch(); // find out the number of rows to fetch from H2O nodes, number of rows to fetch per chunk
if (_numRowsInResult > 0) createChunksInDKV(_mergeId);
tryComplete();
}
/**
* This method will find the number of rows per chunk to fetch per batch for a MSB
*/
private void setPerNodeNumsToFetch() {
Vec anyVec = _leftSB._frame.anyVec();
int nbatch = _leftKO._order.length;
_numRowsPerCidx = new int[nbatch][anyVec.nChunks()];
for (int batchNum =0; batchNum < nbatch; batchNum++) {
int batchSize = _leftKO._order[batchNum].length;
for (int index=0; index < batchSize; index++) {
long globalRowNumber = _leftKO._order[batchNum][index];
int chkIdx = _leftSB._vec.elem2ChunkIdx(globalRowNumber);
_leftKO._perNodeNumRowsToFetch[_leftSB._chunkNode[chkIdx]]++;
_numRowsPerCidx[batchNum][chkIdx]++; // number of rows to fetch per Cidx for a certain MSB
}
}
}
// Holder for Key & Order info
private static class KeyOrder {
public final long _batchSize;
private final transient byte _key[/*n2GB*/][/*i mod 2GB * _keySize*/];
private final transient long _order[/*n2GB*/][/*i mod 2GB * _keySize*/];
private final transient long _perNodeNumRowsToFetch[];
final long _mergeId;
KeyOrder(SingleThreadRadixOrder.OXHeader sortedOXHeader, long mergeId) {
_batchSize = sortedOXHeader._batchSize;
final int nBatch = sortedOXHeader._nBatch;
_key = new byte[nBatch][];
_order = new long[nBatch][];
_perNodeNumRowsToFetch = new long[H2O.CLOUD.size()];
_mergeId = mergeId;
}
void initKeyOrder(int msb, boolean isLeft) {
for (int b = 0; b < _key.length; b++) {
Value v = DKV.get(SplitByMSBLocal.getSortedOXbatchKey(isLeft, msb, b, _mergeId));
SplitByMSBLocal.OXbatch ox = v.get(); //mem version (obtained from remote) of the Values gets turned into POJO version
v.freeMem(); //only keep the POJO version of the Value
_key[b] = ox._x;
_order[b] = ox._o;
}
}
}
private void chunksPopulatePerChunk(final long[][][] perNodeLeftRowsCidx, final long[][][] perNodeLeftIndices) {
int numBatch = _leftKO._order.length; // note that _order is ordered as nbatch/batchIndex
int[][] chkIndices = new int[numBatch][]; // store index for each batch per chunk
for (int nbatch=0; nbatch < numBatch; nbatch++) {
int sortedRowIndex = -1;
chkIndices[nbatch] = new int[_chunkNum];
int batchSize = _leftKO._order[nbatch].length;
for (int batchNum=0; batchNum < batchSize; batchNum++) {
sortedRowIndex++;
long row = _leftKO._order[nbatch][batchNum];
int chkIdx = _leftSB._vec.elem2ChunkIdx(row); //binary search in espc
perNodeLeftRowsCidx[nbatch][chkIdx][chkIndices[nbatch][chkIdx]] = row;
perNodeLeftIndices[nbatch][chkIdx][chkIndices[nbatch][chkIdx]] = sortedRowIndex;
chkIndices[nbatch][chkIdx]++;
}
}
}
private void createChunksInDKV(long mergeId) {
long t0 = System.nanoTime(), t1;
// Create the chunks for the final frame from this MSB.
final int batchSizeUUID = _retBatchSize;
final int nbatch = (int) ((_numRowsInResult - 1) / batchSizeUUID + 1);
final int cloudSize = H2O.CLOUD.size();
final long[][][] perMSBLeftRowsCidx = new long[nbatch][_chunkNum][]; // store the global row number per cidx
final long[][][] perMSBLeftIndices = new long[nbatch][_chunkNum][]; // store the sorted index of that row
// allocate memory to arrays
for (int batchN=0; batchN < nbatch; batchN++) {
for (int chkidx=0; chkidx < _chunkNum; chkidx++) {
perMSBLeftRowsCidx[batchN][chkidx] = new long[_numRowsPerCidx[batchN][chkidx]];
perMSBLeftIndices[batchN][chkidx] = new long[_numRowsPerCidx[batchN][chkidx]];
}
}
_timings[2] += ((t1 = System.nanoTime()) - t0) / 1e9;
t0 = t1;
chunksPopulatePerChunk(perMSBLeftRowsCidx, perMSBLeftIndices); // populate perMSBLeftRowsCidx and perMSBLeftIndices
_timings[3] += ((t1 = System.nanoTime()) - t0) / 1e9;
t0 = t1;
assert nbatch >= 1;
final int lastSize = (int) (_numRowsInResult - (nbatch - 1) * batchSizeUUID); // if there is only 1 batch, this will be the size
assert lastSize > 0;
final int numColsInResult = _leftSB._frame.numCols();
final double[][][] frameLikeChunks = new double[numColsInResult][nbatch][]; //TODO: compression via int types
final long[][][] frameLikeChunksLongs = new long[numColsInResult][nbatch][]; //TODO: compression via int types
BufferedString[][][] frameLikeChunks4Strings = new BufferedString[numColsInResult][nbatch][]; // cannot allocate before hand
_chunkSizes = new int[nbatch];
final GetRawRemoteRowsPerChunk grrrsLeftPerChunk[][] = new GetRawRemoteRowsPerChunk[cloudSize][];
for (int b = 0; b < nbatch; b++) { // divide rows of a MSB into batches to process to avoid overwhelming a machine
allocateFrameLikeChunks(b, nbatch, lastSize, batchSizeUUID, frameLikeChunks, frameLikeChunks4Strings,
frameLikeChunksLongs, numColsInResult); // allocate memory for frameLikeChunks...
chunksPopulateRetFirstPerChunk(perMSBLeftRowsCidx, perMSBLeftIndices, b, grrrsLeftPerChunk, frameLikeChunks,
frameLikeChunks4Strings, frameLikeChunksLongs); // fetch and populate rows of a MSB one batch at a time.
_timings[10] += ((t1 = System.nanoTime()) - t0) / 1e9;
t0 = t1;
// compress all chunks and store them
chunksCompressAndStore(b, numColsInResult, frameLikeChunks, frameLikeChunks4Strings, frameLikeChunksLongs, mergeId);
if (nbatch > 1) {
cleanUpMemory(grrrsLeftPerChunk, b); // clean up memory used by grrrsLeftperChunk
}
}
_timings[11] += (System.nanoTime() - t0) / 1e9;
}
// collect all rows with the same MSB one batch at a time over all nodes in a sorted fashion
private void chunksPopulateRetFirstPerChunk(final long[][][] perMSBLeftRowsCidx, final long[][][] perMSBLeftIndices,
final int jb, final GetRawRemoteRowsPerChunk grrrsLeft[][], final double[][][] frameLikeChunks,
BufferedString[][][] frameLikeChunks4String, final long[][][] frameLikeChunksLong) {
RPC<GetRawRemoteRowsPerChunk> grrrsLeftRPC[][] = new RPC[H2O.CLOUD.size()][];
int batchSize = _leftKO._order[jb].length;
for (H2ONode node : H2O.CLOUD._memary) {
final int ni = node.index();
grrrsLeftRPC[ni] = new RPC[1];
grrrsLeft[ni] = new GetRawRemoteRowsPerChunk[1];
grrrsLeftRPC[ni][0] = new RPC<>(node, new GetRawRemoteRowsPerChunk(_leftSB._frame, batchSize,
perMSBLeftRowsCidx[jb], perMSBLeftIndices[jb])).call();
}
for (H2ONode node : H2O.CLOUD._memary) {
int ni = node.index();
_timings[5] += (grrrsLeft[ni][0] = grrrsLeftRPC[ni][0].get()).timeTaken;
}
for (H2ONode node : H2O.CLOUD._memary) {
final int ni = node.index();
// transfer entries from _chk to frameLikeChunks
long[][] chksLong = grrrsLeft[ni][0]._chkLong; // indexed by col num per batchsize
double[][] chks = grrrsLeft[ni][0]._chk;
BufferedString[][] chksString = grrrsLeft[ni][0]._chkString;
for (int cidx = 0; cidx < _chunkNum; cidx++) {
if (_leftSB._chunkNode[cidx] == ni) { // copy over rows from this node
int rowSize = perMSBLeftIndices[jb][cidx].length;
for (int row = 0; row < rowSize; row++) {
for (int col = 0; col < chks.length; col++) {
int offset = (int) perMSBLeftIndices[jb][cidx][row];
if (this._stringCols[col]) {
frameLikeChunks4String[col][jb][offset] = chksString[col][offset];
} else if (this._intCols[col]) {
frameLikeChunksLong[col][jb][offset] = chksLong[col][offset];
} else {
frameLikeChunks[col][jb][offset] = chks[col][offset];
}
}
}
}
}
}
}
private void allocateFrameLikeChunks(final int b, final int nbatch, final int lastSize, final int batchSizeUUID,
final double[][][] frameLikeChunks,
final BufferedString[][][] frameLikeChunks4Strings,
final long[][][] frameLikeChunksLongs, final int numColsInResult) {
for (int col = 0; col < numColsInResult; col++) { // allocate memory for frameLikeChunks for this batch
if (this._stringCols[col]) {
frameLikeChunks4Strings[col][b] = new BufferedString[_chunkSizes[b] = (b == nbatch - 1 ? lastSize : batchSizeUUID)];
} else if (this._intCols[col]) {
frameLikeChunksLongs[col][b] = MemoryManager.malloc8(_chunkSizes[b] = (b == nbatch - 1 ? lastSize : batchSizeUUID));
Arrays.fill(frameLikeChunksLongs[col][b], Long.MIN_VALUE);
} else {
frameLikeChunks[col][b] = MemoryManager.malloc8d(_chunkSizes[b] = (b == nbatch - 1 ? lastSize : batchSizeUUID));
Arrays.fill(frameLikeChunks[col][b], Double.NaN);
// NA by default to save filling with NA for nomatches when allLeft
}
}
}
private void cleanUpMemory(GetRawRemoteRowsPerChunk[][] grrr, int batchIdx) {
if (grrr != null) {
int nodeNum = grrr.length;
for (int nodeIdx = 0; nodeIdx < nodeNum; nodeIdx++) {
int batchLimit = Math.min(batchIdx + 1, grrr[nodeIdx].length);
if ((grrr[nodeIdx] != null) && (grrr[nodeIdx].length > 0)) {
for (int bIdx = 0; bIdx < batchLimit; bIdx++) { // clean up memory
int chkLen = grrr[nodeIdx][bIdx] == null ? 0 :
(grrr[nodeIdx][bIdx]._chk == null ? 0 : grrr[nodeIdx][bIdx]._chk.length);
for (int cindex = 0; cindex < chkLen; cindex++) {
grrr[nodeIdx][bIdx]._chk[cindex] = null;
grrr[nodeIdx][bIdx]._chkString[cindex] = null;
grrr[nodeIdx][bIdx]._chkLong[cindex] = null;
}
if (chkLen > 0) {
grrr[nodeIdx][bIdx]._chk = null;
grrr[nodeIdx][bIdx]._chkString = null;
grrr[nodeIdx][bIdx]._chkLong = null;
}
}
}
}
}
}
// compress all chunks and store them
private void chunksCompressAndStore(final int b, final int numColsInResult, final double[][][] frameLikeChunks,
BufferedString[][][] frameLikeChunks4String, final long[][][] frameLikeChunksLong,
long mergeId) {
// compress all chunks and store them
Futures fs = new Futures();
for (int col = 0; col < numColsInResult; col++) {
if (this._stringCols[col]) {
NewChunk nc = new NewChunk(null, 0);
for (int index = 0; index < frameLikeChunks4String[col][b].length; index++)
nc.addStr(frameLikeChunks4String[col][b][index]);
Chunk ck = nc.compress();
DKV.put(BinaryMerge.getKeyForMSBComboPerCol(_leftSB._msb, -1, col, b, mergeId), ck, fs, true);
frameLikeChunks4String[col][b] = null; //free mem as early as possible (it's now in the store)
} else if (_intCols[col]) {
NewChunk nc = new NewChunk(null, -1);
for (long l : frameLikeChunksLong[col][b]) {
if (l == Long.MIN_VALUE) nc.addNA();
else nc.addNum(l, 0);
}
Chunk ck = nc.compress();
DKV.put(BinaryMerge.getKeyForMSBComboPerCol(_leftSB._msb, -1, col, b, mergeId), ck, fs, true);
frameLikeChunksLong[col][b] = null; //free mem as early as possible (it's now in the store)
} else {
Chunk ck = new NewChunk(frameLikeChunks[col][b]).compress();
DKV.put(BinaryMerge.getKeyForMSBComboPerCol(_leftSB._msb, -1, col, b, mergeId), ck, fs, true);
frameLikeChunks[col][b] = null; //free mem as early as possible (it's now in the store)
}
}
fs.blockForPending();
}
static class GetRawRemoteRowsPerChunk extends DTask<GetRawRemoteRowsPerChunk> {
Frame _fr;
long[][] _perNodeLeftIndices;
long[][] _perNodeLeftRowsCidx;
double[/*col*/][] _chk; //null on the way to remote node, non-null on the way back
BufferedString[][] _chkString;
long[/*col*/][] _chkLong;
int _batchSize; // deal with which batch we are working with
int _nChunks;
double timeTaken;
GetRawRemoteRowsPerChunk(Frame fr, int batchSize, long[][] leftRowsCidx, long[][] leftRowsIndices) {
_fr = fr;
_batchSize = batchSize; // size of current batch we are dealing with
_perNodeLeftIndices = leftRowsIndices;
_perNodeLeftRowsCidx = leftRowsCidx;
_nChunks = _perNodeLeftIndices.length; // number of chunks in fr.
}
private static long[][] malloc8A(int m, int n) {
long[][] res = new long[m][];
for (int i = 0; i < m; ++i)
res[i] = MemoryManager.malloc8(n);
return res;
}
@Override
public void compute2() {
assert ((_perNodeLeftIndices != null) && (_perNodeLeftRowsCidx != null));
assert (_chk == null);
long t0 = System.nanoTime();
_chk = MemoryManager.malloc8d(_fr.numCols(), _batchSize);
_chkLong = malloc8A(_fr.numCols(), _batchSize);
_chkString = new BufferedString[_fr.numCols()][_batchSize];
for (int cidx = 0; cidx < _nChunks; cidx++) { // go through each chunk and copy from frame to sorted arrays
for (int col = 0; col < _fr.numCols(); col++) {
Vec v = _fr.vec(col);
if (!v.chunkKey(cidx).home())
break; // goto next cidex
Chunk c = v.chunkForChunkIdx(cidx);
int chunkSize = _perNodeLeftRowsCidx[cidx].length;
if (v.isString()) {
for (int row = 0; row < chunkSize; row++) { // copy string and numeric columns
int offset = (int) (_perNodeLeftRowsCidx[cidx][row] - v.espc()[cidx]); // row number
_chkString[col][(int) _perNodeLeftIndices[cidx][row]] = c.atStr(new BufferedString(), offset); // _chkString[col][row] store by reference here
}
} else if (v.isInt()) {
for (int row = 0; row < chunkSize; row++) { // extract info from chunks to one place
int offset = (int) (_perNodeLeftRowsCidx[cidx][row] - v.espc()[cidx]); // row number
_chkLong[col][(int) _perNodeLeftIndices[cidx][row]] = (c.isNA(offset)) ? Long.MIN_VALUE : c.at8(offset);
}
} else {
for (int row = 0; row < chunkSize; row++) { // extract info from chunks to one place
int offset = (int) (_perNodeLeftRowsCidx[cidx][row] - v.espc()[cidx]); // row number
_chk[col][(int) _perNodeLeftIndices[cidx][row]] = c.atd(offset);
}
}
}
}
_perNodeLeftIndices = null;
_perNodeLeftRowsCidx = null;
_fr = null;
assert (_chk != null && _chkLong != null && _chkString != null);
timeTaken = (System.nanoTime() - t0) / 1e9;
tryComplete();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/SplitByMSBLocal.java
|
package water.rapids;
import water.*;
import water.fvec.Chunk;
import water.util.ArrayUtils;
import water.util.Log;
import water.util.MathUtils;
import water.util.PrettyPrint;
import java.math.BigInteger;
import java.util.Arrays;
import java.util.Hashtable;
import static java.math.BigInteger.ONE;
import static java.math.BigInteger.ZERO;
class SplitByMSBLocal extends MRTask<SplitByMSBLocal> {
private final boolean _isLeft;
private final int _shift, _batchSize, _bytesUsed[], _keySize;
private final BigInteger _base[];
private final int _col[];
private final Key _linkTwoMRTask;
private final int _id_maps[][];
private final int[] _ascending;
private transient long _counts[][];
private transient long _o[][][]; // transient ok because there is no reduce here between nodes, and important to save shipping back to caller.
private transient byte _x[][][];
private long _numRowsOnThisNode;
final long _mergeId;
static Hashtable<Key,SplitByMSBLocal> MOVESHASH = new Hashtable<>();
SplitByMSBLocal(boolean isLeft, BigInteger base[], int shift, int keySize, int batchSize, int bytesUsed[], int[] col,
Key linkTwoMRTask, int[][] id_maps, int[] ascending, long mergeId) {
_isLeft = isLeft;
// we only currently use the shift (in bits) for the first column for the
// MSB (which we don't know from bytesUsed[0]). Otherwise we use the
// bytesUsed to write the key's bytes.
_shift = shift;
_batchSize=batchSize; _bytesUsed=bytesUsed; _col=col; _base=base;
_keySize = keySize;
_linkTwoMRTask = linkTwoMRTask;
_id_maps = id_maps;
_ascending = ascending;
_mergeId = mergeId;
}
@Override protected void setupLocal() {
Key k = RadixCount.getKey(_isLeft, _col[0], _mergeId, H2O.SELF);
_counts = ((RadixCount.Long2DArray) DKV.getGet(k))._val; // get the sparse spine for this node, created and DKV-put above
DKV.remove(k);
// First cumulate MSB count histograms across the chunks in this node
long MSBhist[] = MemoryManager.malloc8(256);
int nc = _fr.anyVec().nChunks();
assert nc == _counts.length;
for (int c = 0; c < nc; c++) {
if (_counts[c]!=null) {
for (int h = 0; h < 256; h++) {
MSBhist[h] += _counts[c][h];
}
}
}
_numRowsOnThisNode = ArrayUtils.sum(MSBhist); // we just use this count for the DKV data transfer rate message, number of rows having values with current MSB
if (ArrayUtils.maxValue(MSBhist) > Math.max(1000, _fr.numRows() / 20 / H2O.CLOUD.size())) { // TO DO: better test of a good even split
Log.warn("RadixOrder(): load balancing on this node not optimal (max value should be <= "
+ (Math.max(1000, _fr.numRows() / 20 / H2O.CLOUD.size()))
+ " " + Arrays.toString(MSBhist) + ")");
}
// shared between threads on the same node, all mappers write into distinct
// locations (no conflicts, no need to atomic updates, etc.)
Log.info("Allocating _o and _x buckets on this node with known size up front ... ");
long t0 = System.nanoTime();
_o = new long[256][][];
_x = new byte[256][][]; // for each bucket, there might be > 2^31 bytes, so an extra dimension for that
for (int msb = 0; msb < 256; msb++) {
if (MSBhist[msb] == 0) continue;
int nbatch = (int) ((MSBhist[msb]-1)/_batchSize +1); // at least one batch
int lastSize = (int) (MSBhist[msb] - (nbatch-1) * _batchSize); // the size of the last batch (could be batchSize)
assert nbatch > 0;
assert lastSize > 0;
_o[msb] = new long[nbatch][];
_x[msb] = new byte[nbatch][];
int b;
for (b = 0; b < nbatch-1; b++) {
_o[msb][b] = MemoryManager.malloc8(_batchSize); // TODO?: use MemoryManager.malloc8()
_x[msb][b] = MemoryManager.malloc1(_batchSize * _keySize);
}
_o[msb][b] = MemoryManager.malloc8(lastSize);
_x[msb][b] = MemoryManager.malloc1(lastSize * _keySize);
}
Log.debug("done in " + (System.nanoTime() - t0) / 1e9+" seconds.");
// TO DO: otherwise, expand width. Once too wide (and interestingly large
// width may not be a problem since small buckets won't impact cache),
// start rolling up bins (maybe into pairs or even quads)
for (int msb = 0; msb < 256; msb++) {
// each of the 256 columns starts at 0 for the 0th chunk. This 0 offsets
// into x[MSBvalue][batch div][mod] and o[MSBvalue][batch div][mod]
long rollSum = 0;
for (int c = 0; c < nc; c++) {
if (_counts[c] == null) continue;
long tmp = _counts[c][msb];
// Warning: modify the POJO DKV cache, but that's fine since this node
// won't ask for the original DKV.get() version again
_counts[c][msb] = rollSum;
rollSum += tmp;
}
}
MOVESHASH.put(_linkTwoMRTask, this);
// NB: no radix skipping in this version (unlike data.table we'll use
// biggestBit and assume further bits are used).
}
@Override public void map(Chunk chk[]) {
long myCounts[] = _counts[chk[0].cidx()]; //cumulative offsets into o and x
if (myCounts == null) {
Log.debug("myCounts empty for chunk " + chk[0].cidx());
return;
}
// Loop through this chunk and write the byte key and the source row number
// into the local MSB buckets
// TODO: make this branch free and write the already-compressed _mem
// directly. Just need to normalize compression across all chunks. This
// has to loop through rows because we need the MSBValue from the first
// column to use on the others, by row. Nothing to do cache efficiency,
// although, it will be most cache efficient (holding one page of each
// column's _mem, plus a page of this_x, all contiguous. At the cost of
// more instructions.
boolean[] isIntCols = MemoryManager.mallocZ(chk.length);
for (int c=0; c < chk.length; c++){
isIntCols[c] = chk[c].vec().isCategorical() || chk[c].vec().isInt();
}
for (int r=0; r<chk[0]._len; r++) { // tight, branch free and cache efficient (surprisingly)
int MSBvalue = 0; // default for NA
BigInteger thisx = ZERO;
if (!chk[0].isNA(r)) {
// TODO: restore branch-free again, go by column and retain original
// compression with no .at8()
if (_id_maps[0]!=null) { // dealing with enum column
thisx = BigInteger.valueOf(_isLeft?_id_maps[0][(int)chk[0].at8(r)] + 1:(int)chk[0].at8(r) + 1);
MSBvalue = thisx.shiftRight(_shift).intValue();
// may not be worth that as has to be global minimum so will rarely be
// able to use as raw, but when we can maybe can do in bulk
} else { // dealing with numeric columns (int or double), translate row value into MSB Bucket value
thisx = isIntCols[0] ?
BigInteger.valueOf(_ascending[0]*chk[0].at8(r)).subtract(_base[0]).add(ONE):
MathUtils.convertDouble2BigInteger(_ascending[0]*chk[0].atd(r)).subtract(_base[0]).add(ONE);
MSBvalue = thisx.shiftRight(_shift).intValue();
}
}
long target = myCounts[MSBvalue]++;
int batch = (int) (target / _batchSize);
int offset = (int) (target % _batchSize);
assert _o[MSBvalue] != null;
_o[MSBvalue][batch][offset] = (long) r + chk[0].start(); // move i and the index.
byte this_x[] = _x[MSBvalue][batch];
offset *= _keySize; // can't overflow because batchsize was chosen above to be maxByteSize/max(keysize,8)
byte keyArray[] = thisx.toByteArray(); // switched already here.
int offIndex = keyArray.length > 8 ? -1 : _bytesUsed[0] - keyArray.length;
int endLen = _bytesUsed[0] - (keyArray.length > 8 ? 8 : keyArray.length);
for (int i = _bytesUsed[0] - 1; (i >= endLen && i >= 0); i--) {
this_x[offset + i] = keyArray[i - offIndex];
}
// add on the key values with values from other columns
for (int c=1; c<chk.length; c++) { // TO DO: left align subsequent
offset += _bytesUsed[c-1]; // advance offset by the previous field width
if (chk[c].isNA(r)) continue; // NA is a zero field so skip over as java initializes memory to 0 for us always
if (_id_maps[c] != null) {
thisx = BigInteger.valueOf(_isLeft?_id_maps[c][(int)chk[c].at8(r)] + 1:(int)chk[c].at8(r) + 1);
} else { // for numerical/integer columns
thisx = isIntCols[c]?
BigInteger.valueOf(_ascending[c]*chk[c].at8(r)).subtract(_base[c]).add(ONE):
MathUtils.convertDouble2BigInteger(_ascending[c]*chk[c].atd(r)).subtract(_base[c]).add(ONE);
}
keyArray = thisx.toByteArray(); // switched already here.
offIndex = keyArray.length > 8 ? -1 : _bytesUsed[c] - keyArray.length;
endLen = _bytesUsed[c] - (keyArray.length > 8 ? 8 : keyArray.length);
for (int i = _bytesUsed[c] - 1; (i >= endLen && i >= 0); i--) {
this_x[offset + i] = keyArray[i - offIndex];
}
}
}
}
static H2ONode ownerOfMSB(int MSBvalue) {
// TO DO: this isn't properly working for efficiency. This value should pick the value of where it is, somehow.
// Why not getSortedOXHeader(MSBvalue).home_node() ?
//int blocksize = (int) Math.ceil(256. / H2O.CLOUD.size());
//H2ONode node = H2O.CLOUD._memary[MSBvalue / blocksize];
return H2O.CLOUD._memary[MSBvalue % H2O.CLOUD.size()]; // spread it around more.
}
static Key getNodeOXbatchKey(boolean isLeft, int MSBvalue, int node, int batch, long mergeId) {
return Key.make("__radix_order__NodeOXbatch_MSB" + MSBvalue + "_node" + node + "_batch" + batch + "_"
+ mergeId + (isLeft ? "_LEFT" : "_RIGHT"),
Key.HIDDEN_USER_KEY, false, SplitByMSBLocal.ownerOfMSB(MSBvalue));
}
static Key getSortedOXbatchKey(boolean isLeft, int MSBvalue, int batch, long mergeId) {
return Key.make("__radix_order__SortedOXbatch_MSB" + MSBvalue + "_batch" + batch + "_"
+ mergeId + (isLeft ? "_LEFT" : "_RIGHT"),
Key.HIDDEN_USER_KEY, false, SplitByMSBLocal.ownerOfMSB(MSBvalue));
}
static class OXbatch extends Iced {
OXbatch(long[] o, byte[] x) { _o = o; _x = x; }
final long[/*batchSize or lastSize*/] _o;
final byte[/*batchSize or lastSize*/] _x;
}
static Key getMSBNodeHeaderKey(boolean isLeft, int MSBvalue, int node, long mergeId) {
return Key.make("__radix_order__OXNodeHeader_MSB" + MSBvalue + "_node" + node + "_" + mergeId
+ (isLeft ? "_LEFT" : "_RIGHT"),
Key.HIDDEN_USER_KEY, false, SplitByMSBLocal.ownerOfMSB(MSBvalue));
}
static class MSBNodeHeader extends Iced {
MSBNodeHeader(int MSBnodeChunkCounts[/*chunks*/]) { _MSBnodeChunkCounts = MSBnodeChunkCounts;}
int _MSBnodeChunkCounts[]; // a vector of the number of contributions from each chunk. Since each chunk is length int, this must less than that, so int
}
// Push o/x in chunks to owning nodes
void sendSplitMSB() {
// The map() above ran above for each chunk on this node. Although this
// data was written to _o and _x in the order of chunk number (because we
// calculated those offsets in order in the prior step), the chunk numbers
// will likely have gaps because chunks are distributed across nodes not
// using a modulo approach but something like chunk1 on node1, chunk2 on
// node2, etc then modulo after that. Also, as tables undergo changes as a
// result of user action, their distribution of chunks to nodes could
// change or be changed (e.g. 'Tomas' rebalance()') for various reasons.
// When the helper node (i.e the node doing all the A's) gets the A's from
// this node, it must stack all this nodes' A's with the A's from the other
// nodes in chunk order in order to maintain the original order of the A's
// within the global table. To to do that, this node must tell the helper
// node where the boundaries are in _o and _x. That's what the first for
// loop below does. The helper node doesn't need to be sent the
// corresponding chunk numbers. He already knows them from the Vec header
// which he already has locally.
// TODO: perhaps write to o_ and x_ in batches in the first place, and just
// send more and smaller objects via the DKV. This makes the stitching
// much easier on the helper node too, as it doesn't need to worry about
// batch boundaries in the source data. Then it might be easier to
// parallelize that helper part. The thinking was that if each chunk
// generated 256 objects, that would flood the DKV with keys?
// TODO: send nChunks * 256. Currently we do nNodes * 256. Or avoid DKV
// altogether if possible.
Log.info("Starting SendSplitMSB on this node (keySize is " + _keySize + " as [");
for( int bs : _bytesUsed ) Log.debug(" "+bs);
Log.debug(" ]) ...");
long t0 = System.nanoTime();
Futures myfs = new Futures(); // Private Futures instead of _fs, so can block early and get timing results
for (int msb =0; msb <_o.length /*256*/; ++msb) { // TODO this can be done in parallel, surely
// "I found my A's (msb=0) and now I'll send them to the node doing all the A's"
// "I'll send you a long vector of _o and _x (batched if very long) along with where the boundaries are."
// "You don't need to know the chunk numbers of these boundaries, because you know the node of each chunk from your local Vec header"
if(_o[msb] == null) continue;
myfs.add(H2O.submitTask(new SendOne(msb,myfs)));
}
myfs.blockForPending();
double timeTaken = (System.nanoTime() - t0) / 1e9;
long bytes = _numRowsOnThisNode*( 8/*_o*/ + _keySize) + 64;
Log.debug("took : " + timeTaken+" seconds.");
Log.debug(" DKV.put " + PrettyPrint.bytes(bytes) + " @ " +
String.format("%.3f", bytes / timeTaken / (1024*1024*1024)) + " GByte/sec [10Gbit = 1.25GByte/sec]");
}
class SendOne extends H2O.H2OCountedCompleter<SendOne> {
// Nothing on remote node here, just a local parallel loop
private final int _msb;
private final Futures _myfs;
SendOne(int msb, Futures myfs) { _msb = msb; _myfs = myfs; }
@Override public void compute2() {
int numChunks = 0; // how many of the chunks are on this node
for( long[] cnts : _counts )
if (cnts != null) // the map() allocated the 256 vector in the spine slots for this node's chunks
// even if cnts[_msb]==0 (no _msb for this chunk) we'll store
// that because needed by line marked LINE_ANCHOR_1 below.
numChunks++;
// make dense. And by construction (i.e. cumulative counts) these chunks
// contributed in order
int msbNodeChunkCounts[] = MemoryManager.malloc4(numChunks);
int j=0;
long lastCount = 0; // _counts are cumulative at this stage so need to diff
for( long[] cnts : _counts ) {
if (cnts != null) {
if (cnts[_msb] == 0) { // robust in case we skipped zeros when accumulating
msbNodeChunkCounts[j] = 0;
} else {
// _counts is long so it can be accumulated in-place iirc.
// TODO: check
msbNodeChunkCounts[j] = (int)(cnts[_msb] - lastCount);
lastCount = cnts[_msb];
}
j++;
}
}
MSBNodeHeader msbh = new MSBNodeHeader(msbNodeChunkCounts);
// Need dontCache==true, so data does not remain both locally and on remote.
// Use private Futures so can block independent of MRTask Futures.
DKV.put(getMSBNodeHeaderKey(_isLeft, _msb, H2O.SELF.index(), _mergeId), msbh, _myfs, true);
for (int b=0;b<_o[_msb].length; b++) {
OXbatch ox = new OXbatch(_o[_msb][b], _x[_msb][b]); // this does not copy in Java, just references
DKV.put(getNodeOXbatchKey(_isLeft, _msb, H2O.SELF.index(), b, _mergeId), ox, _myfs, true);
}
tryComplete();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/Val.java
|
package water.rapids;
import hex.Model;
import water.Keyed;
import water.fvec.Frame;
import water.Iced;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import java.util.Map;
/**
* Generic execution values for the untyped stack.
*/
abstract public class Val extends Iced {
// Things on the execution stack
final public static int NUM = 1; // double
final public static int NUMS = 2; // array of doubles
final public static int STR = 3; // string
final public static int STRS = 4; // array of strings
final public static int FRM = 5; // Frame, not a Vec. Can be a Frame of 1 Vec
final public static int ROW = 6; // Row of data; limited to a single array of doubles
final public static int FUN = 7; // Function
final public static int MOD = 8; // Model
final public static int MFRM = 9; // Map of (String, Frame)
final public static int KEYED = 10; // Keyed
abstract public int type();
// One of these methods is overridden in each subclass
public boolean isEmpty() { return isNums() && getNums().length == 0; }
public boolean isNum() { return false; }
public boolean isNums() { return false; }
public boolean isStr() { return false; }
public boolean isStrs() { return false; }
public boolean isFrame() { return false; }
public boolean isMapFrame() { return false; }
public boolean isRow() { return false; }
public boolean isFun() { return false; }
public boolean isModel() { return false; }
public boolean isKeyed() { return false; }
// One of these methods is overridden in each subclass
public double getNum() { throw badValue("number"); }
public boolean getBool() { return getNum() == 1; }
public double[] getNums() { throw badValue("number array"); }
public String getStr() { throw badValue("String"); }
public String[] getStrs() { throw badValue("String array"); }
public Frame getFrame() { throw badValue("Frame"); }
public Map<String, Frame> getMapFrame() { throw badValue("MapFrame"); }
public double[] getRow() { throw badValue("Row"); }
public AstPrimitive getFun() { throw badValue("function"); }
public Model getModel() { throw badValue("Model"); }
public Keyed getKeyed() { throw badValue("Keyed"); }
private IllegalArgumentException badValue(String expectedType) {
return new IllegalArgumentException("Expected a " + expectedType + " but found a " + getClass());
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/AstBuiltin.java
|
package water.rapids.ast;
import water.H2O;
import water.rapids.Env;
import water.rapids.Val;
/**
* (Replacement for AstPrimitive).
*/
public abstract class AstBuiltin<T extends AstBuiltin<T>> extends AstPrimitive<T> {
/**
* <p>Primary method to invoke this function, passing all the parameters
* as the `asts` list.</p>
*
* <p>The default implementation of this method executes all Asts within
* the provided environment, and then calls {@link #exec(Val[])} passing it
* the arguments as the list of {@link Val}s. A derived class will then only
* need to override the second `exec()` function which is much simpler.</p>
*
* <p>However for certain functions (such as short-circuit boolean operators)
* executing all arguments is not desirable -- these functions would have to
* override this more general method.</p>
*
* @param env Current execution environment. Variables are looked up here.
* @param stk TODO need clarification
* @param asts List of AstRoot expressions that are arguments to the
* function. First element in this list is the function itself.
* @return value resulting from calling the function with the provided list
* of arguments.
*/
public Val apply(Env env, Env.StackHelp stk, AstRoot[] asts) {
Val[] args = new Val[asts.length];
args[0] = null;
for (int i = 1; i < asts.length; i++) {
args[i] = stk.track(asts[i].exec(env));
}
return exec(args);
}
/**
* Most Ast* functions will want to override this method. The semantics is
* "call this function with the provided list of arguments".
*/
@SuppressWarnings("UnusedParameters")
protected Val exec(Val[] args) {
throw H2O.unimpl();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/AstExec.java
|
package water.rapids.ast;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFun;
import water.util.SB;
import java.util.ArrayList;
/**
* Apply A Function. Basic function execution.
*/
public class AstExec extends AstRoot {
public final AstRoot[] _asts;
public AstExec() {
this((AstRoot[])null);
}
public AstExec(AstRoot[] asts) {
_asts = asts;
}
public AstExec(ArrayList<AstRoot> asts) {
_asts = asts.toArray(new AstRoot[asts.size()]);
}
@Override
public String str() {
SB sb = new SB().p('(');
for (AstRoot ast : _asts)
sb.p(ast.toString()).p(' ');
return sb.p(')').toString();
}
@Override
public String example() {
return "(func ...args)";
}
@Override
public String description() {
return "List of whitespace-separated tokens within parenthesis is interpreted as a function application. The " +
"first argument must be a function name (or an expression returning a function), all other tokens are passed " +
"to the function as arguments. For example: `(sqrt 16)`, `(+ 2 3)`, `(getTimeZone)`, etc.";
}
// Function application. Execute the first AstRoot and verify that it is a
// function. Then call that function's apply method. Do not evaluate other
// arguments; e.g. short-circuit logicals' apply calls may choose to not ever
// evalute some arguments.
@Override
public Val exec(Env env) {
Val fun = _asts[0].exec(env);
if (!fun.isFun())
throw new IllegalArgumentException("Expected a function but found " + fun.getClass());
AstPrimitive ast = fun.getFun();
int nargs = ast.nargs();
if (nargs != -1 && nargs != _asts.length)
throw new IllegalArgumentException(
"Incorrect number of arguments; '" + ast + "' expects " + (nargs - 1) + " but was passed " + (_asts.length - 1));
try (Env.StackHelp stk = env.stk()) {
return env.returning(ast.apply(env, stk, _asts));
}
}
public String[] getArgs() {
return ((ValFun) _asts[0].exec(new Env(null))).getArgs();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/AstFrame.java
|
package water.rapids.ast;
import water.fvec.Frame;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
/**
* A Frame. Execution is just to return the constant.
*/
public class AstFrame extends AstRoot {
final ValFrame _fr;
public AstFrame() {
_fr = null;
}
public AstFrame(Frame fr) {
_fr = new ValFrame(fr);
}
@Override
public String str() {
return _fr == null ? null : _fr.toString();
}
@Override
public String example() {
return null;
}
@Override
public String description() {
return null;
}
@Override
public Val exec(Env env) {
return env.returning(_fr);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/AstFunction.java
|
package water.rapids.ast;
import water.rapids.Env;
import water.rapids.Rapids;
import water.rapids.Val;
import water.rapids.vals.ValFun;
import water.util.SB;
import java.util.ArrayList;
/**
* Define a function
* Syntax: { ids... . expr }
* IDs are bound within expr
*/
public class AstFunction extends AstPrimitive {
final String[] _ids; // Identifier names
final AstRoot _body; // The function body
// If this function is being evaluated, record the arguments and parent lexical scope
final Val[] _args; // Evaluated arguments to a function
final AstFunction _parent; // Parent lexical scope
public AstFunction() {
_ids = null;
_body = null;
_args = null;
_parent = null;
}
public AstFunction(ArrayList<String> ids, AstRoot body) {
_ids = ids.toArray(new String[ids.size()]);
_body = body;
_args = null; // This is a template of an uncalled function
_parent = null;
}
// A function applied to arguments
public AstFunction(AstFunction fun, Val[] args, AstFunction parent) {
_ids = fun._ids;
_body = fun._body;
_parent = parent;
_args = args;
}
@Override
public String str() {
SB sb = new SB().p('{');
penv(sb);
for (String id : _ids)
sb.p(id).p(' ');
sb.p(". ").p(_body.toString()).p('}');
return sb.toString();
}
@Override
public String example() {
return "{ ...args . expr }";
}
@Override
public String description() {
return "Function definition: a list of tokens in curly braces. All initial tokens (which must be valid " +
"identifiers) become function arguments, then a single dot '.' must follow, and finally an expression which " +
"is the body of the function. Functions with variable number of arguments are not supported. Example: " +
"squaring function `{x . (^ x 2)}`";
}
// Print environment
private void penv(SB sb) {
if (_parent != null) _parent.penv(sb);
if (_args != null)
for (int i = 1; i < _ids.length; i++)
sb.p(_ids[i]).p('=').p(_args[i].toString()).p(' ');
}
// Function execution. Just throw self on stack like a constant. However,
// capture the existing global scope.
@Override
public ValFun exec(Env env) {
return new ValFun(new AstFunction(this, null, env._scope));
}
// Expected argument count, plus self
@Override
public int nargs() {
return _ids.length;
}
@Override
public String[] args() { return _ids; }
// Do a ID lookup, returning the matching argument if found
public Val lookup(String id) {
for (int i = 1; i < _ids.length; i++)
if (id.equals(_ids[i]))
return _args[i]; // Hit, return found argument
return _parent == null ? null : _parent.lookup(id);
}
// Apply this function: evaluate all arguments, push a lexical scope mapping
// the IDs to the ARGs, then evaluate the body. After execution pop the
// lexical scope and return the results.
@Override
public Val apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
// Evaluation all arguments
Val[] args = new Val[asts.length];
for (int i = 1; i < asts.length; i++)
args[i] = stk.track(asts[i].exec(env));
AstFunction old = env._scope;
env._scope = new AstFunction(this, args, _parent); // Push a new lexical scope, extended from the old
Val res = stk.untrack(_body.exec(env));
env._scope = old; // Pop the lexical scope off (by restoring the old unextended scope)
return res;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/AstParameter.java
|
package water.rapids.ast;
public abstract class AstParameter extends AstRoot {
@Override
public String example() {
return null;
}
@Override
public String description() {
return null;
}
public String toJavaString() {
return str();
}
// Select columns by number or String.
public int[] columns(String[] names) {
throw new IllegalArgumentException("Requires a number-list, but found an " + getClass().getSimpleName());
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/AstPrimitive.java
|
package water.rapids.ast;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFun;
import water.util.StringUtils;
/**
* A primitive operation. Execution just returns the function. *Application* (not execution) applies the function
* to the arguments.
*/
public abstract class AstPrimitive<T extends AstPrimitive<T>> extends AstRoot<T> {
private transient ValFun _v;
/**
* Number of function's arguments + 1. Thus, a binary operator like '+'
* should be declared with 3 nargs: ["+", lhs, rhs].
* For variable-argument functions this method should return -1.
*/
public abstract int nargs();
/**
* List of argument names. The length of the returned array should be equal
* to `nargs() - 1` (unless `nargs()` returns -1, in which case this function
* may return {"..."} or something similar).
*/
public abstract String[] args();
/**
* <p>Primary method to invoke this function, passing all the parameters
* as the `asts` list.</p>
*
* @param env Current execution environment. Variables are looked up here.
* @param stk TODO need clarification
* @param asts List of AstRoot expressions that are arguments to the
* function. First element in this list is the function itself.
* @return value resulting from calling the function with the provided list
* of arguments.
*/
public abstract Val apply(Env env, Env.StackHelp stk, AstRoot[] asts);
@Override
public ValFun exec(Env env) {
if (_v == null) _v = new ValFun(this);
return _v;
}
@Override
public String example() {
int nargs = nargs();
return nargs == 1? "(" + str() + ")" :
nargs >= 2? "(" + str() + " " + StringUtils.join(" ", args()) + ")" :
nargs == -1? "(" + str() + " ...)" :
null; // shouldn't be possible, but who knows?
}
@Override
public String description() {
return "";
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/AstRoot.java
|
package water.rapids.ast;
import water.Iced;
import water.rapids.*;
import water.rapids.ast.prims.reducers.AstMean;
import water.rapids.ast.params.*;
import water.rapids.vals.*;
/**
* Base class for all nodes in Rapids language Abstract Syntax Tree.
*/
public abstract class AstRoot<T extends AstRoot<T>> extends Iced<T> {
/**
* <p>"Execute" this AST expression, and return the result. For different ASTs
* this may have different interpretation. For example, consider this Rapids
* expression:</p>
* <pre> (mean frame True False)</pre>
*
* <p>It will be parsed into the following structure:</p>
* <pre>
* AstExec() instance with
* _asts = [AstMean() singleton instance,
* new AstId(frame),
* AstConst.TRUE,
* AstConst.FALSE]
* </pre>
*
* <p>Execution of {@link AstExec} will execute its first argument, _asts[0],
* verify that it produces a function ({@link ValFun}), then call
* {@link AstPrimitive#apply(Env, Env.StackHelp, AstRoot[])} on that function
* passing down the list of _asts arguments.</p>
*
* <p>The {@link AstMean} class will in turn execute all its arguments,
* where execution of {@link AstId} fetches the referred symbol from the
* environment, and execution of {@link AstConst} returns the value of that
* constant.</p>
*
* <p>Certain other functions may choose not to evaluate all their arguments
* (for example boolean expressions providing short-circuit evaluation).</p>
*/
public abstract Val exec(Env env);
/**
* String representation of this Ast object in the Rapids language. For
* {@link AstPrimitive}s this is the name of the function; for
* {@link AstParameter}s this is either the name of the variable, or the
* value of the numeric constant that the parameter represents. For more
* complicated constructs such as {@link AstExec} or {@link AstFunction}
* this method should return those objects as a Rapids string.
*/
public abstract String str();
// Note: the following 2 methods example() and description() really
// ought to be static. Unfortunately, Java doesn't support overriding
// static methods in subclasses, and "abstract static ..." is even a
// syntax error.
/**
* <p>Return an example of how this Ast construct ought to be used. This
* method is used to build documentation for the Rapids language. It is
* different from {@link #str()}, in particular it must provide a valid
* example even in a static context. For example, an {@link AstStr} may
* return <code>"Hello, world!"</code> as an example. At the same time,
* for different {@link AstPrimitive}s this method should generally provide
* a typical example of how that function is to be used.</p>
*
* <p>Return <code>null</code> to indicate that the object should not be
* included in the documentation.</p>
*/
public abstract String example();
/**
* <p>Return the detailed description (help) for what this language construct
* does or how it is supposed to be used. This method is used in conjunction
* with {@link #example()} to build the documentation of the Rapids
* language.</p>
*
* <p>If you need to include any formatting, then please use Markup language.
* Although it is up to the client to support it, Markup is one of the
* simplest and easiest alternatives.</p>
*
* <p>Return <code>null</code> to indicate that the object should not be
* included in the documentation.</p>
*/
public abstract String description();
@Override public String toString() {
return str();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/AstRow.java
|
package water.rapids.ast;
import water.rapids.Env;
import water.rapids.vals.ValRow;
/**
* A Row. Execution is just to return the constant.
*/
public class AstRow extends AstRoot {
final ValRow _row;
public AstRow(double[] ds, String[] names) {
_row = new ValRow(ds, names);
}
@Override
public String str() {
return _row.toString();
}
@Override
public String example() {
return null;
}
@Override
public String description() {
return null;
}
@Override
public ValRow exec(Env env) {
return _row;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/params/AstConst.java
|
package water.rapids.ast.params;
import water.rapids.Env;
import water.rapids.Rapids;
import water.rapids.ast.AstParameter;
import water.rapids.vals.ValNum;
/**
* Class for constants
*/
public class AstConst extends AstParameter {
private final ValNum _v;
private final String name;
final public static AstConst FALSE = new AstConst("False", 0);
final public static AstConst TRUE = new AstConst("True", 1);
final public static AstConst NAN = new AstConst("NaN", Double.NaN);
final public static AstConst PI = new AstConst("Pi", Math.PI);
final public static AstConst E = new AstConst("E", Math.E);
public AstConst() {
name = null;
_v = null;
}
public AstConst(String name, double d) {
this.name = name;
this._v = new ValNum(d);
}
@Override
public String str() {
return name;
}
@Override
public ValNum exec(Env env) {
return _v;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/params/AstId.java
|
package water.rapids.ast.params;
import water.fvec.Frame;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.ast.AstParameter;
/**
* An ID. Execution does lookup in the current scope.
*/
public class AstId extends AstParameter {
private final String _id;
public AstId() {
_id = null;
}
public AstId(String id) {
_id = id;
}
public AstId(Frame f) {
_id = f._key.toString();
}
@Override
public String str() {
return _id;
}
@Override
public Val exec(Env env) {
return env.returning(env.lookup(_id));
}
@Override
public String toJavaString() {
return "\"" + str() + "\"";
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/params/AstNum.java
|
package water.rapids.ast.params;
import water.rapids.Env;
import water.rapids.vals.ValNum;
import water.rapids.ast.AstParameter;
/**
* A number literal. Execution simply returns its value.
*/
public class AstNum extends AstParameter {
private final ValNum _v;
public AstNum() {
this(0);
}
public AstNum(double d) {
_v = new ValNum(d);
}
@Override
public String str() {
return _v.toString();
}
@Override
public int[] columns(String[] names) {
return new int[]{(int) _v.getNum()};
}
public void setNum(double d) {
_v.setNum(d);
}
public double getNum() {
return _v.getNum();
}
@Override
public ValNum exec(Env env) {
return _v;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/params/AstNumList.java
|
package water.rapids.ast.params;
import water.H2O;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.ast.AstParameter;
import water.rapids.vals.ValNums;
import water.util.ArrayUtils;
import water.util.SB;
import java.util.ArrayList;
import java.util.Arrays;
/**
* A collection of base/stride/cnts.
* Syntax: { {num | num:cnt | num:cnt:stride},* }
* <p/>
* The bases can be unordered with dups (often used for column selection where
* repeated columns are allowed, and order matters). The _isList flag tracks
* that all cnts are 1 (and hence all strides are ignored and 1); these lists
* may or may not be sorted. Note that some column selection is dense
* (typical all-columns is: {0:MAX_INT}), and this has cnt>1.
* <p/>
* When cnts are > 1, bases must be sorted, with base+stride*cnt always less
* than the next base. Typical use-case might be a list of probabilities for
* computing quantiles, or grid-search parameters.
* <p/>
* Asking for a sorted integer expansion will sort the bases internally, and
* also demand no overlap between bases. The has(), min() and max() calls
* require a sorted list.
*/
public class AstNumList extends AstParameter {
public final double[] _bases;
final double _strides[];
final long _cnts[];
public final boolean _isList; // True if an unordered list of numbers (cnts are 1, stride is ignored)
public boolean _isSort; // True if bases are sorted. May get updated later.
public AstNumList(ArrayList<Double> bases, ArrayList<Double> strides, ArrayList<Long> counts) {
int n = bases.size();
// Convert to fixed-sized arrays
_bases = new double[n];
_strides = new double[n];
_cnts = new long[n];
boolean isList = true;
for (int i = 0; i < n; i++) {
_bases[i] = bases.get(i);
_cnts[i] = counts.get(i);
_strides[i] = strides.get(i);
if (_cnts[i] != 1) isList = false;
}
_isList = isList;
// Complain about unordered bases, unless it's a simple number list
boolean isSorted = true;
for (int i = 1; i < n; i++)
if (_bases[i-1] + (_cnts[i-1] - 1) * _strides[i-1] >= _bases[i]) {
if (_isList) isSorted = false;
else throw new IllegalArgumentException("Overlapping numeric ranges");
}
_isSort = isSorted;
}
// A simple AstNumList of 1 number
public AstNumList(double d) {
_bases = new double[]{d};
_strides = new double[]{1};
_cnts = new long[]{1};
_isList = _isSort = true;
}
// A simple dense range AstNumList
public AstNumList(long lo, long hi_exclusive) {
_bases = new double[]{lo};
_strides = new double[]{1};
_cnts = new long[]{hi_exclusive - lo};
_isList = false;
_isSort = true;
}
// An empty number list
public AstNumList() {
_bases = new double[0];
_strides = new double[0];
_cnts = new long[0];
_isList = _isSort = true;
}
public AstNumList(double[] list) {
_bases = list;
_strides = new double[list.length];
_cnts = new long[list.length];
_isList = true;
Arrays.fill(_strides, 1);
Arrays.fill(_cnts, 1);
}
public AstNumList(int[] list) {
this(ArrayUtils.copyFromIntArray(list));
}
@Override
public Val exec(Env env) {
return new ValNums(expand());
}
@Override
public String str() {
SB sb = new SB().p('[');
for (int i = 0; i < _bases.length; i++) {
sb.p(_bases[i]);
if (_cnts[i] != 1) {
sb.p(':').p(_bases[i] + _cnts[i] * _strides[i]);
if (_strides[i] != 1 || ((long) _bases[i]) != _bases[i])
sb.p(':').p(_strides[i]);
}
if (i < _bases.length - 1) sb.p(',');
}
return sb.p(']').toString();
}
@Override
public String toJavaString() {
double[] ary = expand();
if (ary == null || ary.length == 0) return "\"null\"";
SB sb = new SB().p('{');
for (int i = 0; i < ary.length - 1; ++i) sb.p(ary[i]).p(',');
return sb.p('}').toString();
}
// Expand the compressed form into an array of doubles.
public double[] expand() {
// Count total values
int nrows = (int) cnt(), r = 0;
// Fill in values
double[] vals = new double[nrows];
for (int i = 0; i < _bases.length; i++) {
if (Double.isNaN(_bases[i])) {
vals[r++] = Double.NaN;
} else {
for (double d = _bases[i]; d < _bases[i] + _cnts[i] * _strides[i]; d += _strides[i])
vals[r++] = d;
}
}
return vals;
}
// Update-in-place sort of bases
public AstNumList sort() {
if (_isSort) return this; // Flow coding fast-path cutout
int[] idxs = ArrayUtils.seq(0, _bases.length);
ArrayUtils.sort(idxs, _bases);
double[] bases = _bases.clone();
double[] strides = _strides.clone();
long[] cnts = _cnts.clone();
for (int i = 0; i < idxs.length; i++) {
_bases[i] = bases[idxs[i]];
_strides[i] = strides[idxs[i]];
_cnts[i] = cnts[idxs[i]];
}
_isSort = true;
return this;
}
// Expand the compressed form into an array of ints;
// often used for unordered column lists
public int[] expand4() {
// Count total values
int nrows = (int) cnt(), r = 0;
// Fill in values
int[] vals = new int[nrows];
for (int i = 0; i < _bases.length; i++)
for (double d = _bases[i]; d < _bases[i] + _cnts[i] * _strides[i]; d += _strides[i])
vals[r++] = (int) d;
return vals;
}
// Expand the compressed form into an array of ints;
// often used for sorted column lists
int[] expand4Sort() {
return sort().expand4();
}
// Expand the compressed form into an array of longs;
// often used for unordered row lists
public long[] expand8() {
// Count total values
int nrows = (int) cnt(), r = 0;
// Fill in values
long[] vals = new long[nrows];
for (int i = 0; i < _bases.length; i++)
for (double d = _bases[i]; d < _bases[i] + _cnts[i] * _strides[i]; d += _strides[i])
vals[r++] = (long) d;
return vals;
}
// Expand the compressed form into an array of longs;
// often used for sorted row lists
public long[] expand8Sort() {
return sort().expand8();
}
public double max() {
assert _isSort;
return _bases[_bases.length - 1] + _cnts[_cnts.length - 1] * _strides[_strides.length - 1];
} // largest exclusive value (weird rite?!)
public double min() {
assert _isSort;
return _bases[0];
}
public long cnt() {
return water.util.ArrayUtils.sum(_cnts);
}
public boolean isDense() {
return _cnts.length == 1 && _bases[0] == 0 && _strides[0] == 1;
}
public boolean isEmpty() {
return _bases.length == 0;
}
// check if n is in this list of numbers
// NB: all contiguous ranges have already been checked to have stride 1
public boolean has(long v) {
int idx = findBase(v);
if (idx >= 0) return true;
idx = -idx - 2; // See Arrays.binarySearch; returns (-idx-1), we want +idx-1 ... if idx == -1 => then this transformation has no effect
if (idx < 0) return false;
assert _bases[idx] < v; // Sanity check binary search, AND idx >= 0
return v < _bases[idx] + _cnts[idx] * _strides[idx] && (v - _bases[idx]) % _strides[idx] == 0;
}
/**
* Finds index of a given value in this number sequence, indexing start at 0.
* @param v value
* @return value index (>= 0) or -1 if value is not a member of this sequence
*/
public long index(long v) {
int bIdx = findBase(v);
if (bIdx >= 0) return water.util.ArrayUtils.sum(_cnts, 0, bIdx);
bIdx = -bIdx - 2;
if (bIdx < 0) return -1L;
assert _bases[bIdx] < v;
long offset = v - (long) _bases[bIdx];
long stride = (long) _strides[bIdx];
if ((offset >= _cnts[bIdx] * stride) || (offset % stride != 0)) return -1L;
return water.util.ArrayUtils.sum(_cnts, 0, bIdx) + (offset / stride);
}
private int findBase(long v) {
assert _isSort; // Only called when already sorted
// do something special for negative indexing... that does not involve
// allocating arrays, once per list element!
if (v < 0) throw H2O.unimpl();
return Arrays.binarySearch(_bases, v);
}
// Select columns by number. Numbers are capped to the number of columns +1
// - this allows R to see a single out-of-range value and throw a range check
// - this allows Python to see a single out-of-range value and ignore it
// - this allows Python to pass [0:MAXINT] without blowing out the max number of columns.
// Note that the Python front-end does not want to cap the max column size, because
// this will force eager evaluation on a standard column slice operation.
// Note that the list is often unsorted (_isSort is false).
// Note that the list is often dense with cnts>1 (_isList is false).
@Override
public int[] columns(String[] names) {
// Count total values, capped by max len+1
int nrows = 0, r = 0;
for (int i = 0; i < _bases.length; i++)
nrows += Math.min(_bases[i] + _cnts[i], names.length + 1) - Math.min(_bases[i], names.length + 1);
// Fill in values
int[] vals = new int[nrows];
for (int i = 0; i < _bases.length; i++) {
int lim = Math.min((int) (_bases[i] + _cnts[i]), names.length + 1);
for (int d = Math.min((int) _bases[i], names.length + 1); d < lim; d++)
vals[r++] = d;
}
return vals;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/params/AstStr.java
|
package water.rapids.ast.params;
import water.rapids.Env;
import water.rapids.ast.AstParameter;
import water.rapids.vals.ValStr;
/**
* A String. Execution is just to return the constant.
*/
public class AstStr extends AstParameter {
private final ValStr _v;
public AstStr() {
this(null);
}
public AstStr(String str) {
_v = new ValStr(str);
}
@Override
public String str() {
return _v.toString().replaceAll("^\"|^\'|\"$|\'$", "");
}
@Override
public ValStr exec(Env env) {
return _v;
}
@Override
public String toJavaString() {
return "\"" + str() + "\"";
}
@Override
public int[] columns(String[] names) {
int i = water.util.ArrayUtils.find(names, _v.getStr());
if (i == -1) throw new IllegalArgumentException("Column " + _v.getStr() + " not found");
return new int[]{i};
}
public String getStr() {
return _v.getStr();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/params/AstStrList.java
|
package water.rapids.ast.params;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.ast.AstParameter;
import water.rapids.vals.ValStrs;
import java.util.ArrayList;
import java.util.Arrays;
/**
* A collection of Strings only. This is a syntatic form only, and never executes and never gets on the execution
* stack.
*/
public class AstStrList extends AstParameter {
public String[] _strs;
public AstStrList() {
_strs = null;
}
public AstStrList(ArrayList<String> strs) {
_strs = strs.toArray(new String[strs.size()]);
}
@Override
public Val exec(Env env) {
return new ValStrs(_strs);
}
@Override
public String str() {
return Arrays.toString(_strs);
}
// Select columns by number or String.
@Override
public int[] columns(String[] names) {
int[] idxs = new int[_strs.length];
for (int i = 0; i < _strs.length; i++) {
int idx = idxs[i] = water.util.ArrayUtils.find(names, _strs[i]);
if (idx == -1) throw new IllegalArgumentException("Column " + _strs[i] + " not found");
}
return idxs;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstCorrelation.java
|
package water.rapids.ast.prims.advmath;
import water.Key;
import water.MRTask;
import water.fvec.*;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
import water.rapids.vals.ValNum;
import water.util.ArrayUtils;
import water.util.EnumUtils;
import java.util.Arrays;
/**
* Calculate Pearson's Correlation Coefficient between columns of a frame
* <p/>
* Formula:
* Pearson's Correlation Coefficient = Cov(X,Y)/sigma(X) * sigma(Y)
*/
public class AstCorrelation extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "x", "y", "use", "method"};
}
protected enum Mode {Everything, AllObs, CompleteObs}
private enum Method {Pearson, Spearman}
@Override
public int nargs() {
return 1 + 4; /* (cor X Y use method) */
}
@Override
public String str() {
return "cor";
}
@Override
public Val apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame frx = stk.track(asts[1].exec(env)).getFrame();
Frame fry = stk.track(asts[2].exec(env)).getFrame();
if (frx.numRows() != fry.numRows())
throw new IllegalArgumentException("Frames must have the same number of rows, found " + frx.numRows() + " and " + fry.numRows());
String use = stk.track(asts[3].exec(env)).getStr();
Mode mode;
switch (use) {
case "everything":
mode = Mode.Everything;
break;
case "all.obs":
mode = Mode.AllObs;
break;
case "complete.obs":
mode = Mode.CompleteObs;
break;
default:
throw new IllegalArgumentException("unknown use mode: " + use);
}
final Method method = getMethodFromUserInput(stk.track(asts[4].exec(env)).getStr());
switch (method) {
case Pearson:
return fry.numRows() == 1 ? scalar(frx, fry, mode) : array(frx, fry, mode);
case Spearman:
return spearman(frx, fry, mode);
default:
throw new IllegalStateException(String.format("Given method input'%s' is not supported. Available options are: %s",
method, Arrays.toString(Method.values())));
}
}
private static Method getMethodFromUserInput(final String methodUserInput) {
return EnumUtils.valueOfIgnoreCase(Method.class, methodUserInput)
.orElseThrow(() -> new IllegalArgumentException(String.format("Unknown correlation method '%s'. Available options are: %s",
methodUserInput, Arrays.toString(Method.values()))));
}
private Val spearman(final Frame frameX, final Frame frameY, final Mode mode) {
final Frame spearmanMatrix = SpearmanCorrelation.calculate(frameX, frameY, mode);
if (frameY.numCols() == 1) {
// If there are only two columns compared, return a single number with the correlation coefficient
return new ValNum(spearmanMatrix.vec(0).at(0));
} else {
// Otherwise just return the correlation matrix
return new ValFrame(spearmanMatrix);
}
}
// Pearson Correlation for one row, which will return a scalar value.
private ValNum scalar(Frame frx, Frame fry, Mode mode) {
if (frx.numCols() != fry.numCols())
throw new IllegalArgumentException("Single rows must have the same number of columns, found " + frx.numCols() + " and " + fry.numCols());
Vec vecxs[] = frx.vecs();
Vec vecys[] = fry.vecs();
double xmean = 0;
double ymean = 0;
double xvar = 0;
double yvar = 0;
double xsd;
double ysd;
double ncols = fry.numCols();
double NACount = 0;
double xval;
double yval;
double ss = 0;
for (int r = 0; r < ncols; r++) {
xval = vecxs[r].at(0);
yval = vecys[r].at(0);
if (Double.isNaN(xval) || Double.isNaN(yval))
NACount++;
else {
xmean += xval;
ymean += yval;
}
}
xmean /= (ncols - NACount);
ymean /= (ncols - NACount);
for (int r = 0; r < ncols; r++) {
xval = vecxs[r].at(0);
yval = vecys[r].at(0);
if (!(Double.isNaN(xval) || Double.isNaN(yval))) {
//Compute variance of x and y vars
xvar += Math.pow((vecxs[r].at(0) - xmean), 2);
yvar += Math.pow((vecys[r].at(0) - ymean), 2);
//Compute sum of squares of x and y
ss += (vecxs[r].at(0) - xmean) * (vecys[r].at(0) - ymean);
}
}
xsd = Math.sqrt(xvar / (ncols - 1 - NACount)); //Sample Standard Deviation
ysd = Math.sqrt(yvar / (ncols - 1 - NACount)); //Sample Standard Deviation
double denom = xsd * ysd; //sd(x) * sd(y)
if (NACount != 0) {
if (mode.equals(Mode.AllObs)) throw new IllegalArgumentException("Mode is 'all.obs' but NAs are present");
if (mode.equals(Mode.Everything)) return new ValNum(Double.NaN);
}
return new ValNum((ss / (ncols - NACount - 1)) / denom); //Pearson's Correlation Coefficient
}
// Matrix correlation. Compute correlation between all columns from each Frame
// against each other. Return a matrix of correlations which is frx.numCols
// wide and fry.numCols tall.
private Val array(Frame frx, Frame fry, Mode mode) {
Vec[] vecxs = frx.vecs();
int ncolx = vecxs.length;
Vec[] vecys = fry.vecs();
int ncoly = vecys.length;
if (mode.equals(Mode.Everything) || mode.equals(Mode.AllObs)) {
if (mode.equals(Mode.AllObs)) {
if (mode.equals(Mode.AllObs)) {
for (Vec v : vecxs)
if (v.naCnt() != 0)
throw new IllegalArgumentException("Mode is 'all.obs' but NAs are present");
}
}
//Set up CoVarTask
CoVarTask[] cvs = new CoVarTask[ncoly];
//Get mean of x vecs
double[] xmeans = new double[ncolx];
for (int x = 0; x < ncolx; x++) {
xmeans[x] = vecxs[x].mean();
}
//Set up double arrays to capture sd(x), sd(y) and sd(x) * sd(y)
double[] sigmay = new double[ncoly];
double[] sigmax = new double[ncolx];
double[][] denom = new double[ncoly][ncolx];
// Launch tasks; each does all Xs vs one Y
for (int y = 0; y < ncoly; y++) {
//Get covariance between x and y
cvs[y] = new CoVarTask(vecys[y].mean(), xmeans).dfork(new Frame(vecys[y]).add(frx));
//Get sigma of y vecs
sigmay[y] = vecys[y].sigma();
}
//Get sigma of x vecs
for (int x = 0; x < ncolx; x++) {
sigmax[x] = vecxs[x].sigma();
}
//Denominator for correlation calculation is sigma_y * sigma_x (All x sigmas vs one Y)
for (int y = 0; y < ncoly; y++) {
for (int x = 0; x < ncolx; x++) {
denom[y][x] = sigmay[y] * sigmax[x];
}
}
// 1-col returns scalar
if (ncolx == 1 && ncoly == 1) {
return new ValNum((cvs[0].getResult()._covs[0] / (fry.numRows() - 1)) / denom[0][0]);
}
//Gather final result, which is the correlation coefficient per column
Vec[] res = new Vec[ncoly];
Key<Vec>[] keys = Vec.VectorGroup.VG_LEN1.addVecs(ncoly);
for (int y = 0; y < ncoly; y++) {
res[y] = Vec.makeVec(ArrayUtils.div(ArrayUtils.div(cvs[y].getResult()._covs, (fry.numRows() - 1)), denom[y]), keys[y]);
}
return new ValFrame(new Frame(fry._names, res));
} else { //if (mode.equals(Mode.CompleteObs))
//Omit NA rows between X and Y.
//This will help with cov, sigma & mean calculations later as we only want to calculate cov, sigma, & mean
//for rows with no NAs
Frame frxy_naomit = new MRTask() {
private void copyRow(int row, Chunk[] cs, NewChunk[] ncs) {
for (int i = 0; i < cs.length; ++i) {
if (cs[i] instanceof CStrChunk) ncs[i].addStr(cs[i], row);
else if (cs[i] instanceof C16Chunk) ncs[i].addUUID(cs[i], row);
else if (cs[i].hasFloat()) ncs[i].addNum(cs[i].atd(row));
else ncs[i].addNum(cs[i].at8(row), 0);
}
}
@Override
public void map(Chunk[] cs, NewChunk[] ncs) {
int col;
for (int row = 0; row < cs[0]._len; ++row) {
for (col = 0; col < cs.length; ++col)
if (cs[col].isNA(row)) break;
if (col == cs.length) copyRow(row, cs, ncs);
}
}
}.doAll(new Frame(frx).add(fry).types(), new Frame(frx).add(fry)).outputFrame(new Frame(frx).add(fry).names(), new Frame(frx).add(fry).domains());
//Collect new vecs that do not contain NA rows
Vec[] vecxs_naomit = frxy_naomit.subframe(0, ncolx).vecs();
int ncolx_naomit = vecxs_naomit.length;
Vec[] vecys_naomit = frxy_naomit.subframe(ncolx, frxy_naomit.vecs().length).vecs();
int ncoly_naomit = vecys_naomit.length;
//Set up CoVarTask
CoVarTask[] cvs = new CoVarTask[ncoly_naomit];
//Get mean of X vecs
double[] xmeans = new double[ncolx_naomit];
for (int x = 0; x < ncolx_naomit; x++) {
xmeans[x] = vecxs_naomit[x].mean();
}
//Set up double arrays to capture sd(x), sd(y) and sd(x) * sd(y)
double[] sigmay = new double[ncoly_naomit];
double[] sigmax = new double[ncolx_naomit];
double[][] denom = new double[ncoly_naomit][ncolx_naomit];
// Launch tasks; each does all Xs vs one Y
for (int y = 0; y < ncoly_naomit; y++) {
//Get covariance between x and y
cvs[y] = new CoVarTask(vecys_naomit[y].mean(), xmeans).dfork(new Frame(vecys_naomit[y]).add(frxy_naomit.subframe(0, ncolx)));
//Get sigma of y vecs
sigmay[y] = vecys_naomit[y].sigma();
}
//Get sigma of x vecs
for (int x = 0; x < ncolx_naomit; x++) {
sigmax[x] = vecxs_naomit[x].sigma();
}
//Denominator for correlation calculation is sigma_y * sigma_x (All x sigmas vs one Y)
for (int y = 0; y < ncoly_naomit; y++) {
for (int x = 0; x < ncolx_naomit; x++) {
denom[y][x] = sigmay[y] * sigmax[x];
}
}
// 1-col returns scalar
if (ncolx_naomit == 1 && ncoly_naomit == 1) {
return new ValNum((cvs[0].getResult()._covs[0] / (frxy_naomit.numRows() - 1)) / denom[0][0]);
}
//Gather final result, which is the correlation coefficient per column
Vec[] res = new Vec[ncoly_naomit];
Key<Vec>[] keys = Vec.VectorGroup.VG_LEN1.addVecs(ncoly_naomit);
for (int y = 0; y < ncoly_naomit; y++) {
res[y] = Vec.makeVec(ArrayUtils.div(ArrayUtils.div(cvs[y].getResult()._covs, (frxy_naomit.numRows() - 1)), denom[y]), keys[y]);
}
return new ValFrame(new Frame(frxy_naomit.subframe(ncolx, frxy_naomit.vecs().length)._names, res));
}
}
private static class CoVarTask extends MRTask<CoVarTask> {
double[] _covs;
final double _xmeans[], _ymean;
CoVarTask(double ymean, double[] xmeans) {
_ymean = ymean;
_xmeans = xmeans;
}
@Override
public void map(Chunk cs[]) {
final int ncolsx = cs.length - 1;
final Chunk cy = cs[0];
final int len = cy._len;
_covs = new double[ncolsx];
double sum;
for (int x = 0; x < ncolsx; x++) {
sum = 0;
final Chunk cx = cs[x + 1];
final double xmean = _xmeans[x];
for (int row = 0; row < len; row++)
sum += (cx.atd(row) - xmean) * (cy.atd(row) - _ymean);
_covs[x] = sum;
}
}
@Override
public void reduce(CoVarTask cvt) {
ArrayUtils.add(_covs, cvt._covs);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstDistance.java
|
package water.rapids.ast.prims.advmath;
import water.H2O;
import water.MRTask;
import water.fvec.*;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.ast.AstBuiltin;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
import water.util.ArrayUtils;
import water.util.Log;
import java.util.Arrays;
/**
* Calculate Distance Metric between pairs of rows
*/
public class AstDistance extends AstBuiltin<AstDistance> {
@Override
public String[] args() {
return new String[]{"ary", "x", "y", "measure"};
}
@Override
public int nargs() {
return 1 + 3; /* (distance X Y measure) */
}
@Override
public String str() {
return "distance";
}
@Override
public String description() {
return "Compute a pairwise distance measure between all rows of two numeric H2OFrames.\n" +
"For a given (usually larger) reference frame (N rows x p cols),\n" +
"and a (usually smaller) query frame (M rows x p cols), we return a numeric Frame of size (N rows x M cols),\n" +
"where the ij-th element is the distance measure between the i-th reference row and the j-th query row.\n" +
"Note1: The output frame is symmetric.\n" +
"Note2: Since N x M can be very large, it may be more efficient (memory-wise) to make multiple calls with smaller query Frames.";
}
@Override
public Val apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame frx = stk.track(asts[1].exec(env)).getFrame();
Frame fry = stk.track(asts[2].exec(env)).getFrame();
String measure = stk.track(asts[3].exec(env)).getStr();
return computeCosineDistances(frx, fry, measure);
}
public Val computeCosineDistances(Frame references, Frame queries, String distanceMetric) {
Log.info("Number of references: " + references.numRows());
Log.info("Number of queries : " + queries.numRows());
String[] options = new String[]{"cosine","cosine_sq","l1","l2"};
if (!ArrayUtils.contains(options, distanceMetric.toLowerCase()))
throw new IllegalArgumentException("Invalid distance measure provided: " + distanceMetric + ". Mustbe one of " + Arrays.toString(options));
if (references.numRows() * queries.numRows() * 8 > H2O.CLOUD.free_mem() )
throw new IllegalArgumentException("Not enough free memory to allocate the distance matrix (" +
references.numRows() + " rows and " + queries.numRows() + " cols. Try specifying a smaller query frame.");
if (references.numCols() != queries.numCols())
throw new IllegalArgumentException("Frames must have the same number of cols, found " + references.numCols() + " and " + queries.numCols());
if (queries.numRows() > Integer.MAX_VALUE)
throw new IllegalArgumentException("Queries can't be larger than 2 billion rows.");
if (queries.numCols() != references.numCols())
throw new IllegalArgumentException("Queries and References must have the same dimensionality");
for (int i=0;i<queries.numCols();++i) {
if (!references.vec(i).isNumeric())
throw new IllegalArgumentException("References column " + references.name(i) + " is not numeric.");
if (!queries.vec(i).isNumeric())
throw new IllegalArgumentException("Queries column " + references.name(i) + " is not numeric.");
if (references.vec(i).naCnt()>0)
throw new IllegalArgumentException("References column " + references.name(i) + " contains missing values.");
if (queries.vec(i).naCnt()>0)
throw new IllegalArgumentException("Queries column " + references.name(i) + " contains missing values.");
}
return new ValFrame(new DistanceComputer(queries, distanceMetric).doAll((int)queries.numRows(), Vec.T_NUM, references).outputFrame());
}
static public class DistanceComputer extends MRTask<DistanceComputer> {
Frame _queries;
String _measure;
DistanceComputer(Frame queries, String measure) {
_queries = queries;
_measure = measure;
}
@Override
public void map(Chunk[] cs, NewChunk[] ncs) {
int p = cs.length; //dimensionality
int Q = (int) _queries.numRows();
int R = cs[0]._len;
Vec.Reader[] Qs = new Vec.Reader[p];
for (int i = 0; i < p; ++i) {
Qs[i] = _queries.vec(i).new Reader();
}
double[] denomR = null;
double[] denomQ = null;
final boolean cosine = _measure.toLowerCase().equals("cosine");
final boolean cosine_sq = _measure.toLowerCase().equals("cosine_sq");
final boolean l1 = _measure.toLowerCase().equals("l1");
final boolean l2 = _measure.toLowerCase().equals("l2");
if (cosine || cosine_sq) {
denomR = new double[R];
denomQ = new double[Q];
for (int r = 0; r < R; ++r) { // Reference row (chunk-local)
for (int c = 0; c < p; ++c) { //cols
denomR[r] += Math.pow(cs[c].atd(r), 2);
}
}
for (int q = 0; q < Q; ++q) { // Query row (global)
for (int c = 0; c < p; ++c) { //cols
denomQ[q] += Math.pow(Qs[c].at(q), 2);
}
}
}
for (int r = 0; r < cs[0]._len; ++r) { // Reference row (chunk-local)
for (int q = 0; q < Q; ++q) { // Query row (global)
double distRQ = 0;
if (l1) {
for (int c = 0; c < p; ++c) { //cols
distRQ += Math.abs(cs[c].atd(r) - Qs[c].at(q));
}
} else if (l2) {
for (int c = 0; c < p; ++c) { //cols
distRQ += Math.pow(cs[c].atd(r) - Qs[c].at(q), 2);
}
distRQ = Math.sqrt(distRQ);
} else if (cosine || cosine_sq) {
for (int c = 0; c < p; ++c) { //cols
distRQ += cs[c].atd(r) * Qs[c].at(q);
}
if (cosine_sq) {
distRQ *= distRQ;
distRQ /= denomR[r] * denomQ[q];
} else {
distRQ /= Math.sqrt(denomR[r] * denomQ[q]);
}
}
ncs[q].addNum(distRQ); // one Q distance per Reference
}
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstHist.java
|
package water.rapids.ast.prims.advmath;
import sun.misc.Unsafe;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.nbhm.UtilUnsafe;
import water.rapids.ast.prims.reducers.AstMad;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.ast.params.AstNum;
import water.rapids.ast.params.AstNumList;
import water.rapids.ast.params.AstStr;
import water.util.ArrayUtils;
public class AstHist extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "breaks"};
}
@Override
public int nargs() {
return 1 + 2;
} // (hist x breaks)
@Override
public String str() {
return "hist";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
// stack is [ ..., ary, breaks]
// handle the breaks
Frame fr2;
Frame f = stk.track(asts[1].exec(env)).getFrame();
if (f.numCols() != 1) throw new IllegalArgumentException("Hist only applies to single numeric columns.");
Vec vec = f.anyVec();
if (!vec.isNumeric()) throw new IllegalArgumentException("Hist only applies to single numeric columns.");
//TODO Add case when vec is a constant numeric
if(vec.isConst()) throw new IllegalArgumentException("Hist does not apply to constant numeric columns.");
AstRoot a = asts[2];
String algo = null;
int numBreaks = -1;
double[] breaks = null;
if (a instanceof AstStr) algo = a.str().toLowerCase();
else if (a instanceof AstNumList) breaks = ((AstNumList) a).expand();
else if (a instanceof AstNum) numBreaks = (int) a.exec(env).getNum();
AstHist.HistTask t;
double h;
double x1 = vec.max();
double x0 = vec.min();
if (breaks != null) t = new AstHist.HistTask(breaks, -1, -1/*ignored if _h==-1*/).doAll(vec);
else if (algo != null) {
switch (algo) {
case "sturges":
numBreaks = sturges(vec);
h = (x1 - x0) / numBreaks;
break;
case "rice":
numBreaks = rice(vec);
h = (x1 - x0) / numBreaks;
break;
case "sqrt":
numBreaks = sqrt(vec);
h = (x1 - x0) / numBreaks;
break;
case "doane":
numBreaks = doane(vec);
h = (x1 - x0) / numBreaks;
break;
case "scott":
h = scotts_h(vec);
numBreaks = scott(vec, h);
break; // special bin width computation
case "fd":
h = fds_h(vec);
numBreaks = fd(vec, h);
break; // special bin width computation
default:
numBreaks = sturges(vec);
h = (x1 - x0) / numBreaks; // just do sturges even if junk passed in
}
t = new AstHist.HistTask(computeCuts(vec, numBreaks), h, x0).doAll(vec);
} else {
h = (x1 - x0) / numBreaks;
t = new AstHist.HistTask(computeCuts(vec, numBreaks), h, x0).doAll(vec);
}
// wanna make a new frame here [breaks,counts,mids]
final double[] brks = t._breaks;
final long[] cnts = t._counts;
final double[] mids_true = t._mids;
final double[] mids = new double[t._breaks.length - 1];
for (int i = 1; i < brks.length; ++i) mids[i - 1] = .5 * (t._breaks[i - 1] + t._breaks[i]);
Vec layoutVec = Vec.makeZero(brks.length);
fr2 = new MRTask() {
@Override
public void map(Chunk[] c, NewChunk[] nc) {
int start = (int) c[0].start();
for (int i = 0; i < c[0]._len; ++i) {
nc[0].addNum(brks[i + start]);
if (i == 0) {
nc[1].addNA();
nc[2].addNA();
nc[3].addNA();
} else {
nc[1].addNum(cnts[(i - 1) + start]);
nc[2].addNum(mids_true[(i - 1) + start]);
nc[3].addNum(mids[(i - 1) + start]);
}
}
}
}.doAll(4, Vec.T_NUM, new Frame(layoutVec)).outputFrame(null, new String[]{"breaks", "counts", "mids_true", "mids"}, null);
layoutVec.remove();
return new ValFrame(fr2);
}
public static int sturges(Vec v) {
return (int) Math.ceil(1 + log2(v.length()));
}
public static int rice(Vec v) {
return (int) Math.ceil(2 * Math.pow(v.length(), 1. / 3.));
}
public static int sqrt(Vec v) {
return (int) Math.sqrt(v.length());
}
public static int doane(Vec v) {
return (int) (1 + log2(v.length()) + log2(1 + (Math.abs(third_moment(v)) / sigma_g1(v))));
}
public static int scott(Vec v, double h) {
return (int) Math.ceil((v.max() - v.min()) / h);
}
public static int fd(Vec v, double h) {
return (int) Math.ceil((v.max() - v.min()) / h);
} // Freedman-Diaconis slightly modified to use MAD instead of IQR
public static double fds_h(Vec v) {
return 2 * AstMad.mad(new Frame(v), null, 1.4826) * Math.pow(v.length(), -1. / 3.);
}
public static double scotts_h(Vec v) {
return 3.5 * Math.sqrt(AstVariance.getVar(v)) / (Math.pow(v.length(), 1. / 3.));
}
public static double log2(double numerator) {
return (Math.log(numerator)) / Math.log(2) + 1e-10;
}
public static double sigma_g1(Vec v) {
return Math.sqrt((6 * (v.length() - 2)) / ((v.length() + 1) * (v.length() + 3)));
}
public static double third_moment(Vec v) {
final double mean = v.mean();
AstHist.ThirdMomTask t = new AstHist.ThirdMomTask(mean).doAll(v);
double m2 = t._ss / v.length();
double m3 = t._sc / v.length();
return m3 / Math.pow(m2, 1.5);
}
public static class ThirdMomTask extends MRTask<AstHist.ThirdMomTask> {
double _ss;
double _sc;
final double _mean;
ThirdMomTask(double mean) {
_mean = mean;
}
@Override
public void setupLocal() {
_ss = 0;
_sc = 0;
}
@Override
public void map(Chunk c) {
for (int i = 0; i < c._len; ++i) {
if (!c.isNA(i)) {
double d = c.atd(i) - _mean;
double d2 = d * d;
_ss += d2;
_sc += d2 * d;
}
}
}
@Override
public void reduce(AstHist.ThirdMomTask t) {
_ss += t._ss;
_sc += t._sc;
}
}
public static double fourth_moment(Vec v) {
final double mean = v.mean();
AstHist.FourthMomTask t = new AstHist.FourthMomTask(mean).doAll(v);
double m2 = t._ss / v.length();
double m4 = t._sc / v.length();
return m4 / Math.pow(m2, 2.0);
}
public static class FourthMomTask extends MRTask<AstHist.FourthMomTask> {
double _ss;
double _sc;
final double _mean;
FourthMomTask(double mean) {
_mean = mean;
}
@Override
public void setupLocal() {
_ss = 0;
_sc = 0;
}
@Override
public void map(Chunk c) {
for (int i = 0; i < c._len; ++i) {
if (!c.isNA(i)) {
double d = c.atd(i) - _mean;
double d2 = d * d;
_ss += d2;
_sc += d2 * d * d;
}
}
}
@Override
public void reduce(AstHist.FourthMomTask t) {
_ss += t._ss;
_sc += t._sc;
}
}
public double[] computeCuts(Vec v, int numBreaks) {
if (numBreaks <= 0) throw new IllegalArgumentException("breaks must be a positive number");
// just make numBreaks cuts equidistant from each other spanning range of [v.min, v.max]
double min;
double w = (v.max() - (min = v.min())) / numBreaks;
double[] res = new double[numBreaks];
for (int i = 0; i < numBreaks; ++i) res[i] = min + w * (i + 1);
return res;
}
public static class HistTask extends MRTask<AstHist.HistTask> {
final private double _h; // bin width
final private double _x0; // far left bin edge
final private double[] _min; // min for each bin, updated atomically
final private double[] _max; // max for each bin, updated atomically
// unsafe crap for mins/maxs of bins
private static final Unsafe U = UtilUnsafe.getUnsafe();
// double[] offset and scale
private static final int _dB = U.arrayBaseOffset(double[].class);
private static final int _dS = U.arrayIndexScale(double[].class);
private static long doubleRawIdx(int i) {
return _dB + _dS * i;
}
// long[] offset and scale
private static final int _8B = U.arrayBaseOffset(long[].class);
private static final int _8S = U.arrayIndexScale(long[].class);
private static long longRawIdx(int i) {
return _8B + _8S * i;
}
// out
private final double[] _breaks;
private final long[] _counts;
private final double[] _mids;
HistTask(double[] cuts, double h, double x0) {
_breaks = cuts;
_min = new double[_breaks.length - 1];
_max = new double[_breaks.length - 1];
_counts = new long[_breaks.length - 1];
_mids = new double[_breaks.length - 1];
_h = h;
_x0 = x0;
}
@Override
public void map(Chunk c) {
// if _h==-1, then don't have fixed bin widths... must loop over bins to obtain the correct bin #
for (int i = 0; i < c._len; ++i) {
int x = 1;
if (c.isNA(i)) continue;
double r = c.atd(i);
if (_h == -1) {
for (; x < _counts.length; x++)
if (r <= _breaks[x]) break;
x--; // back into the bin where count should go
} else
x = Math.min(_counts.length - 1, (int) Math.floor((r - _x0) / _h)); // Pick the bin floor( (x - x0) / h ) or ceil( (x-x0)/h - 1 ), choose the first since fewer ops
bumpCount(x);
setMinMax(Double.doubleToRawLongBits(r), x);
}
}
@Override
public void reduce(AstHist.HistTask t) {
if (_counts != t._counts) ArrayUtils.add(_counts, t._counts);
for (int i = 0; i < _mids.length; ++i) {
_min[i] = t._min[i] < _min[i] ? t._min[i] : _min[i];
_max[i] = t._max[i] > _max[i] ? t._max[i] : _max[i];
}
}
@Override
public void postGlobal() {
for (int i = 0; i < _mids.length; ++i) _mids[i] = 0.5 * (_max[i] + _min[i]);
}
private void bumpCount(int x) {
long o = _counts[x];
while (!U.compareAndSwapLong(_counts, longRawIdx(x), o, o + 1))
o = _counts[x];
}
private void setMinMax(long v, int x) {
double o = _min[x];
double vv = Double.longBitsToDouble(v);
while (vv < o && U.compareAndSwapLong(_min, doubleRawIdx(x), Double.doubleToRawLongBits(o), v))
o = _min[x];
setMax(v, x);
}
private void setMax(long v, int x) {
double o = _max[x];
double vv = Double.longBitsToDouble(v);
while (vv > o && U.compareAndSwapLong(_min, doubleRawIdx(x), Double.doubleToRawLongBits(o), v))
o = _max[x];
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstImpute.java
|
package water.rapids.ast.prims.advmath;
import hex.quantile.QuantileModel;
import water.Freezable;
import water.H2O;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.*;
import water.rapids.ast.AstFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.ast.params.AstNum;
import water.rapids.ast.params.AstNumList;
import water.rapids.ast.params.AstStr;
import water.rapids.ast.params.AstStrList;
import water.rapids.ast.prims.mungers.AstGroup;
import water.rapids.ast.prims.reducers.AstMean;
import water.rapids.ast.prims.reducers.AstMedian;
import water.rapids.vals.ValFrame;
import water.rapids.vals.ValNums;
import water.util.ArrayUtils;
import water.util.IcedDouble;
import water.util.IcedHashMap;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
/**
* Impute columns of a data frame in place.
* <p/>
* This impute can impute whole Frames or a specific Vec within the Frame. Imputation
* will be by the default mean (for numeric columns) or mode (for categorical columns).
* String, date, and UUID columns are never imputed.
* <p/>
* When a Vec is specified to be imputed, it can alternatively be imputed by grouping on
* some other columns in the Frame. If groupByCols is specified, but the user does not
* supply a column to be imputed then an IllegalArgumentException will be raised. Further,
* if the user specifies the column to impute within the groupByCols, exceptions will be
* raised.
* <p/>
* The methods that a user may impute by are as follows:
* - mean: Vec.T_NUM
* - median: Vec.T_NUM
* - mode: Vec.T_CAT
* - bfill: Any valid Vec type
* - ffill: Any valid Vec type
* <p/>
* All methods of imputation are done in place! The first three methods (mean, median,
* mode) are self-explanatory. The bfill and ffill methods will attempt to fill NAs using
* adjacent cell value (either before or forward):
* <p/>
* Vec = [ bfill_value, NA, ffill_value]
* | ^^ |
* -> || <-
* impute
* <p/>
* If the impute method is median then the combineMethod can be one of the Enum variants
* of QuantileModel.CombineMethod = { INTERPOLATE, AVERAGE, LOW, HIGH }. The Enum
* specifies how to combine quantiles on even sample sizes. This parameter is ignored in
* all other cases.
* <p/>
* Finally, the groupByFrame can be used to impute a column with a pre-computed groupby
* result.
* <p/>
* Other notes:
* <p/>
* If col is -1, then the entire Frame will be imputed using mean/mode where appropriate.
*/
public class AstImpute extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "col", "method", "combineMethod", "groupByCols", "groupByFrame", "values"};
}
@Override
public String str() {
return "h2o.impute";
}
@Override
public int nargs() {
return 1 + 7;
} // (h2o.impute data col method combine_method groupby groupByFrame values)
@Override
public Val apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
// Argument parsing and sanity checking
// Whole frame being imputed
Frame fr = stk.track(asts[1].exec(env)).getFrame();
// Column within frame being imputed
final int col = (int) asts[2].exec(env).getNum();
if (col >= fr.numCols())
throw new IllegalArgumentException("Column not -1 or in range 0 to " + fr.numCols());
final boolean doAllVecs = col == -1;
final Vec vec = doAllVecs ? null : fr.vec(col);
// Technique used for imputation
AstRoot method = null;
boolean ffill0 = false, bfill0 = false;
switch (asts[3].exec(env).getStr().toUpperCase()) {
case "MEAN":
method = new AstMean();
break;
case "MEDIAN":
method = new AstMedian();
break;
case "MODE":
method = new AstMode();
break;
case "FFILL":
ffill0 = true;
break;
case "BFILL":
bfill0 = true;
break;
default:
throw new IllegalArgumentException("Method must be one of mean, median or mode");
}
// Only for median, how is the median computed on even sample sizes?
QuantileModel.CombineMethod combine = QuantileModel.CombineMethod.valueOf(asts[4].exec(env).getStr().toUpperCase());
// Group-by columns. Empty is allowed, and perfectly normal.
AstRoot ast = asts[5];
AstNumList by2;
if (ast instanceof AstNumList) by2 = (AstNumList) ast;
else if (ast instanceof AstNum) by2 = new AstNumList(((AstNum) ast).getNum());
else if (ast instanceof AstStrList) {
String[] names = ((AstStrList) ast)._strs;
double[] list = new double[names.length];
int i = 0;
for (String name : ((AstStrList) ast)._strs)
list[i++] = fr.find(name);
Arrays.sort(list);
by2 = new AstNumList(list);
} else throw new IllegalArgumentException("Requires a number-list, but found a " + ast.getClass());
Frame groupByFrame = asts[6].str().equals("_") ? null : stk.track(asts[6].exec(env)).getFrame();
AstRoot vals = asts[7];
AstNumList values;
if (vals instanceof AstNumList) values = (AstNumList) vals;
else if (vals instanceof AstNum) values = new AstNumList(((AstNum) vals).getNum());
else values = null;
boolean doGrpBy = !by2.isEmpty() || groupByFrame != null;
// Compute the imputed value per-group. Empty groups are allowed and OK.
IcedHashMap<AstGroup.G, Freezable[]> group_impute_map;
if (!doGrpBy) { // Skip the grouping work
if (ffill0 || bfill0) { // do a forward/backward fill on the NA
// TODO: requires chk.previousNonNA and chk.nextNonNA style methods (which may go across chk boundaries)s
final boolean ffill = ffill0;
final boolean bfill = bfill0;
throw H2O.unimpl("No ffill or bfill imputation supported");
// new MRTask() {
// @Override public void map(Chunk[] cs) {
// int len=cs[0]._len; // end of this chk
// long start=cs[0].start(); // absolute beginning of chk s.t. start-1 bleeds into previous chk
// long absEnd = start+len; // absolute end of the chk s.t. absEnd+1 bleeds into next chk
// for(int c=0;c<cs.length;++c )
// for(int r=0;r<cs[0]._len;++r ) {
// if( cs[c].isNA(r) ) {
// if( r > 0 && r < len-1 ) {
// cs[c].set(r,ffill?)
// }
// }
// }
// }
// }.doAll(doAllVecs?fr:new Frame(vec));
// return new ValNum(Double.NaN);
} else {
final double[] res = values == null ? new double[fr.numCols()] : values.expand();
if (values == null) { // fill up res if no values supplied user, common case
if (doAllVecs) {
for (int i = 0; i < res.length; ++i)
if (fr.vec(i).isNumeric() || fr.vec(i).isCategorical())
res[i] = fr.vec(i).isNumeric() ? fr.vec(i).mean() : ArrayUtils.maxIndex(fr.vec(i).bins());
} else {
Arrays.fill(res, Double.NaN);
if (method instanceof AstMean) res[col] = vec.mean();
if (method instanceof AstMedian)
res[col] = AstMedian.median(new Frame(vec), combine);
if (method instanceof AstMode) res[col] = AstMode.mode(vec);
}
}
new MRTask() {
@Override
public void map(Chunk[] cs) {
int len = cs[0]._len;
// run down each chk
for (int c = 0; c < cs.length; ++c)
if (!Double.isNaN(res[c]))
for (int row = 0; row < len; ++row)
if (cs[c].isNA(row))
cs[c].set(row, res[c]);
}
}.doAll(fr);
return new ValNums(res);
}
} else {
if (col >= fr.numCols())
throw new IllegalArgumentException("Column not -1 or in range 0 to " + fr.numCols());
Frame imputes = groupByFrame;
if (imputes == null) {
// Build and run a GroupBy command
AstGroup ast_grp = new AstGroup();
// simple case where user specified a column... col == -1 means do all columns
if (doAllVecs) {
AstRoot[] aggs = new AstRoot[(int) (3 + 3 * (fr.numCols() - by2.cnt()))];
aggs[0] = ast_grp;
aggs[1] = new AstFrame(fr);
aggs[2] = by2;
int c = 3;
for (int i = 0; i < fr.numCols(); ++i) {
if (!by2.has(i) && (fr.vec(i).isCategorical() || fr.vec(i).isNumeric())) {
aggs[c] = fr.vec(i).isNumeric() ? new AstMean() : new AstMode();
aggs[c + 1] = new AstNumList(i, i + 1);
aggs[c + 2] = new AstStr("rm");
c += 3;
}
}
imputes = ast_grp.apply(env, stk, aggs).getFrame();
} else
imputes = ast_grp.apply(env, stk, new AstRoot[]{ast_grp, new AstFrame(fr), by2, /**/method, new AstNumList(col, col + 1), new AstStr("rm") /**/}).getFrame();
}
if (by2.isEmpty() && imputes.numCols() > 2) // >2 makes it ambiguous which columns are groupby cols and which are aggs, throw IAE
throw new IllegalArgumentException("Ambiguous group-by frame. Supply the `by` columns to proceed.");
final int[] bycols0 = ArrayUtils.seq(0, Math.max((int) by2.cnt(), 1 /* imputes.numCols()-1 */));
group_impute_map = new Gather(by2.expand4(), bycols0, fr.numCols(), col).doAll(imputes)._group_impute_map;
// Now walk over the data, replace NAs with the imputed results
final IcedHashMap<AstGroup.G, Freezable[]> final_group_impute_map = group_impute_map;
if (by2.isEmpty()) {
int[] byCols = new int[imputes.numCols() - 1];
for (int i = 0; i < byCols.length; ++i)
byCols[i] = fr.find(imputes.name(i));
by2 = new AstNumList(byCols);
}
final int[] bycols = by2.expand4();
new MRTask() {
@Override
public void map(Chunk cs[]) {
Set<Integer> _bycolz = new HashSet<>();
for (int b : bycols) _bycolz.add(b);
AstGroup.G g = new AstGroup.G(bycols.length, null);
for (int row = 0; row < cs[0]._len; row++)
for (int c = 0; c < cs.length; ++c)
if (!_bycolz.contains(c))
if (cs[c].isNA(row))
cs[c].set(row, ((IcedDouble) final_group_impute_map.get(g.fill(row, cs, bycols))[c])._val);
}
}.doAll(fr);
return new ValFrame(imputes);
}
}
// flatten the GroupBy result Frame back into a IcedHashMap
private static class Gather extends MRTask<Gather> {
private final int _imputedCol;
private final int _ncol;
private final int[] _byCols0; // actual group-by indexes
private final int[] _byCols; // index into the grouped-by frame result
private IcedHashMap<AstGroup.G, Freezable[]> _group_impute_map;
private transient Set<Integer> _localbyColzSet;
Gather(int[] byCols0, int[] byCols, int ncol, int imputeCol) {
_byCols = byCols;
_byCols0 = byCols0;
_ncol = ncol;
_imputedCol = imputeCol;
}
@Override
public void setupLocal() {
_localbyColzSet = new HashSet<>();
for (int by : _byCols0) _localbyColzSet.add(by);
}
@Override
public void map(Chunk cs[]) {
_group_impute_map = new IcedHashMap<>();
for (int row = 0; row < cs[0]._len; ++row) {
IcedDouble[] imputes = new IcedDouble[_ncol];
for (int c = 0, z = _byCols.length; c < imputes.length; ++c, ++z) { // z used to skip over the gby cols into the columns containing the aggregated columns
if (_imputedCol != -1)
imputes[c] = c == _imputedCol ? new IcedDouble(cs[cs.length - 1].atd(row)) : new IcedDouble(Double.NaN);
else imputes[c] = _localbyColzSet.contains(c) ? new IcedDouble(Double.NaN) : new IcedDouble(cs[z].atd(row));
}
_group_impute_map.put(new AstGroup.G(_byCols.length, null).fill(row, cs, _byCols), imputes);
}
}
@Override
public void reduce(Gather mrt) {
_group_impute_map.putAll(mrt._group_impute_map);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstKFold.java
|
package water.rapids.ast.prims.advmath;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.util.VecUtils;
import java.util.Random;
import static water.util.RandomUtils.getRNG;
public class AstKFold extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "nfolds", "seed"};
}
@Override
public int nargs() {
return 1 + 3;
} // (kfold_column x nfolds seed)
@Override
public String str() {
return "kfold_column";
}
public static Vec kfoldColumn(Vec v, final int nfolds, final long seed) {
new MRTask() {
@Override
public void map(Chunk c) {
long start = c.start();
for (int i = 0; i < c._len; ++i) {
int fold = Math.abs(getRNG(start + seed + i).nextInt()) % nfolds;
c.set(i, fold);
}
}
}.doAll(v);
return v;
}
public static Vec moduloKfoldColumn(Vec v, final int nfolds) {
new MRTask() {
@Override
public void map(Chunk c) {
long start = c.start();
for (int i = 0; i < c._len; ++i)
c.set(i, (int) ((start + i) % nfolds));
}
}.doAll(v);
return v;
}
public static Vec stratifiedKFoldColumn(Vec y, final int nfolds, final long seed) {
// for each class, generate a fold column (never materialized)
// therefore, have a seed per class to be used by the map call
if (!(y.isCategorical() || (y.isNumeric() && y.isInt())))
throw new IllegalArgumentException("stratification only applies to integer and categorical columns. Got: " + y.get_type_str());
final long[] classes = new VecUtils.CollectIntegerDomain().doAll(y).domain();
final int nClass = y.isNumeric() ? classes.length : y.domain().length;
final long[] seeds = new long[nClass]; // seed for each regular fold column (one per class)
for (int i = 0; i < nClass; ++i)
seeds[i] = getRNG(seed + i).nextLong();
return new MRTask() {
private int getFoldId(long absoluteRow, long seed) {
return Math.abs(getRNG(absoluteRow + seed).nextInt()) % nfolds;
}
// dress up the foldColumn (y[1]) as follows:
// 1. For each testFold and each classLabel loop over the response column (y[0])
// 2. If the classLabel is the current response and the testFold is the foldId
// for the current row and classLabel, then set the foldColumn to testFold
//
// How this balances labels per fold:
// Imagine that a KFold column was generated for each class. Observe that this
// makes the outer loop a way of selecting only the test rows from each fold
// (i.e., the holdout rows). Each fold is balanced sequentially in this way
// since y[1] is only updated if the current row happens to be a holdout row
// for the given classLabel.
//
// Next observe that looping over each classLabel filters down each KFold
// so that it contains labels for just THAT class. This is how the balancing
// can be made so that it is independent of the chunk distribution and the
// per chunk class distribution.
//
// Downside is this performs nfolds*nClass passes over each Chunk. For
// "reasonable" classification problems, this could be 100 passes per Chunk.
@Override
public void map(Chunk[] y) {
long start = y[0].start();
for (int testFold = 0; testFold < nfolds; ++testFold) {
for (int classLabel = 0; classLabel < nClass; ++classLabel) {
for (int row = 0; row < y[0]._len; ++row) {
// missing response gets spread around
if (y[0].isNA(row)) {
if ((start + row) % nfolds == testFold)
y[1].set(row, testFold);
} else {
if (y[0].at8(row) == (classes == null ? classLabel : classes[classLabel])) {
if (testFold == getFoldId(start + row, seeds[classLabel]))
y[1].set(row, testFold);
}
}
}
}
}
}
}.doAll(new Frame(y, y.makeZero()))._fr.vec(1);
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Vec foldVec = stk.track(asts[1].exec(env)).getFrame().anyVec().makeZero();
int nfolds = (int) asts[2].exec(env).getNum();
long seed = (long) asts[3].exec(env).getNum();
return new ValFrame(new Frame(kfoldColumn(foldVec, nfolds, seed == -1 ? new Random().nextLong() : seed)));
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstKurtosis.java
|
package water.rapids.ast.prims.advmath;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValNums;
public class AstKurtosis extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "na_rm"};
}
@Override
public String str() {return "kurtosis";}
@Override
public int nargs() {return 1 + 2;} // (kurtosis ary na.rm)
@Override
public ValNums apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
boolean narm = asts[2].exec(env).getNum() == 1;
double[] ds = new double[fr.numCols()];
Vec[] vecs = fr.vecs();
for (int i = 0; i < fr.numCols(); i++)
ds[i] = kurtosis(vecs[i], narm);
return new ValNums(ds);
}
public static double kurtosis(Vec v, boolean narm) {
return !v.isNumeric() || v.length() == 0 || (!narm && v.naCnt() > 0) ?
Double.NaN : AstHist.fourth_moment(v);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstMode.java
|
package water.rapids.ast.prims.advmath;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.vals.ValNum;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.util.ArrayUtils;
import water.util.MRUtils;
/**
* Find the mode: the most popular element.
*/
public class AstMode extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary"};
}
@Override
public String str() {
return "mode";
}
@Override
public int nargs() {
return 1 + 1;
} // (mode ary)
@Override
public ValNum apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
if (fr.numCols() != 1 || !fr.anyVec().isCategorical())
throw new IllegalArgumentException("mode only works on a single categorical column");
return new ValNum(mode(fr.anyVec()));
}
public static int mode(Vec v) {
if (v.isNumeric()) {
MRUtils.Dist t = new MRUtils.Dist().doAll(v);
int mode = ArrayUtils.maxIndex(t.dist());
return (int) t.keys()[mode];
}
double[] dist = new MRUtils.ClassDist(v).doAll(v).dist();
return ArrayUtils.maxIndex(dist);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstModuloKFold.java
|
package water.rapids.ast.prims.advmath;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
public class AstModuloKFold extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "nfolds"};
}
@Override
public int nargs() {
return 1 + 2;
} // (modulo_kfold_column x nfolds)
@Override
public String str() {
return "modulo_kfold_column";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Vec foldVec = stk.track(asts[1].exec(env)).getFrame().anyVec().makeZero();
int nfolds = (int) asts[2].exec(env).getNum();
return new ValFrame(new Frame(AstKFold.moduloKfoldColumn(foldVec, nfolds)));
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstQtile.java
|
package water.rapids.ast.prims.advmath;
import hex.quantile.Quantile;
import hex.quantile.QuantileModel;
import water.DKV;
import water.Job;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.ast.params.AstNumList;
/**
* Quantiles:
* (quantile %frame [numnber_list_probs] "string_interpolation_type")
*/
public class AstQtile extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "probs", "interpolationMethod", "weights_column"};
}
@Override
public int nargs() {
return 1 + 4;
}
@Override
public String str() {
return "quantile";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
QuantileModel.QuantileParameters parms = new QuantileModel.QuantileParameters();
Frame fr = stk.track(asts[1].exec(env)).getFrame();
Frame fr_wkey = new Frame(fr); // Force a bogus Key for Quantiles ModelBuilder
DKV.put(fr_wkey);
parms._train = fr_wkey._key;
parms._probs = ((AstNumList) asts[2]).expand();
for (double d : parms._probs)
if (d < 0 || d > 1) throw new IllegalArgumentException("Probability must be between 0 and 1: " + d);
String inter = asts[3].exec(env).getStr();
parms._combine_method = QuantileModel.CombineMethod.valueOf(inter.toUpperCase());
parms._weights_column = asts[4].str().equals("_") ? null : asts[4].str();
// Compute Quantiles
Job j = new Quantile(parms).trainModel();
QuantileModel q = (QuantileModel) j.get();
DKV.remove(j._key);
// Remove bogus Key
DKV.remove(fr_wkey._key);
// Reshape all outputs as a Frame, with probs in col 0 and the
// quantiles in cols 1 thru fr.numCols() - except the optional weights vec
int ncols = fr.numCols();
if (parms._weights_column != null) ncols--;
Vec[] vecs = new Vec[1 /*1 more for the probs themselves*/ + ncols];
String[] names = new String[vecs.length];
vecs[0] = Vec.makeCon(null, parms._probs);
names[0] = "Probs";
int w = 0;
for (int i = 0; i < vecs.length - 1; ++i) {
if (fr._names[i].equals(parms._weights_column)) w = 1;
vecs[i + 1] = Vec.makeCon(null, q._output._quantiles[i]);
names[i + 1] = fr._names[w + i] + "Quantiles";
}
q.delete();
return new ValFrame(new Frame(names, vecs));
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstRunif.java
|
package water.rapids.ast.prims.advmath;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import java.util.Random;
public class AstRunif extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "seed"};
}
@Override
public int nargs() {
return 1 + 2;
} // (h2o.runif frame seed)
@Override
public String str() {
return "h2o.runif";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
long seed = (long) asts[2].exec(env).getNum();
if (seed == -1) seed = new Random().nextLong();
return new ValFrame(new Frame(new String[]{"rnd"}, new Vec[]{fr.anyVec().makeRand(seed)}));
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstSkewness.java
|
package water.rapids.ast.prims.advmath;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValNums;
public class AstSkewness extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "na_rm"};
}
@Override
public String str() {
return "skewness";
}
@Override
public int nargs() {
return 1 + 2;
} // (skewness ary na.rm)
@Override
public ValNums apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr = stk.track(asts[1].exec(env)).getFrame();
boolean narm = asts[2].exec(env).getNum() == 1;
double[] ds = new double[fr.numCols()];
Vec[] vecs = fr.vecs();
for (int i = 0; i < fr.numCols(); i++)
ds[i] = skewness(vecs[i], narm);
return new ValNums(ds);
}
public static double skewness(Vec v, boolean narm) {
return !v.isNumeric() || v.length() == 0 || (!narm && v.naCnt() > 0) ?
Double.NaN : AstHist.third_moment(v);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstStratifiedKFold.java
|
package water.rapids.ast.prims.advmath;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import java.util.Random;
public class AstStratifiedKFold extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "nfolds", "seed"};
}
@Override
public int nargs() {
return 1 + 3;
} // (stratified_kfold_column x nfolds seed)
@Override
public String str() {
return "stratified_kfold_column";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Vec foldVec = stk.track(asts[1].exec(env)).getFrame().anyVec().makeZero();
int nfolds = (int) asts[2].exec(env).getNum();
long seed = (long) asts[3].exec(env).getNum();
return new ValFrame(new Frame(AstKFold.stratifiedKFoldColumn(foldVec, nfolds, seed == -1 ? new Random().nextLong() : seed)));
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstStratifiedSplit.java
|
package water.rapids.ast.prims.advmath;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.util.VecUtils;
import java.util.*;
import static water.util.RandomUtils.getRNG;
public class AstStratifiedSplit extends AstPrimitive {
public static final String OUTPUT_COLUMN_NAME = "test_train_split";
public static final String[] OUTPUT_COLUMN_DOMAIN = new String[]{"train", "test"};
@Override
public String[] args() {
return new String[]{"ary", "test_frac", "seed"};
}
@Override
public int nargs() {
return 1 + 3;
} // (h2o.random_stratified_split y test_frac seed)
@Override
public String str() {
return "h2o.random_stratified_split";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame frame = stk.track(asts[1].exec(env)).getFrame();
final double testFrac = asts[2].exec(env).getNum();
long seed = (long) asts[3].exec(env).getNum();
// It is just a single column
if (frame.numCols() != 1)
throw new IllegalArgumentException("Must give a single column to stratify against. Got: " + frame.numCols() + " columns.");
Vec stratifyingColumn = frame.anyVec();
Frame result = new Frame(Key.<Frame>make(),
new String[] {OUTPUT_COLUMN_NAME},
new Vec[] { split(stratifyingColumn, testFrac, seed, OUTPUT_COLUMN_DOMAIN)}
);
return new ValFrame(result);
}
public static Vec split(Vec stratifyingColumn, double splittingFraction, long randomizationSeed, String[] splittingDom) {
checkIfCanStratifyBy(stratifyingColumn);
randomizationSeed = randomizationSeed == -1 ? new Random().nextLong() : randomizationSeed;
// Collect input vector domain
final long[] classes = new VecUtils.CollectIntegerDomain().doAll(stratifyingColumn).domain();
// Number of output classes
final int numClasses = classes.length;
// Make a new column based on input column - this needs to follow layout of input vector!
// Save vector into DKV
Vec outputVec = stratifyingColumn.makeCon(0.0, Vec.T_CAT);
outputVec.setDomain(splittingDom);
DKV.put(outputVec);
// Collect index frame
// FIXME: This is in fact collecting inverse index class -> {row indices}
ClassIdxTask finTask = new ClassIdxTask(numClasses,classes).doAll(stratifyingColumn);
// Loop through each class in the input column
HashSet<Long> usedIdxs = new HashSet<>();
for (int classLabel = 0; classLabel < numClasses; classLabel++) {
// extract frame with index locations of the minority class
// calculate target number of this class to go to test
final LongAry indexAry = finTask._indexes[classLabel];
long tnum = Math.max(Math.round(indexAry.size() * splittingFraction), 1);
HashSet<Long> tmpIdxs = new HashSet<>();
// randomly select the target number of indexes
int generated = 0;
int count = 0;
while (generated < tnum) {
int i = (int) (getRNG(count+ randomizationSeed).nextDouble() * indexAry.size());
if (tmpIdxs.contains(indexAry.get(i))) { count+=1;continue; }
tmpIdxs.add(indexAry.get(i));
generated += 1;
count += 1;
}
usedIdxs.addAll(tmpIdxs);
}
// Update class assignments
new ClassAssignMRTask(usedIdxs).doAll(outputVec);
return outputVec;
}
static void checkIfCanStratifyBy(Vec vec) {
if (!(vec.isCategorical() || (vec.isNumeric() && vec.isInt())))
throw new IllegalArgumentException("Stratification only applies to integer and categorical columns. Got: " + vec.get_type_str());
if (vec.length() > Integer.MAX_VALUE) {
throw new IllegalArgumentException("Cannot stratified the frame because it is too long: nrows=" + vec.length());
}
}
public static class ClassAssignMRTask extends MRTask<AstStratifiedSplit.ClassAssignMRTask> {
HashSet<Long> _idx;
ClassAssignMRTask(HashSet<Long> idx) {
_idx = idx;
}
@Override
public void map(Chunk ck) {
for (int i = 0; i<ck.len(); i++) {
if (_idx.contains(ck.start() + i)) {
ck.set(i,1.0);
}
}
_idx = null; // Do not send it back
}
}
public static class ClassIdxTask extends MRTask<AstStratifiedSplit.ClassIdxTask> {
LongAry[] _indexes;
private final int _nclasses;
private long[] _classes;
private transient HashMap<Long, Integer> _classMap;
public ClassIdxTask(int nclasses, long[] classes) {
_nclasses = nclasses;
_classes = classes;
}
@Override
protected void setupLocal() {
_classMap = new HashMap<>(2*_classes.length);
for (int i = 0; i < _classes.length; i++) {
_classMap.put(_classes[i], i);
}
}
@Override
public void map(Chunk[] ck) {
_indexes = new LongAry[_nclasses];
for (int i = 0; i < _nclasses; i++) { _indexes[i] = new LongAry(); }
for (int i = 0; i < ck[0].len(); i++) {
long clas = ck[0].at8(i);
Integer clas_idx = _classMap.get(clas);
if (clas_idx != null) _indexes[clas_idx].add(ck[0].start() + i);
}
_classes = null;
}
@Override
public void reduce(AstStratifiedSplit.ClassIdxTask c) {
for (int i = 0; i < c._indexes.length; i++) {
for (int j = 0; j < c._indexes[i].size(); j++) {
_indexes[i].add(c._indexes[i].get(j));
}
}
}
}
public static class LongAry extends Iced<AstStratifiedSplit.LongAry> {
public LongAry(long ...vals){_ary = vals; _sz = vals.length;}
long [] _ary = new long[4];
int _sz;
public void add(long i){
if (_sz == _ary.length)
_ary = Arrays.copyOf(_ary, Math.max(4, _ary.length * 2));
_ary[_sz++] = i;
}
public long get(int i){
if(i >= _sz) throw new ArrayIndexOutOfBoundsException(i);
return _ary[i];
}
public int size(){return _sz;}
public long[] toArray(){return Arrays.copyOf(_ary,_sz);}
public void clear() {_sz = 0;}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstTable.java
|
package water.rapids.ast.prims.advmath;
import water.AutoBuffer;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.nbhm.NonBlockingHashMapLong;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.util.ArrayUtils;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicLong;
/**
* Variance between columns of a frame
* TODO: Define "table" in terms of "groupby"
* TODO: keep dense format for two-column comparison (like in previous version of Rapids)
* (table X Y) ==>
* (groupby (cbind X Y) [X Y] nrow TRUE)
*/
public class AstTable extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"X", "Y", "dense"};
}
@Override
public int nargs() {
return -1;
} // (table X dense) or (table X Y dense)
@Override
public String str() {
return "table";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame fr1 = stk.track(asts[1].exec(env)).getFrame();
final boolean dense = asts[asts.length - 1].exec(env).getNum() == 1;
Frame fr2 = asts.length == 4 ? stk.track(asts[2].exec(env)).getFrame() : null;
int ncols = fr1.numCols() + (fr2 == null ? 0 : fr2.numCols());
Vec vec1 = fr1.vec(0);
ValFrame res = fast_table(vec1, ncols, fr1._names[0]);
if (res != null) return res;
if (!(asts.length == 3 || asts.length == 4) || ncols > 2)
throw new IllegalArgumentException("table expects one or two columns");
Vec vec2 = fr1.numCols() == 2 ? fr1.vec(1) : fr2 != null ? fr2.vec(0) : null;
int sz = fr1._names.length + (fr2 != null ? fr2._names.length : 0);
String[] colnames = new String[sz];
int i = 0;
for (String name : fr1._names) colnames[i++] = name;
if (fr2 != null) for (String name : fr2._names) colnames[i++] = name;
return slow_table(vec1, vec2, colnames, dense);
}
// -------------------------------------------------------------------------
// Fast-path for 1 integer column
private ValFrame fast_table(Vec v1, int ncols, String colname) {
if (ncols != 1 || !v1.isInt()) return null;
long spanl = (long) v1.max() - (long) v1.min() + 1;
if (spanl > 1000000) return null; // Cap at decent array size, for performance
// First fast-pass counting
AstTable.FastCnt fastCnt = new AstTable.FastCnt((long) v1.min(), (int) spanl).doAll(v1);
final long cnts[] = fastCnt._cnts;
final long minVal = fastCnt._min;
// Second pass to build the result frame, skipping zeros
Vec dataLayoutVec = Vec.makeCon(0, cnts.length);
Frame fr = new MRTask() {
@Override
public void map(Chunk cs[], NewChunk nc0, NewChunk nc1) {
final Chunk c = cs[0];
for (int i = 0; i < c._len; ++i) {
int idx = (int) (i + c.start());
if (cnts[idx] > 0) {
nc0.addNum(idx + minVal);
nc1.addNum(cnts[idx]);
}
}
}
}.doAll(new byte[]{Vec.T_NUM, Vec.T_NUM}, dataLayoutVec).outputFrame(new String[]{colname, "Count"},
new String[][]{v1.domain(), null});
dataLayoutVec.remove();
return new ValFrame(fr);
}
// Fast-pass for counting unique integers in a span
private static class FastCnt extends MRTask<AstTable.FastCnt> {
final long _min;
final int _span;
long _cnts[];
FastCnt(long min, int span) {
_min = min;
_span = span;
}
@Override
public void map(Chunk c) {
_cnts = new long[_span];
for (int i = 0; i < c._len; i++)
if (!c.isNA(i))
_cnts[(int) (c.at8(i) - _min)]++;
}
@Override
public void reduce(AstTable.FastCnt fc) {
ArrayUtils.add(_cnts, fc._cnts);
}
}
// -------------------------------------------------------------------------
// Count unique combos in 1 or 2 columns, where the values are not integers,
// or cover a very large span.
private ValFrame slow_table(Vec v1, Vec v2, String[] colnames, boolean dense) {
// For simplicity, repeat v1 if v2 is missing; this will end up filling in
// only the diagonal of a 2-D array (in what is otherwise a 1-D array).
// This should be nearly the same cost as a 1-D array, since everything is
// sparsely filled in.
// If this is the 1-column case (all counts on the diagonals), just build a
// 1-d result.
if (v2 == null) {
// Slow-pass group counting, very sparse hashtables. Note that Vec v2 is
// used as the left-most arg, or OUTER dimension - which will be columns in
// the final result.
AstTable.SlowCnt sc = new AstTable.SlowCnt().doAll(v1, v1);
// Get the column headers as sorted doubles
double dcols[] = collectDomain(sc._col0s);
Frame res = new Frame();
Vec rowlabel = Vec.makeVec(dcols, Vec.VectorGroup.VG_LEN1.addVec());
rowlabel.setDomain(v1.domain());
res.add(colnames[0], rowlabel);
long cnts[] = new long[dcols.length];
for (int col = 0; col < dcols.length; col++) {
long lkey = Double.doubleToRawLongBits(dcols[col]);
NonBlockingHashMapLong<AtomicLong> colx = sc._col0s.get(lkey);
AtomicLong al = colx.get(lkey);
cnts[col] = al.get();
}
Vec vec = Vec.makeVec(cnts, null, Vec.VectorGroup.VG_LEN1.addVec());
res.add("Counts", vec);
return new ValFrame(res);
}
// 2-d table result.
Frame res = new Frame();
if (!dense) {
// Slow-pass group counting, very sparse hashtables. Note that Vec v2 is
// used as the left-most arg, or OUTER dimension - which will be columns in
// the final result.
AstTable.SlowCnt sc = new AstTable.SlowCnt().doAll(v2, v1);
// Get the column headers as sorted doubles
double dcols[] = collectDomain(sc._col0s);
// Need the row headers as sorted doubles also, but these are scattered
// throughout the nested tables. Fold 'em into 1 table.
NonBlockingHashMapLong<AtomicLong> rows = new NonBlockingHashMapLong<>();
for (NonBlockingHashMapLong.IteratorLong i = iter(sc._col0s); i.hasNext(); )
rows.putAll(sc._col0s.get(i.nextLong()));
double drows[] = collectDomain(rows);
// Now walk the columns one by one, building a Vec per column, building a
// Frame result. Rowlabel for first column.
Vec rowlabel = Vec.makeVec(drows, Vec.VectorGroup.VG_LEN1.addVec());
rowlabel.setDomain(v1.domain());
res.add(colnames[0], rowlabel);
long cnts[] = new long[drows.length];
for (int col = 0; col < dcols.length; col++) {
NonBlockingHashMapLong<AtomicLong> colx = sc._col0s.get(Double.doubleToRawLongBits(dcols[col]));
for (int row = 0; row < drows.length; row++) {
AtomicLong al = colx.get(Double.doubleToRawLongBits(drows[row]));
cnts[row] = al == null ? 0 : al.get();
}
Vec vec = Vec.makeVec(cnts, null, Vec.VectorGroup.VG_LEN1.addVec());
res.add(v2.isCategorical() ? v2.domain()[col] : Double.toString(dcols[col]), vec);
}
} else {
AstTable.SlowCnt sc = new AstTable.SlowCnt().doAll(v1, v2);
double dcols[] = collectDomain(sc._col0s);
NonBlockingHashMapLong<AtomicLong> rows = new NonBlockingHashMapLong<>();
for (NonBlockingHashMapLong.IteratorLong i = iter(sc._col0s); i.hasNext(); )
rows.putAll(sc._col0s.get(i.nextLong()));
double drows[] = collectDomain(rows);
int x = 0;
int sz = 0;
for (NonBlockingHashMapLong.IteratorLong i = iter(sc._col0s); i.hasNext(); ) {
sz += sc._col0s.get(i.nextLong()).size();
}
long cnts[] = new long[sz];
double[] left_categ = new double[sz];
double[] right_categ = new double[sz];
for (double dcol : dcols) {
NonBlockingHashMapLong<AtomicLong> colx = sc._col0s.get(Double.doubleToRawLongBits(dcol));
for (double drow : drows) {
AtomicLong al = colx.get(Double.doubleToRawLongBits(drow));
if (al != null) {
left_categ[x] = dcol;
right_categ[x] = drow;
cnts[x] = al.get();
x++;
}
}
}
Vec vec = Vec.makeVec(left_categ, Vec.VectorGroup.VG_LEN1.addVec());
if (v1.isCategorical()) vec.setDomain(v1.domain());
res.add(colnames[0], vec);
vec = Vec.makeVec(right_categ, Vec.VectorGroup.VG_LEN1.addVec());
if (v2.isCategorical()) vec.setDomain(v2.domain());
res.add(colnames[1], vec);
vec = Vec.makeVec(cnts, null, Vec.VectorGroup.VG_LEN1.addVec());
res.add("Counts", vec);
}
return new ValFrame(res);
}
// Collect the unique longs from this NBHML, convert to doubles and return
// them as a sorted double[].
private static double[] collectDomain(NonBlockingHashMapLong ls) {
int sz = ls.size(); // Uniques
double ds[] = new double[sz];
int x = 0;
for (NonBlockingHashMapLong.IteratorLong i = iter(ls); i.hasNext(); )
ds[x++] = Double.longBitsToDouble(i.nextLong());
Arrays.sort(ds);
return ds;
}
private static NonBlockingHashMapLong.IteratorLong iter(NonBlockingHashMapLong nbhml) {
return (NonBlockingHashMapLong.IteratorLong) nbhml.keySet().iterator();
}
// Implementation is a double-dimension NBHML. Each dimension key is the raw
// long bits of the double column. Bottoms out in an AtomicLong.
private static class SlowCnt extends MRTask<AstTable.SlowCnt> {
transient NonBlockingHashMapLong<NonBlockingHashMapLong<AtomicLong>> _col0s;
@Override
public void setupLocal() {
_col0s = new NonBlockingHashMapLong<>();
}
@Override
public void map(Chunk c0, Chunk c1) {
for (int i = 0; i < c0._len; i++) {
double d0 = c0.atd(i);
if (Double.isNaN(d0)) continue;
long l0 = Double.doubleToRawLongBits(d0);
double d1 = c1.atd(i);
if (Double.isNaN(d1)) continue;
long l1 = Double.doubleToRawLongBits(d1);
// Atomically fetch/create nested NBHM
NonBlockingHashMapLong<AtomicLong> col1s = _col0s.get(l0);
if (col1s == null) { // Speed filter pre-filled entries
col1s = new NonBlockingHashMapLong<>();
NonBlockingHashMapLong<AtomicLong> old = _col0s.putIfAbsent(l0, col1s);
if (old != null) col1s = old; // Lost race, use old value
}
// Atomically fetch/create nested AtomicLong
AtomicLong cnt = col1s.get(l1);
if (cnt == null) { // Speed filter pre-filled entries
cnt = new AtomicLong();
AtomicLong old = col1s.putIfAbsent(l1, cnt);
if (old != null) cnt = old; // Lost race, use old value
}
// Atomically bump counter
cnt.incrementAndGet();
}
}
@Override
public void reduce(AstTable.SlowCnt sc) {
if (_col0s == sc._col0s) return;
throw water.H2O.unimpl();
}
public final AutoBuffer write_impl(AutoBuffer ab) {
if (_col0s == null) return ab.put8(0);
ab.put8(_col0s.size());
for (long col0 : _col0s.keySetLong()) {
ab.put8(col0);
NonBlockingHashMapLong<AtomicLong> col1s = _col0s.get(col0);
ab.put8(col1s.size());
for (long col1 : col1s.keySetLong()) {
ab.put8(col1);
ab.put8(col1s.get(col1).get());
}
}
return ab;
}
public final AstTable.SlowCnt read_impl(AutoBuffer ab) {
long len0 = ab.get8();
if (len0 == 0) return this;
_col0s = new NonBlockingHashMapLong<>();
for (long i = 0; i < len0; i++) {
NonBlockingHashMapLong<AtomicLong> col1s = new NonBlockingHashMapLong<>();
_col0s.put(ab.get8(), col1s);
long len1 = ab.get8();
for (long j = 0; j < len1; j++)
col1s.put(ab.get8(), new AtomicLong(ab.get8()));
}
return this;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (NonBlockingHashMapLong.IteratorLong i = iter(_col0s); i.hasNext(); ) {
long l = i.nextLong();
double d = Double.longBitsToDouble(l);
sb.append(d).append(": {");
NonBlockingHashMapLong<AtomicLong> col1s = _col0s.get(l);
for (NonBlockingHashMapLong.IteratorLong j = iter(col1s); j.hasNext(); ) {
long l2 = j.nextLong();
double d2 = Double.longBitsToDouble(l2);
AtomicLong al = col1s.get(l2);
sb.append(d2).append(": ").append(al.get()).append(", ");
}
sb.append("}\n");
}
return sb.toString();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstTfIdf.java
|
package water.rapids.ast.prims.advmath;
import hex.tfidf.DocumentFrequencyTask;
import hex.tfidf.InverseDocumentFrequencyTask;
import hex.tfidf.TermFrequencyTask;
import hex.tfidf.TfIdfPreprocessorTask;
import org.apache.log4j.Logger;
import water.Key;
import water.MRTask;
import water.Scope;
import water.fvec.*;
import water.rapids.Env;
import water.rapids.Merge;
import water.rapids.Rapids;
import water.rapids.Val;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.ast.prims.string.AstToLower;
import water.rapids.vals.ValFrame;
import water.util.ArrayUtils;
/**
* Primitive AST operation to compute TF-IDF values for given document corpus.<br>
*
* <br>
* <b>Parameters:</b>
* <p><ul>
* <li><code>frame</code> - Input frame containing data for whose TF-IDF values should be computed
* <li><code>doc_id_idx</code> - Index of a column containing document IDs
* <li><code>text_idx</code> - Index of a column containing words/documents (depending on the <code>preprocess</code> parameter)
* <li><code>preprocess</code> - Flag indicating whether input should be pre-processed or not
* <li><code>case_sensitive</code> - Flag indicating whether input should be treated as a case sensitive data
* </ul><p>
*
* <br>
* <b>Content of a column with index <code>content_idx</code>:</b>
* <p><ul>
* <li>See {@link TfIdfPreprocessorTask} - (default) when pre-processing is enabled
* <li><code>word</code> - when pre-processing is disabled
* </ul><p>
*/
public class AstTfIdf extends AstPrimitive<AstTfIdf> {
/**
* Name to be used for a column containing Inverse Document Frequency values in the output frame of this operation.
*/
private static final String IDF_COL_NAME = "IDF";
/**
* Name to be used for a column containing TF-IDF values in the output frame of this operation.
*/
private static final String TF_IDF_COL_NAME = "TF-IDF";
/**
* Column names to be used for preprocessed frame.
*/
private static final String[] PREPROCESSED_FRAME_COL_NAMES = new String[] { "DocID", "Words" };
/**
* Class logger.
*/
private static Logger log = Logger.getLogger(AstTfIdf.class);
@Override
public int nargs() {
return 1 + 5; // (tf-idf input_frame_name doc_id_col_idx text_col_idx preprocess case_sensitive)
}
@Override
public String[] args() {
return new String[]{ "frame", "doc_id_idx", "text_idx", "preprocess", "case_sensitive"};
}
@Override
public Val apply(Env env, Env.StackHelp stk, AstRoot[] asts) {
Frame inputFrame = stk.track(asts[1].exec(env).getFrame());
final int docIdIdx = (int) asts[2].exec(env).getNum();
final int contentIdx = (int) asts[3].exec(env).getNum();
final boolean preprocess = asts[4].exec(env).getBool();
final boolean caseSensitive = asts[5].exec(env).getBool();
if (inputFrame.anyVec().length() <= 0)
throw new IllegalArgumentException("Empty input frame provided.");
Scope.enter();
Frame tfIdfFrame = null;
try {
// Input checks
int inputFrameColsCnt = inputFrame.numCols();
if (docIdIdx >= inputFrameColsCnt || contentIdx >= inputFrameColsCnt)
throw new IllegalArgumentException("Provided column index is out of bounds. Number of columns in the input frame: "
+ inputFrameColsCnt);
Vec docIdVec = inputFrame.vec(docIdIdx);
Vec contentVec = inputFrame.vec(contentIdx);
if (!docIdVec.isNumeric() || !contentVec.isString())
throw new IllegalArgumentException("Incorrect format of input frame." +
"Following row format is expected: (numeric) documentID, (string) "
+ (preprocess ? "documentContent." : "words. " +
"Got "+docIdVec.get_type_str() + " and " + contentVec.get_type_str()
+" instead."));
// Case sensitivity
if (!caseSensitive) {
Scope.track(inputFrame.replace(contentIdx, AstToLower.toLowerStringCol(inputFrame.vec(contentIdx))));
}
// Pre-processing
Frame wordFrame;
long documentsCnt;
if (preprocess) {
byte[] outputTypes = new byte[]{ Vec.T_NUM, Vec.T_STR };
wordFrame = new TfIdfPreprocessorTask(docIdIdx, contentIdx).doAll(outputTypes, inputFrame)
.outputFrame(PREPROCESSED_FRAME_COL_NAMES, null);
documentsCnt = inputFrame.numRows();
} else {
String[] columnsNames = ArrayUtils.select(inputFrame.names(), new int[]{ docIdIdx, contentIdx });
wordFrame = inputFrame.subframe(columnsNames);
String countDocumentsRapid = "(unique (cols " + asts[1].toString() + " [" + docIdIdx + "]) false)";
documentsCnt = Rapids.exec(countDocumentsRapid).getFrame().anyVec().length();
}
Scope.track(wordFrame);
// TF
Frame tfOutFrame = TermFrequencyTask.compute(wordFrame);
Scope.track(tfOutFrame);
// DF
Frame dfOutFrame = DocumentFrequencyTask.compute(tfOutFrame);
Scope.track(dfOutFrame);
// IDF
InverseDocumentFrequencyTask idf = new InverseDocumentFrequencyTask(documentsCnt);
Vec idfValues = idf.doAll(new byte[]{ Vec.T_NUM }, dfOutFrame.lastVec()).outputFrame().anyVec();
Scope.track(idfValues);
// Replace DF column with IDF column
Vec removedCol = dfOutFrame.remove(dfOutFrame.numCols() - 1);
Scope.track(removedCol);
dfOutFrame.add(IDF_COL_NAME, idfValues);
// Intermediate frame containing both TF and IDF values
Scope.track(tfOutFrame.replace(1, tfOutFrame.vecs()[1].toCategoricalVec()));
Scope.track(dfOutFrame.replace(0, dfOutFrame.vecs()[0].toCategoricalVec()));
int[][] levelMaps = {
CategoricalWrappedVec.computeMap(tfOutFrame.vec(1).domain(), dfOutFrame.vec(0).domain())
};
Frame tfIdfIntermediate = Merge.merge(tfOutFrame, dfOutFrame, new int[]{1}, new int[]{0}, false, levelMaps);
Scope.track(tfIdfIntermediate.replace(1, tfIdfIntermediate.vecs()[1].toStringVec()));
// TF-IDF
int tfOutFrameColCnt = tfIdfIntermediate.numCols();
TfIdfTask tfIdfTask = new TfIdfTask(tfOutFrameColCnt - 2, tfOutFrameColCnt - 1);
Vec tfIdfValues = tfIdfTask.doAll(new byte[]{Vec.T_NUM}, tfIdfIntermediate).outputFrame().anyVec();
Scope.track(tfIdfValues);
// Construct final frame containing TF, IDF and TF-IDF values
tfIdfIntermediate.add(TF_IDF_COL_NAME, tfIdfValues);
tfIdfIntermediate._key = Key.make();
if (log.isDebugEnabled())
log.debug(tfIdfIntermediate.toTwoDimTable().toString());
tfIdfFrame = tfIdfIntermediate;
} finally {
Key[] keysToKeep = tfIdfFrame != null ? tfIdfFrame.keys() : new Key[]{};
Scope.exit(keysToKeep);
}
return new ValFrame(tfIdfFrame);
}
@Override
public String str() {
return "tf-idf";
}
/**
* Final TF-IDF Map-Reduce task used to combine TF and IDF values together.
*/
private static class TfIdfTask extends MRTask<TfIdfTask> {
// IN
/**
* Index of a column containing Term Frequency values in the input frame of this task.
*/
private final int _tfColIndex;
/**
* Index of a column containing Inverse Document Frequency values in the input frame of this task.
*/
private final int _idfColIndex;
private TfIdfTask(int tfColIndex, int idfColIndex) {
_tfColIndex = tfColIndex;
_idfColIndex = idfColIndex;
}
@Override
public void map(Chunk[] cs, NewChunk nc) {
Chunk tfValues = cs[_tfColIndex];
Chunk idfValues = cs[_idfColIndex];
for (int row = 0; row < tfValues._len; row++) {
nc.addNum(tfValues.at8(row) * idfValues.atd(row));
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstUnique.java
|
package water.rapids.ast.prims.advmath;
import water.DKV;
import water.H2O;
import water.fvec.Frame;
import water.fvec.Vec;
import water.fvec.task.UniqOldTask;
import water.fvec.task.UniqTask;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
import water.util.Log;
import water.util.VecUtils;
public class AstUnique extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary"};
}
@Override
public int nargs() {
return 2 + 1;
} // (unique col)
@Override
public String str() {
return "unique";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
final Frame fr = stk.track(asts[1].exec(env)).getFrame();
final boolean includeNAs = asts[2].exec(env).getBool();
return new ValFrame(uniqueValuesBy(fr,0, includeNAs));
}
/** return a frame with unique values from the specified column */
public static Frame uniqueValuesBy(final Frame fr, final int columnIndex, final boolean includeNAs) {
final Vec vec0 = fr.vec(columnIndex);
final Vec v;
if (vec0.isCategorical()) {
// Vector domain might contain levels not actually present in the vector - collection of actual values is required.
final String[] actualVecDomain = VecUtils.collectDomainFast(vec0);
final boolean contributeNAs = vec0.naCnt() > 0 && includeNAs;
final long uniqueVecLength = contributeNAs ? actualVecDomain.length + 1 : actualVecDomain.length;
v = Vec.makeSeq(0, uniqueVecLength, true);
if(contributeNAs) {
v.setNA(uniqueVecLength - 1);
}
v.setDomain(actualVecDomain);
DKV.put(v);
} else {
long start = System.currentTimeMillis();
String uniqImpl = H2O.getSysProperty("rapids.unique.impl", "IcedDouble");
switch (uniqImpl) {
case "IcedDouble":
v = new UniqTask().doAll(vec0).toVec();
break;
case "GroupBy":
v = new UniqOldTask().doAll(vec0).toVec();
break;
default:
throw new UnsupportedOperationException("Unknown unique implementation: " + uniqImpl);
}
Log.info("Unique on a numerical Vec (len=" + vec0.length() + ") took " +
(System.currentTimeMillis() - start) + "ms and returned " + v.length() + " unique values (impl: " + uniqImpl + ").");
}
return new Frame(v);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/AstVariance.java
|
package water.rapids.ast.prims.advmath;
import water.Key;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.vals.ValNum;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.util.ArrayUtils;
import java.util.Arrays;
/**
* Variance between columns of a frame
*/
public class AstVariance extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "x", "y", "use", "symmetric"};
}
private enum Mode {Everything, AllObs, CompleteObs}
@Override
public int nargs() {
return 1 + 4; /* (var X Y use symmetric) */
}
@Override
public String str() {
return "var";
}
@Override
public Val apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame frx = stk.track(asts[1].exec(env)).getFrame();
Frame fry = stk.track(asts[2].exec(env)).getFrame();
if (frx.numRows() != fry.numRows())
throw new IllegalArgumentException("Frames must have the same number of rows, found " + frx.numRows() + " and " + fry.numRows());
String use = stk.track(asts[3].exec(env)).getStr();
boolean symmetric = asts[4].exec(env).getNum() == 1;
Mode mode;
switch (use) {
case "everything":
mode = Mode.Everything;
break;
case "all.obs":
mode = Mode.AllObs;
break;
case "complete.obs":
mode = Mode.CompleteObs;
break;
default:
throw new IllegalArgumentException("unknown use mode: " + use);
}
return fry.numRows() == 1 ? scalar(frx, fry, mode) : array(frx, fry, mode, symmetric);
}
// Scalar covariance for 1 row
private ValNum scalar(Frame frx, Frame fry, Mode mode) {
if (frx.numCols() != fry.numCols())
throw new IllegalArgumentException("Single rows must have the same number of columns, found " + frx.numCols() + " and " + fry.numCols());
Vec vecxs[] = frx.vecs();
Vec vecys[] = fry.vecs();
double xmean = 0, ymean = 0, ncols = frx.numCols(), NACount = 0, xval, yval, ss = 0;
for (int r = 0; r < ncols; r++) {
xval = vecxs[r].at(0);
yval = vecys[r].at(0);
if (Double.isNaN(xval) || Double.isNaN(yval))
NACount++;
else {
xmean += xval;
ymean += yval;
}
}
xmean /= (ncols - NACount);
ymean /= (ncols - NACount);
if (NACount != 0) {
if (mode.equals(Mode.AllObs)) throw new IllegalArgumentException("Mode is 'all.obs' but NAs are present");
if (mode.equals(Mode.Everything)) return new ValNum(Double.NaN);
}
for (int r = 0; r < ncols; r++) {
xval = vecxs[r].at(0);
yval = vecys[r].at(0);
if (!(Double.isNaN(xval) || Double.isNaN(yval)))
ss += (vecxs[r].at(0) - xmean) * (vecys[r].at(0) - ymean);
}
return new ValNum(ss / (ncols - NACount - 1));
}
// Matrix covariance. Compute covariance between all columns from each Frame
// against each other. Return a matrix of covariances which is frx.numCols
// wide and fry.numCols tall.
private Val array(Frame frx, Frame fry, Mode mode, boolean symmetric) {
Vec[] vecxs = frx.vecs();
int ncolx = vecxs.length;
Vec[] vecys = fry.vecs();
int ncoly = vecys.length;
if (mode.equals(Mode.Everything) || mode.equals(Mode.AllObs)) {
if (mode.equals(Mode.AllObs)) {
for (Vec v : vecxs)
if (v.naCnt() != 0)
throw new IllegalArgumentException("Mode is 'all.obs' but NAs are present");
if (!symmetric)
for (Vec v : vecys)
if (v.naCnt() != 0)
throw new IllegalArgumentException("Mode is 'all.obs' but NAs are present");
}
CoVarTaskEverything[] cvs = new CoVarTaskEverything[ncoly];
double[] xmeans = new double[ncolx];
for (int x = 0; x < ncoly; x++)
xmeans[x] = vecxs[x].mean();
if (symmetric) {
//1-col returns scalar
if (ncoly == 1)
return new ValNum(vecys[0].naCnt() == 0 ? vecys[0].sigma() * vecys[0].sigma() : Double.NaN);
int[] idx = new int[ncoly];
for (int y = 1; y < ncoly; y++) idx[y] = y;
int[] first_index = new int[]{0};
//compute covariances between column_i and column_i+1, column_i+2, ...
Frame reduced_fr;
for (int y = 0; y < ncoly - 1; y++) {
idx = ArrayUtils.removeIds(idx, first_index);
reduced_fr = new Frame(frx.vecs(idx));
cvs[y] = new CoVarTaskEverything(vecys[y].mean(), xmeans).dfork(new Frame(vecys[y]).add(reduced_fr));
}
double[][] res_array = new double[ncoly][ncoly];
//fill in the diagonals (variances) using sigma from rollupstats
for (int y = 0; y < ncoly; y++)
res_array[y][y] = vecys[y].naCnt() == 0 ? vecys[y].sigma() * vecys[y].sigma() : Double.NaN;
//arrange the results into the bottom left of res_array. each successive cvs is 1 smaller in length
for (int y = 0; y < ncoly - 1; y++)
System.arraycopy(ArrayUtils.div(cvs[y].getResult()._covs, (fry.numRows() - 1)), 0, res_array[y], y + 1, ncoly - y - 1);
//copy over the bottom left of res_array to its top right
for (int y = 0; y < ncoly - 1; y++) {
for (int x = y + 1; x < ncoly; x++) {
res_array[x][y] = res_array[y][x];
}
}
//set Frame
Vec[] res = new Vec[ncoly];
Key<Vec>[] keys = Vec.VectorGroup.VG_LEN1.addVecs(ncoly);
for (int y = 0; y < ncoly; y++) {
res[y] = Vec.makeVec(res_array[y], keys[y]);
}
return new ValFrame(new Frame(fry._names, res));
}
// Launch tasks; each does all Xs vs one Y
for (int y = 0; y < ncoly; y++)
cvs[y] = new CoVarTaskEverything(vecys[y].mean(), xmeans).dfork(new Frame(vecys[y]).add(frx));
// 1-col returns scalar
if (ncolx == 1 && ncoly == 1) {
return new ValNum(cvs[0].getResult()._covs[0] / (fry.numRows() - 1));
}
// Gather all the Xs-vs-Y covariance arrays; divide by rows
Vec[] res = new Vec[ncoly];
Key<Vec>[] keys = Vec.VectorGroup.VG_LEN1.addVecs(ncoly);
for (int y = 0; y < ncoly; y++)
res[y] = Vec.makeVec(ArrayUtils.div(cvs[y].getResult()._covs, (fry.numRows() - 1)), keys[y]);
return new ValFrame(new Frame(fry._names, res));
} else { //if (mode.equals(Mode.CompleteObs)) {
//two-pass algorithm for computation of variance for numerical stability
if (symmetric) {
if (ncoly == 1)
return new ValNum(vecys[0].sigma() * vecys[0].sigma());
CoVarTaskCompleteObsMeanSym taskCompleteObsMeanSym = new CoVarTaskCompleteObsMeanSym().doAll(fry);
long NACount = taskCompleteObsMeanSym._NACount;
double[] ymeans = ArrayUtils.div(taskCompleteObsMeanSym._ysum, fry.numRows() - NACount);
// 1 task with all Ys
CoVarTaskCompleteObsSym cvs = new CoVarTaskCompleteObsSym(ymeans).doAll(new Frame(fry));
double[][] res_array = new double[ncoly][ncoly];
for (int y = 0; y < ncoly; y++) {
System.arraycopy(ArrayUtils.div(cvs._covs[y], (fry.numRows() - 1 - NACount)), y, res_array[y], y, ncoly - y);
}
//copy over the bottom left of res_array to its top right
for (int y = 0; y < ncoly - 1; y++) {
for (int x = y + 1; x < ncoly; x++) {
res_array[x][y] = res_array[y][x];
}
}
//set Frame
Vec[] res = new Vec[ncoly];
Key<Vec>[] keys = Vec.VectorGroup.VG_LEN1.addVecs(ncoly);
for (int y = 0; y < ncoly; y++) {
res[y] = Vec.makeVec(res_array[y], keys[y]);
}
return new ValFrame(new Frame(fry._names, res));
}
CoVarTaskCompleteObsMean taskCompleteObsMean = new CoVarTaskCompleteObsMean(ncoly, ncolx).doAll(new Frame(fry).add(frx));
long NACount = taskCompleteObsMean._NACount;
double[] ymeans = ArrayUtils.div(taskCompleteObsMean._ysum, fry.numRows() - NACount);
double[] xmeans = ArrayUtils.div(taskCompleteObsMean._xsum, fry.numRows() - NACount);
// 1 task with all Xs and Ys
CoVarTaskCompleteObs cvs = new CoVarTaskCompleteObs(ymeans, xmeans).doAll(new Frame(fry).add(frx));
// 1-col returns scalar
if (ncolx == 1 && ncoly == 1) {
return new ValNum(cvs._covs[0][0] / (fry.numRows() - 1 - NACount));
}
// Gather all the Xs-vs-Y covariance arrays; divide by rows
Vec[] res = new Vec[ncoly];
Key<Vec>[] keys = Vec.VectorGroup.VG_LEN1.addVecs(ncoly);
for (int y = 0; y < ncoly; y++)
res[y] = Vec.makeVec(ArrayUtils.div(cvs._covs[y], (fry.numRows() - 1 - NACount)), keys[y]);
return new ValFrame(new Frame(fry._names, res));
}
}
private static class CoVarTaskEverything extends MRTask<CoVarTaskEverything> {
double[] _covs;
final double _xmeans[], _ymean;
CoVarTaskEverything(double ymean, double[] xmeans) {
_ymean = ymean;
_xmeans = xmeans;
}
@Override
public void map(Chunk cs[]) {
final int ncolsx = cs.length - 1;
final Chunk cy = cs[0];
final int len = cy._len;
_covs = new double[ncolsx];
double sum;
for (int x = 0; x < ncolsx; x++) {
sum = 0;
final Chunk cx = cs[x + 1];
final double xmean = _xmeans[x];
for (int row = 0; row < len; row++)
sum += (cx.atd(row) - xmean) * (cy.atd(row) - _ymean);
_covs[x] = sum;
}
}
@Override
public void reduce(CoVarTaskEverything cvt) {
ArrayUtils.add(_covs, cvt._covs);
}
}
private static class CoVarTaskCompleteObsMean extends MRTask<CoVarTaskCompleteObsMean> {
double[] _xsum, _ysum;
long _NACount;
int _ncolx, _ncoly;
CoVarTaskCompleteObsMean(int ncoly, int ncolx) {
_ncolx = ncolx;
_ncoly = ncoly;
}
@Override
public void map(Chunk cs[]) {
_xsum = new double[_ncolx];
_ysum = new double[_ncoly];
double[] xvals = new double[_ncolx];
double[] yvals = new double[_ncoly];
double xval, yval;
boolean add;
int len = cs[0]._len;
for (int row = 0; row < len; row++) {
add = true;
//reset existing arrays to 0 rather than initializing new ones to save on garbage collection
Arrays.fill(xvals, 0);
Arrays.fill(yvals, 0);
for (int y = 0; y < _ncoly; y++) {
final Chunk cy = cs[y];
yval = cy.atd(row);
//if any yval along a row is NA, discard the entire row
if (Double.isNaN(yval)) {
_NACount++;
add = false;
break;
}
yvals[y] = yval;
}
if (add) {
for (int x = 0; x < _ncolx; x++) {
final Chunk cx = cs[x + _ncoly];
xval = cx.atd(row);
//if any xval along a row is NA, discard the entire row
if (Double.isNaN(xval)) {
_NACount++;
add = false;
break;
}
xvals[x] = xval;
}
}
//add is true iff row has been traversed and found no NAs among yvals and xvals
if (add) {
ArrayUtils.add(_xsum, xvals);
ArrayUtils.add(_ysum, yvals);
}
}
}
@Override
public void reduce(CoVarTaskCompleteObsMean cvt) {
ArrayUtils.add(_xsum, cvt._xsum);
ArrayUtils.add(_ysum, cvt._ysum);
_NACount += cvt._NACount;
}
}
private static class CoVarTaskCompleteObs extends MRTask<CoVarTaskCompleteObs> {
double[][] _covs;
final double _xmeans[], _ymeans[];
CoVarTaskCompleteObs(double[] ymeans, double[] xmeans) {
_ymeans = ymeans;
_xmeans = xmeans;
}
@Override
public void map(Chunk cs[]) {
int ncolx = _xmeans.length;
int ncoly = _ymeans.length;
double[] xvals = new double[ncolx];
double[] yvals = new double[ncoly];
_covs = new double[ncoly][ncolx];
double[] _covs_y;
double xval, yval, ymean;
boolean add;
int len = cs[0]._len;
for (int row = 0; row < len; row++) {
add = true;
//reset existing arrays to 0 rather than initializing new ones to save on garbage collection
Arrays.fill(xvals, 0);
Arrays.fill(yvals, 0);
for (int y = 0; y < ncoly; y++) {
final Chunk cy = cs[y];
yval = cy.atd(row);
//if any yval along a row is NA, discard the entire row
if (Double.isNaN(yval)) {
add = false;
break;
}
yvals[y] = yval;
}
if (add) {
for (int x = 0; x < ncolx; x++) {
final Chunk cx = cs[x + ncoly];
xval = cx.atd(row);
//if any xval along a row is NA, discard the entire row
if (Double.isNaN(xval)) {
add = false;
break;
}
xvals[x] = xval;
}
}
//add is true iff row has been traversed and found no NAs among yvals and xvals
if (add) {
for (int y = 0; y < ncoly; y++) {
_covs_y = _covs[y];
yval = yvals[y];
ymean = _ymeans[y];
for (int x = 0; x < ncolx; x++)
_covs_y[x] += (xvals[x] - _xmeans[x]) * (yval - ymean);
}
}
}
}
@Override
public void reduce(CoVarTaskCompleteObs cvt) {
ArrayUtils.add(_covs, cvt._covs);
}
}
private static class CoVarTaskCompleteObsMeanSym extends MRTask<CoVarTaskCompleteObsMeanSym> {
double[] _ysum;
long _NACount;
@Override
public void map(Chunk cs[]) {
int ncoly = cs.length;
_ysum = new double[ncoly];
double[] yvals = new double[ncoly];
double yval;
boolean add;
int len = cs[0]._len;
for (int row = 0; row < len; row++) {
add = true;
Arrays.fill(yvals, 0);
for (int y = 0; y < ncoly; y++) {
final Chunk cy = cs[y];
yval = cy.atd(row);
//if any yval along a row is NA, discard the entire row
if (Double.isNaN(yval)) {
_NACount++;
add = false;
break;
}
yvals[y] = yval;
}
if (add) {
ArrayUtils.add(_ysum, yvals);
}
}
}
@Override
public void reduce(CoVarTaskCompleteObsMeanSym cvt) {
ArrayUtils.add(_ysum, cvt._ysum);
_NACount += cvt._NACount;
}
}
private static class CoVarTaskCompleteObsSym extends MRTask<CoVarTaskCompleteObsSym> {
double[][] _covs;
final double _ymeans[];
CoVarTaskCompleteObsSym(double[] ymeans) {
_ymeans = ymeans;
}
@Override
public void map(Chunk cs[]) {
int ncoly = _ymeans.length;
double[] yvals = new double[ncoly];
_covs = new double[ncoly][ncoly];
double[] _covs_y;
double yval, ymean;
boolean add;
int len = cs[0]._len;
for (int row = 0; row < len; row++) {
add = true;
//reset existing arrays to 0 rather than initializing new ones to save on garbage collection
Arrays.fill(yvals, 0);
for (int y = 0; y < ncoly; y++) {
final Chunk cy = cs[y];
yval = cy.atd(row);
//if any yval along a row is NA, discard the entire row
if (Double.isNaN(yval)) {
add = false;
break;
}
yvals[y] = yval;
}
//add is true iff row has been traversed and found no NAs among yvals
if (add) {
for (int y = 0; y < ncoly; y++) {
_covs_y = _covs[y];
yval = yvals[y];
ymean = _ymeans[y];
for (int x = y; x < ncoly; x++)
_covs_y[x] += (yvals[x] - _ymeans[x]) * (yval - ymean);
}
}
}
}
@Override
public void reduce(CoVarTaskCompleteObsSym cvt) {
ArrayUtils.add(_covs, cvt._covs);
}
}
public static double getVar(Vec v) {
return v.naCnt() == 0 ? v.sigma() * v.sigma() : Double.NaN;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/advmath/SpearmanCorrelation.java
|
package water.rapids.ast.prims.advmath;
import water.Key;
import water.MRTask;
import water.Scope;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Merge;
import water.util.FrameUtils;
import java.math.BigDecimal;
import java.math.MathContext;
import java.util.Objects;
public class SpearmanCorrelation {
public static Frame calculate(final Frame frameX, Frame frameY, final AstCorrelation.Mode mode) {
Objects.requireNonNull(frameX);
Objects.requireNonNull(frameY);
checkCorrelationDoable(frameX, frameY, mode);
final Frame correlationMatrix = createCorrelationMatrix(frameX, frameY);
// If the two frame contain the same vectors (key-wise), then the diagonal of the correlation matrix can be automatically filled with 1s.
// Unless there mode is "Everything", which enforces NaN correlation values if there is a NaN observation.
final boolean framesAreEqual = !AstCorrelation.Mode.Everything.equals(mode) && framesContainSameVecs(frameX, frameY);
for (int vecIdX = 0; vecIdX < frameX.numCols(); vecIdX++) {
for (int vecIdY = 0; vecIdY < frameY.numCols(); vecIdY++) {
Scope.enter();
try {
if (framesAreEqual && vecIdX == vecIdY) {
// If the correlation is calculated within frame frame, comparing the same vecs always resultings in 1.0
// correlation coefficient. Therefore, there is no need to calculate it.
correlationMatrix.vec(vecIdX)
.set(vecIdY, 1d);
} else if (isNaNCorrelation(frameX.vec(vecIdX), frameY.vec(vecIdY), mode)) {
correlationMatrix.vec(vecIdX)
.set(vecIdY, Double.NaN);
} else {
// Actual SCC calculation
final SpearmanRankedVectors rankedVectors = rankedVectors(frameX, frameY, vecIdX, vecIdY, mode);
// Means must be calculated separately - those are not calculated for categorical columns in rollup stats.
final double[] means = calculateMeans(rankedVectors._x, rankedVectors._y);
final SpearmanCorrelationCoefficientTask spearman = new SpearmanCorrelationCoefficientTask(means[0], means[1])
.doAll(rankedVectors._x, rankedVectors._y);
correlationMatrix.vec(vecIdX)
.set(vecIdY, spearman.getSpearmanCorrelationCoefficient());
}
} finally {
Scope.exit();
}
}
}
return correlationMatrix;
}
/**
* Compares two frames for their vecs being the same key-wise.
*
* @param frameX An instance of {@link Frame} to compare
* @param frameY A second instance of {@link Frame} to compare
* @return True if and only if the frames both contain the same vectors in the same order and quantity. Otherwise false.
*/
private static boolean framesContainSameVecs(final Frame frameX, final Frame frameY) {
final Vec[] vecsX = frameX.vecs();
final Vec[] vecsY = frameY.vecs();
if (vecsX.length != vecsY.length) return false;
for (int i = 0; i < vecsX.length; i++) {
if (!vecsX[i]._key.equals(vecsY[i]._key)) return false;
}
return true;
}
/**
* @param frameX Frame X candiate for SCC calculation
* @param frameY Frame Y candidate for SCC calculation
* @param mode An instance of {@link AstCorrelation.Mode}
* @return False if AstCorrelation.Mode is set to AllObs and any of the vectors in compared frames contains NaNs, otherwise True.
* @throws IllegalArgumentException When the {@link AstCorrelation.Mode} is set to AllObs and any of the vectors contains NaN
* or when any of the frames is empty
*/
private static void checkCorrelationDoable(final Frame frameX, final Frame frameY, final AstCorrelation.Mode mode)
throws IllegalArgumentException {
if (!AstCorrelation.Mode.AllObs.equals(mode)) return;
// Those frames are not guaranteed to have a key - Frames generated by rapids are keyless
if (frameX.numCols() == 0)
throw new IllegalArgumentException("First given frame for Spearman calculation has no columnns.");
if (frameY.numCols() == 0)
throw new IllegalArgumentException("Second given frame for Spearman calculation has no columnns.");
final Vec[] vecsX = frameX.vecs();
final Vec[] vecsY = frameY.vecs();
for (int i = 0; i < vecsX.length; i++) {
if (vecsX[i].naCnt() != 0 || vecsY[i].naCnt() != 0) {
throw new IllegalArgumentException("Mode is 'AllObs' but NAs are present");
}
}
}
/**
* @param vecX Vec X candiate for SCC calculation
* @param vecY Vec Y candidate for SCC calculation
* @param mode An instance of {@link AstCorrelation.Mode}
* @return True if AstCorrelation.Mode is set to EVERYTHING and any of the vectors compared contains NaNs, otherwise False.
*/
private static boolean isNaNCorrelation(final Vec vecX, final Vec vecY, final AstCorrelation.Mode mode) {
return AstCorrelation.Mode.Everything.equals(mode) && (vecX.naCnt() > 0 || vecY.naCnt() > 0);
}
private static Frame createCorrelationMatrix(final Frame frameX, final Frame frameY) {
final Vec[] correlationVecs = new Vec[frameX.numCols()];
final int height = frameY.numCols();
for (int width = 0; width < frameX.numCols(); width++) {
correlationVecs[width] = Vec.makeCon(Double.NaN, height);
}
return new Frame(Key.make(), correlationVecs);
}
/**
* Sorts and ranks the vectors of which SCC is calculated. Original Frame is not modified.
*
* @param frameX Original frame containing the vectors compared.
* @param vecIdX First compared vector
* @param vecIdY Second compared vector
* @return An instance of {@link SpearmanRankedVectors}, holding two new vectors with row rank.
*/
private static SpearmanRankedVectors rankedVectors(final Frame frameX, final Frame frameY, final int vecIdX, final int vecIdY,
final AstCorrelation.Mode mode) {
Frame comparedVecsWithNas = new Frame(frameX.vec(vecIdX).makeCopy(),
frameY.vec(vecIdY).makeCopy());
Frame unsortedVecs;
if (AstCorrelation.Mode.CompleteObs.equals(mode)) {
unsortedVecs = comparedVecsWithNas;
} else {
unsortedVecs = new Merge.RemoveNAsTask(0, 1)
.doAll(comparedVecsWithNas.types(), comparedVecsWithNas)
.outputFrame(comparedVecsWithNas.names(), comparedVecsWithNas.domains());
}
Frame sortedX = new Frame(unsortedVecs.vec(0).makeCopy());
Scope.track(sortedX);
Frame sortedY = new Frame(unsortedVecs.vec(1).makeCopy());
Scope.track(sortedY);
final boolean xIsOrdered = needsOrdering(sortedX.vec(0));
final boolean yIsOrdered = needsOrdering(sortedY.vec(0));
if (xIsOrdered) {
FrameUtils.labelRows(sortedX, "label");
sortedX = sortedX.sort(new int[]{0});
Scope.track(sortedX);
}
if (yIsOrdered) {
FrameUtils.labelRows(sortedY, "label");
sortedY = sortedY.sort(new int[]{0});
Scope.track(sortedY);
}
assert sortedX.numRows() == sortedY.numRows();
final Vec orderX = needsOrdering(sortedX.vec(0)) ? Vec.makeZero(sortedX.numRows()) : frameX.vec(vecIdX);
final Vec orderY = needsOrdering(sortedY.vec(0)) ? Vec.makeZero(sortedY.numRows()) : frameY.vec(vecIdY);
final Vec xLabel = sortedX.vec("label") == null ? sortedX.vec(0) : sortedX.vec("label");
final Vec xValue = sortedX.vec(0);
final Vec yLabel = sortedY.vec("label") == null ? sortedY.vec(0) : sortedY.vec("label");
final Vec yValue = sortedY.vec(0);
Scope.track(xLabel);
Scope.track(yLabel);
final Vec.Writer orderXWriter = orderX.open();
final Vec.Writer orderYWriter = orderY.open();
final Vec.Reader xValueReader = xValue.new Reader();
final Vec.Reader yValueReader = yValue.new Reader();
final Vec.Reader xLabelReader = xLabel.new Reader();
final Vec.Reader yLabelReader = yLabel.new Reader();
// Put the actual rank into the vectors with ranks. Ensure equal values share the same rank.
double lastX = Double.NaN;
double lastY = Double.NaN;
long skippedX = 0;
long skippedY = 0;
for (int i = 0; i < orderX.length(); i++) {
if (xIsOrdered) {
if (lastX == xValueReader.at(i)) {
skippedX++;
} else {
skippedX = 0;
}
lastX = xValueReader.at(i);
orderXWriter.set(xLabelReader.at8(i) - 1, i - skippedX);
}
if (yIsOrdered) {
if (lastY == yValueReader.at(i)) {
skippedY++;
} else {
skippedY = 0;
}
lastY = yValueReader.at(i);
orderYWriter.set(yLabelReader.at8(i) - 1, i - skippedY);
}
}
orderXWriter.close();
orderYWriter.close();
// Ensure chunk layout is the same by adding the vectors into a new Frame.
final Frame sameChunkLayoutFrame = new Frame(new String[]{"X"}, new Vec[]{orderX});
sameChunkLayoutFrame.add("Y", orderY);
// Return the vectors with ranks - the ones with same chunk layout from inside the frame
return new SpearmanRankedVectors(sameChunkLayoutFrame.vec("X"), sameChunkLayoutFrame.vec("Y"));
}
/**
* Ranked vectors prepared to calculate Spearman's correlation coefficient
*/
private static class SpearmanRankedVectors {
private final Vec _x;
private final Vec _y;
public SpearmanRankedVectors(Vec x, Vec y) {
this._x = x;
this._y = y;
}
}
private static boolean needsOrdering(final Vec vec) {
return !vec.isCategorical();
}
/**
* A task to do calculate Spearman's correlation coefficient. Not using the "approximation equation", but the
* fully-fledged equation resistant against noise from repeated values.
* The intermediate calculations required for standard deviation of both columns could be calculated by existing code,
* however the point is to perform the calculations by going through the data only once.
*
* @see {@link water.rapids.ast.prims.advmath.AstVariance}
*/
private static class SpearmanCorrelationCoefficientTask extends MRTask<SpearmanCorrelationCoefficientTask> {
// Arguments obtained externally
private final double _xMean;
private final double _yMean;
private double spearmanCorrelationCoefficient;
// Required to later finish calculation of standard deviation
private double _xDiffSquared = 0;
private double _yDiffSquared = 0;
private double _xyMul = 0;
// If at least one of the vectors contains NaN, such line is skipped
private long _linesVisited;
/**
* @param xMean Mean value of the first 'x' vector, with NaNs skipped
* @param yMean Mean value of the second 'y' vector, with NaNs skipped
*/
private SpearmanCorrelationCoefficientTask(final double xMean, final double yMean) {
this._xMean = xMean;
this._yMean = yMean;
}
@Override
public void map(Chunk[] chunks) {
assert chunks.length == 2; // Amount of linear correlation only calculated between two vectors at once
final Chunk xChunk = chunks[0];
final Chunk yChunk = chunks[1];
for (int row = 0; row < chunks[0].len(); row++) {
final double x = xChunk.atd(row);
final double y = yChunk.atd(row);
_linesVisited++;
_xyMul += x * y;
final double xDiffFromMean = x - _xMean;
final double yDiffFromMean = y - _yMean;
_xDiffSquared += Math.pow(xDiffFromMean, 2);
_yDiffSquared += Math.pow(yDiffFromMean, 2);
}
}
@Override
public void reduce(final SpearmanCorrelationCoefficientTask mrt) {
// The intermediate results are addable. The final calculations are done afterwards.
this._xDiffSquared += mrt._xDiffSquared;
this._yDiffSquared += mrt._yDiffSquared;
this._linesVisited += mrt._linesVisited;
this._xyMul += mrt._xyMul;
}
@Override
protected void postGlobal() {
final double xStdDev = Math.sqrt(_xDiffSquared / _linesVisited);
final double yStdDev = Math.sqrt(_yDiffSquared / _linesVisited);
spearmanCorrelationCoefficient = (_xyMul - (_linesVisited * _xMean * _yMean))
/ (_linesVisited * xStdDev * yStdDev);
}
public double getSpearmanCorrelationCoefficient() {
return spearmanCorrelationCoefficient;
}
}
/**
* Calculates means of given numerical Vectors. Provided there is a NaN on a row in any of the give vectors,
* the row is skipped and involved in the mean calculation.
*
* @param vecs An array of {@link Vec}, must not be empty, nor null. All vectors must be of same length.
* @return An array of doubles with means for given vectors in the order they were given as arguments.
* @throws IllegalArgumentException Zero vectors provided,
*/
private static double[] calculateMeans(final Vec... vecs) throws IllegalArgumentException {
if (vecs.length < 1) {
throw new IllegalArgumentException("There are no vectors to calculate means from.");
}
final long referenceVectorLength = vecs[0].length();
for (int i = 0; i < vecs.length; i++) {
if (!vecs[i].isCategorical() && !vecs[i].isNumeric()) {
throw new IllegalArgumentException(String.format("Given vector '%s' is not numerical or categorical.",
vecs[i]._key.toString()));
}
if (referenceVectorLength != vecs[i].length()) {
throw new IllegalArgumentException("Vectors to calculate means from do not have the same length." +
String.format(" Vector '%s' is of length '%d'", vecs[i]._key.toString(), vecs[i].length()));
}
}
return new MeanTask()
.doAll(vecs)._means;
}
/**
* Calculates means of given numerical Vectors. Provided there is a NaN on a row in any of the give vectors,
* the row is skipped and involved in the mean calculation.
*/
private static class MeanTask extends MRTask<MeanTask> {
private double[] _means;
private long _linesVisited = 0;
@Override
public void map(Chunk[] cs) {
// Sums might get big, BigDecimal ensures local accuracy and no overflow
final BigDecimal[] averages = new BigDecimal[cs.length];
for (int i = 0; i < averages.length; i++) {
averages[i] = new BigDecimal(0, MathContext.DECIMAL128);
}
row:
for (int row = 0; row < cs[0].len(); row++) {
final double[] values = new double[cs.length];
for (int col = 0; col < cs.length; col++) {
values[col] = cs[col].atd(row);
if (Double.isNaN(values[col])) break row; // If a NaN is detected in any of the columns, just skip the row
}
_linesVisited++;
for (int i = 0; i < values.length; i++) {
averages[i] = averages[i].add(new BigDecimal(values[i], MathContext.DECIMAL128), MathContext.DECIMAL128);
}
}
this._means = new double[cs.length];
for (int i = 0; i < averages.length; i++) {
this._means[i] = averages[i].divide(new BigDecimal(_linesVisited), MathContext.DECIMAL64).doubleValue();
}
}
@Override
public void reduce(MeanTask mrt) {
final int numChunks = _means.length;
for (int i = 0; i < numChunks; i++) {
_means[i] = (_means[i] * _linesVisited + mrt._means[i] * mrt._linesVisited) / (_linesVisited + mrt._linesVisited);
}
_linesVisited += mrt._linesVisited;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/assign/AstAppend.java
|
package water.rapids.ast.prims.assign;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
/**
* Attach a named column(s) to a destination frame.
*
* Syntax: destinationFrame (sourceFrame columnName)+
*/
public class AstAppend extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"dst (src colName)+"};
}
@Override
public int nargs() {
return -1;
} // (append dst src "colName")
@Override
public String str() {
return "append";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
assert asts.length >= 1 /* append */ + 3 /* args */: "Append needs at least 3 parameters";
assert (asts.length & 1) == 0 : "Wrong number of parameters";
Frame dst = stk.track(asts[1].exec(env)).getFrame();
dst = new Frame(dst._names.clone(), dst.vecs().clone());
for (int i = 2; i < asts.length; i+=2) {
Val vsrc = stk.track(asts[i].exec(env));
String newColName = asts[i+1].exec(env).getStr();
Vec vec = dst.anyVec();
switch (vsrc.type()) {
case Val.NUM:
vec = vec.makeCon(vsrc.getNum());
break;
case Val.STR:
vec = vec.makeCon(vsrc.getStr());
break;
case Val.FRM:
if (vsrc.getFrame().numCols() != 1)
throw new IllegalArgumentException("Can only append one column");
vec = vsrc.getFrame().anyVec();
break;
default:
throw new IllegalArgumentException(
"Source must be a Frame or Number, but found a " + vsrc.getClass());
}
dst.add(newColName, vec);
}
return new ValFrame(dst);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/assign/AstAssign.java
|
package water.rapids.ast.prims.assign;
import water.*;
import water.fvec.Frame;
import water.rapids.Env;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
/**
* Assign a whole frame over a global. Copy-On-Write optimizations make this cheap.
*/
public class AstAssign extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"id", "frame"};
}
@Override
public int nargs() {
return 1 + 2;
} // (assign id frame)
@Override
public String str() {
return "assign";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Key<Frame> id = Key.make(asts[1].str());
Frame src = stk.track(asts[2].exec(env)).getFrame();
return new ValFrame(env._ses.assign(id, src)); // New global Frame over shared Vecs
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/assign/AstRecAsgnHelper.java
|
package water.rapids.ast.prims.assign;
import water.Iced;
import water.fvec.Chunk;
import water.fvec.Vec;
import java.util.UUID;
public class AstRecAsgnHelper {
/**
* Generic abstraction over Chunk setter methods.
*/
public static abstract class ValueSetter extends Iced<ValueSetter> {
/**
* Sets a value (possibly a constant) to a position of the Chunk.
* @param idx Chunk-local index
*/
public abstract void setValue(Chunk chk, int idx);
/**
* Sets a value (possibly a constant) to a given index of a Vec.
* @param vec Vec
* @param idx absolute index
*/
public abstract void setValue(Vec vec, long idx);
}
/**
* Create an instance of ValueSetter for a given scalar value.
* It creates setter of the appropriate type based on the type of the underlying Vec.
* @param v Vec
* @param value scalar value
* @return instance of ValueSetter
*/
public static ValueSetter createValueSetter(Vec v, Object value) {
if (value == null) {
return new NAValueSetter();
}
switch (v.get_type()) {
case Vec.T_CAT:
return new CatValueSetter(v.domain(), value);
case Vec.T_NUM:
case Vec.T_TIME:
return new NumValueSetter(value);
case Vec.T_STR:
return new StrValueSetter(value);
case Vec.T_UUID:
return new UUIDValueSetter(value);
default:
throw new IllegalArgumentException("Cannot create ValueSetter for a Vec of type = " + v.get_type_str());
}
}
private static class NAValueSetter extends ValueSetter {
public NAValueSetter() {} // for Externalizable
@Override
public void setValue(Chunk chk, int idx) { chk.setNA(idx); }
@Override
public void setValue(Vec vec, long idx) { vec.setNA(idx); }
}
private static class CatValueSetter extends ValueSetter {
private int _val;
public CatValueSetter() {} // for Externalizable
private CatValueSetter(String[] domain, Object val) {
if (! (val instanceof String)) {
throw new IllegalArgumentException("Value needs to be categorical, value = " + val);
}
int factorIdx = -1;
for (int i = 0; i < domain.length; i++)
if (val.equals(domain[i])) {
factorIdx = i;
break;
}
if (factorIdx == -1)
throw new IllegalArgumentException("Value is not in the domain of the Vec, value = " + val);
_val = factorIdx;
}
@Override
public void setValue(Chunk chk, int idx) { chk.set(idx, _val); }
@Override
public void setValue(Vec vec, long idx) { vec.set(idx, (double) _val); }
}
private static class NumValueSetter extends ValueSetter {
private double _val;
public NumValueSetter() {} // for Externalizable
private NumValueSetter(Object val) {
if (! (val instanceof Number)) {
throw new IllegalArgumentException("Value needs to be numeric, value = " + val);
}
_val = ((Number) val).doubleValue();
}
@Override
public void setValue(Chunk chk, int idx) { chk.set(idx, _val); }
@Override
public void setValue(Vec vec, long idx) { vec.set(idx, _val); }
}
private static class StrValueSetter extends ValueSetter {
private String _val;
public StrValueSetter() {} // for Externalizable
private StrValueSetter(Object val) {
if (! (val instanceof String)) {
throw new IllegalArgumentException("Value needs to be string, value = " + val);
}
_val = (String) val;
}
@Override
public void setValue(Chunk chk, int idx) { chk.set(idx, _val); }
@Override
public void setValue(Vec vec, long idx) { vec.set(idx, _val); }
}
private static class UUIDValueSetter extends ValueSetter {
private UUID _val;
public UUIDValueSetter() {} // for Externalizable
private UUIDValueSetter(Object val) {
if (val instanceof String) {
val = UUID.fromString((String) val);
} else if (! (val instanceof UUID)) {
throw new IllegalArgumentException("Value needs to be an UUID, value = " + val);
}
_val = (UUID) val;
}
@Override
public void setValue(Chunk chk, int idx) { chk.set(idx, _val); }
@Override
public void setValue(Vec vec, long idx) { vec.set(idx, _val); }
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/assign/AstRectangleAssign.java
|
package water.rapids.ast.prims.assign;
import water.DKV;
import water.H2O;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.parser.BufferedString;
import water.rapids.*;
import water.rapids.ast.AstParameter;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.ast.params.AstNum;
import water.rapids.ast.params.AstNumList;
import water.rapids.ast.prims.mungers.AstColSlice;
import water.rapids.vals.ValFrame;
import water.util.ArrayUtils;
import java.util.Arrays;
import static water.rapids.ast.prims.assign.AstRecAsgnHelper.*;
/**
* Rectangular assign into a row and column slice. The destination must
* already exist. The output is conceptually a new copy of the data, with a
* fresh Frame. Copy-On-Write optimizations lower the cost to be proportional
* to the over-written sections.
*/
public class AstRectangleAssign extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"dst", "src", "col_expr", "row_expr"};
}
@Override
public int nargs() {
return 5;
} // (:= dst src col_expr row_expr)
@Override
public String str() {
return ":=";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot[] asts) {
Frame dst = stk.track(asts[1].exec(env)).getFrame();
Val vsrc = stk.track(asts[2].exec(env));
AstParameter col_list = (AstParameter) asts[3];
// Column selection
AstNumList cols_numlist = new AstNumList(col_list.columns(dst.names()));
// Special for AstAssign: "empty" really means "all"
if (cols_numlist.isEmpty()) cols_numlist = new AstNumList(0, dst.numCols());
// Allow R-like number list expansion: negative column numbers mean exclusion
int[] cols = AstColSlice.col_select(dst.names(), cols_numlist);
// Any COW optimized path changes Vecs in dst._vecs, and so needs a
// defensive copy. Any update-in-place path updates Chunks instead of
// dst._vecs, and does not need a defensive copy. To make life easier,
// just make the copy now.
dst = new Frame(dst._names, dst.vecs().clone());
// Assign over the column slice
if (asts[4] instanceof AstNum || asts[4] instanceof AstNumList) { // Explictly named row assignment
AstNumList rows = (asts[4] instanceof AstNum)
? new AstNumList(((AstNum) asts[4]).getNum())
: ((AstNumList) asts[4]);
if (rows.isEmpty()) rows = new AstNumList(0, dst.numRows()); // Empty rows is really: all rows
switch (vsrc.type()) {
case Val.NUM:
assign_frame_scalar(dst, cols, rows, nanToNull(vsrc.getNum()), env._ses);
break;
case Val.STR:
assign_frame_scalar(dst, cols, rows, vsrc.getStr(), env._ses);
break;
case Val.FRM:
assign_frame_frame(dst, cols, rows, vsrc.getFrame(), env._ses);
break;
default:
throw new IllegalArgumentException("Source must be a Frame or Number, but found a " + vsrc.getClass());
}
} else { // Boolean assignment selection?
Frame rows = stk.track(asts[4].exec(env)).getFrame();
switch (vsrc.type()) {
case Val.NUM:
assign_frame_scalar(dst, cols, rows, nanToNull(vsrc.getNum()), env._ses);
break;
case Val.STR:
assign_frame_scalar(dst, cols, rows, vsrc.getStr(), env._ses);
break;
case Val.FRM:
throw H2O.unimpl();
default:
throw new IllegalArgumentException("Source must be a Frame or Number, but found a " + vsrc.getClass());
}
}
return new ValFrame(dst);
}
// Rectangular array copy from src into dst
private void assign_frame_frame(Frame dst, int[] cols, AstNumList rows, Frame src, Session ses) {
// Sanity check
if (cols.length != src.numCols())
throw new IllegalArgumentException("Source and destination frames must have the same count of columns");
long nrows = rows.cnt();
if (src.numRows() != nrows)
throw new IllegalArgumentException("Requires same count of rows in the number-list (" + nrows + ") as in the source (" + src.numRows() + ")");
// Whole-column assignment? Directly reuse columns: Copy-On-Write
// optimization happens here on the apply() exit.
if (dst.numRows() == nrows && rows.isDense()) {
for (int i = 0; i < cols.length; i++)
dst.replace(cols[i], src.vecs()[i]);
if (dst._key != null) DKV.put(dst);
return;
}
// Partial update; needs to preserve type, and may need to copy to support
// copy-on-write
Vec[] dvecs = dst.vecs();
final Vec[] svecs = src.vecs();
for (int col = 0; col < cols.length; col++) {
int dtype = dvecs[cols[col]].get_type();
if (dtype != svecs[col].get_type())
throw new IllegalArgumentException("Columns must be the same type; " +
"column " + col + ", \'" + dst._names[cols[col]] + "\', is of type " + dvecs[cols[col]].get_type_str() +
" and the source is " + svecs[col].get_type_str());
if ((dtype == Vec.T_CAT) && (! Arrays.equals(dvecs[cols[col]].domain(), svecs[col].domain())))
throw new IllegalArgumentException("Cannot assign to a categorical column with a different domain; " +
"source column " + src._names[col] + ", target column " + dst._names[cols[col]]);
}
// Frame fill
// Handle fast small case
if (nrows <= 1 || (cols.length * nrows) <= 1000) { // Go parallel for more than 1000 random updates
// Copy dst columns as-needed to allow update-in-place
dvecs = ses.copyOnWrite(dst, cols); // Update dst columns
long[] rownums = rows.expand8(); // Just these rows
for (int col = 0; col < svecs.length; col++)
if (svecs[col].get_type() == Vec.T_STR) {
BufferedString bStr = new BufferedString();
for (int ridx = 0; ridx < rownums.length; ridx++) {
BufferedString s = svecs[col].atStr(bStr, ridx);
dvecs[cols[col]].set(rownums[ridx], s != null ? s.toString() : null);
}
} else {
for (int ridx = 0; ridx < rownums.length; ridx++)
dvecs[cols[col]].set(rownums[ridx], svecs[col].at(ridx));
}
return;
}
// Handle large case
Vec[] vecs = ses.copyOnWrite(dst, cols);
Vec[] vecs2 = new Vec[cols.length]; // Just the selected columns get updated
for (int i = 0; i < cols.length; i++)
vecs2[i] = vecs[cols[i]];
rows.sort(); // Side-effect internal sort; needed for fast row lookup
new AssignFrameFrameTask(rows, svecs).doAll(vecs2);
}
private static class AssignFrameFrameTask extends RowSliceTask {
private Vec[] _svecs;
private AssignFrameFrameTask(AstNumList rows, Vec[] svecs) {
super(rows);
_svecs = svecs;
}
@Override
void mapChunkSlice(Chunk[] cs, int chkOffset) {
long start = cs[0].start();
Chunk[] scs = null;
for (int i = chkOffset; i < cs[0]._len; ++i) {
long idx = _rows.index(start + i);
if (idx < 0) continue;
if ((scs == null) || (scs[0].start() < idx) || (idx >= scs[0].start() + scs[0].len())) {
int sChkIdx = _svecs[0].elem2ChunkIdx(idx);
scs = new Chunk[_svecs.length];
for (int j = 0; j < _svecs.length; j++) {
scs[j] = _svecs[j].chunkForChunkIdx(sChkIdx);
}
}
BufferedString bStr = new BufferedString();
int si = (int) (idx - scs[0].start());
for (int j = 0; j < cs.length; j++) {
Chunk chk = cs[j];
Chunk schk = scs[j];
if (_svecs[j].get_type() == Vec.T_STR) {
BufferedString s = schk.atStr(bStr, si);
chk.set(i, s != null ? s.toString() : null);
BufferedString bss = chk.atStr(new BufferedString(), i);
if (s == null && bss != null) {
chk.set(i, s != null ? s.toString() : null);
}
} else {
chk.set(i, schk.atd(si));
}
}
}
}
}
// Assign a SCALAR over some dst rows; optimize for all rows
private void assign_frame_scalar(Frame dst, int[] cols, AstNumList rows, Object src, Session ses) {
long nrows = rows.cnt();
// Bulk assign a numeric constant (probably zero) over a frame. Directly set
// columns: Copy-On-Write optimization happens here on the apply() exit.
// Note: this skips "scalar to Vec" compatibility check because the whole Vec is overwritten
if (dst.numRows() == nrows && rows.isDense() && (src instanceof Number)) {
Vec anyVec = dst.anyVec();
assert anyVec != null; // if anyVec was null, then dst.numRows() would have been 0
Vec vsrc = anyVec.makeCon((double) src);
for (int col : cols)
dst.replace(col, vsrc);
if (dst._key != null) DKV.put(dst);
return;
}
// Make sure the scalar value is compatible with the target vector
for (int col: cols) {
if (! isScalarCompatible(src, dst.vec(col))) {
throw new IllegalArgumentException("Cannot assign value " + src + " into a vector of type " + dst.vec(col).get_type_str() + ".");
}
}
// Handle fast small case
if (nrows == 1) {
Vec[] vecs = ses.copyOnWrite(dst, cols);
long drow = (long) rows._bases[0];
for (int col : cols)
createValueSetter(vecs[col], src).setValue(vecs[col], drow);
return;
}
// Handle large case
Vec[] vecs = ses.copyOnWrite(dst, cols);
Vec[] vecs2 = new Vec[cols.length]; // Just the selected columns get updated
for (int i = 0; i < cols.length; i++)
vecs2[i] = vecs[cols[i]];
rows.sort(); // Side-effect internal sort; needed for fast row lookup
AssignFrameScalarTask.doAssign(rows, vecs2, src);
}
private static class AssignFrameScalarTask extends RowSliceTask {
final ValueSetter[] _setters;
AssignFrameScalarTask(AstNumList rows, Vec[] vecs, Object value) {
super(rows);
_setters = new ValueSetter[vecs.length];
for (int i = 0; i < _setters.length; i++)
_setters[i] = createValueSetter(vecs[i], value);
}
@Override
void mapChunkSlice(Chunk[] cs, int chkOffset) {
long start = cs[0].start();
for (int i = chkOffset; i < cs[0]._len; ++i)
if (_rows.has(start + i))
for (int col = 0; col < cs.length; col++)
_setters[col].setValue(cs[col], i);
}
/**
* Assigns a given value to a specified rows of given Vecs.
* @param rows row specification
* @param dst target Vecs
* @param src source Value
*/
static void doAssign(AstNumList rows, Vec[] dst, Object src) {
new AssignFrameScalarTask(rows, dst, src).doAll(dst);
}
}
private boolean isScalarCompatible(Object scalar, Vec v) {
if (scalar == null)
return true;
else if (scalar instanceof Number)
return v.get_type() == Vec.T_NUM || v.get_type() == Vec.T_TIME;
else if (scalar instanceof String) {
if (v.get_type() == Vec.T_CAT) {
return ArrayUtils.contains(v.domain(), (String) scalar);
} else
return v.get_type() == Vec.T_STR || (v.get_type() == Vec.T_UUID);
} else
return false;
}
private static Double nanToNull(double value) {
return Double.isNaN(value) ? null : value;
}
// Boolean assignment with a scalar
private void assign_frame_scalar(Frame dst, int[] cols, Frame rows, Object src, Session ses) {
Vec bool = rows.vec(0);
if (dst.numRows() != rows.numRows()) {
throw new IllegalArgumentException("Frame " + dst._key + " has different number of rows than frame " + rows._key +
" (" + dst.numRows() + " vs " + rows.numRows() + ").");
}
// Bulk assign a numeric constant over a frame. Directly set columns without checking target type
// assuming the user just wants to overwrite everything: Copy-On-Write optimization happens here on the apply() exit.
// Note: this skips "scalar to Vec" compatibility check because the whole Vec is overwritten
if (bool.isConst() && ((int) bool.min() == 1) && (src instanceof Number)) {
Vec anyVec = dst.anyVec();
assert anyVec != null;
Vec vsrc = anyVec.makeCon((double) src);
for (int col : cols)
dst.replace(col, vsrc);
if (dst._key != null) DKV.put(dst);
return;
}
// Make sure the scalar value is compatible with the target vector
for (int col: cols) {
if (! isScalarCompatible(src, dst.vec(col))) {
throw new IllegalArgumentException("Cannot assign value " + src + " into a vector of type " + dst.vec(col).get_type_str() + ".");
}
}
Vec[] vecs = ses.copyOnWrite(dst, cols);
Vec[] vecs2 = new Vec[cols.length]; // Just the selected columns get updated
for (int i = 0; i < cols.length; i++)
vecs2[i] = vecs[cols[i]];
ConditionalAssignTask.doAssign(vecs2, src, rows.vec(0));
}
private static class ConditionalAssignTask extends MRTask<ConditionalAssignTask> {
final ValueSetter[] _setters;
ConditionalAssignTask(Vec[] vecs, Object value) {
_setters = new ValueSetter[vecs.length];
for (int i = 0; i < _setters.length; i++) _setters[i] = AstRecAsgnHelper.createValueSetter(vecs[i], value);
}
@Override
public void map(Chunk[] cs) {
Chunk bool = cs[cs.length - 1];
for (int row = 0; row < cs[0]._len; row++) {
if (bool.at8(row) == 1)
for (int col = 0; col < cs.length - 1; col++) _setters[col].setValue(cs[col], row);
}
}
/**
* Sets a given value to all cells where given predicateVec is true.
* @param dst target Vecs
* @param src source Value
* @param predicateVec predicate Vec
*/
static void doAssign(Vec[] dst, Object src, Vec predicateVec) {
Vec[] vecs = new Vec[dst.length + 1];
System.arraycopy(dst, 0, vecs, 0, dst.length);
vecs[vecs.length - 1] = predicateVec;
new ConditionalAssignTask(dst, src).doAll(vecs);
}
}
private static abstract class RowSliceTask extends MRTask<RowSliceTask> {
final AstNumList _rows;
RowSliceTask(AstNumList rows) { _rows = rows; }
@Override
public void map(Chunk[] cs) {
long start = cs[0].start();
long end = start + cs[0]._len;
long min = (long) _rows.min(), max = (long) _rows.max() - 1; // exclusive max to inclusive max when stride == 1
// [ start, ..., end ] the chunk
//1 [] rows out left: rows.max() < start
//2 [] rows out rite: rows.min() > end
//3 [ rows ] rows run left: rows.min() < start && rows.max() <= end
//4 [ rows ] rows run in : start <= rows.min() && rows.max() <= end
//5 [ rows ] rows run rite: start <= rows.min() && end < rows.max()
if (!(max < start || min > end)) { // not situation 1 or 2 above
long startOffset = min > start ? min : start; // situation 4 and 5 => min > start;
int chkOffset = (int) (startOffset - start);
mapChunkSlice(cs, chkOffset);
}
}
abstract void mapChunkSlice(Chunk[] cs, int chkOffset);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/assign/AstRm.java
|
package water.rapids.ast.prims.assign;
import water.DKV;
import water.Key;
import water.Keyed;
import water.Value;
import water.fvec.Frame;
import water.rapids.Env;
import water.rapids.vals.ValNum;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
/**
* Remove by ID. Removing a Frame updates refcnts. Returns 1 for removing, 0 if id does not exist.
*/
public class AstRm extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"id"};
}
@Override
public int nargs() {
return 1 + 1;
} // (rm id)
@Override
public String str() {
return "rm";
}
@Override
public ValNum apply(Env env, Env.StackHelp stk, AstRoot[] asts) {
Key id = Key.make(env.expand(asts[1].str()));
Value val = DKV.get(id);
if (val == null) return new ValNum(0);
if (val.isFrame())
env._ses.remove(val.<Frame>get()); // Remove unshared Vecs
else
Keyed.remove(id); // Normal (e.g. Model) remove
return new ValNum(1);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/assign/AstTmpAssign.java
|
package water.rapids.ast.prims.assign;
import water.DKV;
import water.Key;
import water.Value;
import water.fvec.Frame;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
/**
* Assign a temp. All such assignments are final (cannot change), but the temp can be deleted. Temp is returned for
* immediate use, and also set in the DKV. Must be globally unique in the DKV.
*/
public class AstTmpAssign extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"id", "frame"};
}
@Override
public int nargs() {
return 1 + 2;
} // (tmp= id frame)
@Override
public String str() {
return "tmp=";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot[] asts) {
// Note: non-standard evaluation of the first argument! Instead of being
// executed, it is stringified. This, for example, allows us to write an
// expression as
// (tmp= newid (* frame 3))
// instead of
// (tmp= "newid" (* frame 3))
// On the other hand, this makes us unable to create dynamic identifiers
// in Rapids, for example this is invalid:
// (tmp= (+ "id" 3) (* frame 3))
// Right now there is no need for dynamically generated identifiers, since
// we don't even have proper variables or loops or control structures yet.
//
Key<Frame> id = Key.make(env.expand(asts[1].str()));
Val srcVal = stk.track(asts[2].exec(env));
Frame srcFrame = srcVal.getFrame();
Value v = DKV.get(id);
if (v != null) {
if (v.get().equals(srcFrame))
return (ValFrame) srcVal;
else
throw new IllegalArgumentException("Temp ID " + id + " already exists");
}
Frame dst = new Frame(id, srcFrame._names, srcFrame.vecs());
return new ValFrame(env._ses.track_tmp(dst)); // Track new session-wide ID
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/filters
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/filters/dropduplicates/AstDropDuplicates.java
|
package water.rapids.ast.prims.filters.dropduplicates;
import water.fvec.Frame;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
import water.util.EnumUtils;
import java.util.Arrays;
/**
* Removes duplicated rows, leaving only the first or last observed duplicate in place.
*/
public class AstDropDuplicates extends AstPrimitive<AstDropDuplicates> {
@Override
public int nargs() {
return 1 + 3;
}
@Override
public String[] args() {
return new String[]{"ary", "frame", "cols", "droporder"};
}
@Override
public Val apply(Env env, Env.StackHelp stk, AstRoot[] asts) {
final Frame deduplicatedFrame = stk.track(asts[1].exec(env)).getFrame();
final int[] comparedColumnsIndices = ColumnIndicesParser.parseAndCheckComparedColumnIndices(deduplicatedFrame,
stk.track(asts[2].exec(env)));
final String dropOrderString = asts[3].str();
final KeepOrder keepOrder = EnumUtils.valueOfIgnoreCase(KeepOrder.class, dropOrderString)
.orElseThrow(() -> new IllegalArgumentException(String.format("Unknown drop order: '%s'. Known types: %s",
dropOrderString, Arrays.toString(KeepOrder.values()))));
final DropDuplicateRows dropDuplicateRows = new DropDuplicateRows(deduplicatedFrame, comparedColumnsIndices, keepOrder);
final Frame outputFrame = dropDuplicateRows.dropDuplicates();
return new ValFrame(outputFrame);
}
@Override
public String str() {
return "dropdup";
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/filters
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/filters/dropduplicates/CollectChunkBorderValuesTask.java
|
package water.rapids.ast.prims.filters.dropduplicates;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.NewChunk;
/**
* Collects border values from a chunk - last element of each chunk.
*/
public class CollectChunkBorderValuesTask extends MRTask<CollectChunkBorderValuesTask> {
@Override
public void map(final Chunk[] oldChunks, NewChunk[] newChunks) {
for (int columnIndex = 0; columnIndex < oldChunks.length; columnIndex++) {
oldChunks[columnIndex].extractRows(newChunks[columnIndex], oldChunks[columnIndex].len() - 1);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/filters
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/filters/dropduplicates/ColumnIndicesParser.java
|
package water.rapids.ast.prims.filters.dropduplicates;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Val;
public class ColumnIndicesParser {
/**
* @param deduplicatedFrame Deduplicated frame to look for vectors in
* @param comparedColumns A {@link Val} instance of columns to be compared during row de-duplication process.
* Accepts {@link water.rapids.ast.params.AstStr}, {@link water.rapids.ast.params.AstStrList},
* and {@link water.rapids.ast.params.AstNumList}.
* @return An array if primitive integers with indices of vectorss
* @throws IllegalArgumentException If comparedColumns field is not of given type. If any given column is not
* recognized in deduplicatedFrame or if any column compared has unsupported type.
*/
public static int[] parseAndCheckComparedColumnIndices(final Frame deduplicatedFrame, final Val comparedColumns)
throws IllegalArgumentException {
final int[] columnIndices;
if (comparedColumns.isStr()) {
final String columnName = comparedColumns.getStr();
final int columnIndex = deduplicatedFrame.find(columnName);
if (columnIndex == -1) {
throw new IllegalArgumentException(String.format("Unknown column name: '%s'", columnName));
}
columnIndices = new int[]{columnIndex};
} else if (comparedColumns.isStrs()) {
final String[] columnNames = comparedColumns.getStrs();
columnIndices = new int[columnNames.length];
for (int i = 0; i < columnNames.length; i++) {
final String columnName = columnNames[i];
final int columnIndex = deduplicatedFrame.find(columnName);
if (columnIndex == -1) {
throw new IllegalArgumentException(String.format("Unknown column name: '%s'", columnName));
} else if (isUnsupportedVecType(deduplicatedFrame.types()[columnIndex])) {
throw new IllegalArgumentException(String.format("Column '%s' is of unsupported type %s for row de-duplication.",
columnName, Vec.TYPE_STR[deduplicatedFrame.types()[columnIndex]]));
}
columnIndices[i] = columnIndex;
}
} else if (comparedColumns.isNums()) {
final double[] columnRangeDouble = comparedColumns.getNums();
columnIndices = new int[columnRangeDouble.length];
for (int i = 0; i < columnRangeDouble.length; i++) {
columnIndices[i] = (int) columnRangeDouble[i];
if (columnIndices[i] < 0 || columnIndices[i] > deduplicatedFrame.numCols() - 1) {
throw new IllegalArgumentException(String.format("No such column index: '%d', frame has %d columns," +
"maximum index is %d. ", columnIndices[i], deduplicatedFrame.numCols(), deduplicatedFrame.numCols() - 1));
} else if (isUnsupportedVecType(deduplicatedFrame.types()[columnIndices[i]])) {
throw new IllegalArgumentException(String.format("Column '%s' is of unsupported type %s for row de-duplication.",
deduplicatedFrame.name(columnIndices[i]),
Vec.TYPE_STR[deduplicatedFrame.types()[columnIndices[i]]]));
}
}
} else {
throw new IllegalArgumentException(String.format("Column range for deduplication must either be a set of columns, or a " +
"column range. Given type: %s", comparedColumns.type()));
}
return columnIndices;
}
private static boolean isUnsupportedVecType(final byte vecType) {
return vecType == Vec.T_STR || vecType == Vec.T_BAD || vecType == Vec.T_UUID;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/filters
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/filters/dropduplicates/DropDuplicateRows.java
|
package water.rapids.ast.prims.filters.dropduplicates;
import water.Scope;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Merge;
import water.util.ArrayUtils;
import java.util.Arrays;
/**
* Drops duplicated rows of a Frame
*/
public class DropDuplicateRows {
private static final String LABEL_COLUMN_NAME = "label";
final Frame sourceFrame;
final int[] comparedColumnIndices;
final KeepOrder keepOrder;
/**
* @param sourceFrame Frame to perform the row de-duplication on
* @param comparedColumnIndices Indices of columns to consider during the comparison
* @param keepOrder Which rows to keep.
*/
public DropDuplicateRows(final Frame sourceFrame, final int[] comparedColumnIndices, final KeepOrder keepOrder) {
this.sourceFrame = sourceFrame;
this.comparedColumnIndices = comparedColumnIndices;
this.keepOrder = keepOrder;
}
public Frame dropDuplicates() {
Frame outputFrame = null;
try {
Scope.enter();
final Vec labelVec = Scope.track(Vec.makeSeq(1, sourceFrame.numRows()));
final Frame fr = new Frame(sourceFrame);
fr.add(LABEL_COLUMN_NAME, labelVec);
final Frame sortedFrame = Scope.track(sortByComparedColumns(fr));
final Frame chunkBoundaries = Scope.track(new CollectChunkBorderValuesTask()
.doAll(sortedFrame.types(), sortedFrame)
.outputFrame(null, sortedFrame.names(), sortedFrame.domains()));
final Frame deDuplicatedFrame = Scope.track(new DropDuplicateRowsTask(chunkBoundaries, comparedColumnIndices)
.doAll(sortedFrame.types(), sortedFrame)
.outputFrame(null, sortedFrame.names(), sortedFrame.domains())); // Removing duplicates, domains remain the same
// Before the final sorted duplicated is created, remove the unused datasets to free some space early
chunkBoundaries.remove();
sortedFrame.remove();
outputFrame = Scope.track(Merge.sort(deDuplicatedFrame, deDuplicatedFrame.numCols() - 1));
outputFrame.remove(outputFrame.numCols() - 1).remove();
return outputFrame;
} finally {
if (outputFrame != null) {
Scope.exit(outputFrame.keys());
} else {
Scope.exit(); // Clean up in case of any exception/error.
}
}
}
/**
* Creates a copy of the original dataset, sorted by all compared columns.
* The sort is done with respect to {@link KeepOrder} value.
*
* @return A new Frame sorted by all compared columns.
*/
private Frame sortByComparedColumns(Frame fr) {
final int labelColumnIndex = fr.find(LABEL_COLUMN_NAME);
final int[] sortByColumns = ArrayUtils.append(comparedColumnIndices, labelColumnIndex);
final boolean ascendingSort = KeepOrder.First == keepOrder;
final int[] sortOrder = new int[sortByColumns.length];
// Compared columns are always sorted in the same order
Arrays.fill(sortOrder, 0, sortOrder.length - 1, Merge.ASCENDING);
// Label column is sorted differently based on DropOrder
sortOrder[sortOrder.length - 1] = ascendingSort ? Merge.ASCENDING : Merge.DESCENDING;
return Merge.sort(fr, sortByColumns, sortOrder);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/filters
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/filters/dropduplicates/DropDuplicateRowsTask.java
|
package water.rapids.ast.prims.filters.dropduplicates;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
/**
* Performs the row de-duplication itself.
*/
public class DropDuplicateRowsTask extends MRTask<DropDuplicateRowsTask> {
final Frame chunkBoundaries;
private final int[] comparedColumnIndices;
/**
* @param chunkBoundaries Frame with border values of each chunk
* @param comparedColumnIndices Columns indices to include in row deduplication comparison
*/
public DropDuplicateRowsTask(final Frame chunkBoundaries, final int[] comparedColumnIndices) {
this.chunkBoundaries = chunkBoundaries;
this.comparedColumnIndices = comparedColumnIndices;
}
@Override
public void map(Chunk[] chunks, NewChunk[] newChunks) {
final int chunkLength = chunks[0].len();
final int chunkId = chunks[0].cidx();
for (int row = 0; row < chunkLength; row++) {
final boolean equal;
if (chunkId == 0 && row == 0) {
for (int columnIndex = 0; columnIndex < chunks.length; columnIndex++) {
chunks[columnIndex].extractRows(newChunks[columnIndex], row);
}
continue;
} else if (chunkId != 0 && row == 0) {
equal = compareFirstRowWithPreviousChunk(chunks, row, chunkId);
} else {
equal = compareRows(chunks, row, chunks, row - 1);
}
if (!equal) {
for (int columnIndex = 0; columnIndex < chunks.length; columnIndex++) {
chunks[columnIndex].extractRows(newChunks[columnIndex], row);
}
}
}
}
private boolean compareFirstRowWithPreviousChunk(final Chunk[] chunks, final int row, final int chunkId) {
final Chunk[] previousRowChunks = new Chunk[chunkBoundaries.numCols()];
for (int column = 0; column < chunkBoundaries.numCols(); column++) {
previousRowChunks[column] = chunkBoundaries.vec(column).chunkForChunkIdx(chunkId - 1);
}
return compareRows(chunks, row, previousRowChunks, 0);
}
private boolean compareRows(final Chunk[] chunksA, final int rowA, final Chunk[] chunksB, final int rowB) {
for (final int column : comparedColumnIndices) {
final boolean isPreviousNA = chunksA[column].isNA(rowA);
final boolean isCurrentNA = chunksB[column].isNA(rowB);
if (isPreviousNA || isCurrentNA) {
return isPreviousNA && isCurrentNA;
}
switch (chunksA[column].vec().get_type()) {
case Vec.T_NUM:
final double previousDoubleValue = chunksA[column].atd(rowA);
final double currentDoubleValue = chunksB[column].atd(rowB);
if (previousDoubleValue != currentDoubleValue) return false;
break;
case Vec.T_CAT:
case Vec.T_TIME:
final long previousTimeValue = chunksA[column].at8(rowA);
final long currentTimeValue = chunksB[column].at8(rowB);
if (previousTimeValue != currentTimeValue) return false;
break;
default:
throw new IllegalStateException("Unexpected value: " + chunksA[column].vec().get_type());
}
}
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/filters
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/filters/dropduplicates/KeepOrder.java
|
package water.rapids.ast.prims.filters.dropduplicates;
/**
* Determines which duplicated row is kept during row de-duplication process.
*/
public enum KeepOrder {
First, // Retain first, drop rest
Last // Retain last, drop rest
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/internal/AstRunTool.java
|
package water.rapids.ast.prims.internal;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValStr;
import java.lang.reflect.Method;
public class AstRunTool extends AstPrimitive<AstRunTool> {
private static final String TOOLS_PACKAGE = "water.tools.";
@Override
public String[] args() {
return new String[]{"tool_class", "tool_parameters"};
}
@Override
public int nargs() {
return 1 + 2;
} // (run_tool tool_class tool_parameters)
@Override
public String str() {
return "run_tool";
}
@Override
public ValStr apply(Env env, Env.StackHelp stk, AstRoot[] asts) {
String toolClassName = stk.track(asts[1].exec(env)).getStr();
String[] args = stk.track(asts[2].exec(env)).getStrs();
try {
// only allow to run approved tools (from our package), not just anything on classpath
Class<?> clazz = Class.forName(TOOLS_PACKAGE + toolClassName);
Method mainMethod = clazz.getDeclaredMethod("mainInternal", String[].class);
mainMethod.invoke(null, new Object[]{args});
} catch (Exception e) {
RuntimeException shorterException = new RuntimeException(e.getCause().getMessage());
shorterException.setStackTrace(new StackTraceElement[0]);
throw shorterException;
}
return new ValStr("OK");
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstAbs.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstAbs extends AstUniOp {
@Override
public String str() {
return "abs";
}
@Override
public double op(double d) {
return Math.abs(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstAcos.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstAcos extends AstUniOp {
@Override
public String str() {
return "acos";
}
@Override
public double op(double d) {
return Math.acos(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstAcosh.java
|
package water.rapids.ast.prims.math;
import org.apache.commons.math3.util.FastMath;
/**
*/
public class AstAcosh extends AstUniOp {
@Override
public String str() {
return "acosh";
}
@Override
public double op(double d) {
return FastMath.acosh(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstAsin.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstAsin extends AstUniOp {
@Override
public String str() {
return "asin";
}
@Override
public double op(double d) {
return Math.asin(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstAsinh.java
|
package water.rapids.ast.prims.math;
import org.apache.commons.math3.util.FastMath;
/**
*/
public class AstAsinh extends AstUniOp {
@Override
public String str() {
return "asinh";
}
@Override
public double op(double d) {
return FastMath.asinh(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstAtan.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstAtan extends AstUniOp {
@Override
public String str() {
return "atan";
}
@Override
public double op(double d) {
return Math.atan(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstAtanh.java
|
package water.rapids.ast.prims.math;
import org.apache.commons.math3.util.FastMath;
/**
*/
public class AstAtanh extends AstUniOp {
@Override
public String str() {
return "atanh";
}
@Override
public double op(double d) {
return FastMath.atanh(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstCeiling.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstCeiling extends AstUniOp {
@Override
public String str() {
return "ceiling";
}
@Override
public double op(double d) {
return Math.ceil(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstCos.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstCos extends AstUniOp {
@Override
public String str() {
return "cos";
}
@Override
public double op(double d) {
return Math.cos(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstCosPi.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstCosPi extends AstUniOp {
@Override
public String str() {
return "cospi";
}
@Override
public double op(double d) {
return Math.cos(Math.PI * d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstCosh.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstCosh extends AstUniOp {
@Override
public String str() {
return "cosh";
}
@Override
public double op(double d) {
return Math.cosh(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstDiGamma.java
|
package water.rapids.ast.prims.math;
import org.apache.commons.math3.special.Gamma;
/**
*/
public class AstDiGamma extends AstUniOp {
@Override
public String str() {
return "digamma";
}
@Override
public double op(double d) {
return Double.isNaN(d) ? Double.NaN : Gamma.digamma(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstExp.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstExp extends AstUniOp {
@Override
public String str() {
return "exp";
}
@Override
public double op(double d) {
return Math.exp(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstExpm1.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstExpm1 extends AstUniOp {
@Override
public String str() {
return "expm1";
}
@Override
public double op(double d) {
return Math.expm1(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstFloor.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstFloor extends AstUniOp {
@Override
public String str() {
return "floor";
}
@Override
public double op(double d) {
return Math.floor(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstGamma.java
|
package water.rapids.ast.prims.math;
import org.apache.commons.math3.special.Gamma;
/**
*/
public class AstGamma extends AstUniOp {
@Override
public String str() {
return "gamma";
}
@Override
public double op(double d) {
return Gamma.gamma(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstLGamma.java
|
package water.rapids.ast.prims.math;
import org.apache.commons.math3.special.Gamma;
/**
*/
public class AstLGamma extends AstUniOp {
@Override
public String str() {
return "lgamma";
}
@Override
public double op(double d) {
return Gamma.logGamma(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstLog.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstLog extends AstUniOp {
@Override
public String str() {
return "log";
}
@Override
public double op(double d) {
return Math.log(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstLog10.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstLog10 extends AstUniOp {
@Override
public String str() {
return "log10";
}
@Override
public double op(double d) {
return Math.log10(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstLog1P.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstLog1P extends AstUniOp {
@Override
public String str() {
return "log1p";
}
@Override
public double op(double d) {
return Math.log1p(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstLog2.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstLog2 extends AstUniOp {
@Override
public String str() {
return "log2";
}
@Override
public double op(double d) {
return Math.log(d) / Math.log(2);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstNoOp.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstNoOp extends AstUniOp {
@Override
public String str() {
return "none";
}
@Override
public double op(double d) {
return d;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstNot.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstNot extends AstUniOp {
public String str() {
return "not";
}
public double op(double d) {
return Double.isNaN(d) ? Double.NaN : d == 0 ? 1 : 0;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstRound.java
|
package water.rapids.ast.prims.math;
import water.rapids.ast.prims.operators.AstBinOp;
/**
*/
public class AstRound extends AstBinOp {
public String str() {
return "round";
}
public double op(double x, double digits) {
// e.g.: floor(2.676*100 + 0.5) / 100 => 2.68
if (Double.isNaN(x)) return x;
double sgn = x < 0 ? -1 : 1;
x = Math.abs(x);
if ((int) digits != digits) digits = Math.round(digits);
double power_of_10 = (int) Math.pow(10, (int) digits);
return sgn * (digits == 0
// go to the even digit
? (x % 1 > 0.5 || (x % 1 == 0.5 && !(Math.floor(x) % 2 == 0)))
? Math.ceil(x)
: Math.floor(x)
: Math.floor(x * power_of_10 + 0.5) / power_of_10);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstSgn.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstSgn extends AstUniOp {
@Override
public String str() {
return "sign";
}
@Override
public double op(double d) {
return Math.signum(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstSignif.java
|
package water.rapids.ast.prims.math;
import water.rapids.ast.prims.operators.AstBinOp;
/**
*/
public class AstSignif extends AstBinOp {
public String str() {
return "signif";
}
public double op(double x, double digits) {
if (Double.isNaN(x)) return x;
if (digits < 1) digits = 1; //mimic R's base::signif
if ((int) digits != digits) digits = Math.round(digits);
java.math.BigDecimal bd = new java.math.BigDecimal(x);
bd = bd.round(new java.math.MathContext((int) digits, java.math.RoundingMode.HALF_EVEN));
return bd.doubleValue();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstSin.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstSin extends AstUniOp {
@Override
public String str() {
return "sin";
}
@Override
public double op(double d) {
return Math.sin(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstSinPi.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstSinPi extends AstUniOp {
@Override
public String str() {
return "sinpi";
}
@Override
public double op(double d) {
return Math.sin(Math.PI * d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstSinh.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstSinh extends AstUniOp {
@Override
public String str() {
return "sinh";
}
@Override
public double op(double d) {
return Math.sinh(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstSqrt.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstSqrt extends AstUniOp {
@Override
public String str() {
return "sqrt";
}
@Override
public double op(double d) {
return Math.sqrt(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstTan.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstTan extends AstUniOp {
@Override
public String str() {
return "tan";
}
@Override
public double op(double d) {
return Math.tan(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstTanPi.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstTanPi extends AstUniOp {
@Override
public String str() {
return "tanpi";
}
@Override
public double op(double d) {
return Math.tan(Math.PI * d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstTanh.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstTanh extends AstUniOp {
@Override
public String str() {
return "tanh";
}
@Override
public double op(double d) {
return Math.tanh(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstTriGamma.java
|
package water.rapids.ast.prims.math;
import org.apache.commons.math3.special.Gamma;
/**
*/
public class AstTriGamma extends AstUniOp {
@Override
public String str() {
return "trigamma";
}
@Override
public double op(double d) {
return Double.isNaN(d) ? Double.NaN : Gamma.trigamma(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstTrunc.java
|
package water.rapids.ast.prims.math;
/**
*/
public class AstTrunc extends AstUniOp {
@Override
public String str() {
return "trunc";
}
@Override
public double op(double d) {
return d >= 0 ? Math.floor(d) : Math.ceil(d);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/math/AstUniOp.java
|
package water.rapids.ast.prims.math;
import water.H2O;
import water.MRTask;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.rapids.Val;
import water.rapids.ast.AstBuiltin;
import water.rapids.vals.ValFrame;
import water.rapids.vals.ValNum;
import water.rapids.vals.ValRow;
/**
* Subclasses auto-widen between scalars and Frames, and have exactly one argument
*/
public abstract class AstUniOp<T extends AstUniOp<T>> extends AstBuiltin<T> {
@Override
public String[] args() {
return new String[]{"ary"};
}
@Override
public int nargs() {
return 1 + 1;
}
@Override
public Val exec(Val... args) {
Val val = args[1];
switch (val.type()) {
case Val.NUM:
return new ValNum(op(val.getNum()));
case Val.FRM:
Frame fr = val.getFrame();
for (int i = 0; i < fr.numCols(); i++)
if (!fr.vec(i).isNumeric())
throw new IllegalArgumentException(
"Operator " + str() + "() cannot be applied to non-numeric column " + fr.name(i));
// Get length of columns in fr and append `op(colName)`. For example, a column named "income" that had
// a log transformation would now be changed to `log(income)`.
String[] newNames = new String[fr.numCols()];
for (int i = 0; i < newNames.length; i++) {
newNames[i] = str() + "(" + fr.name(i) + ")";
}
return new ValFrame(new MRTask() {
@Override
public void map(Chunk cs[], NewChunk ncs[]) {
for (int col = 0; col < cs.length; col++) {
Chunk c = cs[col];
NewChunk nc = ncs[col];
for (int i = 0; i < c._len; i++)
nc.addNum(op(c.atd(i)));
}
}
}.doAll(fr.numCols(), Vec.T_NUM, fr).outputFrame(newNames, null));
case Val.ROW:
double[] ds = new double[val.getRow().length];
for (int i = 0; i < ds.length; ++i)
ds[i] = op(val.getRow()[i]);
String[] names = ((ValRow) val).getNames().clone();
return new ValRow(ds, names);
default:
throw H2O.unimpl("unop unimpl: " + val.getClass());
}
}
public abstract double op(double d);
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/matrix/AstMMult.java
|
package water.rapids.ast.prims.matrix;
import hex.DMatrix;
import water.fvec.Frame;
import water.rapids.Env;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
/**
* Matrix multiplication
*/
public class AstMMult extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary", "ary2"};
}
@Override
public int nargs() {
return 1 + 2;
} // (x X1 X2)
@Override
public String str() {
return "x";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame X1 = stk.track(asts[1].exec(env)).getFrame();
Frame X2 = stk.track(asts[2].exec(env)).getFrame();
return new ValFrame(DMatrix.mmul(X1, X2));
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/matrix/AstTranspose.java
|
package water.rapids.ast.prims.matrix;
import hex.DMatrix;
import water.fvec.Frame;
import water.rapids.Env;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
/**
* Matrix transposition
*/
public class AstTranspose extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"ary"};
}
@Override
public int nargs() {
return 1 + 1;
} // (t X)
@Override
public String str() {
return "t";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Frame f = stk.track(asts[1].exec(env)).getFrame();
return new ValFrame(DMatrix.transpose(f));
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/misc/AstComma.java
|
package water.rapids.ast.prims.misc;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValNum;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
/**
* Evaluate any number of expressions, returning the last one
*/
public class AstComma extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"..."};
}
@Override
public int nargs() {
return -1;
} // variable args
@Override
public String str() {
return ",";
}
@Override
public Val apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Val val = new ValNum(0);
for (int i = 1; i < asts.length; i++)
val = stk.track(asts[i].exec(env)); // Evaluate all expressions for side-effects
return val; // Return the last one
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/misc/AstLs.java
|
package water.rapids.ast.prims.misc;
import water.Futures;
import water.Key;
import water.KeySnapshot;
import water.fvec.AppendableVec;
import water.fvec.Frame;
import water.fvec.NewChunk;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.vals.ValFrame;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import java.util.ArrayList;
/**
* R 'ls' command.
* <p/>
* This method is purely for the console right now. Print stuff into the string buffer.
* JSON response is not configured at all.
*/
public class AstLs extends AstPrimitive {
@Override
public String[] args() {
return null;
}
@Override
public int nargs() {
return 1;
}
@Override
public String str() {
return "ls";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
ArrayList<String> domain = new ArrayList<>();
Futures fs = new Futures();
AppendableVec av = new AppendableVec(Vec.VectorGroup.VG_LEN1.addVec(), Vec.T_CAT);
NewChunk keys = new NewChunk(av, 0);
int r = 0;
for (Key key : KeySnapshot.globalSnapshot().keys()) {
keys.addCategorical(r++);
domain.add(key.toString());
}
String[] key_domain = domain.toArray(new String[domain.size()]);
av.setDomain(key_domain);
keys.close(fs);
Vec c0 = av.layout_and_close(fs); // c0 is the row index vec
fs.blockForPending();
return new ValFrame(new Frame(Key.<Frame>make("h2o_ls"), new String[]{"key"}, new Vec[]{c0}));
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/misc/AstSetProperty.java
|
package water.rapids.ast.prims.misc;
import water.MRTask;
import water.rapids.Val;
import water.rapids.ast.AstBuiltin;
import water.rapids.vals.ValStr;
import water.util.ArrayUtils;
import water.util.Log;
import water.util.StringUtils;
/**
* Internal operator that lets user set a given system property on all nodes of H2O cluster.
* It is meant for debugging of running clusters and it is not meant to be directly exposed to users.
*/
public class AstSetProperty extends AstBuiltin<AstSetProperty> {
@Override
public String[] args() {
return new String[]{"property", "value"};
}
@Override
public int nargs() {
return 1 + 2;
} // (setproperty property value)
@Override
public String str() {
return "setproperty";
}
@Override
protected ValStr exec(Val[] args) {
String property = args[1].getStr();
String value = args[2].getStr();
String debugMessage = setClusterProperty(property, value);
return new ValStr(debugMessage);
}
private static String setClusterProperty(String property, String value) {
String[] oldValues = new SetClusterPropertyTask(property, value).doAllNodes()._oldValues;
return "Old values of " + property + " (per node): " + StringUtils.join(",", oldValues);
}
private static class SetClusterPropertyTask extends MRTask<SetClusterPropertyTask> {
private String _property;
private String _value;
private String[] _oldValues;
private SetClusterPropertyTask(String property, String value) {
_property = property;
_value = value;
_oldValues = new String[0];
}
@Override
protected void setupLocal() {
_oldValues = ArrayUtils.append(_oldValues, String.valueOf(System.getProperty(_property)));
Log.info("Setting property: " + _property + "=" + _value);
System.setProperty(_property, _value);
}
@Override
public void reduce(SetClusterPropertyTask mrt) {
_oldValues = ArrayUtils.append(_oldValues, mrt._oldValues);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/models/AstFairnessMetrics.java
|
package water.rapids.ast.prims.models;
import hex.AUC2;
import hex.Model;
import org.apache.commons.math3.distribution.HypergeometricDistribution;
import org.apache.commons.math3.stat.inference.GTest;
import water.DKV;
import water.Key;
import water.MRTask;
import water.exceptions.H2OIllegalArgumentException;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValMapFrame;
import water.util.ArrayUtils;
import water.util.TwoDimTable;
import java.lang.reflect.Field;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.IntStream;
public class AstFairnessMetrics extends AstPrimitive {
static public class FairnessMetrics {
double tp;
double fp;
double tn;
double fn;
double total;
double relativeSize;
double accuracy;
double precision;
double f1;
double tpr;
double tnr;
double fpr;
double fnr;
double auc;
double aucpr;
double gini;
double selected;
double selectedRatio;
double logloss;
public FairnessMetrics(double tp, double tn, double fp, double fn, double logLossSum, AUC2.AUCBuilder aucBuilder, double nrows) {
this.tp = tp;
this.tn = tn;
this.fp = fp;
this.fn = fn;
total = tp + fp + tn + fn;
logloss = logLossSum / total;
relativeSize = total / nrows;
accuracy = (tp + tn) / total;
precision = tp / (fp + tp);
f1 = (2 * tp) / (2 * tp + fp + fn);
tpr = tp / (tp + fn);
tnr = tn / (tn + fp);
fpr = fp / (fp + tn);
fnr = fn / (fn + tp);
if (aucBuilder != null) {
AUC2 auc2 = new AUC2(aucBuilder);
auc = auc2._auc;
aucpr = auc2._pr_auc;
gini = auc2._gini;
} else {
auc = Double.NaN;
aucpr = Double.NaN;
gini = Double.NaN;
}
selected = tp + fp;
selectedRatio = (tp + fp) / total;
}
}
public static class FairnessMRTask extends MRTask {
// Threshold to switch from Fisher's exact test to G-test.
// Fisher's exact test gets slower with increasing size of the population
// fortunately G-test approximates it very well for bigger population.
// Recommendation is to use Fisher's test if the population size is lower than 1000
// (http://www.biostathandbook.com/small.html) but nowadays even 10000 is computed in the blink of an eye
public static final int GTEST_THRESHOLD = 10000;
// The magic constant REL1+1e-7 is taken from the R implementation of fisher.test in order to make
// the results the same => easier to test
public static final double FISHER_TEST_REL_ERROR = (1 + 1e-7);
int[] protectedColsIdx;
int[] cardinalities;
int responseIdx;
int predictionIdx;
final int tpIdx = 0;
final int tnIdx = 1;
final int fpIdx = 2;
final int fnIdx = 3;
final int llsIdx = 4; // Log Loss Sum
final int essentialMetrics = 5;
final int maxIndex;
final int favourableClass;
int[] _results;
AUC2.AUCBuilder[] _aucs;
public FairnessMRTask(int[] protectedColsIdx, int[] cardinalities, int responseIdx, int predictionIdx, int favourableClass) {
super();
this.protectedColsIdx = protectedColsIdx;
this.cardinalities = cardinalities;
this.responseIdx = responseIdx;
this.predictionIdx = predictionIdx;
this.favourableClass = favourableClass;
double maxIndexDbl = Arrays.stream(cardinalities).asDoubleStream().reduce((a, b) -> a * b).getAsDouble();
if (maxIndexDbl > Integer.MAX_VALUE)
throw new RuntimeException("Too many combinations of categories! Maximum number of category combinations is " + Integer.MAX_VALUE + "!");
this.maxIndex = (int) maxIndexDbl;
}
private int pColsToKey(Chunk[] cs, int row) {
int[] indices = new int[protectedColsIdx.length];
for (int i = 0; i < protectedColsIdx.length; i++) {
if (cs[protectedColsIdx[i]].isNA(row))
indices[i] = (cardinalities[i] - 1);
else
indices[i] += cs[protectedColsIdx[i]].at8(row);
}
return pColsToKey(indices);
}
public int pColsToKey(int[] indices) {
int result = 0;
int base = 1;
for (int i = 0; i < protectedColsIdx.length; i++) {
result += indices[i] * base;
base *= cardinalities[i];
}
return result;
}
private double[] keyToPCols(int value) {
double[] result = new double[cardinalities.length];
for (int i = 0; i < cardinalities.length; i++) {
final int tmp = value % cardinalities[i];
value /= cardinalities[i];
if (tmp == cardinalities[i] - 1)
result[i] = Double.NaN;
else
result[i] = tmp;
}
return result;
}
protected String keyToString(int value, Frame fr) {
double[] pcolIdx = keyToPCols(value);
StringBuilder result = new StringBuilder();
for (int i = 0; i < protectedColsIdx.length; i++) {
if (i > 0) result.append(",");
if (Double.isFinite(pcolIdx[i])) {
result.append(fr.vec(protectedColsIdx[i]).domain()[(int) pcolIdx[i]]);
} else {
result.append("NaN");
}
}
return result.toString().replaceAll("[^A-Za-z0-9,]", "_");
}
@Override
public void map(Chunk[] cs) {
assert _results == null;
_results = new int[maxIndex * essentialMetrics];
_aucs = new AUC2.AUCBuilder[maxIndex];
for (int i = 0; i < cs[0]._len; i++) {
final int key = pColsToKey(cs, i);
final long response = favourableClass == 1 ? cs[responseIdx].at8(i) : 1 - cs[responseIdx].at8(i);
final long prediction = favourableClass == 1 ? cs[predictionIdx].at8(i) : 1 - cs[predictionIdx].at8(i);
final double predictionProb = favourableClass == 1 ? cs[predictionIdx + 2].atd(i) : cs[predictionIdx + 1].atd(i);
if (response == prediction) {
if (response == 1)
_results[essentialMetrics * key + tpIdx]++;
else
_results[essentialMetrics * key + tnIdx]++;
} else {
if (prediction == 1)
_results[essentialMetrics * key + fpIdx]++;
else
_results[essentialMetrics * key + fnIdx]++;
}
_results[essentialMetrics * key + llsIdx] += -(response * Math.log(predictionProb) + (1 - response) * Math.log(1 - predictionProb));
if (_aucs[key] == null)
_aucs[key] = new AUC2.AUCBuilder(400);
_aucs[key].perRow(predictionProb, (int) response, 1);
}
}
@Override
public void reduce(MRTask mrt) {
FairnessMRTask other = (FairnessMRTask) mrt;
if (this._results == other._results) return;
for (int i = 0; i < _results.length; i++) {
_results[i] += other._results[i];
}
for (int i = 0; i < maxIndex; i++) {
if (_aucs[i] == null)
_aucs[i] = other._aucs[i];
else if (other._aucs[i] != null)
_aucs[i].reduce(other._aucs[i]);
}
}
public Frame getMetrics(String[] protectedCols, Frame fr, Model model, String[] reference, final String frName) {
// Calculate additional metrics
FairnessMetrics[] results = new FairnessMetrics[maxIndex];
final long nrows = fr.numRows();
for (int i = 0; i < maxIndex; i++) {
results[i] = new FairnessMetrics(
_results[i * essentialMetrics + tpIdx],
_results[i * essentialMetrics + tnIdx],
_results[i * essentialMetrics + fpIdx],
_results[i * essentialMetrics + fnIdx],
_results[i * essentialMetrics + llsIdx],
_aucs[i],
nrows
);
}
// Get reference - If not specified, use the biggest group as a reference.
int referenceIdx = 0;
if (reference != null) {
int[] indices = new int[protectedCols.length];
for (int i = 0; i < protectedCols.length; i++) {
indices[i] = ArrayUtils.find(fr.vec(protectedCols[i]).domain(), reference[i]);
}
referenceIdx = pColsToKey(indices);
} else {
double max = 0;
for (int key = 0; key < maxIndex; key++) {
if (results[key].total > max) {
max = results[key].total;
referenceIdx = key;
}
}
}
int emptyResults = 0;
for (FairnessMetrics fm : results)
emptyResults += fm.total == 0 ? 1 : 0;
// Fill in a frame
final String[] skipAIR = new String[]{"total", "relativeSize"};
Field[] metrics = FairnessMetrics.class.getDeclaredFields();
final int protectedColsCnt = protectedCols.length;
final int metricsCount = metrics.length + (metrics.length - skipAIR.length) + 1/*p-value*/;
double[][] resultCols = new double[protectedColsCnt + metricsCount][results.length - emptyResults];
FairnessMetrics ref = results[referenceIdx];
int nonEmptyKey = 0;
for (int key = 0; key < maxIndex; key++) {
if (results[key].total == 0) continue;
int counter = 0;
double[] decodedKey = keyToPCols(key);
for (int i = 0; i < protectedCols.length; i++) {
resultCols[i][nonEmptyKey] = decodedKey[i];
}
for (int i = 0; i < metrics.length; i++) {
try {
resultCols[protectedColsCnt + i][nonEmptyKey] = metrics[i].getDouble(results[key]);
if (!ArrayUtils.contains(skipAIR, metrics[i].getName())) {
final double air = metrics[i].getDouble(results[key]) / metrics[i].getDouble(ref);
resultCols[protectedColsCnt + metrics.length + i - counter][nonEmptyKey] = air;
} else
counter++;
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
try {
resultCols[resultCols.length - 1][nonEmptyKey] = getPValue(ref, results[key]);
} catch (Exception e) {
resultCols[resultCols.length - 1][nonEmptyKey] = Double.NaN;
}
nonEmptyKey++;
}
String[] colNames = new String[protectedColsCnt + metricsCount];
System.arraycopy(protectedCols, 0, colNames, 0, protectedCols.length);
int counter = 0;
for (int i = 0; i < metrics.length; i++) {
colNames[protectedColsCnt + i] = metrics[i].getName();
if (!ArrayUtils.contains(skipAIR, metrics[i].getName())) {
colNames[protectedColsCnt + metrics.length + i - counter] = "AIR_" + metrics[i].getName();
} else
counter++;
}
colNames[colNames.length - 1] = "p.value";
Vec[] vecs = new Vec[protectedColsCnt + metricsCount];
for (int i = 0; i < protectedColsCnt; i++) {
vecs[i] = Vec.makeVec(resultCols[i], fr.domains()[protectedColsIdx[i]], Vec.newKey());
}
for (int i = 0; i < metricsCount; i++) {
vecs[protectedColsCnt + i] = Vec.makeVec(resultCols[protectedColsCnt + i], Vec.newKey());
}
return new Frame(Key.make("fairness_metrics_" + frName + "_for_model_" + model._key), colNames, vecs);
}
public Map<String, Frame> getROCInfo(Model model, Frame fr, final String frName) {
Map<String, Frame> result = new HashMap<>();
for (int id = 0; id < maxIndex; id++) {
if (_aucs[id] == null) continue;
AUC2 auc = new AUC2(_aucs[id]);
// Fill TwoDimTable
String[] thresholds = new String[auc._nBins];
for (int i = 0; i < auc._nBins; i++)
thresholds[i] = Double.toString(auc._ths[i]);
AUC2.ThresholdCriterion crits[] = AUC2.ThresholdCriterion.VALUES;
String[] colHeaders = new String[crits.length + 2];
String[] types = new String[crits.length + 2];
String[] formats = new String[crits.length + 2];
colHeaders[0] = "Threshold";
types[0] = "double";
formats[0] = "%f";
int i;
for (i = 0; i < crits.length; i++) {
colHeaders[i + 1] = crits[i].toString();
types[i + 1] = crits[i]._isInt ? "long" : "double";
formats[i + 1] = crits[i]._isInt ? "%d" : "%f";
}
colHeaders[i + 1] = "idx";
types[i + 1] = "int";
formats[i + 1] = "%d";
TwoDimTable thresholdsByMetrics = new TwoDimTable("Metrics for Thresholds", "Binomial metrics as a function of classification thresholds", new String[auc._nBins], colHeaders, types, formats, null);
for (i = 0; i < auc._nBins; i++) {
int j = 0;
thresholdsByMetrics.set(i, j, Double.valueOf(thresholds[i]));
for (j = 0; j < crits.length; j++) {
double d = crits[j].exec(auc, i); // Note: casts to Object are NOT redundant
thresholdsByMetrics.set(i, 1 + j, crits[j]._isInt ? (Object) ((long) d) : d);
}
thresholdsByMetrics.set(i, 1 + j, i);
}
String groupName = keyToString(id, fr);
Frame f = thresholdsByMetrics.asFrame(Key.make("thresholds_and_metrics_" + groupName + "_for_model_" + model._key + "_for_frame_" + frName));
DKV.put(f);
result.put("thresholds_and_metrics_" + groupName, f);
}
return result;
}
/**
* Calculate p-value using Fisher's exact test
* <p>
* | | Protected Group | Reference |
* |--------------+-----------------+-----------|
* | Selected | a | b |
* | Not Selected | c | d |
*
* @param a
* @param b
* @param c
* @param d
* @return
*/
private static double fishersTest(long a, long b, long c, long d) {
long popSize = a + b + c + d;
if (popSize > Integer.MAX_VALUE) return Double.NaN; // Make sure we don't get stuck on p-value computation
HypergeometricDistribution hgd = new HypergeometricDistribution((int) popSize, (int) (a + b), (int) (a + c));
double p = hgd.probability((int) a);
double pValue = 0;
// sum up pValues in all more extreme cases - like in R, sum all less probable cases in to the p-value
for (int i = (int) Math.max(a - d, 0); i <= Math.min(a + b, a + c); i++) {
final double proposal = hgd.probability(i);
if (proposal <= p * FISHER_TEST_REL_ERROR) pValue += proposal;
}
return pValue;
}
/**
* Calculate p-value. If there is a high number of instances use G-test as approximation of Fisher's exact test,
* otherwise use Fisher's exact test.
*
* @param ref Metrics for the reference group
* @param results Metrics for a protected group
* @return p-value
*/
private static double getPValue(FairnessMetrics ref, FairnessMetrics results) {
long a = (long) results.selected;
long b = (long) ref.selected;
long c = (long) (results.total - results.selected);
long d = (long) (ref.total - ref.selected);
if ((ref.total < GTEST_THRESHOLD && results.total < GTEST_THRESHOLD) || a == 0 || b == 0 || c == 0 || d == 0) {
// fisher's exact test
return fishersTest(a, b, c, d);
} else {
return new GTest().gTestDataSetsComparison(
new long[]{a, c},
new long[]{b, d}
);
}
}
}
@Override
public String[] args() {
return new String[]{"model", "test_frame", "protected_columns", "reference", "favourable_class"};
}
@Override
public int nargs() {
return 1 + 5;
}
@Override
public String str() {
return "fairnessMetrics";
}
@Override
public ValMapFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Model model = stk.track(asts[1].exec(env)).getModel();
Frame fr = stk.track((asts[2].exec(env)).getFrame());
String[] protectedCols = stk.track(asts[3].exec(env)).getStrs();
String[] reference = stk.track(asts[4].exec(env)).getStrs();
String favourableClass = stk.track(asts[5].exec(env)).getStr();
final String frameName = asts[2].str(); // Used only for naming the derived metrics
final int responseIdx = fr.find(model._parms._response_column);
if (!model._output.isBinomialClassifier()) {
throw new H2OIllegalArgumentException("Model has to be a binomial model!");
}
for (String pc : protectedCols) {
if (fr.find(pc) == -1)
throw new RuntimeException(pc + " was not found in the frame!");
if (!fr.vec(pc).isCategorical())
throw new H2OIllegalArgumentException(pc + " has to be a categorical column!");
}
if (reference.length != protectedCols.length)
reference = null;
else
for (int i = 0; i < protectedCols.length; i++) {
if (!ArrayUtils.contains(fr.vec(protectedCols[i]).domain(), reference[i])) {
throw new RuntimeException("Reference group is not present in the protected column");
}
}
if (!ArrayUtils.contains(fr.vec(responseIdx).domain(), favourableClass))
throw new RuntimeException("Favourable class is not present in the response!");
final int favorableClassId = ArrayUtils.find(fr.vec(responseIdx).domain(), favourableClass);
final int[] protectedColsIdx = fr.find(protectedCols);
final int[] cardinalities = IntStream.of(protectedColsIdx).map(colId -> fr.vec(colId).cardinality() + 1).toArray(); // +1 for missing value
// Sanity check - the number of subgroups grows very quickly and higher values are practically unexplainable
// but I don't want to limit the user too much
if (Arrays.stream(cardinalities).asDoubleStream().reduce((a, b) -> a * b).orElse(Double.MAX_VALUE) > 1e6)
throw new RuntimeException("Too many combinations of categories! Maximum number of category combinations is 1e6.");
Frame predictions = new Frame(fr).add(model.score(fr));
DKV.put(predictions);
try {
FairnessMRTask fairnessMRTask = (FairnessMRTask) new FairnessMRTask(
protectedColsIdx,
cardinalities,
responseIdx,
fr.numCols(),
favorableClassId
).doAll(predictions);
Frame metrics = fairnessMRTask.getMetrics(protectedCols, fr, model, reference, frameName);
Map<String, Frame> results = fairnessMRTask.getROCInfo(model, fr, frameName);
DKV.put(metrics);
results.put("overview", metrics);
return new ValMapFrame(results);
} finally {
DKV.remove(predictions.getKey());
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/models/AstMakeLeaderboard.java
|
package water.rapids.ast.prims.models;
import hex.Model;
import hex.ModelContainer;
import hex.leaderboard.*;
import water.DKV;
import water.Key;
import water.fvec.Frame;
import water.logging.Logger;
import water.logging.LoggerFactory;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
import water.util.Log;
import java.util.Arrays;
import java.util.stream.Stream;
/**
* Compute Leaderboard
*/
public class AstMakeLeaderboard extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"models", "leaderboardFrame", "sortMetric", "extensions", "scoringData"};
}
@Override
public int nargs() {
return 1 + 5;
} // (makeLeaderboard models leaderboardFrame sortMetric extensions projectName)
@Override
public String str() {
return "makeLeaderboard";
}
private static LeaderboardExtensionsProvider createLeaderboardExtensionProvider(Frame leaderboardFrame) {
return new LeaderboardExtensionsProvider() {
@Override
public LeaderboardCell[] createExtensions(Model model) {
return new LeaderboardCell[]{
new TrainingTime(model),
new ScoringTimePerRow(model, leaderboardFrame),
new AlgoName(model),
};
}
};
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot[] asts) {
Key[] models = Arrays.stream(stk.track(asts[1].exec(env)).getStrs())
.flatMap(model_id -> {
Object obj = DKV.getGet(model_id);
if (obj instanceof Model) {
return Stream.of(Key.make(model_id));
} else if (obj instanceof ModelContainer) {
ModelContainer mc = (ModelContainer) obj;
return Stream.of(mc.getModelKeys());
} else {
throw new RuntimeException("Unsupported model/grid id: " + model_id + "!");
}
}).toArray(Key[]::new);
// Get Frame clones Frame and the clone is key-less and since we are doing just read only ops here we can use the original frame
String leaderboardFrameKey = stk.track(asts[2].exec(env)).getStr();
Frame leaderboardFrame = null;
if (!leaderboardFrameKey.isEmpty())
leaderboardFrame = DKV.getGet(leaderboardFrameKey);
else
leaderboardFrameKey = null;
String sortMetric = stk.track(asts[3].exec(env)).getStr();
String[] extensions = stk.track(asts[4].exec(env)).getStrs();
String scoringData = stk.track(asts[5].exec(env)).getStr().toLowerCase();
String projectName = leaderboardFrameKey + "_" + Arrays.deepHashCode(models) + "_" + scoringData;
Arrays.stream(models).forEach(DKV::prefetch);
if (sortMetric.equalsIgnoreCase("auto")) sortMetric = null;
final boolean oneTrainingFrame = Arrays.stream(models).map(m -> ((Model) DKV.getGet(m))._parms._train).distinct().count() == 1;
final boolean oneValidationFrame = Arrays.stream(models).map(m -> ((Model) DKV.getGet(m))._parms._valid).distinct().count() == 1;
final boolean oneNFoldsSetting = Arrays.stream(models)
.map(m -> ((Model) DKV.getGet(m))._parms)
.filter(parms -> !parms.algoName().equalsIgnoreCase("stackedensemble"))
.map(parameters -> parameters._nfolds)
.distinct()
.count() == 1;
final boolean allCV = Arrays.stream(models).allMatch(m -> ((Model) DKV.getGet(m))._output._cross_validation_metrics != null);
final boolean allHasValid = Arrays.stream(models).allMatch(m -> ((Model) DKV.getGet(m))._output._validation_metrics != null);
boolean warnAboutTrain = false;
boolean warnAboutValid = false;
boolean warnAboutNFolds = false;
boolean warnAboutLeaderboard = false;
if (scoringData.equals("auto") && leaderboardFrame == null) {
warnAboutTrain = true;
warnAboutNFolds = true;
scoringData = "xval";
}
if (scoringData.equals("xval")) {
warnAboutTrain = true;
warnAboutNFolds = true;
warnAboutLeaderboard = true;
if (!allCV)
scoringData = "valid";
}
if (scoringData.equals("valid")) {
warnAboutTrain = false;
warnAboutValid = true;
warnAboutLeaderboard = true;
if (!allHasValid)
scoringData = "train";
}
if (scoringData.equals("train")) {
warnAboutTrain = true;
warnAboutValid = false;
warnAboutLeaderboard = true;
}
// One training frame can be false positive if models from two different automls are used.
if (warnAboutTrain && !oneTrainingFrame)
Log.warn("More than one training frame was used amongst the models provided to the leaderboard.");
if (warnAboutValid && !oneValidationFrame)
Log.warn("More than one validation frame was used amongst the models provided to the leaderboard.");
if (warnAboutNFolds && !oneNFoldsSetting)
Log.warn("More than one n-folds settings are present."); //had to exclude SEs
if (warnAboutLeaderboard && leaderboardFrame != null)
Log.warn("Leaderboard frame present but scoring data are set to " + scoringData +
". Using scores from " + scoringData + ".");
final Logger logger = LoggerFactory.getLogger(Leaderboard.class);
Leaderboard ldb = Leaderboard.getOrMake(projectName, logger, leaderboardFrame, sortMetric, Leaderboard.ScoreData.valueOf(scoringData));
ldb.setExtensionsProvider(createLeaderboardExtensionProvider(leaderboardFrame));
ldb.addModels(models);
ldb.ensureSorted();
Frame leaderboard = ldb.toTwoDimTable(extensions).asFrame(Key.make());
return new ValFrame(leaderboard);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/models/AstModelResetThreshold.java
|
package water.rapids.ast.prims.models;
import hex.Model;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
/**
* Reset a model threshold and return the old one.
*/
public class AstModelResetThreshold extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"model", "threshold"};
}
@Override
public int nargs() {
return 1 + 2;
} // (+ Model + threshold)
@Override
public String str() {
return "model.reset.threshold";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Model model = stk.track(asts[1].exec(env)).getModel();
double oldThreshold = model._output.defaultThreshold();
double newThreshold = stk.track(asts[2].exec(env)).getNum();
model.resetThreshold(newThreshold);
return ValFrame.fromRow(oldThreshold);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/models/AstPerfectAUC.java
|
package water.rapids.ast.prims.models;
import hex.AUC2;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.Val;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
/**
* Calculates a "perfect" (= not approximated) AUC
*/
public class AstPerfectAUC extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"probs", "acts"};
}
@Override
public int nargs() {
return 1 + 2;
} // (perfectAUC probs acts)
@Override
public String str() {
return "perfectAUC";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Vec probs = getSingleVec(stk.track(asts[1].exec(env)), "probabilities");
Vec acts = getSingleVec(stk.track(asts[2].exec(env)), "actuals");
double auc = AUC2.perfectAUC(probs, acts);
return ValFrame.fromRow(auc);
}
private static Vec getSingleVec(Val v, String what) {
Frame f = v.getFrame();
if (f == null || f.numCols() != 1) {
throw new IllegalArgumentException("Expected a frame containing a single vector of " + what +
". Instead got " + String.valueOf(f));
}
return f.vec(0);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/models/AstPermutationVarImp.java
|
package water.rapids.ast.prims.models;
import hex.Model;
import water.Key;
import water.Scope;
import water.fvec.Frame;
import water.fvec.Vec;
import water.rapids.Env;
import water.rapids.PermutationVarImp;
import water.rapids.Val;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
import water.util.ArrayUtils;
import water.util.TwoDimTable;
import java.util.Arrays;
import java.util.Locale;
/**
* Ast class for passing the model, frame and metric to calculate Permutation Variable Importance
*/
public class AstPermutationVarImp extends AstPrimitive {
@Override
public int nargs() { return 1 + 7; }
@Override
public String[] args() { return new String[]{"model", "frame", "metric", "n_samples", "n_repeats", "features", "seed"}; }
@Override
public String str() { return "PermutationVarImp"; }
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Model model = stk.track(asts[1].exec(env)).getModel();
Frame fr = stk.track(asts[2].exec(env)).getFrame();
String metric = stk.track(asts[3].exec(env)).getStr().toLowerCase();
long n_samples = (long) stk.track(asts[4].exec(env)).getNum();
int n_repeats = (int) stk.track(asts[5].exec(env)).getNum();
String[] features = null;
Val featuresVal = stk.track(asts[6].exec(env));
if (!featuresVal.isEmpty()) // empty string list is interpreted as nums
features = featuresVal.getStrs();
long seed = (long) stk.track(asts[7].exec(env)).getNum();
if (n_samples < -1 || n_samples == 0 || n_samples == 1 || n_samples > fr.numRows()) {
throw new IllegalArgumentException("Argument n_samples has to be either -1 to use the whole frame " +
"or greater than 2 and lower than or equal to the number of rows of the provided frame!");
}
if (n_repeats < 1) {
throw new IllegalArgumentException("Argument n_repeats must be greater than 0!");
}
if (features != null) {
String[] notInFrame = Arrays.stream(features).filter((f) ->
!ArrayUtils.contains(fr.names(), f)).toArray(String[]::new);
if (notInFrame.length > 0) {
throw new IllegalArgumentException("Features " + String.join(", ", notInFrame) + " are not present in the provided frame!");
}
String[] notUsedInModel = Arrays.stream(features).filter((f) ->
!ArrayUtils.contains(model._output._origNames == null ? model._output._names : model._output._origNames, f)).toArray(String[]::new);
if (notUsedInModel.length > 0) {
throw new IllegalArgumentException("Features " + String.join(", ", notInFrame) + " weren't used for training!");
}
}
Scope.enter();
Frame pviFr = null;
try {
// Calculate Permutation Variable Importance
PermutationVarImp pvi = new PermutationVarImp(model, fr);
TwoDimTable varImpTable = null;
if (n_repeats > 1) {
varImpTable = pvi.getRepeatedPermutationVarImp(metric, n_samples, n_repeats, features, seed);
} else {
varImpTable = pvi.getPermutationVarImp(metric, n_samples, features, seed);
}
// Create Frame from TwoDimTable
pviFr = varimpToFrame(varImpTable, Key.make(model._key + "permutationVarImp"));
Scope.track(pviFr);
} finally {
Key[] keysToKeep = pviFr != null ? pviFr.keys() : new Key[]{};
Scope.exit(keysToKeep);
}
return new ValFrame(pviFr);
}
private static Frame varimpToFrame(TwoDimTable twoDimTable, Key frameKey) {
String[] colNames = new String[twoDimTable.getColDim() + 1];
colNames[0] = "Variable";
System.arraycopy(twoDimTable.getColHeaders(),0, colNames, 1, twoDimTable.getColDim());
Vec[] vecs = new Vec[colNames.length];
vecs[0] = Vec.makeVec(twoDimTable.getRowHeaders(), Vec.newKey());
double[] tmpRow = new double[twoDimTable.getRowDim()];
for (int j = 0; j < twoDimTable.getColDim(); j++) {
for (int i = 0; i < twoDimTable.getRowDim(); i++) {
tmpRow[i] = (double) twoDimTable.get(i, j);
}
vecs[j + 1] = Vec.makeVec(tmpRow, Vec.newKey());
}
Frame fr = new Frame(frameKey, colNames, vecs);
return fr;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/models/AstResultFrame.java
|
package water.rapids.ast.prims.models;
import hex.Model;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
public class AstResultFrame extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"model key"};
}
@Override
public int nargs() {
return 1 + 1;
} // (segment_models_as_frame segment_models_id)
@Override
public String str() {
return "result";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
Model model = stk.track(asts[1].exec(env)).getModel();
return new ValFrame(model.result());
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/ast/prims/models/AstSegmentModelsAsFrame.java
|
package water.rapids.ast.prims.models;
import hex.segments.SegmentModels;
import water.rapids.Env;
import water.rapids.ast.AstPrimitive;
import water.rapids.ast.AstRoot;
import water.rapids.vals.ValFrame;
public class AstSegmentModelsAsFrame extends AstPrimitive {
@Override
public String[] args() {
return new String[]{"segment_models"};
}
@Override
public int nargs() {
return 1 + 1;
} // (segment_models_as_frame segment_models_id)
@Override
public String str() {
return "segment_models_as_frame";
}
@Override
public ValFrame apply(Env env, Env.StackHelp stk, AstRoot asts[]) {
SegmentModels models = (SegmentModels) stk.track(asts[1].exec(env)).getKeyed();
return new ValFrame(models.toFrame());
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.