index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/NFSFileVec.java
package water.fvec; import java.io.File; import java.io.IOException; import water.*; import water.persist.PersistNFS; import water.util.FileUtils; /** A NFS distributed file-backed Vector * <p> * Vec will be lazily loaded from the NFS file on-demand. Each machine is * expected to have the <b>same</b> filesystem view onto a file with the same * byte contents. Each machine will lazily load only the sections of the file * that are assigned to that machine. Basically, the file starts striped * across some globally visible file system (e.g. NFS, or just replicated on * local disk) and is loaded into memory - again striped across the machines - * without any network traffic or data-motion. * <p> * Useful to "memory map" into RAM large datafiles, often pure text files. */ public class NFSFileVec extends FileVec { /** Make a new NFSFileVec key which holds the filename implicitly. This name * is used by the Chunks to load data on-demand. Blocking * @return A NFSFileVec mapped to this file. */ public static NFSFileVec make(File f) { Futures fs = new Futures(); NFSFileVec nfs = make(f, fs); fs.blockForPending(); return nfs; } public static NFSFileVec make(String fname) throws IOException { File f = FileUtils.getFile(fname); return NFSFileVec.make(f); } /** Make a new NFSFileVec key which holds the filename implicitly. This name * is used by the Chunks to load data on-demand. * @return A NFSFileVec mapped to this file. */ public static NFSFileVec make(File f, Futures fs) { if( !f.exists() ) throw new IllegalArgumentException("File not found: "+f.toString()); long size = f.length(); Key k = Vec.newKey(PersistNFS.decodeFile(f)); // Insert the top-level FileVec key into the store NFSFileVec nfs = new NFSFileVec(k,size); DKV.put(k,nfs,fs); return nfs; } private NFSFileVec(Key key, long len) {super(key,len,Value.NFS);} }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/NewChunk.java
package water.fvec; import water.AutoBuffer; import water.Futures; import water.H2O; import water.MemoryManager; import water.parser.BufferedString; import water.util.PrettyPrint; import water.util.StringUtils; import water.util.UnsafeUtils; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.Arrays; import java.util.BitSet; import java.util.HashMap; import java.util.UUID; import static water.H2OConstants.MAX_STR_LEN; // An uncompressed chunk of data, supporting an append operation public class NewChunk extends Chunk { private static final int[] EXP10s = new int[Double.MAX_EXPONENT - Double.MIN_EXPONENT + 1]; private static final double[] INV_POW10s = new double[EXP10s.length]; static { for (int i = 0; i < EXP10s.length; i++) { EXP10s[i] = (int) Math.log10(Math.pow(2, i + Double.MIN_EXPONENT)); INV_POW10s[i] = Math.pow(10, -EXP10s[i]); } } public void alloc_mantissa(int sparseLen) {_ms = new Mantissas(sparseLen);} public void alloc_exponent(int sparseLen) {_xs = new Exponents(sparseLen);} public int is(int i) { return _is[i];} public void set_is(int i, int val) {_is[i] = val;} public void alloc_nums(int len) { _ms = new Mantissas(len); _xs = new Exponents(len);} /** * Wrapper around exponent, stores values (only if there are non-zero exponents) in bytes or ints. */ public static class Exponents { int _len; public Exponents(int cap){_len = cap;} byte [] _vals1; int [] _vals4; private void alloc_data(int val){ byte b = (byte)val; if(b == val && b != CATEGORICAL_1) _vals1 = MemoryManager.malloc1(_len); else _vals4 = MemoryManager.malloc4(_len); } public void set(int idx, int x) { if(_vals1 == null && _vals4 == null) { if(x == 0) return; alloc_data(x); } if(_vals1 != null){ byte b = (byte)x; if(x == b && b > Byte.MIN_VALUE-1) { _vals1[idx] = b; } else { // need to switch to 4 byte values int len = _vals1.length; _vals4 = MemoryManager.malloc4(len); for (int i = 0; i < _vals1.length; ++i) _vals4[i] = (_vals1[i] == CATEGORICAL_1)?CATEGORICAL_2:_vals1[i]; _vals1 = null; _vals4[idx] = x; } } else _vals4[idx] = x; } public int get(int id){ if(_vals1 == null && null == _vals4) return 0; if(_vals1 != null) { int x = _vals1[id]; if(x == CATEGORICAL_1) x = CATEGORICAL_2; return x; } return _vals4[id]; } public boolean isCategorical(int i) { return _vals1 != null && _vals1[i] == CATEGORICAL_1 || _vals4 != null && _vals4[i] == CATEGORICAL_2;} private static byte CATEGORICAL_1 = Byte.MIN_VALUE; private static int CATEGORICAL_2 = Integer.MIN_VALUE; public void setCategorical(int idx) { if(_vals1 == null && _vals4 == null) alloc_data(0); if(_vals1 != null) _vals1[idx] = CATEGORICAL_1; else _vals4[idx] = CATEGORICAL_2; } public void move(int to, int from) { if(_vals1 == null && null == _vals4) return; if(_vals1 != null) _vals1[to] = _vals1[from]; else _vals4[to] = _vals4[from]; } public void resize(int len) { if (_vals1 != null) _vals1 = Arrays.copyOf(_vals1, len); else if (_vals4 != null) _vals4 = Arrays.copyOf(_vals4, len); _len = len; } public long byteSize() { long mem = 4; if (_vals1 != null) { mem += _vals1.length; } if (_vals4 != null) { mem += _vals4.length * 4; } return mem; } } /** * Class wrapping around mantissa. * Stores values in bytes, ints or longs, if data fits. * Sets and gets done in longs. */ public static class Mantissas { byte [] _vals1; int [] _vals4; long [] _vals8; int _nzs; public Mantissas(int cap) {_vals1 = MemoryManager.malloc1(cap);} public void set(int idx, long l) { long old; if(_vals1 != null) { // check if we fit withing single byte byte b = (byte)l; if(b == l) { old = _vals1[idx]; _vals1[idx] = b; } else { int i = (int)l; if(i == l) { switchToInts(); old = _vals4[idx]; _vals4[idx] = i; } else { switchToLongs(); old = _vals8[idx]; _vals8[idx] = l; } } } else if(_vals4 != null) { int i = (int)l; if(i != l) { switchToLongs(); old = _vals8[idx]; _vals8[idx] = l; } else { old = _vals4[idx]; _vals4[idx] = i; } } else { old = _vals8[idx]; _vals8[idx] = l; } if (old != l) { if (old == 0) ++_nzs; else if(l == 0) --_nzs; } } public long get(int id) { if(_vals1 != null) return _vals1[id]; if(_vals4 != null) return _vals4[id]; return _vals8[id]; } public void switchToInts() { int len = _vals1.length; _vals4 = MemoryManager.malloc4(len); for(int i = 0; i < _vals1.length; ++i) _vals4[i] = _vals1[i]; _vals1 = null; } public void switchToLongs() { int len = Math.max(_vals1 == null?0:_vals1.length,_vals4 == null?0:_vals4.length); int newlen = len; _vals8 = MemoryManager.malloc8(newlen); if(_vals1 != null) for(int i = 0; i < _vals1.length; ++i) _vals8[i] = _vals1[i]; else if(_vals4 != null) { for(int i = 0; i < _vals4.length; ++i) _vals8[i] = _vals4[i]; } _vals1 = null; _vals4 = null; } public void move(int to, int from) { if(to != from) { if (_vals1 != null) { _vals1[to] = _vals1[from]; _vals1[from] = 0; } else if (_vals4 != null) { _vals4[to] = _vals4[from]; _vals4[from] = 0; } else { _vals8[to] = _vals8[from]; _vals8[from] = 0; } } } public int len() { return _vals1 != null?_vals1.length:_vals4 != null?_vals4.length:_vals8.length; } public void resize(int len) { if(_vals1 != null) _vals1 = Arrays.copyOf(_vals1,len); else if(_vals4 != null) _vals4 = Arrays.copyOf(_vals4,len); else if(_vals8 != null) _vals8 = Arrays.copyOf(_vals8,len); } public long byteSize() { long mem = 4; if (_vals1 != null) { mem += _vals1.length; } if (_vals4 != null) { mem += _vals4.length * 4; } if (_vals8 != null) { mem += _vals8.length * 8; } return mem; } } public final int _cidx; // We can record the following (mixed) data types: // 1- doubles, in _ds including NaN for NA & 0; _ls==_xs==null // 2- scaled decimals from parsing, in _ls & _xs; _ds==null // 3- zero: requires _ls==0 && _xs==0 // 4- NA: _ls==Long.MAX_VALUE && _xs==Integer.MIN_VALUE || _ds=NaN // 5- Categorical: _xs==(Integer.MIN_VALUE+1) && _ds==null // 6- Str: _ss holds appended string bytes (with trailing 0), _is[] holds offsets into _ss[] // Chunk._len is the count of elements appended // Sparse: if _sparseLen != _len, then _ls/_ds are compressed to non-zero's only, // and _xs is the row number. Still _len is count of elements including // zeros, and _sparseLen is count of non-zeros. protected transient Mantissas _ms; // Mantissa protected transient BitSet _missing; protected transient Exponents _xs; // Exponent, or if _ls==0, NA or Categorical or Rows private transient int _id[]; // Indices (row numbers) of stored values, used for sparse private transient double _ds[]; // Doubles, for inflating via doubles public transient byte[] _ss; // Bytes of appended strings, including trailing 0 private transient int _is[]; // _is[] index of strings - holds offsets into _ss[]. _is[i] == -1 means NA/sparse public long byteSize() { long mem = 0; if (_ms != null) { mem += _ms.byteSize(); } if (_missing != null) { mem += _missing.size() / 8; // size() returns number of bits, convert to bytes, approx } if (_xs != null) { mem += _xs.byteSize(); } if (_id != null) { mem += _id.length * 4; } if (_ds != null) { mem += _ds.length * 8; } if (_ss != null) { mem += _ss.length; } if (_is != null) { mem += _is.length * 8; } return mem; } int [] alloc_indices(int l) { return _id = MemoryManager.malloc4(l); } public double[] alloc_doubles(int l) { _ms = null; _xs = null; _missing = null; return _ds = MemoryManager.malloc8d(l); } int [] alloc_str_indices(int l) { _ms = null; _xs = null; _missing = null; _ds = null; return _is = MemoryManager.malloc4(l); } final protected int [] indices() { return _id; } final protected double[] doubles() { return _ds; } @Override public boolean isSparseZero() { return sparseZero(); } public boolean _sparseNA = false; @Override public boolean isSparseNA() {return sparseNA();} void setSparseNA() {_sparseNA = true;} public int _sslen; // Next offset into _ss for placing next String public int _sparseLen; int set_sparseLen(int l) { return this._sparseLen = l; } @Override public int sparseLenZero() { return _sparseNA ? _len : _sparseLen;} @Override public int sparseLenNA() { return _sparseNA ? _sparseLen : _len; } private int _naCnt=-1; // Count of NA's appended protected int naCnt() { return _naCnt; } // Count of NA's appended private int _catCnt; // Count of Categorical's appended private int _strCnt; // Count of string's appended private int _nzCnt; // Count of non-zero's appended private int _uuidCnt; // Count of UUIDs public int _timCnt = 0; protected static final int MIN_SPARSE_RATIO = 8; private int _sparseRatio = MIN_SPARSE_RATIO; public boolean _isAllASCII = true; //For cat/string col, are all characters in chunk ASCII? public NewChunk( Vec vec, int cidx ) { _vec = vec; _cidx = cidx; _ms = new Mantissas(4); _xs = new Exponents(4); } public NewChunk( Vec vec, int cidx, boolean sparse ) { _vec = vec; _cidx = cidx; _ms = new Mantissas(4); _xs = new Exponents(4); if(sparse) _id = new int[4]; } public NewChunk(double [] ds) { _cidx = -1; _vec = null; setDoubles(ds); } public NewChunk( Vec vec, int cidx, long[] mantissa, int[] exponent, int[] indices, double[] doubles) { _vec = vec; _cidx = cidx; _ms = new Mantissas(mantissa.length); _xs = new Exponents(exponent.length); for(int i = 0; i < mantissa.length; ++i) { _ms.set(i,mantissa[i]); _xs.set(i,exponent[i]); } _id = indices; _ds = doubles; if (_ms != null && _sparseLen==0) set_sparseLen(set_len(mantissa.length)); if (_ds != null && _sparseLen==0) set_sparseLen(set_len(_ds.length)); if (_id != null && _sparseLen==0) set_sparseLen(_id.length); } // Constructor used when inflating a Chunk. public NewChunk( Chunk c ) { this(c._vec, c.cidx()); _start = c._start; } // Constructor used when inflating a Chunk. public NewChunk( Chunk c, double [] vals) { _vec = c._vec; _cidx = c.cidx(); _start = c._start; _ds = vals; _sparseLen = _len = _ds.length; } // Pre-sized newchunks. public NewChunk( Vec vec, int cidx, int len ) { this(vec,cidx); _ds = new double[len]; Arrays.fill(_ds, Double.NaN); set_sparseLen(set_len(len)); } public NewChunk setSparseRatio(int s) { _sparseRatio = s; return this; } public void setDoubles(double[] ds) { _ds = ds; _sparseLen = _len = ds.length; _ms = null; _xs = null; } public void set_vec(Vec vec) { _vec = vec; } public final class Value { int _gId; // row number in dense (ie counting zeros) int _lId; // local array index of this value, equal to _gId if dense public Value(int lid, int gid){_lId = lid; _gId = gid;} public final int rowId0(){return _gId;} public void add2Chunk(NewChunk c){add2Chunk_impl(c,_lId);} } private transient BufferedString _bfstr = new BufferedString(); private void add2Chunk_impl(NewChunk c, int i) { if (isNA2(i)) { c.addNA(); } else if (isUUID()) { c.addUUID(_ms.get(i), Double.doubleToRawLongBits(_ds[i])); } else if(_ms != null) { c.addNum(_ms.get(i), _xs.get(i)); } else if(_ds != null) { c.addNum(_ds[i]); } else if (_ss != null) { int sidx = _is[i]; int nextNotNAIdx = i + 1; // Find next not-NA value (_is[idx] != -1) while (nextNotNAIdx < _is.length && _is[nextNotNAIdx] == -1) nextNotNAIdx++; int send = nextNotNAIdx < _is.length ? _is[nextNotNAIdx]: _sslen; int slen = send - sidx -1 /*account for trailing zero byte*/; assert slen >= 0 : getClass().getSimpleName() + ".add2Chunk_impl: slen=" + slen + ", sidx=" + sidx + ", send=" + send; // null-BufferedString represents NA value BufferedString bStr = sidx == -1 ? null : _bfstr.set(_ss, sidx, slen); c.addStr(bStr); } else throw new IllegalStateException(); } public void add2Chunk(NewChunk c, int i){ if(!isSparseNA() && !isSparseZero()) add2Chunk_impl(c,i); else { int j = Arrays.binarySearch(_id,0,_sparseLen,i); if(j >= 0) add2Chunk_impl(c,j); else if(isSparseNA()) c.addNA(); else c.addNum(0,0); } } // Heuristic to decide the basic type of a column byte type() { if( _naCnt == -1 ) { // No rollups yet? int nas=0, es=0, nzs=0, ss=0; if( _ds != null && _ms != null ) { // UUID? for(int i = 0; i< _sparseLen; i++ ) if( _xs != null && _xs.get(i)==Integer.MIN_VALUE ) nas++; else if( _ds[i] !=0 || _ms.get(i) != 0 ) nzs++; _uuidCnt = _len -nas; } else if( _ds != null ) { // Doubles? assert _xs==null; for(int i = 0; i < _sparseLen; ++i) { if( Double.isNaN(_ds[i]) ) nas++; else if( _ds[i]!=0 ) nzs++; } } else { if( _ms != null && _sparseLen > 0) // Longs and categoricals? for(int i = 0; i< _sparseLen; i++ ) if( isNA2(i) ) nas++; else { if( isCategorical2(i) ) es++; if( _ms.get(i) != 0 ) nzs++; } if( _is != null ) // Strings for(int i = 0; i< _sparseLen; i++ ) if( isNA2(i) ) nas++; else ss++; } if (_sparseNA) nas += (_len - _sparseLen); _nzCnt=nzs; _catCnt =es; _naCnt=nas; _strCnt = ss; } // Now run heuristic for type if(_naCnt == _len) // All NAs ==> NA Chunk return Vec.T_BAD; if(_strCnt > 0) return Vec.T_STR; if(_catCnt > 0 && _catCnt + _naCnt + (isSparseZero()? _len-_sparseLen : 0) == _len) return Vec.T_CAT; // All are Strings+NAs ==> Categorical Chunk // UUIDs? if( _uuidCnt > 0 ) return Vec.T_UUID; // Larger of time & numbers int nums = _len -_naCnt-_timCnt; return _timCnt >= nums ? Vec.T_TIME : Vec.T_NUM; } //what about sparse reps? protected final boolean isNA2(int idx) { if (isString()) return _is[idx] == -1; if(isUUID() || _ds == null) return _missing != null && _missing.get(idx); return Double.isNaN(_ds[idx]); } protected final boolean isCategorical2(int idx) { return _xs!=null && _xs.isCategorical(idx); } protected final boolean isCategorical(int idx) { if(_id == null)return isCategorical2(idx); int j = Arrays.binarySearch(_id,0, _sparseLen,idx); return j>=0 && isCategorical2(j); } public void addCategorical(int e) { if(_ms == null || _ms.len() == _sparseLen) append2slow(); if( e != 0 || !isSparseZero() ) { _ms.set(_sparseLen,e); _xs.setCategorical(_sparseLen); if(_id != null) _id[_sparseLen] = _len; ++_sparseLen; } ++_len; } public void addNA() { if(!_sparseNA) { if (isString()) { addStr(null); return; } else if (isUUID()) { if( _ms==null || _ds== null || _sparseLen >= _ms.len() ) append2slowUUID(); if(_missing == null) _missing = new BitSet(); _missing.set(_sparseLen); if (_id != null) _id[_sparseLen] = _len; _ds[_sparseLen] = Double.NaN; ++_sparseLen; } else if (_ds != null) { addNum(Double.NaN); return; } else { if (!_sparseNA && _sparseLen == _ms.len()) append2slow(); if(!_sparseNA) { if(_missing == null) _missing = new BitSet(); _missing.set(_sparseLen); if (_id != null) _id[_sparseLen] = _len; ++_sparseLen; } } } ++_len; } public void addNumDecompose(final double d) { if (isUUID() || isString()) { // not worth trying addNA(); return; } if (_ds != null) { // already using doubles addNum(d); return; } if ((long) d == d) { addNum((long) d, 0); return; } final int expIdx = Math.getExponent(d) - Double.MIN_EXPONENT; if (expIdx == -1) { // zero or subnormal if (d == 0) addNum(0, 0); else // subnormal addNum(d); return; } else if (expIdx == Double.MAX_EXPONENT - Double.MIN_EXPONENT + 1) { // NaN or infinity if (d == Double.NaN) { // NaN addNA(); } else { addNum(d); // infinity } return; } final int sign = d < 0 ? -1 : 1; int exp = EXP10s[expIdx]; double val = sign * d * INV_POW10s[expIdx]; while ((long) val != val) { double x = val * 10; if (x > Long.MAX_VALUE) { addNum(d); return; } val = x; exp--; } addNum(sign * (long) val, exp); } public void addNum (long val, int exp) { if( isUUID() || isString() ) { addNA(); } else if(_ds != null) { assert _ms == null; addNum(PrettyPrint.pow10(val,exp)); } else { if( val == 0 ) exp = 0;// Canonicalize zero if(val != 0 || !isSparseZero()) { if (_ms == null || _ms.len() == _sparseLen) { append2slow(); addNum(val, exp); // sparsity could've changed return; } int len = _ms.len(); int slen = _sparseLen; long t; // Remove extra scaling while (exp < 0 && exp > -9999999 && (t = val / 10) * 10 == val) { val = t; exp++; } _ms.set(_sparseLen, val); _xs.set(_sparseLen, exp); assert _id == null || _id.length == _ms.len() : "id.len = " + _id.length + ", ms.len = " + _ms.len() + ", old ms.len = " + len + ", sparseLen = " + slen; if (_id != null) _id[_sparseLen] = _len; _sparseLen++; } _len++; } } // Fast-path append double data public void addNum(double d) { if( isUUID() || isString() ) { addNA(); return; } boolean predicate = _sparseNA ? !Double.isNaN(d) : isSparseZero()?d != 0:true; if(predicate) { if(_ms != null) { if((long)d == d){ addNum((long)d,0); return; } switch_to_doubles(); } //if ds not big enough if(_sparseLen == _ds.length ) { append2slowd(); // call addNum again since append2slowd might have flipped to sparse addNum(d); assert _sparseLen <= _len; return; } if(_id != null)_id[_sparseLen] = _len; _ds[_sparseLen] = d; _sparseLen++; } _len++; assert _sparseLen <= _len; } private void append_ss(String str) { byte[] bytes = str == null ? new byte[0] : StringUtils.bytesOf(str); // Allocate memory if necessary if (_ss == null) _ss = MemoryManager.malloc1((bytes.length+1) * 4); while (_ss.length < (_sslen + bytes.length+1)) _ss = MemoryManager.arrayCopyOf(_ss,_ss.length << 1); // Copy bytes to _ss for (byte b : bytes) _ss[_sslen++] = b; _ss[_sslen++] = (byte)0; // for trailing 0; } private void append_ss(BufferedString str) { int strlen = str.length(); int off = str.getOffset(); byte b[] = str.getBuffer(); if (_ss == null) { int size = (strlen + 1) * 4; if(size < 0 || size > MAX_STR_LEN){ size = MAX_STR_LEN; } _ss = MemoryManager.malloc1(size); } long spaceRequired = _sslen + strlen + 1; if( spaceRequired > MAX_STR_LEN ){ throw new IllegalStateException("Parsed string is too big."); } while (_ss.length < (_sslen + strlen + 1)) { long doubleSize = _ss.length << 1; if(doubleSize > Integer.MAX_VALUE){ doubleSize = MAX_STR_LEN; } _ss = MemoryManager.arrayCopyOf(_ss, (int) doubleSize); } for (int i = off; i < off+strlen; i++) _ss[_sslen++] = b[i]; _ss[_sslen++] = (byte)0; // for trailing 0; } // Append a string, store in _ss & _is // TODO cleanup public void addStr(Object str) { if(_id == null || str != null) { if(_is == null || _sparseLen >= _is.length) { append2slowstr(); addStr(str); assert _sparseLen <= _len; return; } if (str != null) { if(_id != null)_id[_sparseLen] = _len; _is[_sparseLen] = _sslen; _sparseLen++; if (str instanceof BufferedString) append_ss((BufferedString) str); else // this spares some callers from an unneeded conversion to BufferedString first append_ss((String) str); } else if (_id == null) { _is[_sparseLen] = CStrChunk.NA; set_sparseLen(_sparseLen + 1); } } set_len(_len + 1); assert _sparseLen <= _len; } // TODO: FIX isAllASCII test to actually inspect string contents public void addStr(Chunk c, long row) { if( c.isNA_abs(row) ) addNA(); else { addStr(c.atStr_abs(new BufferedString(), row)); _isAllASCII &= ((CStrChunk)c)._isAllASCII; } } public void addStr(Chunk c, int row) { if( c.isNA(row) ) addNA(); else { addStr(c.atStr(new BufferedString(), row)); _isAllASCII &= ((CStrChunk)c)._isAllASCII; } } public void addUUID(UUID uuid) { if (uuid == null) addNA(); else addUUID(uuid.getLeastSignificantBits(), uuid.getMostSignificantBits()); } // Append a UUID, stored in _ls & _ds public void addUUID( long lo, long hi ) { if (C16Chunk.isNA(lo, hi)) { addNA(); return; } if( _ms==null || _ds== null || _sparseLen >= _ms.len() ) append2slowUUID(); _ms.set(_sparseLen,lo); _ds[_sparseLen] = Double.longBitsToDouble(hi); if (_id != null) _id[_sparseLen] = _len; _sparseLen++; _len++; assert _sparseLen <= _len; } public void addUUID( Chunk c, long row ) { if (c.isNA_abs(row)) addNA(); else addUUID(c.at16l_abs(row),c.at16h_abs(row)); } public void addUUID( Chunk c, int row ) { if( c.isNA(row) ) addNA(); else addUUID(c.at16l(row),c.at16h(row)); } public final boolean isUUID(){return _ms != null && _ds != null; } public final boolean isString(){return _is != null; } public final boolean sparseZero(){return _id != null && !_sparseNA;} public final boolean sparseNA() {return _id != null && _sparseNA;} public void addZeros(int n){ if(n == 0) return; assert n > 0; while(!sparseZero() && n != 0) { addNum(0, 0); n--; } assert n >= 0; _len += n; } public void addNAs(int n) { if(n == 0) return; while(!sparseNA() && n != 0) { addNA(); n--; } _len += n; } // Append all of 'nc' onto the current NewChunk. Kill nc. public void add( NewChunk nc ) { assert _cidx >= 0; assert _sparseLen <= _len; assert nc._sparseLen <= nc._len :"_sparseLen = " + nc._sparseLen + ", _len = " + nc._len; if( nc._len == 0 ) return; if(_len == 0){ _ms = nc._ms; nc._ms = null; _xs = nc._xs; nc._xs = null; _id = nc._id; nc._id = null; _ds = nc._ds; nc._ds = null; _is = nc._is; nc._is = null; _ss = nc._ss; nc._ss = null; set_sparseLen(nc._sparseLen); set_len(nc._len); return; } if(nc.sparseZero() != sparseZero() || nc.sparseNA() != sparseNA()){ // for now, just make it dense cancel_sparse(); nc.cancel_sparse(); } if( _ds != null ) throw H2O.fail(); for(int i = 0; i < nc._sparseLen; ++i) { _ms.set(_sparseLen+i,nc._ms.get(i)); _xs.set(_sparseLen+i,nc._xs.get(i)); } if(_id != null) { assert nc._id != null; _id = MemoryManager.arrayCopyOf(_id,_sparseLen + nc._sparseLen); System.arraycopy(nc._id,0,_id, _sparseLen, nc._sparseLen); for(int i = _sparseLen; i < _sparseLen + nc._sparseLen; ++i) _id[i] += _len; } else assert nc._id == null; set_sparseLen(_sparseLen + nc._sparseLen); set_len(_len + nc._len); nc._ms = null; nc._xs = null; nc._id = null; nc.set_sparseLen(nc.set_len(0)); assert _sparseLen <= _len; } // Fast-path append long data // void append2( long l, int x ) { // boolean predicate = _sparseNA ? (l != Long.MAX_VALUE || x != Integer.MIN_VALUE): l != 0; // if(_id == null || predicate){ // if(_ms == null || _sparseLen == _ms._c) { // append2slow(); // // again call append2 since calling append2slow might have changed things (eg might have switched to sparse and l could be 0) // append2(l,x); // return; // } // _ls[_sparseLen] = l; // _xs[_sparseLen] = x; // if(_id != null)_id[_sparseLen] = _len; // set_sparseLen(_sparseLen + 1); // } // set_len(_len + 1); // assert _sparseLen <= _len; // } // Slow-path append data private void append2slowd() { assert _ms==null; if(_ds != null && _ds.length > 0){ if(_id == null) { // check for sparseness int nzs = 0; // assume one non-zero for the element currently being stored int nonnas = 0; for(double d:_ds) { if(d != 0)++nzs; if(!Double.isNaN(d))++nonnas; } if((nzs+1)*_sparseRatio < _len) { set_sparse(nzs,Compress.ZERO); assert _sparseLen == 0 || _sparseLen <= _ds.length:"_sparseLen = " + _sparseLen + ", _ds.length = " + _ds.length + ", nzs = " + nzs + ", len = " + _len; assert _id.length == _ds.length; assert _sparseLen <= _len; return; } else if((nonnas+1)*_sparseRatio < _len) { set_sparse(nonnas,Compress.NA); assert _sparseLen == 0 || _sparseLen <= _ds.length:"_sparseLen = " + _sparseLen + ", _ds.length = " + _ds.length + ", nonnas = " + nonnas + ", len = " + _len; assert _id.length == _ds.length; assert _sparseLen <= _len; return; } } else { // verify we're still sufficiently sparse if((_sparseRatio*(_sparseLen) >> 2) > _len) cancel_sparse(); else _id = MemoryManager.arrayCopyOf(_id, _sparseLen << 1); } _ds = MemoryManager.arrayCopyOf(_ds, _sparseLen << 1); } else { alloc_doubles(4); if (_id != null) alloc_indices(4); } assert _sparseLen == 0 || _ds.length > _sparseLen :"_ds.length = " + _ds.length + ", _sparseLen = " + _sparseLen; assert _id == null || _id.length == _ds.length; assert _sparseLen <= _len; } // Slow-path append data private void append2slowUUID() { if(_id != null) cancel_sparse(); if( _ds==null && _ms!=null ) { // This can happen for columns with all NAs and then a UUID _xs=null; _ms.switchToLongs(); _ds = MemoryManager.malloc8d(_sparseLen); Arrays.fill(_ms._vals8,C16Chunk._LO_NA); Arrays.fill(_ds,Double.longBitsToDouble(C16Chunk._HI_NA)); } if( _ms != null && _sparseLen > 0 ) { _ds = MemoryManager.arrayCopyOf(_ds, _sparseLen * 2); _ms.resize(_sparseLen*2); if(_id != null) _id = Arrays.copyOf(_id,_sparseLen*2); } else { _ms = new Mantissas(4); _xs = null; _ms.switchToLongs(); _ds = new double[4]; } } // Slow-path append string private void append2slowstr() { // In case of all NAs and then a string, convert NAs to string NAs if (_xs != null) { _xs = null; _ms = null; alloc_str_indices(_sparseLen); Arrays.fill(_is,-1); } if(_is != null && _is.length > 0){ // Check for sparseness if(_id == null){ int nzs = 0; // assume one non-null for the element currently being stored for( int i:_is) if( i != -1 ) ++nzs; if( (nzs+1)*_sparseRatio < _len) set_sparse(nzs, Compress.NA); } else { if((_sparseRatio*(_sparseLen) >> 2) > _len) cancel_sparse(); else _id = MemoryManager.arrayCopyOf(_id,_sparseLen<<1); } _is = MemoryManager.arrayCopyOf(_is, _sparseLen<<1); /* initialize the memory extension with -1s */ for (int i = _sparseLen; i < _is.length; i++) _is[i] = -1; } else { _is = MemoryManager.malloc4 (4); /* initialize everything with -1s */ for (int i = 0; i < _is.length; i++) _is[i] = -1; if (sparseZero()||sparseNA()) alloc_indices(4); } assert _sparseLen == 0 || _is.length > _sparseLen:"_ls.length = " + _is.length + ", _len = " + _sparseLen; } // Slow-path append data private void append2slow( ) { // PUBDEV-2639 - don't die for many rows, few columns -> can be long chunks // if( _sparseLen > FileVec.DFLT_CHUNK_SIZE ) // throw new ArrayIndexOutOfBoundsException(_sparseLen); assert _ds==null; if(_ms != null && _sparseLen > 0){ if(_id == null) { // check for sparseness int nzs = _ms._nzs + (_missing != null?_missing.cardinality():0); int nonnas = _sparseLen - ((_missing != null)?_missing.cardinality():0); if((nonnas+1)*_sparseRatio < _len) { set_sparse(nonnas,Compress.NA); assert _id.length == _ms.len():"id.len = " + _id.length + ", ms.len = " + _ms.len(); assert _sparseLen <= _len; return; } else if((nzs+1)*_sparseRatio < _len) { // note order important here set_sparse(nzs,Compress.ZERO); assert _sparseLen <= _len; assert _sparseLen == nzs; return; } } else { // verify we're still sufficiently sparse if(2*_sparseLen > _len) cancel_sparse(); else _id = MemoryManager.arrayCopyOf(_id, _id.length*2); } _ms.resize(_sparseLen*2); _xs.resize(_sparseLen*2); } else { _ms = new Mantissas(16); _xs = new Exponents(16); if (_id != null) _id = new int[16]; } assert _sparseLen <= _len; } // Do any final actions on a completed NewVector. Mostly: compress it, and // do a DKV put on an appropriate Key. The original NewVector goes dead // (does not live on inside the K/V store). public Chunk new_close() { Chunk chk = compress(); if(_vec instanceof AppendableVec) ((AppendableVec)_vec).closeChunk(_cidx,chk._len); return chk; } public void close(Futures fs) { close(_cidx,fs); } private void switch_to_doubles(){ assert _ds == null; double [] ds = MemoryManager.malloc8d(_sparseLen); for(int i = 0; i < _sparseLen; ++i) ds[i] = getDouble(i); _ms = null; _xs = null; _missing = null; _ds = ds; } public enum Compress {ZERO, NA} //Sparsify. Compressible element can be 0 or NA. Store noncompressible elements in _ds OR _ls and _xs OR _is and // their row indices in _id. protected void set_sparse(int num_noncompressibles, Compress sparsity_type) { assert !isUUID():"sparse for uuids is not supported"; if ((sparsity_type == Compress.ZERO && isSparseNA()) || (sparsity_type == Compress.NA && isSparseZero())) cancel_sparse(); if (sparsity_type == Compress.NA) { _sparseNA = true; } if (_id != null && _sparseLen == num_noncompressibles && _len != 0) return; if (_id != null) cancel_sparse(); assert _sparseLen == _len : "_sparseLen = " + _sparseLen + ", _len = " + _len + ", num_noncompressibles = " + num_noncompressibles; int cs = 0; //number of compressibles if (_is != null) { assert num_noncompressibles <= _is.length; _id = MemoryManager.malloc4(_is.length); for (int i = 0; i < _len; i++) { if (_is[i] == -1) cs++; //same condition for NA and 0 else { _is[i - cs] = _is[i]; _id[i - cs] = i; } } } else if (_ds == null) { if (_len == 0) { _ms = new Mantissas(0); _xs = new Exponents(0); _id = new int[0]; set_sparseLen(0); return; } else { assert num_noncompressibles <= _sparseLen; _id = MemoryManager.malloc4(_ms.len()); for (int i = 0; i < _sparseLen; ++i) { if (is_compressible(i)) { ++cs; } else { _ms.move(i - cs, i); _xs.move(i - cs, i); _id[i - cs] = i; if(sparsity_type != Compress.NA && _missing != null){ _missing.set(i-cs,_missing.get(i)); } } } if(_missing != null && _missing.length() > num_noncompressibles) _missing.clear(num_noncompressibles, _missing.length()); } } else { assert num_noncompressibles <= _ds.length; _id = alloc_indices(_ds.length); for (int i = 0; i < _sparseLen; ++i) { if (is_compressible(_ds[i])) ++cs; else { _ds[i - cs] = _ds[i]; _id[i - cs] = i; } } } assert cs == (_sparseLen - num_noncompressibles) : "cs = " + cs + " != " + (_sparseLen - num_noncompressibles) + ", sparsity type = " + sparsity_type; assert (sparsity_type == Compress.NA) == _sparseNA; if(sparsity_type == Compress.NA && _missing != null) _missing.clear(); set_sparseLen(num_noncompressibles); } private boolean is_compressible(double d) { return _sparseNA ? Double.isNaN(d) : d == 0; } private boolean is_compressible(int x) { return isNA2(x)?_sparseNA:!_sparseNA &&_ms.get(x) == 0; } public void cancel_sparse(){ if(_sparseLen != _len){ if(_is != null){ int [] is = MemoryManager.malloc4(_len); Arrays.fill(is, -1); for (int i = 0; i < _sparseLen; i++) is[_id[i]] = _is[i]; _is = is; } else if(_ds == null) { Exponents xs = new Exponents(_len); Mantissas ms = new Mantissas(_len); BitSet missing = new BitSet(); if(_sparseNA) missing.set(0,_len); for (int i = 0; i < _sparseLen; ++i) { xs.set(_id[i], _xs.get(i)); ms.set(_id[i], _ms.get(i)); missing.set(_id[i], _sparseNA || _missing == null?false:_missing.get(i)); } assert _sparseNA || (ms._nzs == _ms._nzs):_ms._nzs + " != " + ms._nzs; ms._nzs = _ms._nzs; _xs = xs; _missing = missing; _ms = ms; } else{ double [] ds = MemoryManager.malloc8d(_len); _missing = new BitSet(); if (_sparseNA) Arrays.fill(ds, Double.NaN); for(int i = 0; i < _sparseLen; ++i) { ds[_id[i]] = _ds[i]; if(_sparseNA)_missing.set(_id[i]); } _ds = ds; } set_sparseLen(_len); } _id = null; _sparseNA = false; } // Study this NewVector and determine an appropriate compression scheme. // Return the data so compressed. public Chunk compress() { Chunk res = compress2(); byte type = type(); assert _vec == null || // Various testing scenarios do not set a Vec type == _vec._type || // Equal types // Allow all-bad Chunks in any type of Vec type == Vec.T_BAD || // Specifically allow the NewChunk to be a numeric type (better be all // ints) and the selected Vec type an categorical - whose String mapping // may not be set yet. (type==Vec.T_NUM && _vec._type==Vec.T_CAT) || // Another one: numeric Chunk and Time Vec (which will turn into all longs/zeros/nans Chunks) (type==Vec.T_NUM && _vec._type == Vec.T_TIME && !res.hasFloat()) : "NewChunk has type "+Vec.TYPE_STR[type]+", but the Vec is of type "+_vec.get_type_str(); assert _len == res._len : "NewChunk has length "+_len+", compressed Chunk has "+res._len; // Force everything to null after compress to free up the memory. Seems // like a non-issue in the land of GC, but the NewChunk *should* be dead // after this, but might drag on. The arrays are large, and during a big // Parse there's lots and lots of them... so free early just in case a GC // happens before the drag-time on the NewChunk finishes. _id = null; _xs = null; _ds = null; _ms = null; _is = null; _ss = null; return res; } private static long leRange(long lemin, long lemax){ if(lemin < 0 && lemax >= (Long.MAX_VALUE + lemin)) return Long.MAX_VALUE; // if overflow return 64 as the max possible value long res = lemax - lemin; return res < 0 ? 0 /*happens for rare FP roundoff computation of min & max */: res; } private Chunk compress2() { // Check for basic mode info: all missing or all strings or mixed stuff byte mode = type(); if( mode==Vec.T_BAD ) // ALL NAs, nothing to do return new C0DChunk(Double.NaN, _len); if( mode==Vec.T_STR ) return new CStrChunk(_sslen, _ss, _sparseLen, _len, _id, _is); boolean rerun=false; if(mode == Vec.T_CAT) { for(int i = 0; i< _sparseLen; i++ ) if(isCategorical2(i)) _xs.set(i,0); else if(!isNA2(i)){ setNA_impl2(i); ++_naCnt; } // Smack any mismatched string/numbers } else if( mode == Vec.T_NUM ) { for(int i = 0; i< _sparseLen; i++ ) if(isCategorical2(i)) { setNA_impl2(i); rerun = true; } } if( rerun ) { _naCnt = -1; type(); } // Re-run rollups after dropping all numbers/categoricals boolean sparse = false; boolean na_sparse = false; // sparse? treat as sparse iff fraction of noncompressed elements is less than 1/MIN_SPARSE_RATIO if(_sparseRatio*(_naCnt + _nzCnt) < _len) { set_sparse(_naCnt + _nzCnt, Compress.ZERO); sparse = true; } else if(_sparseRatio*(_len - _naCnt) < _len){ set_sparse(_len - _naCnt, Compress.NA); na_sparse = true; } else if (_id != null) cancel_sparse(); // If the data is UUIDs there's not much compression going on if( _ds != null && _ms != null ) return chunkUUID(); // cut out the easy all NaNs case; takes care of constant na_sparse if(_naCnt == _len) return new C0DChunk(Double.NaN,_len); // If the data was set8 as doubles, we do a quick check to see if it's // plain longs. If not, we give up and use doubles. boolean isInteger = true; boolean isFloat = true; if( _ds != null ) { int i; // check if we can flip to ints for (i=0; i < _sparseLen && (isInteger || isFloat); ++i) { if (!Double.isNaN(_ds[i]) && (double) (long) _ds[i] != _ds[i]) isInteger = false; if (!Double.isNaN(_ds[i]) && (double) (float) _ds[i] != _ds[i]) isFloat = false; } boolean isConstant = !(sparse || na_sparse) || _sparseLen == 0; double constVal = 0; if (!(sparse || na_sparse)) { // check the values, sparse with some nonzeros can not be constant - has 0s and (at least 1) nonzero constVal = _ds[0]; for(int j = 1; j < _len; ++j) if(_ds[j] != constVal) { isConstant = false; break; } } if(isConstant) return isInteger? new C0LChunk((long)constVal, _len): new C0DChunk(constVal,_len); if(!isInteger) { return (sparse || na_sparse) ?new CXFChunk(bufD(isFloat?4:8,na_sparse)) :chunkD(); } // Else flip to longs _ms = new Mantissas(_ds.length); _xs = new Exponents(_ds.length); _missing = new BitSet(); double [] ds = _ds; _ds = null; final int naCnt = _naCnt; for(i=0; i< _sparseLen; i++ ) // Inject all doubles into longs if( Double.isNaN(ds[i]) ) { _missing.set(i); } else { _ms.set(i,(long)ds[i]); _xs.set(i,0); } // setNA_impl2 will set _naCnt to -1! // we already know what the naCnt is (it did not change!) so set it back to correct value _naCnt = naCnt; } // IF (_len > _sparseLen) THEN Sparse // Check for compressed *during appends*. Here we know: // - No specials; _xs[]==0. // - No floats; _ds==null // - NZ length in _sparseLen, actual length in _len. // - Huge ratio between _len and _sparseLen, and we do NOT want to inflate to // the larger size; we need to keep it all small all the time. // - Rows in _xs // Data in some fixed-point format, not doubles // See if we can sanely normalize all the data to the same fixed-point. int xmin = Integer.MAX_VALUE; // min exponent found boolean floatOverflow = false; double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; long min_l = Long.MAX_VALUE; long max_l = Long.MIN_VALUE; double longMax = (double) Long.MAX_VALUE; double longMin = (double) Long.MIN_VALUE; int p10iLength = PrettyPrint.powers10i.length; long llo=Long .MAX_VALUE, lhi=Long .MIN_VALUE; int xlo=Integer.MAX_VALUE, xhi=Integer.MIN_VALUE; boolean hasZero = sparse; long ll; for(int i = 0; i< _sparseLen; i++ ) { if( isNA2(i) ) continue; long l = _ms.get(i); int x = _xs.get(i); if( x==Integer.MIN_VALUE) x=0; // Replace categorical flag with no scaling assert l!=0 || x==0:"l == 0 while x = " + x + " ms = " + _ms.toString(); // Exponent of zero is always zero long t; // Remove extra scaling while( l!=0 && (t=l/10)*10==l ) { l=t; x++; } // Compute per-chunk min/max double d = PrettyPrint.pow10(l,x); // WARNING: this is lossy!! if(d == 0) { hasZero = true; continue; } if (isInteger) // once set to false don't want to reset back to true isInteger = (x>=0) && (d<=longMax) && (d>=longMin); if (isInteger) { ll = l*PrettyPrint.pow10i(x); // only perform operation if still fit in Long and still integer if( ll<min_l ) { min = d; min_l=ll; llo=l; xlo=x; } // if( ll>max_l ) { max = d; max_l=ll; lhi=l; xhi=x; } } else { if (d < min) { min = d; llo = l; xlo = x; } if (d > max) { max=d; lhi=l; xhi=x; } } floatOverflow = l < Integer.MIN_VALUE+1 || l > Integer.MAX_VALUE; xmin = Math.min(xmin,x); } boolean hasNonZero = min != Double.POSITIVE_INFINITY && max != Double.NEGATIVE_INFINITY; if(hasZero){ // sparse? then compare vs implied 0s if( min > 0 ) { min = 0; llo=0; min_l=0l; } if( max < 0 ) { max = 0; lhi=0; max_l=0l; } } if(!hasNonZero) xlo = xhi = xmin = 0; // Constant column? if( _naCnt==0 && (min_l==max_l) && xmin >=0 && isInteger) { return new C0LChunk(min_l, _len); } if( _naCnt==0 && (min==max) && (xmin<0 || !isInteger) ) { return new C0DChunk(min, _len); } // Compute min & max, as scaled integers in the xmin scale. // Check for overflow along the way boolean overflow = ((xhi-xmin) >= p10iLength) || ((xlo-xmin) >= p10iLength); long lemax=0, lemin=0; if( !overflow ) { // Can at least get the power-of-10 without overflow long pow10 = PrettyPrint.pow10i(xhi-xmin); lemax = lhi*pow10; // Hacker's Delight, Section 2-13, checking overflow. // Note that the power-10 is always positive, so the test devolves this: if( (lemax/pow10) != lhi ) overflow = true; // Note that xlo might be > xmin; e.g. { 101e-49 , 1e-48}. long pow10lo = llo == 0?1:PrettyPrint.pow10i(xlo-xmin); lemin = llo*pow10lo; if( (lemin/pow10lo) != llo ) overflow = true; } final long leRange = leRange(lemin,lemax); // put min_l in xmin scale if( xmin > 0 ) min_l = min_l/PrettyPrint.pow10i(xmin); // Boolean column? if (max == 1 && min == 0 && xmin == 0 && !overflow) { if(sparse || na_sparse) { // Very sparse? return !na_sparse && _naCnt==0 ? new CXIChunk(bufS(_len,_len < 65535?2:4,0,false))// No NAs, can store as sparse bitvector : _len < 65535 && (Short.MIN_VALUE < min && max < Short.MAX_VALUE) ?new CXIChunk(bufS(_len,2,2,na_sparse)) :new CXIChunk(bufS(_len,4,4,na_sparse)); } int bpv = _catCnt +_naCnt > 0 ? 2 : 1; // Bit-vector return bufB(bpv); } final boolean fpoint = xmin < 0 || min < Long.MIN_VALUE || max > Long.MAX_VALUE; if( sparse || na_sparse ) { if(fpoint) { if(_ds == null){ switch_to_doubles(); isFloat = false; } return new CXFChunk(bufD(isFloat?4:8,na_sparse)); } if( Integer.MIN_VALUE <= min && max <= Integer.MAX_VALUE ) { if(_len < 65535 && (Short.MIN_VALUE < min && max < Short.MAX_VALUE)) return new CXIChunk(bufS(_len,2,2, na_sparse)); else return new CXIChunk(bufS(_len, 4,4, na_sparse)); } return new CXIChunk(bufS(_len, 4,8, na_sparse)); } // Exponent scaling: replacing numbers like 1.3 with 13e-1. '13' fits in a // byte and we scale the column by 0.1. A set of numbers like // {1.2,23,0.34} then is normalized to always be represented with 2 digits // to the right: {1.20,23.00,0.34} and we scale by 100: {120,2300,34}. // This set fits in a 2-byte short. // We use exponent-scaling for bytes & shorts only; it's uncommon (and not // worth it) for larger numbers. We need to get the exponents to be // uniform, so we scale up the largest lmax by the largest scale we need // and if that fits in a byte/short - then it's worth compressing. Other // wise we just flip to a float or double representation. if( overflow || (fpoint && floatOverflow) || -35 > xmin || xmin > 35 ) return chunkD(); if( fpoint ) { if( (int)lemin == lemin && (int)lemax == lemax ) { if(leRange < 255) { // Fits in scaled biased byte? return new C1SChunk(bufX(lemin, xmin, CSChunk._OFF, 0), lemin, xmin); } if(leRange < 65535) { // we use signed 2B short, add -32k to the bias! long bias = 32767 + lemin; return new C2SChunk(bufX(bias,xmin,CSChunk._OFF,1),bias,xmin); } } if(leRange < 4294967295l) { long bias = 2147483647l + lemin; return new C4SChunk(bufX(bias,xmin,C4SChunk._OFF,2),bias,xmin); } return chunkD(); } // else an integer column // Compress column into a byte if(xmin == 0 && 0<=lemin && lemax <= 255 && ((_naCnt + _catCnt)==0) ) return new C1NChunk( bufX(0,0,C1NChunk._OFF,0)); if( lemin < Integer.MIN_VALUE ) return new C8Chunk( bufX(0,0,0,3)); if( leRange < 255 ) { // Span fits in a byte? if(0 <= min && max < 255 ) // Span fits in an unbiased byte? return new C1Chunk( bufX(0,0,C1Chunk._OFF,0)); return new C1SChunk( bufX(lemin,xmin,C1SChunk._OFF,0),lemin,xmin); } // Compress column into a short if( leRange < 65535 ) { // Span fits in a biased short? if( xmin == 0 && Short.MIN_VALUE < lemin && lemax <= Short.MAX_VALUE ) // Span fits in an unbiased short? return new C2Chunk( bufX(0,0,C2Chunk._OFF,1)); long bias = (lemin-(Short.MIN_VALUE+1)); return new C2SChunk( bufX(bias,xmin,C2SChunk._OFF,1),bias,xmin); } // Compress column into ints if( Integer.MIN_VALUE < min && max <= Integer.MAX_VALUE ) return new C4Chunk( bufX(0,0,0,2)); return new C8Chunk( bufX(0,0,0,3)); } private static long [] NAS = {C1Chunk._NA,C2Chunk._NA,C4Chunk._NA,C8Chunk._NA}; // Compute a sparse integer buffer private byte[] bufS(int len, int id_sz, int val_sz,boolean na_sparse){ long NA = CXIChunk.NA(val_sz); int elem_size = id_sz+val_sz; byte [] res = MemoryManager.malloc1(CXIChunk._OFF + _sparseLen*elem_size); UnsafeUtils.set4(res,0,len); res[4] = (byte)id_sz; res[5] = (byte)val_sz; res[6] = na_sparse?(byte)1:0; if(na_sparse)res[6] = (byte)1; for(int i = 0; i < _sparseLen; ++i){ if(id_sz == 2) UnsafeUtils.set2(res,CXIChunk._OFF+i*elem_size+0,(short)_id[i]); else UnsafeUtils.set4(res,CXIChunk._OFF+i*elem_size+0,_id[i]); long val = isNA2(i)?NA:_ms.get(i); switch(val_sz){ case 0: break; // no value store dfor binary chunks case 2: UnsafeUtils.set2(res,CXIChunk._OFF+i*elem_size+id_sz,(short)val); break; case 4: UnsafeUtils.set4(res,CXIChunk._OFF+i*elem_size+id_sz,(int)val); break; case 8: UnsafeUtils.set8(res,CXIChunk._OFF+i*elem_size+id_sz,val); break; default: throw H2O.unimpl(); } } return res; } // Compute a sparse float buffer private byte[] bufD(final int valsz, boolean na_sparse){ int elem_size = valsz+4; byte [] res = MemoryManager.malloc1(CXIChunk._OFF + _sparseLen*elem_size); UnsafeUtils.set4(res,0,_len); res[4] = (byte)4; res[5] = (byte)valsz; res[6] = na_sparse?(byte)1:0; if(na_sparse)res[6] = (byte)1; for(int i = 0; i < _sparseLen; ++i){ UnsafeUtils.set4(res,CXIChunk._OFF+i*elem_size+0,_id[i]); if(valsz == 4){ UnsafeUtils.set4f(res,CXIChunk._OFF+i*elem_size+4,(float)_ds[i]); } else if(valsz == 8) { UnsafeUtils.set8d(res,CXIChunk._OFF+i*elem_size+4,_ds[i]); } else throw H2O.unimpl(); } return res; } // Compute a compressed integer buffer private byte[] bufX( long bias, int scale, int off, int log ) { byte[] bs = MemoryManager.malloc1((_len <<log)+off); if (log > 0) { return bufX_safe(bs, bias, scale, off, log); } assert bs.length == off + _len; int j = 0; for (int i = off; i < bs.length; i++) { long le = -bias; if(_id == null || _id.length == 0 || (j < _id.length && _id[j] == i-off)){ if( isNA2(j) ) { le = NAS[log]; } else { int x = (_xs.get(j)==Integer.MIN_VALUE+1 ? 0 : _xs.get(j))-scale; le += x >= 0 ? _ms.get(j)*PrettyPrint.pow10i( x) : _ms.get(j)/PrettyPrint.pow10i(-x); } ++j; } bs[i] = (byte) le; } assert j == _sparseLen :"j = " + j + ", _sparseLen = " + _sparseLen; return bs; } private byte[] bufX_safe( final byte[] bs, long bias, int scale, int off, int log ) { assert log > 0; ByteBuffer bb = ByteBuffer.wrap(bs, off, bs.length - off).order(ByteOrder.nativeOrder()); int j = 0; for( int i=0; i< _len; i++ ) { long le = -bias; if(_id == null || _id.length == 0 || (j < _id.length && _id[j] == i)){ if( isNA2(j) ) { le = NAS[log]; } else { int x = (_xs.get(j)==Integer.MIN_VALUE+1 ? 0 : _xs.get(j))-scale; le += x >= 0 ? _ms.get(j)*PrettyPrint.pow10i( x) : _ms.get(j)/PrettyPrint.pow10i(-x); } ++j; } switch( log ) { case 1: bb.putShort((short) le); break; case 2: bb.putInt((int) le); break; case 3: bb.putLong(le); break; default: throw H2O.fail(); } } assert j == _sparseLen :"j = " + j + ", _sparseLen = " + _sparseLen; return bs; } private double getDouble(int j){ if(_ds != null) return _ds[j]; if(isNA2(j)|| isCategorical(j)) return Double.NaN; return PrettyPrint.pow10(_ms.get(j),_xs.get(j)); } // Compute a compressed double buffer private Chunk chunkD() { HashMap<Long,Byte> hs = new HashMap<>(CUDChunk.MAX_UNIQUES); Byte dummy = 0; final byte [] bs = MemoryManager.malloc1(_len *8,true); int j = 0; boolean fitsInUnique = true; for(int i = 0; i < _len; ++i){ double d = 0; if(_id == null || _id.length == 0 || (j < _id.length && _id[j] == i)) { d = getDouble(j); ++j; } if (fitsInUnique) { if (hs.size() < CUDChunk.MAX_UNIQUES) //still got space hs.put(Double.doubleToLongBits(d),dummy); //store doubles as longs to avoid NaN comparison issues during extraction else fitsInUnique = (hs.size() == CUDChunk.MAX_UNIQUES) && // full, but might not need more space because of repeats hs.containsKey(Double.doubleToLongBits(d)); } UnsafeUtils.set8d(bs, 8*i, d); } assert j == _sparseLen :"j = " + j + ", _len = " + _sparseLen; if (fitsInUnique && CUDChunk.computeByteSize(hs.size(), len()) < 0.8 * bs.length) return new CUDChunk(bs, hs, len()); else return new C8DChunk(bs); } // Compute a compressed UUID buffer private Chunk chunkUUID() { final byte [] bs = MemoryManager.malloc1(_len *16,true); int j = 0; for( int i = 0; i < _len; ++i ) { long lo = 0, hi=0; if( _id == null || _id.length == 0 || (j < _id.length && _id[j] == i ) ) { if(_missing != null && _missing.get(j)) { lo = C16Chunk._LO_NA; hi = C16Chunk._HI_NA; } else { lo = _ms.get(j); hi = Double.doubleToRawLongBits(_ds[j]); } j++; } UnsafeUtils.set8(bs, 16*i , lo); UnsafeUtils.set8(bs, 16 * i + 8, hi); } assert j == _sparseLen :"j = " + j + ", _sparselen = " + _sparseLen; return new C16Chunk(bs); } // Compute compressed boolean buffer private CBSChunk bufB(int bpv) { CBSChunk chk = new CBSChunk(_len,bpv); for(int i = 0; i < _len; ++i){ if(isNA2(i)) chk.write(i,CBSChunk._NA); else if(_ms.get(i) == 1) chk.write(i, (byte)1); else assert _ms.get(i) == 0; } return chk; } // Set & At on NewChunks are weird: only used after inflating some other // chunk. At this point the NewChunk is full size, no more appends allowed, // and the xs exponent array should be only full of zeros. Accesses must be // in-range and refer to the inflated values of the original Chunk. @Override boolean set_impl(int i, long l) { if( _ds != null ) return set_impl(i,(double)l); if(_sparseLen != _len){ // sparse? int idx = Arrays.binarySearch(_id,0, _sparseLen,i); if(idx >= 0)i = idx; else cancel_sparse(); // for now don't bother setting the sparse value } _ms.set(i,l); _xs.set(i,0); if(_missing != null)_missing.clear(i); _naCnt = -1; return true; } @Override public boolean set_impl(int i, double d) { if(_ds == null && (long)d == d) return set_impl(i,(long)d); if(_ds == null) { if (_is == null) { //not a string assert _sparseLen == 0 || _ms != null; switch_to_doubles(); } else { if (_is[i] == -1) return true; //nothing to do: already NA assert(Double.isNaN(d)) : "can only set strings to <NA>, nothing else"; set_impl(i, null); //null encodes a missing string: <NA> return true; } } if(_sparseLen != _len){ // sparse? int idx = Arrays.binarySearch(_id,0, _sparseLen,i); if(idx >= 0)i = idx; else cancel_sparse(); // for now don't bother setting the sparse value } assert i < _sparseLen; _ds[i] = d; _naCnt = -1; return true; } @Override boolean set_impl(int i, float f) { return set_impl(i,(double)f); } @Override boolean set_impl(int i, String str) { if (str == null) { return setNA_impl(i); } if(_is == null && _len > 0) { alloc_str_indices(_len); Arrays.fill(_is,-1); } if(_sparseLen != _len){ // sparse? int idx = Arrays.binarySearch(_id,0, _sparseLen,i); if(idx >= 0)i = idx; else cancel_sparse(); // for now don't bother setting the sparse value } _is[i] = _sslen; append_ss(str); return true; } protected final boolean setNA_impl2(int i) { if(!isUUID() && _ds != null) { _ds[i] = Double.NaN; return true; } if(isString()) { _is[i] = -1; return true; } if(_missing == null) _missing = new BitSet(); _missing.set(i); _ms.set(i,0); // do not double count non-zeros _naCnt = -1; return true; } @Override boolean setNA_impl(int i) { if( isNA_impl(i) ) return true; if(_sparseLen != _len){ int idx = Arrays.binarySearch(_id,0, _sparseLen,i); if(idx >= 0) i = idx; else cancel_sparse(); // todo - do not necessarily cancel sparse here } return setNA_impl2(i); } protected final long at8_impl2(int i) { if(isNA2(i))throw new RuntimeException("Attempting to access NA as integer value."); if( _ms == null ) return (long)_ds[i]; return _ms.get(i)*PrettyPrint.pow10i(_xs.get(i)); } @Override public long at8_impl( int i ) { if( _len != _sparseLen) { int idx = Arrays.binarySearch(_id,0, _sparseLen,i); if(idx >= 0) i = idx; else { if (_sparseNA) throw new RuntimeException("Attempting to access NA as integer value."); return 0; } } return at8_impl2(i); } @Override public double atd_impl( int i ) { if( _len != _sparseLen) { int idx = Arrays.binarySearch(_id,0, _sparseLen,i); if(idx >= 0) i = idx; else return sparseNA() ? Double.NaN : 0; } if (isNA2(i)) return Double.NaN; // if exponent is Integer.MIN_VALUE (for missing value) or >=0, then go the integer path (at8_impl) // negative exponents need to be handled right here if( _ds == null ) return _xs.get(i) >= 0 ? at8_impl2(i) : _ms.get(i)*Math.pow(10,_xs.get(i)); assert _xs==null; return _ds[i]; } private long loAt(int idx) { return _ms.get(idx); } private long hiAt(int idx) { return Double.doubleToRawLongBits(_ds[idx]); } @Override protected long at16l_impl(int idx) { long lo = loAt(idx); if(lo == C16Chunk._LO_NA && hiAt(idx) == C16Chunk._HI_NA) { throw new RuntimeException("Attempting to access NA as integer lo value at " + idx); } return _ms.get(idx); } @Override protected long at16h_impl(int idx) { long hi = Double.doubleToRawLongBits(_ds[idx]); if(hi == C16Chunk._HI_NA && loAt(idx) == C16Chunk._LO_NA) { throw new RuntimeException("Attempting to access NA as integer hi value at " + idx); } return hi; } @Override public boolean isNA_impl( int i ) { if (_len != _sparseLen) { int idx = Arrays.binarySearch(_id, 0, _sparseLen, i); if (idx >= 0) i = idx; else return sparseNA(); } return !sparseNA() && isNA2(i); } @Override public BufferedString atStr_impl( BufferedString bStr, int i ) { if( _sparseLen != _len ) { int idx = Arrays.binarySearch(_id,0, _sparseLen,i); if(idx >= 0) i = idx; else return null; } if( _is[i] == CStrChunk.NA ) return null; int len = 0; while( _ss[_is[i] + len] != 0 ) len++; return bStr.set(_ss, _is[i], len); } @Override protected final void initFromBytes () {throw H2O.fail();} @Override public <T extends ChunkVisitor> T processRows(T v, int from, int to) { throw new UnsupportedOperationException("New chunk does not support visitor pattern"); } @Override public <T extends ChunkVisitor> T processRows(T v, int[] ids) { throw new UnsupportedOperationException("New chunk does not support visitor pattern"); } public static AutoBuffer write_impl(NewChunk nc,AutoBuffer bb) { throw H2O.fail(); } @Override public String toString() { return "NewChunk._sparseLen="+ _sparseLen; } @Override public NewChunk extractRows(NewChunk nc, int from, int to) { throw H2O.unimpl("Not expected to be called on NewChunk"); } @Override public NewChunk extractRows(NewChunk nc, int... rows) { throw H2O.unimpl("Not expected to be called on NewChunk"); } // We have to explicitly override cidx implementation since we hide _cidx field with new version @Override public int cidx() { return _cidx; } /** * Converts a float array into a new line in NewChunks. * * @param ncs array of new chunks where the values should be appended to * @param values array */ public static void addNums(NewChunk[] ncs, float[] values) { if (ncs.length != values.length) { throw new IllegalArgumentException("Number of inputs do not match (#chunks=" + ncs.length + ", #values=" + values.length); } for (int i = 0; i < values.length; i++) { ncs[i].addNum(values[i]); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/OneChunkVec.java
package water.fvec; import water.DKV; import water.Futures; import water.Key; /* Bare minimal implementation intended to be used only by `SplitApplyCombine`*/ public class OneChunkVec extends WrappedVec { private int _chunkIdx; private transient Chunk _chunk; public OneChunkVec(Key<Vec> key, int rowLayout, Key<Vec> masterVecKey, int chunkIdx) { super(key, rowLayout, masterVecKey.get().domain(), masterVecKey); _chunkIdx = chunkIdx; _type = masterVec()._type; } public static OneChunkVec make(Vec v, int cidx, Futures fs) { OneChunkVec ocv = new OneChunkVec(Vec.newKey(Key.make(v._key+"_oneChunkVec_"+cidx)), -1, v._key, cidx); DKV.put(ocv, fs); return ocv; } @Override public int elem2ChunkIdx(long i) { return 0; } @Override public Vec masterVec() { return super.masterVec(); } @Override public Chunk chunkForChunkIdx(int cidx) { assert cidx==0; if (null == _chunk) { _chunk = masterVec().chunkForChunkIdx(_chunkIdx).clone(); _chunk.setStart(0); _chunk._vec = this; } return _chunk; } @Override public long length() { return chunkLen(0); } @Override public int nChunks() { return 1; } @Override public int chunkLen(int cidx) { assert cidx==0; return chunkForChunkIdx(0).len(); } @Override long chunk2StartElem(int cidx) { return 0; } @Override public String toString() { return"OneChunkVec["+_chunkIdx+"] ("+masterVec().toString()+")"; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/RawChunk.java
package water.fvec; import water.udf.TypedChunk; /** * Representative of TypedChunk in the world of typeless chunks. * We don't store any data there. We just pretend to be a "regular" chunk, * to satisfy the obsolete API. */ public class RawChunk extends CXIChunk { public RawChunk(TypedChunk<?> base) { super(new byte[CXIChunk._OFF]); _cidx = base.cidx(); _vec = base.vec(); this._start = base.start(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/RebalanceDataSet.java
package water.fvec; import jsr166y.CountedCompleter; import water.Futures; import water.H2O; import water.Key; import water.MRTask; import java.util.Arrays; /** * Created by tomasnykodym on 3/28/14. * * Utility to rebalance dataset so that it has requested number of chunks and * each chunk has the same number of rows +/-1. * * It *does not* guarantee even chunk-node placement. (This can not currently * be done in H2O, since the placement of chunks is governed only by key-hash * /vector group/ for Vecs) */ public class RebalanceDataSet extends H2O.H2OCountedCompleter { final Frame _in; final int _nchunks; Key _okey; Frame _out; final Key _jobKey; final transient Vec.VectorGroup _vg; transient long[] _espc; /** * Constructor for make-compatible task. * To be used to make frame compatible with other frame (i.e. make all vecs compatible with other vector group and rows-per-chunk). */ public RebalanceDataSet(Frame modelFrame, Frame srcFrame, Key<?> dstKey) { this(modelFrame.anyVec().espc(), modelFrame.anyVec().group(), srcFrame, dstKey); } public RebalanceDataSet(long[] espc, Vec.VectorGroup vg, Frame srcFrame, Key<?> dstKey) { super(null); _in = srcFrame; _jobKey = null; _okey = dstKey; _espc = espc; _vg = vg; _nchunks = espc.length - 1; } public RebalanceDataSet(Frame srcFrame, Key dstKey, int nchunks) { this(srcFrame, dstKey,nchunks,null,null);} public RebalanceDataSet(Frame srcFrame, Key dstKey, int nchunks, H2O.H2OCountedCompleter cmp, Key jobKey) { super(cmp); _in = srcFrame; _nchunks = nchunks; _jobKey = jobKey; _okey = dstKey; _vg = new Vec.VectorGroup(); } public Frame getResult(){join(); return _out;} @Override public void compute2() { // Simply create a bogus new vector (don't even put it into KV) with // appropriate number of lines per chunk and then use it as a source to do // multiple makeZero calls to create empty vecs and than call RebalanceTask // on each one of them. RebalanceTask will fetch the appropriate training_frame // chunks and fetch the data from them. long[] espc; if (_espc != null) espc = _espc; else { int rpc = (int) (_in.numRows() / _nchunks); int rem = (int) (_in.numRows() % _nchunks); espc = new long[_nchunks + 1]; Arrays.fill(espc, rpc); for (int i = 0; i < rem; ++i) ++espc[i]; long sum = 0; for (int i = 0; i < espc.length; ++i) { long s = espc[i]; espc[i] = sum; sum += s; } assert espc[espc.length - 1] == _in.numRows() : "unexpected number of rows, expected " + _in.numRows() + ", got " + espc[espc.length - 1]; } final int rowLayout = Vec.ESPC.rowLayout(_vg._key,espc); final Vec[] srcVecs = _in.vecs(); _out = new Frame(_okey,_in.names(), new Vec(_vg.addVec(),rowLayout).makeCons(srcVecs.length,0L,_in.domains(),_in.types())); _out.delete_and_lock(_jobKey); new RebalanceTask(this,srcVecs).dfork(_out); } @Override public void onCompletion(CountedCompleter caller) { assert _out.numRows() == _in.numRows(); Vec vec = _out.anyVec(); assert vec.nChunks() == _nchunks; _out.update(_jobKey); _out.unlock(_jobKey); } @Override public boolean onExceptionalCompletion(Throwable t, CountedCompleter caller) { t.printStackTrace(); if( _out != null ) _out.delete(_jobKey,new Futures(), true).blockForPending(); return true; } public static class RebalanceTask extends MRTask<RebalanceTask> { final Vec [] _srcVecs; public RebalanceTask(H2O.H2OCountedCompleter cmp, Vec... srcVecs){super(cmp);_srcVecs = srcVecs;} @Override public boolean logVerbose() { return false; } private void rebalanceChunk(int i, Chunk c, NewChunk nc){ final Vec srcVec = _srcVecs[i]; final int N = c._len; int len = 0; int lastId = -1; while(N > len) { Chunk srcRaw = srcVec.chunkForRow(c._start+len); assert lastId == -1 || lastId == srcRaw.cidx()-1 || // proceeded to the next chunk srcVec.chunk2StartElem(lastId+1) == srcVec.chunk2StartElem(srcRaw.cidx()); // skipped bunch of empty chunks lastId = srcRaw.cidx(); int off = (int)((c._start+len) - srcRaw._start); assert off >=0 && off < srcRaw._len; int x = Math.min(N-len,srcRaw._len-off); srcRaw.extractRows(nc, off,off+x); len += x; } nc.close(_fs); } @Override public void map(Chunk [] chks){ for(int c = 0; c < chks.length; ++c){ rebalanceChunk(c,chks[c],new NewChunk(chks[c])); } } } /** * Rebalance a (small) frame into a single chunk. This function * is useful after filtering/aggregating the frame. * @param fr frame to rebalance * @return rebalanced frame, installed into DKV with a random key */ public static Frame toSingleChunk(Frame fr) { Key<Frame> singleKey = Key.make(); return toSingleChunk(fr, singleKey); } /** * Rebalance a (small) frame into a single chunk. This function * is useful after filtering/aggregating the frame. * @param fr frame to rebalance * @param destinationKey key for the new single-chunk Frame * @return rebalanced frame, keyed using destinationKey */ public static Frame toSingleChunk(Frame fr, Key<Frame> destinationKey) { H2O.submitTask(new RebalanceDataSet(fr, destinationKey, 1)).join(); return destinationKey.get(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/RollupStats.java
package water.fvec; import water.Futures; import jsr166y.CountedCompleter; import jsr166y.ForkJoinTask; import water.*; import water.H2O.H2OCallback; import water.H2O.H2OCountedCompleter; import water.exceptions.H2OConcurrentModificationException; import water.nbhm.NonBlockingHashMap; import water.parser.Categorical; import water.parser.BufferedString; import water.util.ArrayUtils; import water.util.Log; import java.util.Arrays; /** A class to compute the rollup stats. These are computed lazily, thrown * away if the Vec is written into, and then recomputed lazily. Error to ask * for them if the Vec is actively being written into. It is common for all * cores to ask for the same Vec rollup at once, so it is crucial that it be * computed once across the cluster. * * Rollups are kept in the K/V store, which also controls who manages the * rollup work and final results. Winner of a DKV CAS/PutIfMatch race gets to * manage the M/R job computing the rollups. Losers block for the same * rollup. Remote requests *always* forward to the Rollup Key's master. */ final class RollupStats extends Iced { /** The count of missing elements.... or -2 if we have active writers and no * rollup info can be computed (because the vector is being rapidly * modified!), or -1 if rollups have not been computed since the last * modification. */ volatile transient ForkJoinTask _tsk; // Computed in 1st pass volatile long _naCnt; //count(isNA(X)) double _mean, _sigma; //mean(X) and sqrt(sum((X-mean(X))^2)) for non-NA values long _rows, //count(X) for non-NA values excluding negative/positive infinities (for numeric Vecs) _nzCnt, //count(X!=0) for non-NA values _size, //byte size _pinfs, //count(+inf) _ninfs; //count(-inf) boolean _isInt=true; double[] _mins, _maxs; long _checksum; // Expensive histogram & percentiles // Computed in a 2nd pass, on-demand, by calling computeHisto private static final int MAX_SIZE = 1000; // Standard bin count; categoricals can have more bins // the choice of MAX_SIZE being a power of 10 (rather than 1024) just aligns-to-the-grid of the common input of fixed decimal // precision numbers. It is still an estimate and makes no difference mathematically. It just gives tidier output in some // simple cases without penalty. volatile long[] _bins; // Approximate data value closest to the Xth percentile double[] _pctiles; public boolean hasHisto(){return _bins != null;} // Check for: Vector is mutating and rollups cannot be asked for boolean isMutating() { return _naCnt==-2; } // Check for: Rollups currently being computed private boolean isComputing() { return _naCnt==-1; } // Check for: Rollups available private boolean isReady() { return _naCnt>=0; } private String modeDescription() { if (isMutating()) return "Mutating"; if (isComputing()) return "Computing"; return "Ready"; } private RollupStats(int mode) { _mins = new double[5]; _maxs = new double[5]; Arrays.fill(_mins, Double.MAX_VALUE); Arrays.fill(_maxs,-Double.MAX_VALUE); _pctiles = new double[Vec.PERCENTILES.length]; Arrays.fill(_pctiles, Double.NaN); _mean = _sigma = 0; _size = 0; _naCnt = mode; } private static RollupStats makeComputing() { return new RollupStats(-1); } static RollupStats makeMutating () { return new RollupStats(-2); } private RollupStats map( Chunk c ) { _size = c.byteSize(); boolean isUUID = c._vec.isUUID(); boolean isString = c._vec.isString(); BufferedString tmpStr = new BufferedString(); if (isString) _isInt = false; // Checksum support long checksum = 0; long start = c._start; long l = 81985529216486895L; // Check for popular easy cases: All Constant double min=c.min(), max=c.max(); if( min==max ) { // All constant or all NaN double d = min; // It's the min, it's the max, it's the alpha and omega _checksum = (c.hasFloat()?Double.doubleToRawLongBits(d):(long)d)*c._len; Arrays.fill(_mins, d); Arrays.fill(_maxs, d); if( d == Double.POSITIVE_INFINITY) _pinfs++; else if( d == Double.NEGATIVE_INFINITY) _ninfs++; else { if( Double.isNaN(d)) _naCnt=c._len; else if( d != 0 ) _nzCnt=c._len; _mean = d; _rows=c._len; } _isInt = ((long)d) == d; _sigma = 0; // No variance for constants return this; } //all const NaNs if ((c instanceof C0DChunk && c.isNA_impl(0))) { _sigma=0; //count of non-NAs * variance of non-NAs _mean = 0; //sum of non-NAs (will get turned into mean) _naCnt=c._len; _nzCnt=0; return this; } // Check for popular easy cases: Boolean, possibly sparse, possibly NaN if( min==0 && max==1 ) { int zs = c._len-c.sparseLenZero(); // Easy zeros int nans = 0; // Hard-count sparse-but-zero (weird case of setting a zero over a non-zero) for( int i=c.nextNZ(-1); i< c._len; i=c.nextNZ(i) ) if( c.isNA(i) ) nans++; else if( c.at8(i)==0 ) zs++; int os = c._len-zs-nans; // Ones _nzCnt += os; _naCnt += nans; for( int i=0; i<Math.min(_mins.length,zs); i++ ) { min(0); max(0); } for( int i=0; i<Math.min(_mins.length,os); i++ ) { min(1); max(1); } _rows += zs+os; _mean = (double)os/_rows; _sigma = zs*(0.0-_mean)*(0.0-_mean) + os*(1.0-_mean)*(1.0-_mean); return this; } // Walk the non-zeros if( isUUID ) { // UUID columns do not compute min/max/mean/sigma for( int i=c.nextNZ(-1); i< c._len; i=c.nextNZ(i) ) { if( c.isNA(i) ) _naCnt++; else { long lo = c.at16l(i), hi = c.at16h(i); if (lo != 0 || hi != 0) _nzCnt++; l = lo ^ 37*hi; } if(l != 0) // ignore 0s in checksum to be consistent with sparse chunks checksum ^= (17 * (start+i)) ^ 23*l; } } else if( isString ) { // String columns do not compute min/max/mean/sigma for (int i = c.nextNZ(-1); i < c._len; i = c.nextNZ(i)) { if (c.isNA(i)) _naCnt++; else { _nzCnt++; l = c.atStr(tmpStr, i).hashCode(); } if (l != 0) // ignore 0s in checksum to be consistent with sparse chunks checksum ^= (17 * (start + i)) ^ 23 * l; } } else { // Work off all numeric rows, or only the nonzeros for sparse if (c instanceof C1Chunk) checksum=new RollupStatsHelpers(this).numericChunkRollup((C1Chunk) c, start, checksum); else if (c instanceof C1SChunk) checksum=new RollupStatsHelpers(this).numericChunkRollup((C1SChunk) c, start, checksum); else if (c instanceof C1NChunk) checksum=new RollupStatsHelpers(this).numericChunkRollup((C1NChunk) c, start, checksum); else if (c instanceof C2Chunk) checksum=new RollupStatsHelpers(this).numericChunkRollup((C2Chunk) c, start, checksum); else if (c instanceof C2SChunk) checksum=new RollupStatsHelpers(this).numericChunkRollup((C2SChunk) c, start, checksum); else if (c instanceof C4SChunk) checksum=new RollupStatsHelpers(this).numericChunkRollup((C4SChunk) c, start, checksum); else if (c instanceof C4FChunk) checksum=new RollupStatsHelpers(this).numericChunkRollup((C4FChunk) c, start, checksum); else if (c instanceof C4Chunk) checksum=new RollupStatsHelpers(this).numericChunkRollup((C4Chunk) c, start, checksum); else if (c instanceof C8Chunk) checksum=new RollupStatsHelpers(this).numericChunkRollup((C8Chunk) c, start, checksum); else if (c instanceof C8DChunk) checksum=new RollupStatsHelpers(this).numericChunkRollup((C8DChunk) c, start, checksum); else checksum=new RollupStatsHelpers(this).numericChunkRollup(c, start, checksum); // special case for sparse chunks // we need to merge with the mean (0) and variance (0) of the zeros count of 0s of the sparse chunk - which were skipped above // _rows is the count of non-zero rows // _mean is the mean of non-zero rows // _sigma is the mean of non-zero rows // handle the zeros if( c.isSparseZero() ) { int zeros = c._len - c.sparseLenZero(); if (zeros > 0) { for( int i=0; i<Math.min(_mins.length,zeros); i++ ) { min(0); max(0); } double zeromean = 0; double zeroM2 = 0; double delta = _mean - zeromean; _mean = (_mean * _rows + zeromean * zeros) / (_rows + zeros); _sigma += zeroM2 + delta*delta * _rows * zeros / (_rows + zeros); //this is the variance*(N-1), will do sqrt(_sigma/(N-1)) later in postGlobal _rows += zeros; } } else if(c.isSparseNA()){ _naCnt = c._len - c.sparseLenNA(); } } _checksum = checksum; // UUID and String columns do not compute min/max/mean/sigma if( isUUID || isString) { Arrays.fill(_mins,Double.NaN); Arrays.fill(_maxs,Double.NaN); _mean = _sigma = Double.NaN; } return this; } private void reduce( RollupStats rs ) { for( double d : rs._mins ) if (!Double.isNaN(d)) min(d); for( double d : rs._maxs ) if (!Double.isNaN(d)) max(d); _naCnt += rs._naCnt; _nzCnt += rs._nzCnt; _pinfs += rs._pinfs; _ninfs += rs._ninfs; if (_rows == 0) { _mean = rs._mean; _sigma = rs._sigma; } else if(rs._rows != 0){ double delta = _mean - rs._mean; _mean = (_mean * _rows + rs._mean * rs._rows) / (_rows + rs._rows); _sigma += rs._sigma + delta*delta * _rows*rs._rows / (_rows+rs._rows); } _rows += rs._rows; _size += rs._size; _isInt &= rs._isInt; _checksum ^= rs._checksum; } double min( double d ) { assert(!Double.isNaN(d)); for( int i=0; i<_mins.length; i++ ) if( d < _mins[i] ) { double tmp = _mins[i]; _mins[i] = d; d = tmp; } return _mins[_mins.length-1]; } double max( double d ) { assert(!Double.isNaN(d)); for( int i=0; i<_maxs.length; i++ ) if( d > _maxs[i] ) { double tmp = _maxs[i]; _maxs[i] = d; d = tmp; } return _maxs[_maxs.length-1]; } private static class Roll extends MRTask<Roll> { final Key _rskey; RollupStats _rs; @Override protected boolean modifiesVolatileVecs(){return false;} Roll( H2OCountedCompleter cmp, Key rskey ) { super(cmp); _rskey=rskey; } @Override public void map( Chunk c ) { _rs = new RollupStats(0).map(c); } @Override public void reduce( Roll roll ) { _rs.reduce(roll._rs); } @Override public void postGlobal() { if( _rs == null ) _rs = new RollupStats(0); else { _rs._sigma = Math.sqrt(_rs._sigma/(_rs._rows-1)); if (_rs._rows == 1) _rs._sigma = 0; if (_rs._rows < 5) for (int i=0; i<5-_rs._rows; i++) { // Fix PUBDEV-150 for files under 5 rows _rs._maxs[4-i] = Double.NaN; _rs._mins[4-i] = Double.NaN; } } // mean & sigma not allowed on more than 2 classes; for 2 classes the assumption is that it's true/false Vec vec = _fr.anyVec(); String[] ss = vec.domain(); if( vec.isCategorical() && ss.length > 2 ) _rs._mean = _rs._sigma = Double.NaN; if( ss != null ) { long dsz = (2/*hdr*/+1/*len*/+ss.length)*8; // Size of base domain array for( String s : vec.domain() ) if( s != null ) dsz += 2*s.length() + (2/*hdr*/+1/*value*/+1/*hash*/+2/*hdr*/+1/*len*/)*8; _rs._size += dsz; // Account for domain size in Vec size // Account for Chunk key size int keysize = (2/*hdr*/+1/*kb*/+1/*hash*/+2/*hdr*/+1/*len*/)*8+ vec._key._kb.length; _rs._size += vec.nChunks()*(keysize*4/*key+value ptr in DKV, plus 50% fill rate*/); } } // Just toooo common to report always. Drowning in multi-megabyte log file writes. @Override public boolean logVerbose() { return false; } /** * Added to avoid deadlocks when running from idea in debug mode (evaluating toSgtring on mr task causes rollups to be computed) * @return */ @Override public String toString(){ return "Roll(" + _fr.anyVec()._key + ")"; } } @Override public String toString() { return "RollupStats{" + "state=" + modeDescription() + '}'; } static void start(final Vec vec, Futures fs, boolean computeHisto) { if( vec instanceof InteractionWrappedVec ) return; if( DKV.get(vec._key)== null ) throw new H2OConcurrentModificationException("Rollups not possible, because Vec was deleted: "+vec._key); if( vec.isString() ) computeHisto = false; // No histogram for string columns final Key rskey = vec.rollupStatsKey(); RollupStats rs = getOrNull(vec,rskey); if(rs == null || (computeHisto && !rs.hasHisto())) fs.add(new RPC(rskey.home_node(),new ComputeRollupsTask(vec,computeHisto)).addCompleter(new H2OCallback() { @Override public void callback(H2OCountedCompleter h2OCountedCompleter) { DKV.get(rskey); // fetch new results via DKV to enable caching of the results. } }).call()); } private static NonBlockingHashMap<Key,RPC> _pendingRollups = new NonBlockingHashMap<>(); static RollupStats get(Vec vec, boolean computeHisto) { if( DKV.get(vec._key)== null ) { throw new H2OConcurrentModificationException("Rollups not possible, because Vec was deleted: " + vec._key); } if( vec.isString() ) { computeHisto = false; // No histogram for string columns } final Key rskey = vec.rollupStatsKey(); RollupStats rs = DKV.getGet(rskey); while(rs == null || (!rs.isReady() || (computeHisto && !rs.hasHisto()))){ if(rs != null && rs.isMutating()) throw new H2OConcurrentModificationException("Can not compute rollup stats while vec is being modified. (1)"); // 1. compute only once try { RPC rpcNew = new RPC(rskey.home_node(),new ComputeRollupsTask(vec, computeHisto)); RPC rpcOld = _pendingRollups.putIfAbsent(rskey, rpcNew); if(rpcOld == null) { // no prior pending task, need to send this one rpcNew.call().get(); _pendingRollups.remove(rskey); } else // rollups computation is already in progress, wait for it to finish rpcOld.get(true); } catch( Throwable t ) { System.err.println("Remote rollups failed with an exception, wrapping and rethrowing: "+t); throw new RuntimeException(t); } // 2. fetch - done in two steps to go through standard DKV.get and enable local caching rs = DKV.getGet(rskey); } return rs; } // Allow a bunch of rollups to run in parallel. If Futures is passed in, run // the rollup in the background and do not return. static RollupStats get(Vec vec) { return get(vec,false);} // Fetch if present, but do not compute static RollupStats getOrNull(Vec vec, final Key rskey ) { Value val = DKV.get(rskey); if( val == null ) // No rollup stats present? return vec.length() > 0 ? /*not computed*/null : /*empty vec*/new RollupStats(0); RollupStats rs = val.get(RollupStats.class); return rs.isReady() ? rs : null; } // Histogram base & stride double h_base() { return _mins[0]; } double h_stride() { return h_stride(_bins.length); } private double h_stride(int nbins) { return (_maxs[0]-_mins[0]+(_isInt?1:0))/nbins; } // Compute expensive histogram private static class Histo extends MRTask<Histo> { final double _base, _stride; // Inputs final int _nbins; // Inputs long[] _bins; // Outputs Histo( H2OCountedCompleter cmp, RollupStats rs, int nbins ) { super(cmp);_base = rs.h_base(); _stride = rs.h_stride(nbins); _nbins = nbins; } @Override public void map( Chunk c ) { _bins = new long[_nbins]; for( int i=c.nextNZ(-1); i< c._len; i=c.nextNZ(i) ) { double d = c.atd(i); if( !Double.isNaN(d) ) _bins[idx(d)]++; } // Sparse? We skipped all the zeros; do them now if( c.isSparseZero() ) _bins[idx(0.0)] += (c._len - c.sparseLenZero()); } private int idx( double d ) { int idx = (int)((d-_base)/_stride); return Math.min(idx,_bins.length-1); } @Override public void reduce( Histo h ) { ArrayUtils.add(_bins,h._bins); } // Just toooo common to report always. Drowning in multi-megabyte log file writes. @Override public boolean logVerbose() { return false; } } // Task to compute rollups on its homenode if needed. // Only computes the rollups, does not fetch them, caller should fetch them via DKV store (to preserve caching). // Only comutes the rollups if needed (i.e. are null or do not have histo and histo is required) // If rs computation is already in progress, it will wait for it to finish. // Throws IAE if the Vec is being modified (or removed) while this task is in progress. static final class ComputeRollupsTask extends DTask<ComputeRollupsTask>{ final Key _vecKey; final Key _rsKey; final boolean _computeHisto; public ComputeRollupsTask(Vec v, boolean computeHisto){ super((byte)(Thread.currentThread() instanceof H2O.FJWThr ? currThrPriority()+1 : H2O.MIN_HI_PRIORITY-3)); _vecKey = v._key; _rsKey = v.rollupStatsKey(); _computeHisto = computeHisto; } private Value makeComputing(){ RollupStats newRs = RollupStats.makeComputing(); CountedCompleter cc = getCompleter(); // should be null or RPCCall if(cc != null) assert cc.getCompleter() == null; newRs._tsk = cc == null?this:cc; return new Value(_rsKey,newRs); } private void installResponse(Value nnn, RollupStats rs) { Futures fs = new Futures(); Value old = DKV.DputIfMatch(_rsKey, new Value(_rsKey, rs), nnn, fs); assert rs.isReady(); if(old != nnn) throw new H2OConcurrentModificationException("Can not compute rollup stats while vec is being modified. (2)"); fs.blockForPending(); } @Override public void compute2() { assert _rsKey.home(); final Vec vec = DKV.getGet(_vecKey); while(true) { Value v = DKV.get(_rsKey); RollupStats rs = (v == null) ? null : v.<RollupStats>get(); // Fetched current rs from the DKV, rs can be: // a) computed // a.1) has histo or histo not required => do nothing // a.2) no histo and histo is required => only compute histo // b) computing => wait for the task computing it to finish and check again // c) mutating => throw IAE // d) null => compute new rollups if (rs != null) { if (rs.isReady()) { if (_computeHisto && !rs.hasHisto()) { // a.2 => compute rollups CountedCompleter cc = getCompleter(); // should be null or RPCCall if(cc != null) assert cc.getCompleter() == null; // note: if cc == null then onExceptionalCompletion tasks waiting on this may be woken up before exception handling iff exception is thrown. Value nnn = makeComputing(); Futures fs = new Futures(); Value oldv = DKV.DputIfMatch(_rsKey, nnn, v, fs); fs.blockForPending(); if(oldv == v){ // got the lock computeHisto(rs, vec, nnn); break; } // else someone else is modifying the rollups => try again } else break; // a.1 => do nothing } else if (rs.isComputing()) { // b) => wait for current computation to finish rs._tsk.join(); } else if(rs.isMutating()) // c) => throw IAE throw new H2OConcurrentModificationException("Can not compute rollup stats while vec is being modified. (3)"); } else { // d) => compute the rollups final Value nnn = makeComputing(); Futures fs = new Futures(); Value oldv = DKV.DputIfMatch(_rsKey, nnn, v, fs); fs.blockForPending(); if(oldv == v){ // got the lock, compute the rollups try { Roll r = new Roll(null, _rsKey).doAll(vec); // computed the stats, now compute histo if needed and install the response and quit r._rs._checksum ^= vec.length(); if (_computeHisto) computeHisto(r._rs, vec, nnn); else installResponse(nnn, r._rs); break; } catch (Exception e) { Log.err(e); cleanupStats(nnn); throw e; } } // else someone else is modifying the rollups => try again } } tryComplete(); } private boolean cleanupStats(Value current) { Futures fs = new Futures(); Value old = DKV.DputIfMatch(_rsKey, null, current, fs); boolean success = old != current; fs.blockForPending(); return success; } final void computeHisto(final RollupStats rs, Vec vec, final Value nnn) { // All NAs or non-math; histogram has zero bins if (rs._naCnt == vec.length() || vec.isUUID()) { rs._bins = new long[0]; installResponse(nnn, rs); return; } // Constant: use a single bin double span = rs._maxs[0] - rs._mins[0]; final long rows = vec.length() - rs._naCnt; assert rows > 0 : "rows = " + rows + ", vec.len() = " + vec.length() + ", naCnt = " + rs._naCnt; if (span == 0) { rs._bins = new long[]{rows}; installResponse(nnn, rs); return; } // Number of bins: MAX_SIZE by default. For integers, bins for each unique int // - unless the count gets too high; allow a very high count for categoricals. int nbins = MAX_SIZE; if (rs._isInt && span < Integer.MAX_VALUE) { nbins = (int) span + 1; // 1 bin per int int lim = vec.isCategorical() ? Categorical.MAX_CATEGORICAL_COUNT : MAX_SIZE; nbins = Math.min(lim, nbins); // Cap nbins at sane levels } Histo histo = new Histo(null, rs, nbins).doAll(vec); long sum = ArrayUtils.sum(histo._bins); assert sum == rows:"expected " + rows + " rows, got " + sum; rs._bins = histo._bins; // Compute percentiles from histogram rs._pctiles = new double[Vec.PERCENTILES.length]; int j = 0; // Histogram bin number int k = 0; // The next non-zero bin after j long hsum = 0; // Rolling histogram sum double base = rs.h_base(); double stride = rs.h_stride(); double lastP = -1.0; // any negative value to pass assert below first time for (int i = 0; i < Vec.PERCENTILES.length; i++) { final double P = Vec.PERCENTILES[i]; assert P >= 0 && P <= 1 && P >= lastP; // rely on increasing percentiles here. If P has dup then strange but accept, hence >= not > lastP = P; double pdouble = 1.0 + P * (rows - 1); // following stats:::quantile.default type 7 long pint = (long) pdouble; // 1-based into bin vector double h = pdouble - pint; // any fraction h to linearly interpolate between? assert P != 1 || (h == 0.0 && pint == rows); // i.e. max while (hsum < pint) hsum += rs._bins[j++]; // j overshot by 1 bin; we added _bins[j-1] and this goes from too low to either exactly right or too big // pint now falls in bin j-1 (the ++ happened even when hsum==pint), so grab that bin value now rs._pctiles[i] = base + stride * (j - 1); if (h > 0 && pint == hsum) { // linearly interpolate between adjacent non-zero bins // i) pint is the last of (j-1)'s bin count (>1 when either duplicates exist in input, or stride makes dups at lower accuracy) // AND ii) h>0 so we do need to find the next non-zero bin if (k < j) k = j; // if j jumped over the k needed for the last P, catch k up to j // Saves potentially winding k forward over the same zero stretch many times while (rs._bins[k] == 0) k++; // find the next non-zero bin rs._pctiles[i] += h * stride * (k - j + 1); } // otherwise either h==0 and we know which bin, or fraction is between two positions that fall in the same bin // this guarantees we are within one bin of the exact answer; i.e. within (max-min)/MAX_SIZE } installResponse(nnn, rs); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/RollupStatsHelpers.java
package water.fvec; /** * DO NOT CHANGE ANY CODE */ public class RollupStatsHelpers { private final RollupStats _rs; RollupStatsHelpers(RollupStats rs) { _rs = rs; } /** * MASTER TEMPLATE - All methods below are COPY & PASTED from this template, and some optimizations are performed based on the chunk types * * @param c * @param start * @param checksum * @return */ public long numericChunkRollup(Chunk c, long start, long checksum) { long pinfs=0, ninfs=0, naCnt=0, nzCnt=0; // pull (some) members into local variables for speed boolean isInt = _rs._isInt; boolean hasNA = c.hasNA(); boolean hasFloat = c.hasFloat(); double dmin = _rs._mins[_rs._mins.length-1]; double dmax = _rs._maxs[_rs._maxs.length-1]; assert(_rs._pinfs == 0); assert(_rs._ninfs == 0); assert(_rs._naCnt == 0); assert(_rs._nzCnt == 0); assert(dmin == Double.MAX_VALUE); assert(dmax == -Double.MAX_VALUE); long rows = 0; //count of non-NA rows, might be >0 for sparse chunks (all 0s are already processed outside) double mean = 0; //mean of non-NA rows, will be 0 for all 0s of sparse chunks double M2 = 0; //variance of non-NA rows, will be 0 for all 0s of sparse chunks // loop over all values for dense chunks, but only the non-zeros for sparse chunks for (int i = c.nextNZ(-1); i < c._len; i = c.nextNZ(i)) { if (hasNA && c.isNA(i)) naCnt++; else { double x = c.atd(i); long l = hasFloat ? Double.doubleToRawLongBits(x) : c.at8(i); if (l != 0) // ignore 0s in checksum to be consistent with sparse chunks checksum ^= (17 * (start + i)) ^ 23 * l; if (x == Double.POSITIVE_INFINITY) pinfs++; else if (x == Double.NEGATIVE_INFINITY) ninfs++; else { if (x != 0) nzCnt++; if (x < dmin) dmin = _rs.min(x); if (x > dmax) dmax = _rs.max(x); if (isInt) isInt = (long)x == x; rows++; double delta = x - mean; mean += delta / rows; M2 += delta * (x - mean); } } } // write back local variables into members _rs._pinfs = pinfs; _rs._ninfs = ninfs; _rs._naCnt = naCnt; _rs._nzCnt = nzCnt; _rs._rows += rows; // add to pre-filled value for sparse chunks _rs._isInt = isInt; _rs._mean = mean; _rs._sigma = M2; return checksum; } public long numericChunkRollup(C1Chunk c, long start, long checksum) { long pinfs=0, ninfs=0, naCnt=0, nzCnt=0; // pull (some) members into local variables for speed // boolean isInt = _rs._isInt; boolean isInt = true; //boolean hasNA = c.hasNA(); boolean hasNA = true; // boolean hasFloat = c.hasFloat(); double dmin = _rs._mins[_rs._mins.length-1]; double dmax = _rs._maxs[_rs._maxs.length-1]; assert(_rs._pinfs == 0); assert(_rs._ninfs == 0); assert(_rs._naCnt == 0); assert(_rs._nzCnt == 0); assert(dmin == Double.MAX_VALUE); assert(dmax == -Double.MAX_VALUE); long rows = 0; //count of non-NA rows, might be >0 for sparse chunks (all 0s are already processed outside) double mean = 0; //mean of non-NA rows, will be 0 for all 0s of sparse chunks double M2 = 0; //variance of non-NA rows, will be 0 for all 0s of sparse chunks // loop over all values for dense chunks, but only the non-zeros for sparse chunks int len = c._len; for (int i=0; i < len; ++i){ if (hasNA && c.isNA(i)) naCnt++; else { // double x = c.atd(i); long l = c.at8(i); double x = (double)l; // long l = hasFloat ? Double.doubleToRawLongBits(x) : c.at8(i); if (l != 0) // ignore 0s in checksum to be consistent with sparse chunks checksum ^= (17 * (start + i)) ^ 23 * l; // if (x == Double.POSITIVE_INFINITY) pinfs++; // else if (x == Double.NEGATIVE_INFINITY) ninfs++; // else { if (x != 0) nzCnt++; if (x < dmin) dmin = _rs.min(x); if (x > dmax) dmax = _rs.max(x); // if (isInt) isInt = (long)x == x; rows++; double delta = x - mean; mean += delta / rows; M2 += delta * (x - mean); } } } // write back local variables into members _rs._pinfs = pinfs; _rs._ninfs = ninfs; _rs._naCnt = naCnt; _rs._nzCnt = nzCnt; _rs._rows += rows; // add to pre-filled value for sparse chunks _rs._isInt = isInt; _rs._mean = mean; _rs._sigma = M2; return checksum; } public long numericChunkRollup(C1NChunk c, long start, long checksum) { long pinfs=0, ninfs=0, naCnt=0, nzCnt=0; // pull (some) members into local variables for speed // boolean isInt = _rs._isInt; boolean isInt = true; // boolean hasNA = c.hasNA(); boolean hasNA = false; // boolean hasFloat = c.hasFloat(); double dmin = _rs._mins[_rs._mins.length-1]; double dmax = _rs._maxs[_rs._maxs.length-1]; assert(_rs._pinfs == 0); assert(_rs._ninfs == 0); assert(_rs._naCnt == 0); assert(_rs._nzCnt == 0); assert(dmin == Double.MAX_VALUE); assert(dmax == -Double.MAX_VALUE); long rows = 0; //count of non-NA rows, might be >0 for sparse chunks (all 0s are already processed outside) double mean = 0; //mean of non-NA rows, will be 0 for all 0s of sparse chunks double M2 = 0; //variance of non-NA rows, will be 0 for all 0s of sparse chunks // loop over all values for dense chunks, but only the non-zeros for sparse chunks int len = c._len; for (int i=0; i < len; ++i){ if (hasNA && c.isNA(i)) naCnt++; else { long l = c.at8(i); double x = (double)l; // double x = c.atd(i); // long l = hasFloat ? Double.doubleToRawLongBits(x) : c.at8(i); if (l != 0) // ignore 0s in checksum to be consistent with sparse chunks checksum ^= (17 * (start + i)) ^ 23 * l; // if (x == Double.POSITIVE_INFINITY) pinfs++; // else if (x == Double.NEGATIVE_INFINITY) ninfs++; // else { if (x != 0) nzCnt++; if (x < dmin) dmin = _rs.min(x); if (x > dmax) dmax = _rs.max(x); // if (isInt) isInt = (long)x == x; rows++; double delta = x - mean; mean += delta / rows; M2 += delta * (x - mean); } } } // write back local variables into members _rs._pinfs = pinfs; _rs._ninfs = ninfs; _rs._naCnt = naCnt; _rs._nzCnt = nzCnt; _rs._rows += rows; // add to pre-filled value for sparse chunks _rs._isInt = isInt; _rs._mean = mean; _rs._sigma = M2; return checksum; } public long numericChunkRollup(C1SChunk c, long start, long checksum) { long pinfs=0, ninfs=0, naCnt=0, nzCnt=0; // pull (some) members into local variables for speed boolean isInt = _rs._isInt; boolean hasNA = c.hasNA(); boolean hasFloat = c.hasFloat(); double dmin = _rs._mins[_rs._mins.length-1]; double dmax = _rs._maxs[_rs._maxs.length-1]; assert(_rs._pinfs == 0); assert(_rs._ninfs == 0); assert(_rs._naCnt == 0); assert(_rs._nzCnt == 0); assert(dmin == Double.MAX_VALUE); assert(dmax == -Double.MAX_VALUE); long rows = 0; //count of non-NA rows, might be >0 for sparse chunks (all 0s are already processed outside) double mean = 0; //mean of non-NA rows, will be 0 for all 0s of sparse chunks double M2 = 0; //variance of non-NA rows, will be 0 for all 0s of sparse chunks // loop over all values for dense chunks, but only the non-zeros for sparse chunks int len = c._len; for (int i=0; i < len; ++i){ if (hasNA && c.isNA(i)) naCnt++; else { double x = c.atd(i); long l = hasFloat ? Double.doubleToRawLongBits(x) : c.at8(i); if (l != 0) // ignore 0s in checksum to be consistent with sparse chunks checksum ^= (17 * (start + i)) ^ 23 * l; if (x == Double.POSITIVE_INFINITY) pinfs++; else if (x == Double.NEGATIVE_INFINITY) ninfs++; else { if (x != 0) nzCnt++; if (x < dmin) dmin = _rs.min(x); if (x > dmax) dmax = _rs.max(x); if (isInt) isInt = (long)x == x; rows++; double delta = x - mean; mean += delta / rows; M2 += delta * (x - mean); } } } // write back local variables into members _rs._pinfs = pinfs; _rs._ninfs = ninfs; _rs._naCnt = naCnt; _rs._nzCnt = nzCnt; _rs._rows += rows; // add to pre-filled value for sparse chunks _rs._isInt = isInt; _rs._mean = mean; _rs._sigma = M2; return checksum; } public long numericChunkRollup(C2Chunk c, long start, long checksum) { long pinfs=0, ninfs=0, naCnt=0, nzCnt=0; // pull (some) members into local variables for speed // boolean isInt = _rs._isInt; boolean isInt = true; // boolean hasNA = c.hasNA(); boolean hasNA = true; // boolean hasFloat = c.hasFloat(); double dmin = _rs._mins[_rs._mins.length-1]; double dmax = _rs._maxs[_rs._maxs.length-1]; assert(_rs._pinfs == 0); assert(_rs._ninfs == 0); assert(_rs._naCnt == 0); assert(_rs._nzCnt == 0); assert(dmin == Double.MAX_VALUE); assert(dmax == -Double.MAX_VALUE); long rows = 0; //count of non-NA rows, might be >0 for sparse chunks (all 0s are already processed outside) double mean = 0; //mean of non-NA rows, will be 0 for all 0s of sparse chunks double M2 = 0; //variance of non-NA rows, will be 0 for all 0s of sparse chunks // loop over all values for dense chunks, but only the non-zeros for sparse chunks int len = c._len; for (int i=0; i < len; ++i){ if (hasNA && c.isNA(i)) naCnt++; else { long l = c.at8(i); double x = (double)l; // double x = c.atd(i); // long l = hasFloat ? Double.doubleToRawLongBits(x) : c.at8(i); if (l != 0) // ignore 0s in checksum to be consistent with sparse chunks checksum ^= (17 * (start + i)) ^ 23 * l; // if (x == Double.POSITIVE_INFINITY) pinfs++; // else if (x == Double.NEGATIVE_INFINITY) ninfs++; // else { if (x != 0) nzCnt++; if (x < dmin) dmin = _rs.min(x); if (x > dmax) dmax = _rs.max(x); // if (isInt) isInt = (long)x == x; rows++; double delta = x - mean; mean += delta / rows; M2 += delta * (x - mean); } } } // write back local variables into members _rs._pinfs = pinfs; _rs._ninfs = ninfs; _rs._naCnt = naCnt; _rs._nzCnt = nzCnt; _rs._rows += rows; // add to pre-filled value for sparse chunks _rs._isInt = isInt; _rs._mean = mean; _rs._sigma = M2; return checksum; } public long numericChunkRollup(C2SChunk c, long start, long checksum) { long pinfs=0, ninfs=0, naCnt=0, nzCnt=0; // pull (some) members into local variables for speed boolean isInt = _rs._isInt; boolean hasNA = c.hasNA(); boolean hasFloat = c.hasFloat(); double dmin = _rs._mins[_rs._mins.length-1]; double dmax = _rs._maxs[_rs._maxs.length-1]; assert(_rs._pinfs == 0); assert(_rs._ninfs == 0); assert(_rs._naCnt == 0); assert(_rs._nzCnt == 0); assert(dmin == Double.MAX_VALUE); assert(dmax == -Double.MAX_VALUE); long rows = 0; //count of non-NA rows, might be >0 for sparse chunks (all 0s are already processed outside) double mean = 0; //mean of non-NA rows, will be 0 for all 0s of sparse chunks double M2 = 0; //variance of non-NA rows, will be 0 for all 0s of sparse chunks // loop over all values for dense chunks, but only the non-zeros for sparse chunks int len = c._len; for (int i=0; i < len; ++i){ if (hasNA && c.isNA(i)) naCnt++; else { double x = c.atd(i); long l = hasFloat ? Double.doubleToRawLongBits(x) : c.at8(i); if (l != 0) // ignore 0s in checksum to be consistent with sparse chunks checksum ^= (17 * (start + i)) ^ 23 * l; if (x == Double.POSITIVE_INFINITY) pinfs++; else if (x == Double.NEGATIVE_INFINITY) ninfs++; else { if (x != 0) nzCnt++; if (x < dmin) dmin = _rs.min(x); if (x > dmax) dmax = _rs.max(x); if (isInt) isInt = (long)x == x; rows++; double delta = x - mean; mean += delta / rows; M2 += delta * (x - mean); } } } // write back local variables into members _rs._pinfs = pinfs; _rs._ninfs = ninfs; _rs._naCnt = naCnt; _rs._nzCnt = nzCnt; _rs._rows += rows; // add to pre-filled value for sparse chunks _rs._isInt = isInt; _rs._mean = mean; _rs._sigma = M2; return checksum; } public long numericChunkRollup(C4Chunk c, long start, long checksum) { long pinfs=0, ninfs=0, naCnt=0, nzCnt=0; // pull (some) members into local variables for speed // boolean isInt = _rs._isInt; // boolean hasNA = c.hasNA(); boolean isInt = true; boolean hasNA = true; // boolean hasFloat = c.hasFloat(); double dmin = _rs._mins[_rs._mins.length-1]; double dmax = _rs._maxs[_rs._maxs.length-1]; assert(_rs._pinfs == 0); assert(_rs._ninfs == 0); assert(_rs._naCnt == 0); assert(_rs._nzCnt == 0); assert(dmin == Double.MAX_VALUE); assert(dmax == -Double.MAX_VALUE); long rows = 0; //count of non-NA rows, might be >0 for sparse chunks (all 0s are already processed outside) double mean = 0; //mean of non-NA rows, will be 0 for all 0s of sparse chunks double M2 = 0; //variance of non-NA rows, will be 0 for all 0s of sparse chunks // loop over all values for dense chunks, but only the non-zeros for sparse chunks int len = c._len; for (int i=0; i < len; ++i){ if (hasNA && c.isNA(i)) naCnt++; else { long l = c.at8(i); double x = (double)l; // double x = c.atd(i); // long l = hasFloat ? Double.doubleToRawLongBits(x) : c.at8(i); if (l != 0) // ignore 0s in checksum to be consistent with sparse chunks checksum ^= (17 * (start + i)) ^ 23 * l; // if (x == Double.POSITIVE_INFINITY) pinfs++; // else if (x == Double.NEGATIVE_INFINITY) ninfs++; // else { if (x != 0) nzCnt++; if (x < dmin) dmin = _rs.min(x); if (x > dmax) dmax = _rs.max(x); // if (isInt) isInt = (long)x == x; rows++; double delta = x - mean; mean += delta / rows; M2 += delta * (x - mean); } } } // write back local variables into members _rs._pinfs = pinfs; _rs._ninfs = ninfs; _rs._naCnt = naCnt; _rs._nzCnt = nzCnt; _rs._rows += rows; // add to pre-filled value for sparse chunks _rs._isInt = isInt; _rs._mean = mean; _rs._sigma = M2; return checksum; } public long numericChunkRollup(C4FChunk c, long start, long checksum) { long pinfs=0, ninfs=0, naCnt=0, nzCnt=0; // pull (some) members into local variables for speed boolean isInt = _rs._isInt; boolean hasNA = c.hasNA(); boolean hasFloat = c.hasFloat(); double dmin = _rs._mins[_rs._mins.length-1]; double dmax = _rs._maxs[_rs._maxs.length-1]; assert(_rs._pinfs == 0); assert(_rs._ninfs == 0); assert(_rs._naCnt == 0); assert(_rs._nzCnt == 0); assert(dmin == Double.MAX_VALUE); assert(dmax == -Double.MAX_VALUE); long rows = 0; //count of non-NA rows, might be >0 for sparse chunks (all 0s are already processed outside) double mean = 0; //mean of non-NA rows, will be 0 for all 0s of sparse chunks double M2 = 0; //variance of non-NA rows, will be 0 for all 0s of sparse chunks // loop over all values for dense chunks, but only the non-zeros for sparse chunks int len = c._len; for (int i=0; i < len; ++i){ if (hasNA && c.isNA(i)) naCnt++; else { double x = c.atd(i); long l = hasFloat ? Double.doubleToRawLongBits(x) : c.at8(i); if (l != 0) // ignore 0s in checksum to be consistent with sparse chunks checksum ^= (17 * (start + i)) ^ 23 * l; if (x == Double.POSITIVE_INFINITY) pinfs++; else if (x == Double.NEGATIVE_INFINITY) ninfs++; else { if (x != 0) nzCnt++; if (x < dmin) dmin = _rs.min(x); if (x > dmax) dmax = _rs.max(x); if (isInt) isInt = (long)x == x; rows++; double delta = x - mean; mean += delta / rows; M2 += delta * (x - mean); } } } // write back local variables into members _rs._pinfs = pinfs; _rs._ninfs = ninfs; _rs._naCnt = naCnt; _rs._nzCnt = nzCnt; _rs._rows += rows; // add to pre-filled value for sparse chunks _rs._isInt = isInt; _rs._mean = mean; _rs._sigma = M2; return checksum; } public long numericChunkRollup(C4SChunk c, long start, long checksum) { long pinfs=0, ninfs=0, naCnt=0, nzCnt=0; // pull (some) members into local variables for speed boolean isInt = _rs._isInt; boolean hasNA = c.hasNA(); boolean hasFloat = c.hasFloat(); double dmin = _rs._mins[_rs._mins.length-1]; double dmax = _rs._maxs[_rs._maxs.length-1]; assert(_rs._pinfs == 0); assert(_rs._ninfs == 0); assert(_rs._naCnt == 0); assert(_rs._nzCnt == 0); assert(dmin == Double.MAX_VALUE); assert(dmax == -Double.MAX_VALUE); long rows = 0; //count of non-NA rows, might be >0 for sparse chunks (all 0s are already processed outside) double mean = 0; //mean of non-NA rows, will be 0 for all 0s of sparse chunks double M2 = 0; //variance of non-NA rows, will be 0 for all 0s of sparse chunks // loop over all values for dense chunks, but only the non-zeros for sparse chunks int len = c._len; for (int i=0; i < len; ++i){ if (hasNA && c.isNA(i)) naCnt++; else { double x = c.atd(i); long l = hasFloat ? Double.doubleToRawLongBits(x) : c.at8(i); if (l != 0) // ignore 0s in checksum to be consistent with sparse chunks checksum ^= (17 * (start + i)) ^ 23 * l; if (x == Double.POSITIVE_INFINITY) pinfs++; else if (x == Double.NEGATIVE_INFINITY) ninfs++; else { if (x != 0) nzCnt++; if (x < dmin) dmin = _rs.min(x); if (x > dmax) dmax = _rs.max(x); if (isInt) isInt = (long)x == x; rows++; double delta = x - mean; mean += delta / rows; M2 += delta * (x - mean); } } } // write back local variables into members _rs._pinfs = pinfs; _rs._ninfs = ninfs; _rs._naCnt = naCnt; _rs._nzCnt = nzCnt; _rs._rows += rows; // add to pre-filled value for sparse chunks _rs._isInt = isInt; _rs._mean = mean; _rs._sigma = M2; return checksum; } public long numericChunkRollup(C8Chunk c, long start, long checksum) { long pinfs=0, ninfs=0, naCnt=0, nzCnt=0; // pull (some) members into local variables for speed // boolean isInt = _rs._isInt; boolean isInt = true; boolean hasNA = c.hasNA(); boolean hasFloat = c.hasFloat(); double dmin = _rs._mins[_rs._mins.length-1]; double dmax = _rs._maxs[_rs._maxs.length-1]; assert(_rs._pinfs == 0); assert(_rs._ninfs == 0); assert(_rs._naCnt == 0); assert(_rs._nzCnt == 0); assert(dmin == Double.MAX_VALUE); assert(dmax == -Double.MAX_VALUE); long rows = 0; //count of non-NA rows, might be >0 for sparse chunks (all 0s are already processed outside) double mean = 0; //mean of non-NA rows, will be 0 for all 0s of sparse chunks double M2 = 0; //variance of non-NA rows, will be 0 for all 0s of sparse chunks // loop over all values for dense chunks, but only the non-zeros for sparse chunks int len = c._len; for (int i=0; i < len; ++i){ if (hasNA && c.isNA(i)) naCnt++; else { long l = c.at8(i); double x = (double)l; // double x = c.atd(i); // long l = hasFloat ? Double.doubleToRawLongBits(x) : c.at8(i); if (l != 0) // ignore 0s in checksum to be consistent with sparse chunks checksum ^= (17 * (start + i)) ^ 23 * l; // if (x == Double.POSITIVE_INFINITY) pinfs++; // else if (x == Double.NEGATIVE_INFINITY) ninfs++; // else { if (x != 0) nzCnt++; if (x < dmin) dmin = _rs.min(x); if (x > dmax) dmax = _rs.max(x); // if (isInt) isInt = (long)x == x; rows++; double delta = x - mean; mean += delta / rows; M2 += delta * (x - mean); } } } // write back local variables into members _rs._pinfs = pinfs; _rs._ninfs = ninfs; _rs._naCnt = naCnt; _rs._nzCnt = nzCnt; _rs._rows += rows; // add to pre-filled value for sparse chunks _rs._isInt = isInt; _rs._mean = mean; _rs._sigma = M2; return checksum; } public long numericChunkRollup(C8DChunk c, long start, long checksum) { long pinfs=0, ninfs=0, naCnt=0, nzCnt=0; // pull (some) members into local variables for speed boolean isInt = _rs._isInt; boolean hasNA = c.hasNA(); // boolean hasFloat = c.hasFloat(); boolean hasFloat = true; double dmin = _rs._mins[_rs._mins.length-1]; double dmax = _rs._maxs[_rs._maxs.length-1]; assert(_rs._pinfs == 0); assert(_rs._ninfs == 0); assert(_rs._naCnt == 0); assert(_rs._nzCnt == 0); assert(dmin == Double.MAX_VALUE); assert(dmax == -Double.MAX_VALUE); long rows = 0; //count of non-NA rows, might be >0 for sparse chunks (all 0s are already processed outside) double mean = 0; //mean of non-NA rows, will be 0 for all 0s of sparse chunks double M2 = 0; //variance of non-NA rows, will be 0 for all 0s of sparse chunks // loop over all values for dense chunks, but only the non-zeros for sparse chunks int len = c._len; for (int i=0; i < len; ++i){ if (hasNA && c.isNA(i)) naCnt++; else { double x = c.atd(i); long l = hasFloat ? Double.doubleToRawLongBits(x) : c.at8(i); if (l != 0) // ignore 0s in checksum to be consistent with sparse chunks checksum ^= (17 * (start + i)) ^ 23 * l; if (x == Double.POSITIVE_INFINITY) pinfs++; else if (x == Double.NEGATIVE_INFINITY) ninfs++; else { if (x != 0) nzCnt++; if (x < dmin) dmin = _rs.min(x); if (x > dmax) dmax = _rs.max(x); if (isInt) isInt = (long)x == x; rows++; double delta = x - mean; mean += delta / rows; M2 += delta * (x - mean); } } } // write back local variables into members _rs._pinfs = pinfs; _rs._ninfs = ninfs; _rs._naCnt = naCnt; _rs._nzCnt = nzCnt; _rs._rows += rows; // add to pre-filled value for sparse chunks _rs._isInt = isInt; _rs._mean = mean; _rs._sigma = M2; return checksum; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/RyuDouble.java
// Copyright 2018 Ulf Adams // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package water.fvec; import java.math.BigInteger; /** * An implementation of Ryu for double. */ public final class RyuDouble { private static final int DOUBLE_MANTISSA_BITS = 52; private static final long DOUBLE_MANTISSA_MASK = (1L << DOUBLE_MANTISSA_BITS) - 1; private static final int DOUBLE_EXPONENT_BITS = 11; private static final int DOUBLE_EXPONENT_MASK = (1 << DOUBLE_EXPONENT_BITS) - 1; private static final int DOUBLE_EXPONENT_BIAS = (1 << (DOUBLE_EXPONENT_BITS - 1)) - 1; private static final int POS_TABLE_SIZE = 326; private static final int NEG_TABLE_SIZE = 291; // Only for debugging. private static final BigInteger[] POW5 = new BigInteger[POS_TABLE_SIZE]; private static final BigInteger[] POW5_INV = new BigInteger[NEG_TABLE_SIZE]; private static final int POW5_BITCOUNT = 121; // max 3*31 = 124 private static final int POW5_QUARTER_BITCOUNT = 31; private static final int[][] POW5_SPLIT = new int[POS_TABLE_SIZE][4]; private static final int POW5_INV_BITCOUNT = 122; // max 3*31 = 124 private static final int POW5_INV_QUARTER_BITCOUNT = 31; private static final int[][] POW5_INV_SPLIT = new int[NEG_TABLE_SIZE][4]; static { BigInteger mask = BigInteger.valueOf(1).shiftLeft(POW5_QUARTER_BITCOUNT).subtract(BigInteger.ONE); BigInteger invMask = BigInteger.valueOf(1).shiftLeft(POW5_INV_QUARTER_BITCOUNT).subtract(BigInteger.ONE); for (int i = 0; i < Math.max(POW5.length, POW5_INV.length); i++) { BigInteger pow = BigInteger.valueOf(5).pow(i); int pow5len = pow.bitLength(); int expectedPow5Bits = pow5bits(i); if (expectedPow5Bits != pow5len) { throw new IllegalStateException(pow5len + " != " + expectedPow5Bits); } if (i < POW5.length) { POW5[i] = pow; } if (i < POW5_SPLIT.length) { for (int j = 0; j < 4; j++) { POW5_SPLIT[i][j] = pow .shiftRight(pow5len - POW5_BITCOUNT + (3 - j) * POW5_QUARTER_BITCOUNT) .and(mask) .intValueExact(); } } if (i < POW5_INV_SPLIT.length) { // We want floor(log_2 5^q) here, which is pow5len - 1. int j = pow5len - 1 + POW5_INV_BITCOUNT; BigInteger inv = BigInteger.ONE.shiftLeft(j).divide(pow).add(BigInteger.ONE); POW5_INV[i] = inv; for (int k = 0; k < 4; k++) { if (k == 0) { POW5_INV_SPLIT[i][k] = inv.shiftRight((3 - k) * POW5_INV_QUARTER_BITCOUNT).intValueExact(); } else { POW5_INV_SPLIT[i][k] = inv.shiftRight((3 - k) * POW5_INV_QUARTER_BITCOUNT).and(invMask).intValueExact(); } } } } } public static String doubleToString(double value) { // Step 1: Decode the floating point number, and unify normalized and subnormal cases. // First, handle all the trivial cases. if (Double.isNaN(value)) return "NaN"; if (value == Double.POSITIVE_INFINITY) return "Infinity"; if (value == Double.NEGATIVE_INFINITY) return "-Infinity"; long bits = Double.doubleToLongBits(value); if (bits == 0) return "0.0"; if (bits == 0x8000000000000000L) return "-0.0"; // Otherwise extract the mantissa and exponent bits and run the full algorithm. int ieeeExponent = (int) ((bits >>> DOUBLE_MANTISSA_BITS) & DOUBLE_EXPONENT_MASK); long ieeeMantissa = bits & DOUBLE_MANTISSA_MASK; int e2; long m2; if (ieeeExponent == 0) { // Denormal number - no implicit leading 1, and the exponent is 1, not 0. e2 = 1 - DOUBLE_EXPONENT_BIAS - DOUBLE_MANTISSA_BITS; m2 = ieeeMantissa; } else { // Add implicit leading 1. e2 = ieeeExponent - DOUBLE_EXPONENT_BIAS - DOUBLE_MANTISSA_BITS; m2 = ieeeMantissa | (1L << DOUBLE_MANTISSA_BITS); } boolean sign = bits < 0; // Step 2: Determine the interval of legal decimal representations. boolean even = (m2 & 1) == 0; final long mv = 4 * m2; final long mp = 4 * m2 + 2; final int mmShift = ((m2 != (1L << DOUBLE_MANTISSA_BITS)) || (ieeeExponent <= 1)) ? 1 : 0; final long mm = 4 * m2 - 1 - mmShift; e2 -= 2; // Step 3: Convert to a decimal power base using 128-bit arithmetic. // -1077 = 1 - 1023 - 53 - 2 <= e_2 - 2 <= 2046 - 1023 - 53 - 2 = 968 long dv, dp, dm; final int e10; boolean dmIsTrailingZeros = false, dvIsTrailingZeros = false; if (e2 >= 0) { final int q = Math.max(0, ((e2 * 78913) >>> 18) - 1); // k = constant + floor(log_2(5^q)) final int k = POW5_INV_BITCOUNT + pow5bits(q) - 1; final int i = -e2 + q + k; dv = mulPow5InvDivPow2(mv, q, i); dp = mulPow5InvDivPow2(mp, q, i); dm = mulPow5InvDivPow2(mm, q, i); e10 = q; if (q <= 21) { if (mv % 5 == 0) { dvIsTrailingZeros = multipleOfPowerOf5(mv, q); } else if (even) { dmIsTrailingZeros = multipleOfPowerOf5(mm, q); } else if (multipleOfPowerOf5(mp, q)) { dp--; } } } else { final int q = Math.max(0, ((-e2 * 732923) >>> 20) - 1); final int i = -e2 - q; final int k = pow5bits(i) - POW5_BITCOUNT; final int j = q - k; dv = mulPow5divPow2(mv, i, j); dp = mulPow5divPow2(mp, i, j); dm = mulPow5divPow2(mm, i, j); e10 = q + e2; if (q <= 1) { dvIsTrailingZeros = true; if (even) { dmIsTrailingZeros = mmShift == 1; } else { dp--; } } else if (q < 63) { dvIsTrailingZeros = (mv & ((1L << (q - 1)) - 1)) == 0; } } // Step 4: Find the shortest decimal representation in the interval of legal representations. // // We do some extra work here in order to follow Float/Double.toString semantics. In particular, // that requires printing in scientific format if and only if the exponent is between -3 and 7, // and it requires printing at least two decimal digits. // // Above, we moved the decimal dot all the way to the right, so now we need to count digits to // figure out the correct exponent for scientific notation. final int vplength = decimalLength(dp); int exp = e10 + vplength - 1; // Double.toString semantics requires using scientific notation if and only if outside this range. boolean scientificNotation = !((exp >= -3) && (exp < 7)); int removed = 0; int lastRemovedDigit = 0; long output; if (dmIsTrailingZeros || dvIsTrailingZeros) { while (dp / 10 > dm / 10) { if ((dp < 100) && scientificNotation) { // Double.toString semantics requires printing at least two digits. break; } dmIsTrailingZeros &= dm % 10 == 0; dvIsTrailingZeros &= lastRemovedDigit == 0; lastRemovedDigit = (int) (dv % 10); dp /= 10; dv /= 10; dm /= 10; removed++; } if (dmIsTrailingZeros && even) { while (dm % 10 == 0) { if ((dp < 100) && scientificNotation) { // Double.toString semantics requires printing at least two digits. break; } dvIsTrailingZeros &= lastRemovedDigit == 0; lastRemovedDigit = (int) (dv % 10); dp /= 10; dv /= 10; dm /= 10; removed++; } } if (dvIsTrailingZeros && (lastRemovedDigit == 5) && (dv % 2 == 0)) { // Round even if the exact numbers is .....50..0. lastRemovedDigit = 4; } output = dv + ((dv == dm && !(dmIsTrailingZeros && even)) || (lastRemovedDigit >= 5) ? 1 : 0); } else { while (dp / 10 > dm / 10) { if ((dp < 100) && scientificNotation) { // Double.toString semantics requires printing at least two digits. break; } lastRemovedDigit = (int) (dv % 10); dp /= 10; dv /= 10; dm /= 10; removed++; } output = dv + ((dv == dm || (lastRemovedDigit >= 5)) ? 1 : 0); } int olength = vplength - removed; // Step 5: Print the decimal representation. // We follow Double.toString semantics here. byte[] result = new byte[24]; int index = 0; if (sign) { result[index++] = '-'; } // Values in the interval [1E-3, 1E7) are special. if (scientificNotation) { // Print in the format x.xxxxxE-yy. for (int i = 0; i < olength - 1; i++) { int c = (int) (output % 10); output /= 10; result[index + olength - i] = (byte) ('0' + c); } result[index] = (byte) ('0' + output % 10); result[index + 1] = '.'; index += olength + 1; if (olength == 1) { result[index++] = '0'; } // Print 'E', the exponent sign, and the exponent, which has at most three digits. result[index++] = 'E'; if (exp < 0) { result[index++] = '-'; exp = -exp; } if (exp >= 100) { result[index++] = (byte) ('0' + exp / 100); exp %= 100; result[index++] = (byte) ('0' + exp / 10); } else if (exp >= 10) { result[index++] = (byte) ('0' + exp / 10); } result[index++] = (byte) ('0' + exp % 10); return new String(result, 0, index); } else { // Otherwise follow the Java spec for values in the interval [1E-3, 1E7). if (exp < 0) { // Decimal dot is before any of the digits. result[index++] = '0'; result[index++] = '.'; for (int i = -1; i > exp; i--) { result[index++] = '0'; } int current = index; for (int i = 0; i < olength; i++) { result[current + olength - i - 1] = (byte) ('0' + output % 10); output /= 10; index++; } } else if (exp + 1 >= olength) { // Decimal dot is after any of the digits. for (int i = 0; i < olength; i++) { result[index + olength - i - 1] = (byte) ('0' + output % 10); output /= 10; } index += olength; for (int i = olength; i < exp + 1; i++) { result[index++] = '0'; } result[index++] = '.'; result[index++] = '0'; } else { // Decimal dot is somewhere between the digits. int current = index + 1; for (int i = 0; i < olength; i++) { if (olength - i - 1 == exp) { result[current + olength - i - 1] = '.'; current--; } result[current + olength - i - 1] = (byte) ('0' + output % 10); output /= 10; } index += olength + 1; } return new String(result, 0, index); } } private static int pow5bits(int e) { return ((e * 1217359) >>> 19) + 1; } private static int decimalLength(long v) { if (v >= 1000000000000000000L) return 19; if (v >= 100000000000000000L) return 18; if (v >= 10000000000000000L) return 17; if (v >= 1000000000000000L) return 16; if (v >= 100000000000000L) return 15; if (v >= 10000000000000L) return 14; if (v >= 1000000000000L) return 13; if (v >= 100000000000L) return 12; if (v >= 10000000000L) return 11; if (v >= 1000000000L) return 10; if (v >= 100000000L) return 9; if (v >= 10000000L) return 8; if (v >= 1000000L) return 7; if (v >= 100000L) return 6; if (v >= 10000L) return 5; if (v >= 1000L) return 4; if (v >= 100L) return 3; if (v >= 10L) return 2; return 1; } private static boolean multipleOfPowerOf5(long value, int q) { return pow5Factor(value) >= q; } private static int pow5Factor(long value) { // We want to find the largest power of 5 that divides value. if ((value % 5) != 0) return 0; if ((value % 25) != 0) return 1; if ((value % 125) != 0) return 2; if ((value % 625) != 0) return 3; int count = 4; value /= 625; while (value > 0) { if (value % 5 != 0) { return count; } value /= 5; count++; } throw new IllegalArgumentException("" + value); } /** * Compute the high digits of m * 5^p / 10^q = m * 5^(p - q) / 2^q = m * 5^i / 2^j, with q chosen * such that m * 5^i / 2^j has sufficiently many decimal digits to represent the original floating * point number. */ private static long mulPow5divPow2(long m, int i, int j) { // m has at most 55 bits. long mHigh = m >>> 31; long mLow = m & 0x7fffffff; long bits13 = mHigh * POW5_SPLIT[i][0]; // 124 long bits03 = mLow * POW5_SPLIT[i][0]; // 93 long bits12 = mHigh * POW5_SPLIT[i][1]; // 93 long bits02 = mLow * POW5_SPLIT[i][1]; // 62 long bits11 = mHigh * POW5_SPLIT[i][2]; // 62 long bits01 = mLow * POW5_SPLIT[i][2]; // 31 long bits10 = mHigh * POW5_SPLIT[i][3]; // 31 long bits00 = mLow * POW5_SPLIT[i][3]; // 0 int actualShift = j - 3 * 31 - 21; if (actualShift < 0) { throw new IllegalArgumentException("" + actualShift); } return (((((( ((bits00 >>> 31) + bits01 + bits10) >>> 31) + bits02 + bits11) >>> 31) + bits03 + bits12) >>> 21) + (bits13 << 10)) >>> actualShift; } /** * Compute the high digits of m / 5^i / 2^j such that the result is accurate to at least 9 * decimal digits. i and j are already chosen appropriately. */ private static long mulPow5InvDivPow2(long m, int i, int j) { // m has at most 55 bits. long mHigh = m >>> 31; long mLow = m & 0x7fffffff; long bits13 = mHigh * POW5_INV_SPLIT[i][0]; long bits03 = mLow * POW5_INV_SPLIT[i][0]; long bits12 = mHigh * POW5_INV_SPLIT[i][1]; long bits02 = mLow * POW5_INV_SPLIT[i][1]; long bits11 = mHigh * POW5_INV_SPLIT[i][2]; long bits01 = mLow * POW5_INV_SPLIT[i][2]; long bits10 = mHigh * POW5_INV_SPLIT[i][3]; long bits00 = mLow * POW5_INV_SPLIT[i][3]; int actualShift = j - 3 * 31 - 21; if (actualShift < 0) { throw new IllegalArgumentException("" + actualShift); } return (((((( ((bits00 >>> 31) + bits01 + bits10) >>> 31) + bits02 + bits11) >>> 31) + bits03 + bits12) >>> 21) + (bits13 << 10)) >>> actualShift; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/S3FileVec.java
package water.fvec; import water.*; /** * Created by tomas on 6/23/16. */ public class S3FileVec extends FileVec { private S3FileVec(Key key, long len) { super(key, len, Value.S3); } public static Key make(String path, long size) { Futures fs = new Futures(); Key key = make(path, size, fs); fs.blockForPending(); return key; } public static Key make(String path, long size, Futures fs) { Key k = Key.make(path); Key k2 = Vec.newKey(k); new Frame(k).delete_and_lock(); // Insert the top-level FileVec key into the store Vec v = new S3FileVec(k2, size); DKV.put(k2, v, fs); Frame fr = new Frame(k, new String[]{path}, new Vec[]{v}); fr.update(); fr.unlock(); return k; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/SubsetChunk.java
package water.fvec; import water.*; // A filtered Chunk; passed in the original data and a (chunk-relative) set of // rows (also in Chunk for, for maximum compression). public class SubsetChunk extends Chunk { final Chunk _data; // All the data final Chunk _rows; // The selected rows public SubsetChunk( Chunk data, Chunk rows, Vec subset_vec ) { _data = data; _rows = rows; set_len(rows._len); _start = rows._start; _vec = subset_vec; _cidx = rows._cidx; _mem = new byte[0]; } @Override public ChunkVisitor processRows(ChunkVisitor nc, int from, int to) { int [] rows = _rows.getIntegers(new int[to-from],from,to,-1); return _data.processRows(nc, rows); } @Override public ChunkVisitor processRows(ChunkVisitor nc, int... rows) { int [] expandedRows = _rows.getIntegers(new int[rows[rows.length-1]-rows[0]],rows[0],rows[rows.length-1],-1); int off = rows[0]; int [] selectedRows = new int[rows.length]; for(int i = 0; i < rows.length; ++i) selectedRows[i] = expandedRows[rows[i]-off]; return _data.processRows(nc, selectedRows); } @Override protected double atd_impl(int idx) { return _data.atd_impl((int)_rows.at8_impl(idx)); } @Override protected long at8_impl(int idx) { return _data.at8_impl((int)_rows.at8_impl(idx)); } // Returns true if the masterVec is missing, false otherwise @Override protected boolean isNA_impl(int idx) { return _data.isNA_impl((int)_rows.at8_impl(idx)); } @Override boolean set_impl(int idx, long l) { return false; } @Override boolean set_impl(int idx, double d) { return false; } @Override boolean set_impl(int idx, float f) { return false; } @Override boolean setNA_impl(int idx) { return false; } public static AutoBuffer write_impl(SubsetChunk sc, AutoBuffer bb) { throw water.H2O.fail(); } @Override protected final void initFromBytes () { throw water.H2O.fail(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/SubsetVec.java
package water.fvec; import water.*; /** * A simple wrapper for looking at only a subset of rows */ public class SubsetVec extends WrappedVec { final Key _subsetRowsKey; transient Vec _rows; // Cached copy of the rows-Vec public SubsetVec(Key key, int rowLayout, Key masterVecKey, Key subsetRowsKey) { super(key, rowLayout, masterVecKey); _subsetRowsKey = subsetRowsKey; } public Vec rows() { if( _rows==null ) _rows = DKV.get(_subsetRowsKey).get(); return _rows; } // A subset chunk @Override public Chunk chunkForChunkIdx(int cidx) { Chunk crows = rows().chunkForChunkIdx(cidx); return new SubsetChunk(crows,this,masterVec()); } @Override public Futures remove_impl(Futures fs, boolean cascade) { Keyed.remove(_subsetRowsKey, fs, true); return super.remove_impl(fs, cascade); } /** Write out K/V pairs */ @Override protected AutoBuffer writeAll_impl(AutoBuffer ab) { ab.putKey(_subsetRowsKey); return super.writeAll_impl(ab); } @Override protected Keyed readAll_impl(AutoBuffer ab, Futures fs) { ab.getKey(_subsetRowsKey,fs); return super.readAll_impl(ab,fs); } // static class SubsetChunk extends Chunk { final Chunk _crows; final Vec _masterVec; protected SubsetChunk(Chunk crows, SubsetVec vec, Vec masterVec) { _vec = vec; _masterVec = masterVec; _len = crows._len; _start = crows._start; _crows = crows; _cidx = crows._cidx; } @Override protected double atd_impl(int idx) { long rownum = _crows.at8_impl(idx); return _masterVec.at(rownum); } @Override protected long at8_impl(int idx) { long rownum = _crows.at8_impl(idx); return _masterVec.at8(rownum); } @Override protected boolean isNA_impl(int idx) { long rownum = _crows.at8_impl(idx); return _masterVec.isNA(rownum); } @Override boolean set_impl(int idx, long l) { return false; } @Override boolean set_impl(int idx, double d) { return false; } @Override boolean set_impl(int idx, float f) { return false; } @Override boolean setNA_impl(int idx) { return false; } @Override public ChunkVisitor processRows(ChunkVisitor nc, int from, int to) { throw H2O.unimpl(); } @Override public ChunkVisitor processRows(ChunkVisitor nc, int... rows) { throw H2O.unimpl(); } @Override public boolean hasFloat() { return false; } public static AutoBuffer write_impl(SubsetChunk sc, AutoBuffer bb) { throw H2O.fail(); } @Override protected final void initFromBytes () { throw H2O.fail(); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/TransformWrappedVec.java
package water.fvec; import water.*; import water.rapids.ast.AstPrimitive; import water.rapids.ast.AstRoot; import water.rapids.Env; import water.rapids.ast.params.AstNum; /** * This wrapper pushes a transform down into each chunk so that * transformations will happen on-the-fly. When wrapped and there * are Op instances to be applied, the atd call will supersede the * usual chunk-at retrieval with a "special" atd call. * * Overhead added per element fetch per chunk is another virtual call * per Op per element (per Chunk). As has been noted (see e.g. RollupStats), * virtual calls are expensive, but the memory savings are substantial. * * AutoML can freely transform columns without ramification. * * Each wrapped Vec will track its own transformations, which makes it easy * when generating a POJO. * * A TransformWrappedVec is actually a function of one or more Vec instances. * * This class exists here so that Chunk and NewChunk don't need to become fully public * (since java has no friends). Other packages (not just core H2O) depend on this class! * * * @author spencer */ public class TransformWrappedVec extends WrappedVec { private final Key<Vec>[] _masterVecKeys; private volatile transient Vec[] _masterVecs; private final TransformFactory<?> _tf; public TransformWrappedVec(Key<Vec> key, int rowLayout, TransformFactory<?> fact, Key<Vec>[] masterVecKeys) { super(key, rowLayout, null); _tf = fact; _masterVecKeys = masterVecKeys; DKV.put(this); } @SuppressWarnings("unchecked") public TransformWrappedVec(Vec v, AstPrimitive fun) { this(v.group().addVec(), v._rowLayout, fun, new Key[]{v._key}); } @SuppressWarnings("unchecked") public TransformWrappedVec(Vec[] vecs, TransformFactory<?> fact) { this(vecs[0].group().addVec(), vecs[0]._rowLayout, fact, keys(vecs)); } @SuppressWarnings("unchecked") private static Key<Vec>[] keys(Vec[] vecs) { Key[] keys = new Key[vecs.length]; for (int i = 0; i < vecs.length; i++) keys[i] = vecs[i]._key; return keys; } public TransformWrappedVec(Key<Vec> key, int rowLayout, AstPrimitive fun, Key<Vec>[] masterVecKeys) { this(key, rowLayout, new AstTransformFactory(fun), masterVecKeys); } public Vec makeVec() { Vec v = new MRTask() { @Override public void map(Chunk c, NewChunk nc) { c.extractRows(nc, 0,c._len); } }.doAll(Vec.T_NUM,this).outputFrame().anyVec(); remove(); return v; } @Override public Chunk chunkForChunkIdx(int cidx) { Vec[] masterVecs = _masterVecs; if (masterVecs == null) { masterVecs = new Vec[_masterVecKeys.length]; for (int i = 0; i < masterVecs.length; i++) { DKV.prefetch(_masterVecKeys[i]); } for (int i = 0; i < masterVecs.length; i++) { masterVecs[i] = _masterVecKeys[i].get(); } _masterVecs = masterVecs; // publish fetched Vecs } assert _masterVecs != null; Chunk[] cs = new Chunk[_masterVecs.length]; for (int i = 0; i < cs.length; i++) { assert _masterVecs[i] != null; cs[i] = _masterVecs[i].chunkForChunkIdx(cidx); assert cs[i] != null; } return new TransformWrappedChunk(_tf, this, cs); } @Override public Vec doCopy() { Vec v = new TransformWrappedVec(group().addVec(), _rowLayout, _tf, _masterVecKeys); v.setDomain(domain()==null?null:domain().clone()); return v; } public static class TransformWrappedChunk extends Chunk { public final transient Chunk _c[]; public final transient Transform _t; public final TransformFactory<?> _fact; TransformWrappedChunk(TransformFactory<?> fact, Vec transformWrappedVec, Chunk... c) { // set all the chunk fields _c = c; set_len(_c[0]._len); _start = _c[0]._start; _vec = transformWrappedVec; _cidx = _c[0]._cidx; _fact = fact; _t = fact != null ? fact.create(c.length) : null; } @Override public ChunkVisitor processRows(ChunkVisitor nc, int from, int to) { throw H2O.unimpl(); } @Override public ChunkVisitor processRows(ChunkVisitor nc, int... rows) { throw H2O.unimpl(); } // applies the function to a row of doubles @Override public double atd_impl(int idx) { if( null==_fact ) return _c[0].atd(idx); // simple wrapping of 1 vec _t.reset(); for(int i = 0; i < _c.length; i++) _t.setInput(i, _c[i].atd(idx)); return _t.apply(); // Make the call per-row } @Override public long at8_impl(int idx) { throw H2O.unimpl(); } @Override public boolean isNA_impl(int idx) { return Double.isNaN(atd_impl(idx)); } // ouch, not quick! runs thru atd_impl // Returns true if the masterVec is missing, false otherwise @Override public boolean set_impl(int idx, long l) { return false; } @Override public boolean set_impl(int idx, double d) { return false; } @Override public boolean set_impl(int idx, float f) { return false; } @Override public boolean setNA_impl(int idx) { return false; } @Override protected final void initFromBytes () { throw water.H2O.fail(); } public Chunk deepCopy() { return extractRows(new NewChunk(this),0,_len).compress(); } } public interface Transform { void reset(); void setInput(int i, double value); double apply(); } public abstract static class Function1DTransform implements Transform { private double _value; @Override public void reset() { // noop } @Override public void setInput(int i, double value) { assert i == 0; _value = value; } @Override public double apply() { return apply(_value); } protected abstract double apply(double x); } public interface TransformFactory<T extends Freezable> extends Freezable<T> { Transform create(int n_inputs); } private static class AstTransformFactory extends Iced<AstTransformFactory> implements TransformFactory<AstTransformFactory> { private final AstPrimitive _fun; AstTransformFactory(AstPrimitive fun) { _fun = fun; } public AstTransformFactory() { this(null); } @Override public Transform create(int n_inputs) { return new AstTransform(_fun, n_inputs); } } private static class AstTransform implements Transform { private final AstPrimitive _fun; private final AstRoot[] _asts; private final Env _env; AstTransform(AstPrimitive fun, int n) { _fun = fun; _asts = new AstRoot[1 + n]; _asts[0] = _fun; for (int i = 1; i < _asts.length; i++) _asts[i] = new AstNum(0); _env = new Env(null); } @Override public void setInput(int i, double value) { ((AstNum) _asts[i + 1]).setNum(value); } @Override public double apply() { return _fun.apply(_env,_env.stk(),_asts).getNum(); } @Override public void reset() { // no need to do anything } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/UploadFileVec.java
package water.fvec; import java.io.IOException; import java.util.Arrays; import java.io.InputStream; import water.*; import water.util.Log; /** Build a Vec by reading from an InputStream */ public class UploadFileVec extends FileVec { int _nchunks; protected UploadFileVec(Key key) { super(key,-1,Value.ICE); } @Override public boolean writable() { return _len==-1; } public void addAndCloseChunk(Chunk c, Futures fs) { assert _len==-1; // Not closed assert (c._vec == null); // Don't try to re-purpose a chunk. c._vec = this; // Attach chunk to this vec. DKV.put(chunkKey(_nchunks++),c,fs,true); // Write updated chunk back into K/V } // Close, and possible replace the prior chunk with a new, larger Chunk public void close(C1NChunk c, int cidx, Futures fs) { assert _len==-1; // Not closed c._vec = this; // Attach chunk to this vec. DKV.put(chunkKey(cidx),c,fs); // Write updated chunk back into K/V long l = _nchunks-1L; _len = l*_chunkSize +c._len; } private boolean checkMissing(int cidx, Value val) { if( val != null ) return true; Log.err("Missing chunk " + cidx + " for " + _key); return false; } @Override public Value chunkIdx( int cidx ) { Value val = DKV.get(chunkKey(cidx)); assert checkMissing(cidx,val); return val; } // --------------------------------------------------------------------------- // Store a file (byte by byte) into a frame. // This file will generally come from a POST through the REST interface. // --------------------------------------------------------------------------- public static class ReadPutStats { public ReadPutStats() {} public long total_chunks; public long total_bytes; } static public Key readPut(String keyname, InputStream is, ReadPutStats stats) throws IOException { return readPut(Key.make(keyname), is, stats); } static public Key readPut(Key k, InputStream is, ReadPutStats stats) throws IOException { return readPut_impl(k, is, stats); } static private Key readPut_impl(Key key, InputStream is, ReadPutStats stats) throws IOException { Log.info("Reading byte InputStream into Frame:"); Log.info(" frameKey: " + key.toString()); Key newVecKey = Vec.newKey(); UploadFileVec uv = null; try { new Frame(key,new String[0],new Vec[0]).delete_and_lock(); uv = new UploadFileVec(newVecKey); assert uv.writable(); Futures fs = new Futures(); byte prev[] = null; byte bytebuf[] = new byte[FileVec.DFLT_CHUNK_SIZE]; int bytesInChunkSoFar = 0; while (true) { int rv = is.read(bytebuf, bytesInChunkSoFar, FileVec.DFLT_CHUNK_SIZE - bytesInChunkSoFar); if (rv < 0) break; bytesInChunkSoFar += rv; if( bytesInChunkSoFar == FileVec.DFLT_CHUNK_SIZE ) { // Write full chunk of size FileVec.CHUNK_SZ. C1NChunk c = new C1NChunk(bytebuf); uv.addAndCloseChunk(c, fs); prev = bytebuf; bytebuf = new byte[FileVec.DFLT_CHUNK_SIZE]; bytesInChunkSoFar = 0; } } if(bytesInChunkSoFar > 0) { // last chunk can be a little smaller byte [] buf2 = Arrays.copyOf(bytebuf,bytesInChunkSoFar); uv.close(new C1NChunk(buf2),uv._nchunks++,fs); } if( stats != null ) { stats.total_chunks = uv.nChunks(); stats.total_bytes = uv.length(); } Log.info(" totalChunks: " + uv.nChunks()); Log.info(" totalBytes: " + uv.length()); DKV.put(newVecKey, uv, fs); fs.blockForPending(); Frame f = new Frame(key,new String[]{"bytes"}, new Vec[]{uv}); f.unlock(); Log.info(" Success."); } catch (IOException e) { // Clean up and do not leak keys. Log.err("Exception caught in Frame::readPut; attempting to clean up the new frame and vector", e); Lockable.delete(key); if( uv != null ) uv.remove(newVecKey); Log.err("Frame::readPut cleaned up new frame and vector successfully"); throw e; } return key; } @Override // not supported for now, can do rebalance later public int setChunkSize(Frame fr, int chunkSize) {return _chunkSize;} }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/Vec.java
package water.fvec; import water.*; import water.nbhm.NonBlockingHashMap; import water.parser.BufferedString; import water.util.*; import java.util.Arrays; import java.util.Random; import java.util.UUID; /** A distributed vector/array/column of uniform data. * * <p>A distributed vector has a count of elements, an element-to-chunk * mapping, a Java-like type (mostly determines rounding on store and * display), and functions to directly load elements without further * indirections. The data is compressed, or backed by disk or both. * * <p>A Vec is a collection of {@link Chunk}s, each of which holds between 1,000 * and 1,000,000 elements. Operations on a Chunk are intended to be * single-threaded; operations on a Vec are intended to be parallel and * distributed on Chunk granularities, with each Chunk being manipulated by a * separate CPU. The standard Map/Reduce ({@link MRTask}) paradigm handles * parallel and distributed Chunk access well. * * <p>Individual elements can be directly accessed like a (very large and * distributed) array - however this is not the fastest way to access the * data. Direct access from Chunks is faster, avoiding several layers of * indirection. In particular accessing a random row from the Vec will force * the containing Chunk data to be cached locally (and network traffic to * bring it local); accessing all rows from a single machine will force all * the Big Data to be pulled local typically resulting in swapping and very * poor performance. The main API is provided for ease of small-data * manipulations and is fairly slow for writing; when touching ALL the data * you are much better off using e.g. {@link MRTask}. * * <p>The main API is {@link #at}, {@link #set}, and {@link #isNA}:<br> * <table class='table table-striped table-bordered' border="1" summary=""> * <tr><th> Returns </th><th> Call </th><th> Missing? </th><th>Notes</th> * <tr><td> {@code double} </td><td>{@link #at} </td><td>{@code NaN} </td><td></td> * <tr><td> {@code long} </td><td>{@link #at8} </td><td> throws </td><td></td> * <tr><td> {@code long} </td><td>{@link #at16l}</td><td> throws </td><td>Low half of 128-bit UUID</td> * <tr><td> {@code long} </td><td>{@link #at16h}</td><td> throws </td><td>High half of 128-bit UUID</td> * <tr><td>{@link BufferedString}</td><td>{@link #atStr}</td><td>{@code null}</td><td>Updates BufferedString in-place and returns it for flow-coding</td> * <tr><td> {@code boolean} </td><td>{@link #isNA} </td><td>{@code true}</td><td></td> * <tr><td> </td><td>{@link #set(long,double)}</td><td>{@code NaN} </td><td></td> * <tr><td> </td><td>{@link #set(long,float)} </td><td>{@code NaN} </td><td>Limited precision takes less memory</td> * <tr><td> </td><td>{@link #set(long,long)} </td><td>Cannot set </td><td></td> * <tr><td> </td><td>{@link #set(long,String)}</td><td>{@code null}</td><td>Convenience wrapper for String</td> * <tr><td> </td<td>{@link #set(long,UUID)}</td><td>{@code null}</td></tr> * <tr><td> </td><td>{@link #setNA(long)} </td><td> </td><td></td> * </table> * * <p>Example manipulating some individual elements:<pre> * double r1 = vec.at(0x123456789L); // Access element 0x1234567889 as a double * double r2 = vec.at(-1); // Throws AIOOBE * long r3 = vec.at8_abs(1); // Element #1, as a long * vec.set(2,r1+r3); // Set element #2, as a double * </pre> * * <p>Vecs have a loosely enforced <em>type</em>: one of numeric, {@link UUID} * or {@link String}. Numeric types are further broken down into integral * ({@code long}) and real ({@code double}) types. The {@code categorical} type is * an integral type, with a String mapping side-array. Most of the math * algorithms will treat categoricals as small dense integers, and most categorical * printouts will use the String mapping. Time is another special integral * type: it is represented as milliseconds since the unix epoch, and is mostly * treated as an integral type when doing math but it has special time-based * printout formatting. All types support the notion of a missing element; for * real types this is always NaN. It is an error to attempt to fetch a * missing integral type, and {@link #isNA} must be called first. Integral * types are losslessly compressed. Real types may lose 1 or 2 ULPS due to * compression. * * <p>Reading elements as doubles, or checking for an element missing is * always safe. Reading a missing integral type throws an exception, since * there is no NaN equivalent in the integer domain. <em>Writing</em> to * elements may throw if the backing data is read-only (file backed), and * otherwise is fully supported. * * <p>Note this dangerous scenario: loading a missing value as a double, and * setting it as a long: <pre> * set(row,(long)at(row)); // Danger! *</pre> * The cast from a Double.NaN to a long produces a zero! This code will * silently replace a missing value with a zero. * * <p>Vecs have a lazily computed {@link RollupStats} object and Key. The * RollupStats give fast access to the common metrics: {@link #min}, {@link * #max}, {@link #mean}, {@link #sigma}, the count of missing elements ({@link * #naCnt}) and non-zeros ({@link #nzCnt}), amongst other stats. They are * cleared if the Vec is modified and lazily recomputed after the modified Vec * is closed. Clearing the RollupStats cache is fairly expensive for * individual {@link #set} calls but is easy to amortize over a large count of * writes; i.e., batch writing is efficient. This is normally handled by the * MRTask framework; the {@link Vec.Writer} framework allows * <em>single-threaded</em> efficient batch writing for smaller Vecs. * * <p>Example usage of common stats:<pre> * double mean = vec.mean(); // Vec's mean; first touch computes and caches rollups * double min = vec.min(); // Smallest element; already computed * double max = vec.max(); // Largest element; already computed * double sigma= vec.sigma(); // Standard deviation; already computed * </pre> * * <p>Example: Impute (replace) missing values with the mean. Note that the * use of {@code vec.mean()} in the constructor uses (and computes) the * general RollupStats before the MRTask starts. Setting a value in the Chunk * clears the RollupStats (since setting any value but the mean will change * the mean); they will be recomputed at the next use after the MRTask. * <pre> * new MRTask{} { final double _mean = vec.mean(); * public void map( Chunk chk ) { * for( int row=0; row &lt; chk._len; row++ ) * if( chk.isNA(row) ) chk.set(row,_mean); * } * }.doAll(vec); * </pre> * * <p>Vecs have a {@link Vec.VectorGroup}. Vecs in the same VectorGroup have the * same Chunk and row alignment - that is, Chunks with the same index are * homed to the same Node and have the same count of rows-per-Chunk. {@link * Frame}s are only composed of Vecs of the same VectorGroup (or very small * Vecs) guaranteeing that all elements of each row are homed to the same Node * and set of Chunks - such that a simple {@code for} loop over a set of * Chunks all operates locally. See the example in the {@link Chunk} class. * * <p>It is common and cheap to make new Vecs in the same VectorGroup as an * existing Vec and initialized to e.g. zero. Such Vecs are often used as * temps, and usually immediately set to interest values in a later MRTask * pass. * * <p>Example creation of temp Vecs:<pre> * Vec tmp0 = vec.makeZero(); // New Vec with same VectorGroup and layout as vec, filled with zero * Vec tmp1 = vec.makeCon(mean); // Filled with 'mean' * assert tmp1.at(0x123456789)==mean; // All elements filled with 'mean' * for( int i=0; i&lt;100; i++ ) // A little math on the first 100 elements * tmp0.set(i,tmp1.at(i)+17); // ...set into the tmp0 vec * </pre> * * <p>Vec {@link Key}s have a special layout (enforced by the various Vec * constructors) so there is a direct Key mapping from a Vec to a numbered * Chunk and vice-versa. This mapping is crucially used in all sorts of * places, basically representing a global naming scheme across a Vec and the * Chunks that make it up. The basic layout created by {@link #newKey}: * <pre> * byte: 0 1 2 3 4 5 6 7 8 9 10+ * Vec Key layout: Key.VEC -1 vec#grp -1 normal Key bytes; often e.g. a function of original file name * Chunk Key layout: Key.CHK -1 vec#grp chunk# normal Key bytes; often e.g. a function of original file name * RollupStats Key : Key.CHK -1 vec#grp -2 normal Key bytes; often e.g. a function of original file name * Group Key layout: Key.GRP -1 -1 -1 normal Key bytes; often e.g. a function of original file name * ESPC Key layout: Key.GRP -1 -1 -2 normal Key bytes; often e.g. a function of original file name * </pre> * * @author Cliff Click */ public class Vec extends Keyed<Vec> { public interface Holder { Vec vec(); } // Vec internal type: one of T_BAD, T_UUID, T_STR, T_NUM, T_CAT, T_TIME byte _type; // Vec Type /** Element-start per chunk, i.e. the row layout. Defined in the * VectorGroup. This field is dead/ignored in subclasses that are * guaranteed to have fixed-sized chunks such as file-backed Vecs. */ public int _rowLayout; // Carefully set in the constructor and read_impl to be pointer-equals to a // common copy one-per-node. These arrays can get both *very* common // (one-per-Vec at least, sometimes one-per-Chunk), and very large (one per // Chunk, could run into the millions). private transient long _espc[]; // String domain, only for Categorical columns private String[] _domain; // Rollup stats key. Every ask of a rollup stats (e.g. min/mean/max or // bytesize) bounces through the DKV to fetch the latest copy of the Rollups // - lest a Vec.set changes the rollups and we return a stale copy. transient private Key _rollupStatsKey; private boolean _volatile; /** Returns the categorical toString mapping array, or null if not an categorical column. * Not a defensive clone (to expensive to clone; coding error to change the * contents). * @return the categorical / factor / categorical mapping array, or null if not a categorical column */ public String[] domain() { return _domain; } // made no longer final so that InteractionWrappedVec which are _type==T_NUM but have a categorical interaction /** Returns the {@code i}th factor for this categorical column. * @return The {@code i}th factor */ public final String factor( long i ) { return _domain[(int)i]; } /** Set the categorical/factor names. No range-checking on the actual * underlying numeric domain; user is responsible for maintaining a mapping * which is coherent with the Vec contents. */ public final void setDomain(String[] domain) { _domain = domain; if( domain != null ) _type = T_CAT; } /** Returns cardinality for categorical domain or -1 for other types. */ public final int cardinality() { return isCategorical() ? _domain.length : -1; } /** * @return true iff the domain has been truncated (usually during encoding). * @see CreateInteractions#makeDomain(java.util.Map, String[], String[]) */ public final boolean isDomainTruncated(int expectedCardinality) { return cardinality() == expectedCardinality + 1 && CreateInteractions._other.equals(_domain[_domain.length -1]); } // Vec internal type public static final byte T_BAD = 0; // No none-NA rows (triple negative! all NAs or zero rows) public static final byte T_UUID = 1; // UUID public static final byte T_STR = 2; // String public static final byte T_NUM = 3; // Numeric, but not categorical or time public static final byte T_CAT = 4; // Integer, with a categorical/factor String mapping public static final byte T_TIME = 5; // Long msec since the Unix Epoch - with a variety of display/parse options public static final String[] TYPE_STR=new String[] { "BAD", "UUID", "String", "Numeric", "Enum", "Time", "Time", "Time"}; public static final boolean DO_HISTOGRAMS = true; /** True if this is an categorical column. All categorical columns are also * {@link #isInt}, but not vice-versa. * @return true if this is an categorical column. */ public final boolean isCategorical() { assert (_type==T_CAT && _domain!=null) || (_type!=T_CAT && _domain==null) || (_type==T_NUM && this instanceof InteractionWrappedVec && _domain!=null); return _type==T_CAT; } public final double sparseRatio() { return rollupStats()._nzCnt/(double)length(); } /** True if this is a UUID column. * @return true if this is a UUID column. */ public final boolean isUUID (){ return _type==T_UUID; } /** True if this is a String column. * @return true if this is a String column. */ public final boolean isString (){ return _type==T_STR; } /** True if this is a numeric column, excluding categorical and time types. * @return true if this is a numeric column, excluding categorical and time types */ public final boolean isNumeric(){ return _type==T_NUM; } /** True if this is a time column. All time columns are also {@link #isInt}, but * not vice-versa. * @return true if this is a time column. */ public final boolean isTime (){ return _type==T_TIME; } /** Build a numeric-type Vec; the caller understands Chunk layout (via the * {@code espc} array). */ public Vec( Key<Vec> key, int rowLayout) { this(key, rowLayout, null, T_NUM); } /** Build a numeric-type or categorical-type Vec; the caller understands Chunk * layout (via the {@code espc} array); categorical Vecs need to pass the * domain. */ Vec( Key<Vec> key, int rowLayout, String[] domain) { this(key,rowLayout,domain, (domain==null?T_NUM:T_CAT)); } /** Main default constructor; the caller understands Chunk layout (via the * {@code espc} array), plus categorical/factor the {@code domain} (or null for * non-categoricals), and the Vec type. */ public Vec( Key<Vec> key, int rowLayout, String[] domain, byte type ) { super(key); assert key._kb[0]==Key.VEC; assert domain==null || type==T_CAT; assert T_BAD <= type && type <= T_TIME; // Note that T_BAD is allowed for all-NA Vecs _rowLayout = rowLayout; _type = type; _domain = domain; _espc = ESPC.espc(this); } public long[] espc() { if( _espc==null ) _espc = ESPC.espc(this); return _espc; } /** Number of elements in the vector; returned as a {@code long} instead of * an {@code int} because Vecs support more than 2^32 elements. Overridden * by subclasses that compute length in an alternative way, such as * file-backed Vecs. * @return Number of elements in the vector */ public long length() { espc(); return _espc[_espc.length-1]; } /** Number of chunks, returned as an {@code int} - Chunk count is limited by * the max size of a Java {@code long[]}. Overridden by subclasses that * compute chunks in an alternative way, such as file-backed Vecs. * @return Number of chunks */ public int nChunks() { return espc().length-1; } /** * Number of non-empty chunks, also see {@link #nChunks} * @return Number of non-empty chunks */ public int nonEmptyChunks() { int nonEmptyCnt = nChunks(); for (int i = 1; i < _espc.length; i++) if (_espc[i-1] == _espc[i]) nonEmptyCnt--; return nonEmptyCnt; } /** Convert a chunk-index into a starting row #. For constant-sized chunks * this is a little shift-and-add math. For variable-sized chunks this is a * table lookup. */ long chunk2StartElem( int cidx ) { return espc()[cidx]; } /** Number of rows in chunk. Does not fetch chunk content. */ public int chunkLen( int cidx ) { espc(); return (int) (_espc[cidx + 1] - _espc[cidx]); } /** Check that row-layouts are compatible. */ public boolean isCompatibleWith(Vec v) { // Vecs are compatible iff they have same group and same espc (i.e. same length and same chunk-distribution) return (espc() == v.espc() || Arrays.equals(_espc, v._espc)) && (VectorGroup.sameGroup(this, v) || length() < 1e3); } /** Default read/write behavior for Vecs. File-backed Vecs are read-only. */ boolean readable() { return true ; } /** Default read/write behavior for Vecs. AppendableVecs are write-only. */ boolean writable() { return true; } public void setBad() { _type = T_BAD; } /** Get the column type. */ public byte get_type() { return _type; } public String get_type_str() { return TYPE_STR[_type]; } public boolean isBinary(){ RollupStats rs = rollupStats(); return rs._isInt && rs._mins[0] >= 0 && rs._maxs[0] <= 1; } /** * Strict version of binary check * @param strict If true check also -1/1 case and if the vec is constant * @return true if the vector is binary */ public boolean isBinary(boolean strict){ if(strict) { return (isBinary() || isBinaryOnes()) && !isConst(); } return isBinary(); } /** * Check binary vector case -1/1 * @return true if the vector is consist of -1/1 only */ public boolean isBinaryOnes(){ RollupStats rs = rollupStats(); long zeroCount = rs._rows - rs._nzCnt; return rs._isInt && rs._mins[0] >= -1 && rs._maxs[0] <= 1 && zeroCount == 0; } // ======= Create zero/constant Vecs ====== /** Make a new zero-filled vec **/ public static Vec makeZero( long len, boolean redistribute ) { return makeCon(0L,len,redistribute); } /** Make a new zero-filled vector with the given row count. * @return New zero-filled vector with the given row count. */ public static Vec makeZero( long len ) { return makeZero(len, T_NUM); } public static Vec makeOne( long len ) { return makeOne(len, T_NUM); } public static Vec makeOne(long len, byte typeCode) { return makeCon(1.0, len, true, typeCode); } public static Vec makeZero(long len, byte typeCode) { return makeCon(0.0, len, true, typeCode); } /** Make a new constant vector with the given row count, and redistribute the data * evenly around the cluster. * @param x The value with which to fill the Vec. * @param len Number of rows. * @return New cosntant vector with the given len. */ public static Vec makeCon(double x, long len) { return makeCon(x,len,true); } /** * Make a new constant vector of the specified type. * @param x The value with which to fill the Vec. * @param len The number of rows in the produced Vec. * @param type Type of the Vec to construct. */ public static Vec makeCon(double x, long len, byte type) { int log_rows_per_chunk = FileVec.DFLT_LOG2_CHUNK_SIZE; return makeCon(x, len, log_rows_per_chunk, true, type); } /** Make a new constant vector with the given row count. * @return New constant vector with the given row count. */ public static Vec makeCon(double x, long len, boolean redistribute) { return makeCon(x,len,redistribute, T_NUM); } public static Vec makeCon(double x, long len, boolean redistribute, byte typeCode) { int log_rows_per_chunk = FileVec.DFLT_LOG2_CHUNK_SIZE; return makeCon(x,len,log_rows_per_chunk,redistribute, typeCode); } /** Make a new constant vector with the given row count, and redistribute the data evenly * around the cluster. * @return New constant vector with the given row count. */ public static Vec makeCon(double x, long len, int log_rows_per_chunk) { return makeCon(x,len,log_rows_per_chunk,true); } /** * Make a new constant vector with minimal number of chunks. Used for importing SQL tables. * @return New constant vector with the given row count. */ public static Vec makeCon(long totSize, long len) { final int safetyInflationFactor = 8; int nchunks = (int) Math.max(totSize * safetyInflationFactor / Value.MAX, 1); return makeConN(len, nchunks); } /** * Make a new constant vector with fixed number of chunks. * @return New constant vector with the given chunks number. */ public static Vec makeConN(long len, int nchunks) { long[] espc = new long[nchunks+1]; espc[0] = 0; for( int i=1; i<nchunks; i++ ) espc[i] = espc[i-1]+len/nchunks; espc[nchunks] = len; VectorGroup vg = VectorGroup.VG_LEN1; return makeCon(0, vg, ESPC.rowLayout(vg._key, espc), T_NUM); } /** * @return the number of chunks that would be required when creating a Vec with given length and rows */ public static int nChunksFor(long len, int log_rows_per_chunk, boolean redistribute) { int chunks0 = (int)Math.max(1,len>>log_rows_per_chunk); // redistribute = false int chunks1 = (int)Math.min( 4 * H2O.NUMCPUS * H2O.CLOUD.size(), len); // redistribute = true int nchunks = (redistribute && chunks0 < chunks1 && len > 10*chunks1) ? chunks1 : chunks0; return nchunks; } /** Make a new constant vector with the given row count. * @return New constant vector with the given row count. */ public static Vec makeCon(double x, long len, int log_rows_per_chunk, boolean redistribute) { return makeCon(x, len, log_rows_per_chunk, redistribute, T_NUM); } public static Vec makeCon(double x, long len, int log_rows_per_chunk, boolean redistribute, byte type) { final int nchunks = nChunksFor(len, log_rows_per_chunk, redistribute); long[] espc = new long[nchunks+1]; espc[0] = 0; for( int i=1; i<nchunks; i++ ) espc[i] = redistribute ? espc[i-1]+len/nchunks : ((long)i)<<log_rows_per_chunk; espc[nchunks] = len; VectorGroup vg = VectorGroup.VG_LEN1; return makeCon(x, vg, ESPC.rowLayout(vg._key, espc), type); } public Vec [] makeDoubles(int n, double [] values) { Key [] keys = group().addVecs(n); Vec [] res = new Vec[n]; for(int i = 0; i < n; ++i) res[i] = new Vec(keys[i],_rowLayout); fillDoubleChunks(this,res, values); Futures fs = new Futures(); for(Vec v:res) DKV.put(v,fs); fs.blockForPending(); // System.out.println("made vecs " + Arrays.toString(res)); return res; } private static void fillDoubleChunks(Vec v, final Vec[] ds, final double [] values){ new MRTask(){ public void map(Chunk c){ for(int i = 0; i < ds.length; ++i) DKV.put(ds[i].chunkKey(c.cidx()),new C0DChunk(values[i],c._len << 3)); } }.doAll(v); } /** Make a new vector with the same size and data layout as the current one, * and initialized to zero. * @return A new vector with the same size and data layout as the current one, * and initialized to zero. */ public Vec makeZero() { return makeCon(0, null, group(), _rowLayout); } /** A new vector with the same size and data layout as the current one, and * initialized to zero, with the given categorical domain. * @return A new vector with the same size and data layout as the current * one, and initialized to zero, with the given categorical domain. */ public Vec makeZero(String[] domain) { return makeCon(0, domain, group(), _rowLayout); } /** A new vector which is a copy of {@code this} one. * @return a copy of the vector. */ public Vec makeCopy() { return makeCopy(domain()); } /** A new vector which is a copy of {@code this} one. * @return a copy of the vector. */ public Vec makeCopy(String[] domain) { final byte type = _type == T_CAT && domain == null ? T_NUM : _type; // convenience for dropping a domain return makeCopy(domain, type); } public Vec makeCopy(String[] domain, byte type) { if (domain == null && type == T_CAT) { throw new IllegalArgumentException("Desired Vec type is Categorical but not domain provided."); } Vec v = doCopy(); v._domain = domain; v._type = type; DKV.put(v); return v; } public Vec doCopy() { final Vec v = new Vec(group().addVec(),_rowLayout); new MRTask(){ @Override public void map(Chunk c){ Chunk c2 = c.deepCopy(); DKV.put(v.chunkKey(c.cidx()), c2, _fs); } }.doAll(this); return v; } public static Vec makeCon( final long l, String[] domain, VectorGroup group, int rowLayout ) { return makeCon(l, domain, group, rowLayout, domain == null? T_NUM : T_CAT); } private static Vec makeCon( final long l, String[] domain, VectorGroup group, int rowLayout, byte type ) { final Vec v0 = new Vec(group.addVec(), rowLayout, domain, type); final int nchunks = v0.nChunks(); new MRTask() { // Body of all zero chunks @Override protected void setupLocal() { for( int i=0; i<nchunks; i++ ) { Key k = v0.chunkKey(i); if( k.home() ) DKV.put(k,new C0LChunk(l,v0.chunkLen(i)),_fs); } } }.doAllNodes(); DKV.put(v0._key, v0); // Header last return v0; } public static Vec makeTimeVec(double[] vals, Key<Vec> vecKey){ if (vecKey == null) vecKey = Vec.VectorGroup.VG_LEN1.addVec(); int layout = ESPC.rowLayout(vecKey, new long[]{0, vals.length}); Vec v = new Vec(vecKey, layout, null, Vec.T_TIME); NewChunk nc = new NewChunk(v, 0); Futures fs = new Futures(); for (double d: vals) nc.addNum(d); nc.close(fs); DKV.put(v._key, v, fs); fs.blockForPending(); return v; } public static Vec makeVec(double [] vals, Key<Vec> vecKey){ Vec v = new Vec(vecKey,ESPC.rowLayout(vecKey,new long[]{0,vals.length})); NewChunk nc = new NewChunk(v,0); Futures fs = new Futures(); for(double d:vals) nc.addNum(d); nc.close(fs); DKV.put(v._key, v, fs); fs.blockForPending(); return v; } public static Vec makeVec(float [] vals, Key<Vec> vecKey){ Vec v = new Vec(vecKey,ESPC.rowLayout(vecKey,new long[]{0,vals.length})); NewChunk nc = new NewChunk(v,0); Futures fs = new Futures(); for(float d:vals) nc.addNum(d); nc.close(fs); DKV.put(v._key, v, fs); fs.blockForPending(); return v; } public static Vec makeVec(String [] vals, Key<Vec> vecKey){ Vec v = new Vec(vecKey,ESPC.rowLayout(vecKey,new long[]{0,vals.length}),null, Vec.T_STR); NewChunk nc = new NewChunk(v,0); Futures fs = new Futures(); for(String s:vals) nc.addStr(s); nc.close(fs); DKV.put(v._key, v, fs); fs.blockForPending(); return v; } // allow missing (NaN) categorical values public static Vec makeVec(float [] vals, String [] domain, Key<Vec> vecKey){ Vec v = new Vec(vecKey,ESPC.rowLayout(vecKey, new long[]{0, vals.length}), domain); NewChunk nc = new NewChunk(v,0); Futures fs = new Futures(); for(float d:vals) { assert(Float.isNaN(d) || (long)d == d); nc.addNum(d); } nc.close(fs); DKV.put(v._key, v, fs); fs.blockForPending(); return v; } // allow missing (NaN) categorical values public static Vec makeVec(double [] vals, String [] domain, Key<Vec> vecKey){ Vec v = new Vec(vecKey,ESPC.rowLayout(vecKey, new long[]{0, vals.length}), domain); NewChunk nc = new NewChunk(v,0); Futures fs = new Futures(); for(double d:vals) { assert(Double.isNaN(d) || (long)d == d); nc.addNum(d); } nc.close(fs); DKV.put(v._key, v, fs); fs.blockForPending(); return v; } // Warning: longs are lossily converted to doubles in nc.addNum(d) public static Vec makeVec(long [] vals, String [] domain, Key<Vec> vecKey){ Vec v = new Vec(vecKey,ESPC.rowLayout(vecKey, new long[]{0, vals.length}), domain); NewChunk nc = new NewChunk(v,0); Futures fs = new Futures(); for(long d:vals) nc.addNum(d); nc.close(fs); DKV.put(v._key, v, fs); fs.blockForPending(); return v; } public static Vec[] makeCons(double x, long len, int n) { Vec[] vecs = new Vec[n]; for( int i=0; i<n; i++ ) vecs[i] = makeCon(x,len,true); return vecs; } /** Make a new vector with the same size and data layout as the current one, * and initialized to the given constant value. * @return A new vector with the same size and data layout as the current one, * and initialized to the given constant value. */ public Vec makeCon(final double d) { return makeCon(d, group(), _rowLayout, T_NUM); } public Vec makeCon(final double d, byte type) { return makeCon(d, group(), _rowLayout, type); } public Vec makeCon(final byte type) { return makeCon(0, null, group(), _rowLayout, type); } private static Vec makeCon( final double d, VectorGroup group, int rowLayout, byte type ) { if( (long)d==d ) return makeCon((long)d, null, group, rowLayout, type); final Vec v0 = new Vec(group.addVec(), rowLayout, null, type); final int nchunks = v0.nChunks(); new MRTask() { // Body of all zero chunks @Override protected void setupLocal() { for( int i=0; i<nchunks; i++ ) { Key k = v0.chunkKey(i); if( k.home() ) DKV.put(k,new C0DChunk(d,v0.chunkLen(i)),_fs); } } }.doAllNodes(); DKV.put(v0._key, v0); // Header last return v0; } public Vec makeCon(final String s){ return makeCon(s, group(), _rowLayout, T_STR);} private static Vec makeCon( final String s, VectorGroup group, int rowLayout, byte type ) { final Vec v0 = new Vec(group.addVec(), rowLayout, null, type); final int nchunks = v0.nChunks(); new MRTask() { // Body of all zero chunks @Override protected void setupLocal() { for( int i=0; i<nchunks; i++ ) { Key k = v0.chunkKey(i); if( k.home() ) { DKV.put(k,new CStrChunk(s,v0.chunkLen(i)),_fs); } } } }.doAllNodes(); DKV.put(v0._key, v0); // Header last return v0; } public Vec [] makeZeros(int n){return makeZeros(n,null,null);} public Vec [] makeOnes(int n){return makeOnes(n,null,null);} /** * Make a temporary work vec of double [] . * Volatile vecs can only be used locally (chunks do not serialize) and are assumed to change frequently(MRTask call preWiting() by default). * Chunks stores as C8DVolatileChunk - expose data directly as double []. * * @param n number of columns * @return */ public Vec [] makeVolatileDoubles(int n){ Vec [] vecs = makeZeros(n); for(Vec v:vecs) { v._volatile = true; DKV.put(v); } new MRTask(){ @Override public void map(Chunk [] cs){ int len = cs[0].len(); for(int i = 0; i < cs.length; ++i) { cs[i].setVolatile(MemoryManager.malloc8d(len)); } } }.doAll(vecs); return vecs; } /** * Make a temporary work vec of int [] . * Volatile vecs can only be used locally (chunks do not serialize) and are assumed to change frequently(MRTask call preWiting() by default). * Chunks stores as C4VolatileChunk - expose data directly as int []. * * @param cons integer array with constant used to fill each column. * @return */ public Vec [] makeVolatileInts(final int [] cons){ Vec [] vecs = makeZeros(cons.length); for(Vec v:vecs) { v._volatile = true; DKV.put(v); } new MRTask(){ @Override public void map(Chunk [] cs){ int len = cs[0].len(); for(int i = 0; i < cs.length; ++i) { int [] vals = MemoryManager.malloc4(len); Arrays.fill(vals,cons[i]); cs[i].setVolatile(vals); } } }.doAll(vecs); return vecs; } public Vec [] makeZeros(int n, String [][] domain, byte[] types){ return makeCons(n, 0, domain, types);} public Vec [] makeOnes(int n, String [][] domain, byte[] types){ return makeCons(n, 1, domain, types);} // Make a bunch of compatible zero Vectors public Vec[] makeCons(int n, final long l, String[][] domains, byte[] types) { final int nchunks = nChunks(); Key<Vec>[] keys = group().addVecs(n); final Vec[] vs = new Vec[keys.length]; for(int i = 0; i < vs.length; ++i) vs[i] = new Vec(keys[i],_rowLayout, domains== null ? null : domains[i], types == null ? T_NUM: types[i]); new MRTask() { @Override protected void setupLocal() { for (Vec v1 : vs) { for (int i = 0; i < nchunks; i++) { Key k = v1.chunkKey(i); if (k.home()) DKV.put(k, new C0LChunk(l, chunkLen(i)), _fs); } } for( Vec v : vs ) if( v._key.home() ) DKV.put(v._key,v,_fs); } }.doAllNodes(); return vs; } /** A Vec from an array of doubles * @param rows Data * @return The Vec */ public static Vec makeCon(Key<Vec> k, double ...rows) { k = k==null?Vec.VectorGroup.VG_LEN1.addVec():k; Futures fs = new Futures(); AppendableVec avec = new AppendableVec(k, T_NUM); NewChunk chunk = new NewChunk(avec, 0); for( double r : rows ) chunk.addNum(r); chunk.close(0, fs); Vec vec = avec.layout_and_close(fs); fs.blockForPending(); return vec; } /** Make a new vector initialized to increasing integers, starting with 1. * @return A new vector initialized to increasing integers, starting with 1. */ public static Vec makeSeq( long len, boolean redistribute) { return new MRTask() { @Override public void map(Chunk[] cs) { for( Chunk c : cs ) for( int r = 0; r < c._len; r++ ) c.set(r, r + 1 + c._start); } }.doAll(makeZero(len, redistribute))._fr.vecs()[0]; } /** Make a new vector initialized to increasing integers, starting with `min`. * @return A new vector initialized to increasing integers, starting with `min`. */ public static Vec makeSeq(final long min, long len) { return new MRTask() { @Override public void map(Chunk[] cs) { for (Chunk c : cs) for (int r = 0; r < c._len; r++) c.set(r, r + min + c._start); } }.doAll(makeZero(len))._fr.vecs()[0]; } /** Make a new vector initialized to increasing integers, starting with `min`. * @return A new vector initialized to increasing integers, starting with `min`. */ public static Vec makeSeq(final long min, long len, boolean redistribute) { return new MRTask() { @Override public void map(Chunk[] cs) { for (Chunk c : cs) for (int r = 0; r < c._len; r++) c.set(r, r + min + c._start); } }.doAll(makeZero(len, redistribute))._fr.vecs()[0]; } /** Make a new vector initialized to increasing integers mod {@code repeat}. * @return A new vector initialized to increasing integers mod {@code repeat}. */ public static Vec makeRepSeq( long len, final long repeat ) { return new MRTask() { @Override public void map(Chunk[] cs) { for( Chunk c : cs ) for( int r = 0; r < c._len; r++ ) c.set(r, (r + c._start) % repeat); } }.doAll(makeZero(len))._fr.vecs()[0]; } /** Make a new vector initialized to random numbers with the given seed */ public Vec makeRand( final long seed ) { Vec randVec = makeZero(); new MRTask() { @Override public void map(Chunk c){ Random rng = new RandomUtils.PCGRNG(c._start,1); for(int i = 0; i < c._len; ++i) { rng.setSeed(seed+c._start+i); // Determinstic per-row c.set(i, rng.nextFloat()); } } }.doAll(randVec); return randVec; } // ======= Rollup Stats ====== /** Vec's minimum value * @return Vec's minimum value */ public double min() { return mins()[0]; } /** Vec's 5 smallest values * @return Vec's 5 smallest values */ public double[] mins(){ return rollupStats()._mins; } /** Vec's maximum value * @return Vec's maximum value */ public double max() { return maxs()[0]; } /** Vec's 5 largest values * @return Vec's 5 largeest values */ public double[] maxs(){ return rollupStats()._maxs; } /** True if the column contains only a constant value and it is not full of NAs * @return True if the column is constant */ public final boolean isConst() { return min() == max(); } /** True if the column contains only a constant value and it is not full of NAs * @return True if the column is constant */ public final boolean isConst(boolean includeNAs) { if (! isConst()) return false; return !includeNAs || naCnt() == 0; } /** True if the column contains only NAs * @return True if the column contains only NAs */ public final boolean isBad() { return naCnt()==length(); } /** Vecs's mean * @return Vec's mean */ public double mean() { return rollupStats()._mean; } /** Vecs's standard deviation * @return Vec's standard deviation */ public double sigma(){ return rollupStats()._sigma; } /** Vecs's mode * @return Vec's mode */ public int mode() { if (!isCategorical()) throw H2O.unimpl(); long[] bins = bins(); return ArrayUtils.maxIndex(bins); } /** Count of missing elements * @return Count of missing elements */ public long naCnt() { return rollupStats()._naCnt; } /** Count of non-zero elements * @return Count of non-zero elements */ public long nzCnt() { return rollupStats()._nzCnt; } /** Count of positive infinities * @return Count of positive infinities */ public long pinfs() { return rollupStats()._pinfs; } /** Count of negative infinities * @return Count of negative infinities */ public long ninfs() { return rollupStats()._ninfs; } /** <b>isInt</b> is a property of numeric Vecs and not a type; this * property can be changed by assigning non-integer values into the Vec (or * restored by overwriting non-integer values with integers). This is a * strong type for {@link #isCategorical} and {@link #isTime} Vecs. * @return true if the Vec is all integers */ public boolean isInt(){return rollupStats()._isInt; } /** Size of compressed vector data. */ public long byteSize(){return rollupStats()._size; } /** Default percentiles for approximate (single-pass) quantile computation (histogram-based). */ public static final double PERCENTILES[] = {0.001,0.01,0.1,0.2,0.25,0.3,1.0/3.0,0.4,0.5,0.6,2.0/3.0,0.7,0.75,0.8,0.9,0.99,0.999}; /** A simple and cheap histogram of the Vec, useful for getting a broad * overview of the data. Each bin is row-counts for the bin's range. The * bin's range is computed from {@link #base} and {@link #stride}. The * histogram is computed on first use and cached thereafter. * @return A set of histogram bins, or null for String columns */ public long[] bins() { return RollupStats.get(this, true)._bins; } /** Optimistically return the histogram bins, or null if not computed * @return the histogram bins, or null if not computed */ public long[] lazy_bins() { return rollupStats()._bins; } /** The {@code base} for a simple and cheap histogram of the Vec, useful * for getting a broad overview of the data. This returns the base of * {@code bins()[0]}. * @return the base of {@code bins()[0]} */ public double base() { return RollupStats.get(this,true).h_base(); } /** The {@code stride} for a a simple and cheap histogram of the Vec, useful * for getting a broad overview of the data. This returns the stride * between any two bins. * @return the stride between any two bins */ public double stride() { return RollupStats.get(this,true).h_stride(); } /** A simple and cheap percentiles of the Vec, useful for getting a broad * overview of the data. The specific percentiles are take from {@link #PERCENTILES}. * @return A set of percentiles */ public double[] pctiles() { return RollupStats.get(this, true)._pctiles; } /** Compute the roll-up stats as-needed */ private RollupStats rollupStats() { return RollupStats.get(this); } public void startRollupStats(Futures fs) { startRollupStats(fs,false);} /** * Check if we have local cached copy of basic Vec stats (including histogram if requested) and if not start task to compute and fetch them; * useful when launching a bunch of them in parallel to avoid single threaded execution later (e.g. for-loop accessing min/max of every vec in a frame). * * Does *not* guarantee rollup stats will be cached locally when done since there may be racy changes to vec which will flush local caches. * * @param fs Futures allow to wait for this task to finish. * @param doHisto Also compute histogram, requires second pass over data amd is not computed by default. * */ public void startRollupStats(Futures fs, boolean doHisto) { RollupStats.start(this,fs,doHisto); } /** A high-quality 64-bit checksum of the Vec's content, useful for * establishing dataset identity. * @return Checksum of the Vec's content */ @Override protected long checksum_impl() { return rollupStats()._checksum;} public boolean isVolatile() {return _volatile;} private static class SetMutating extends TAtomic<RollupStats> { @Override protected RollupStats atomic(RollupStats rs) { return rs != null && rs.isMutating() ? null : RollupStats.makeMutating(); } } /** Begin writing into this Vec. Immediately clears all the rollup stats * ({@link #min}, {@link #max}, {@link #mean}, etc) since such values are * not meaningful while the Vec is being actively modified. Can be called * repeatedly. Per-chunk row-counts will not be changing, just row * contents. */ public void preWriting( ) { if( !writable() ) throw new IllegalArgumentException("Vector not writable"); setMutating(rollupStatsKey()); } /** * Marks the Vec as mutating. Vec needs to be marked as mutating whenever * it is modified ({@link #preWriting()}) or removed ({@link Keyed#remove_impl(Futures, boolean)}). */ private static void setMutating(Key rskey) { Value val = DKV.get(rskey); if( val != null ) { RollupStats rs = val.get(RollupStats.class); if( rs.isMutating() ) return; // Vector already locked against rollups } // Set rollups to "vector isMutating" atomically. new SetMutating().invoke(rskey); } /** Stop writing into this Vec. Rollup stats will again (lazily) be * computed. */ public Futures postWrite( Futures fs ) { // Get the latest rollups *directly* (do not compute them!). if (writable()) { // skip this for immutable vecs (like FileVec) final Key rskey = rollupStatsKey(); Value val = DKV.get(rollupStatsKey()); if (val != null) { RollupStats rs = val.get(RollupStats.class); if (rs.isMutating()) // Vector was mutating, is now allowed for rollups DKV.remove(rskey, fs);// Removing will cause them to be rebuilt, on demand } } return fs; // Flow-coding } // ======= Key and Chunk Management ====== /** Convert a row# to a chunk#. For constant-sized chunks this is a little * shift-and-add math. For variable-sized chunks this is a binary search, * with a sane API (JDK has an insane API). Overridden by subclasses that * compute chunks in an alternative way, such as file-backed Vecs. */ public int elem2ChunkIdx( long i ) { if( !(0 <= i && i < length()) ) throw new ArrayIndexOutOfBoundsException("0 <= "+i+" < "+length()); long[] espc = espc(); // Preload int lo=0, hi = nChunks(); while( lo < hi-1 ) { int mid = (hi+lo)>>>1; if( i < espc[mid] ) hi = mid; else lo = mid; } while( espc[lo+1] == i ) lo++; return lo; } /** Get a Vec Key from Chunk Key, without loading the Chunk. * @return the Vec Key for the Chunk Key */ public static Key getVecKey( Key chk_key ) { assert chk_key._kb[0]==Key.CHK; byte [] bits = chk_key._kb.clone(); bits[0] = Key.VEC; UnsafeUtils.set4(bits, 6, -1); // chunk# return Key.make(bits); } public transient int [] _cids; // local chunk ids /** Get a Chunk Key from a chunk-index. Basically the index-to-key map. * @return Chunk Key from a chunk-index */ public Key chunkKey(int cidx ) { return chunkKey(_key,cidx); } /** Get a Chunk Key from a chunk-index and a Vec Key, without needing the * actual Vec object. Basically the index-to-key map. * @return Chunk Key from a chunk-index and Vec Key */ public static Key chunkKey(Key veckey, int cidx ) { byte [] bits = veckey._kb.clone(); bits[0] = Key.CHK; UnsafeUtils.set4(bits, 6, cidx); // chunk# return Key.make(bits); } // Filled in lazily and racily... but all writers write the exact identical Key public Key rollupStatsKey() { if( _rollupStatsKey==null ) _rollupStatsKey=chunkKey(-2); return _rollupStatsKey; } /** Get a Chunk's Value by index. Basically the index-to-key map, plus the * {@code DKV.get()}. Warning: this pulls the data locally; using this call * on every Chunk index on the same node will probably trigger an OOM! */ Value chunkIdx( int cidx ) { Value val = DKV.get(chunkKey(cidx)); if (val == null) { boolean vecExists = DKV.get(_key) != null; // does that Vec even (still) exist? String vecInfo = (vecExists ? "is in DKV" : "is not in DKV") + "; home=" + _key.home_node() + "; self=" + H2O.SELF; throw new IllegalStateException("Missing chunk " + cidx + " for vector " + _key + "; Vec info: " + vecInfo); } return val; } /** Return the next Chunk, or null if at end. Mostly useful for parsers or * optimized stencil calculations that want to "roll off the end" of a * Chunk, but in a highly optimized way. */ Chunk nextChunk( Chunk prior ) { int cidx = elem2ChunkIdx(prior._start)+1; return cidx < nChunks() ? chunkForChunkIdx(cidx) : null; } /** Make a new random Key that fits the requirements for a Vec key. * @return A new random Vec Key */ public static Key<Vec> newKey(){return newKey(Key.make());} /** Internally used to help build Vec and Chunk Keys; public to help * PersistNFS build file mappings. Not intended as a public field. */ public static final int KEY_PREFIX_LEN = 4+4+1+1; /** Make a new Key that fits the requirements for a Vec key, based on the * passed-in key. Used to make Vecs that back over e.g. disk files. */ static Key<Vec> newKey(Key k) { byte [] kb = k._kb; byte [] bits = MemoryManager.malloc1(kb.length + KEY_PREFIX_LEN); bits[0] = Key.VEC; bits[1] = -1; // Not homed UnsafeUtils.set4(bits,2,0); // new group, so we're the first vector UnsafeUtils.set4(bits,6,-1); // 0xFFFFFFFF in the chunk# area System.arraycopy(kb, 0, bits, 4 + 4 + 1 + 1, kb.length); return Key.make(bits); } /** Make a ESPC-group key. */ private static Key espcKey(Key key) { byte [] bits = key._kb.clone(); bits[0] = Key.GRP; UnsafeUtils.set4(bits, 2, -1); UnsafeUtils.set4(bits, 6, -2); return Key.make(bits); } /** Make a Vector-group key. */ private Key groupKey(){ byte [] bits = _key._kb.clone(); bits[0] = Key.GRP; UnsafeUtils.set4(bits, 2, -1); UnsafeUtils.set4(bits, 6, -1); return Key.make(bits); } /** Get the group this vector belongs to. In case of a group with only one * vector, the object actually does not exist in KV store. This is the ONLY * place VectorGroups are fetched. * @return VectorGroup this vector belongs to */ public final VectorGroup group() { Key gKey = groupKey(); Value v = DKV.get(gKey); // if no group exists we have to create one return v==null ? new VectorGroup(gKey,1) : (VectorGroup)v.get(); } /** The Chunk for a chunk#. Warning: this pulls the data locally; using this * call on every Chunk index on the same node will probably trigger an OOM! * @return Chunk for a chunk# */ public Chunk chunkForChunkIdx(int cidx) { long start = chunk2StartElem(cidx); // Chunk# to chunk starting element# Value dvec = chunkIdx(cidx); // Chunk# to chunk data Chunk c = dvec.get(); // Chunk data to compression wrapper long cstart = c._start; // Read once, since racily filled in Vec v = c._vec; int tcidx = c._cidx; if( cstart == start && v == this && tcidx == cidx) return c; // Already filled-in c._vec = this; // Fields not filled in by unpacking from Value c._start = start; c._cidx = cidx; return c; } /** The Chunk for a row#. Warning: this pulls the data locally; using this * call on every Chunk index on the same node will probably trigger an OOM! * @return Chunk for a row# */ public final Chunk chunkForRow(long i) { return chunkForChunkIdx(elem2ChunkIdx(i)); } // ======= Direct Data Accessors ====== /** Fetch element the slow way, as a long. Floating point values are * silently rounded to an integer. Throws if the value is missing. * @return {@code i}th element as a long, or throw if missing */ public final long at8( long i ) { return chunkForRow(i).at8_abs(i); } /** Fetch element the slow way, as a double, or Double.NaN is missing. * @return {@code i}th element as a double, or Double.NaN if missing */ public final double at( long i ) { return chunkForRow(i).at_abs(i); } /** Fetch the missing-status the slow way. * @return the missing-status the slow way */ public final boolean isNA(long row){ return chunkForRow(row).isNA_abs(row); } /** Fetch element the slow way, as the low half of a UUID. Throws if the * value is missing or not a UUID. * @return {@code i}th element as a UUID low half, or throw if missing */ public final long at16l( long i ) { return chunkForRow(i).at16l_abs(i); } /** Fetch element the slow way, as the high half of a UUID. Throws if the * value is missing or not a UUID. * @return {@code i}th element as a UUID high half, or throw if missing */ public final long at16h( long i ) { return chunkForRow(i).at16h_abs(i); } /** Fetch element the slow way, as a {@link BufferedString} or null if missing. * Throws if the value is not a String. BufferedStrings are String-like * objects than can be reused in-place, which is much more efficient than * constructing Strings. * @return {@code i}th element as {@link BufferedString} or null if missing, or * throw if not a String */ public final BufferedString atStr( BufferedString bStr, long i ) { if (isCategorical()) { //for categorical vecs, return the factor level if (isNA(i)) return null; return bStr.set(_domain[(int)at8(i)]); } else return chunkForRow(i).atStr_abs(bStr, i); } public String stringAt(long i) { return String.valueOf(atStr(new BufferedString(), i)); } /** A more efficient way to read randomly to a Vec - still single-threaded, * but much faster than Vec.at(i). Limited to single-threaded * single-machine reads. * * Usage: * Vec.Reader vr = vec.new Reader(); * x = vr.at(0); * y = vr.at(1); * z = vr.at(2); */ public final class Reader { private Chunk _cache; private Chunk chk(long i) { Chunk c = _cache; return (c != null && c.chk2()==null && c._start <= i && i < c._start+ c._len) ? c : (_cache = chunkForRow(i)); } public final long at8( long i ) { return chk(i). at8_abs(i); } public final double at( long i ) { return chk(i). at_abs(i); } public final boolean isNA(long i ) { return chk(i).isNA_abs(i); } public final BufferedString atStr(BufferedString sb, long i) { return chk(i).atStr_abs(sb, i); } public final long length() { return Vec.this.length(); } } /** Write element the slow way, as a long. There is no way to write a * missing value with this call. Under rare circumstances this can throw: * if the long does not fit in a double (value is larger magnitude than * 2^52), AND float values are stored in Vec. In this case, there is no * common compatible data representation. */ public final void set( long i, long l) { Chunk ck = chunkForRow(i); ck.set_abs(i, l); postWrite(ck.close(ck.cidx(), new Futures())).blockForPending(); } /** Write element the slow way, as a double. Double.NaN will be treated as a * set of a missing element. */ public final void set( long i, double d) { Chunk ck = chunkForRow(i); ck.set_abs(i, d); postWrite(ck.close(ck.cidx(), new Futures())).blockForPending(); } /** Write element the slow way, as a float. Float.NaN will be treated as a * set of a missing element. */ public final void set( long i, float f) { Chunk ck = chunkForRow(i); ck.set_abs(i, f); postWrite(ck.close(ck.cidx(), new Futures())).blockForPending(); } /** Set the element as missing the slow way. */ public final void setNA( long i ) { Chunk ck = chunkForRow(i); ck.setNA_abs(i); postWrite(ck.close(ck.cidx(), new Futures())).blockForPending(); } /** Write element the slow way, as a String. {@code null} will be treated as a * set of a missing element. */ public final void set( long i, String str) { Chunk ck = chunkForRow(i); ck.set_abs(i, str); postWrite(ck.close(ck.cidx(), new Futures())).blockForPending(); } public final void set(long i, UUID uuid) { Chunk ck = chunkForRow(i); ck.set_abs(i, uuid); postWrite(ck.close(ck.cidx(), new Futures())).blockForPending(); } /** A more efficient way to write randomly to a Vec - still single-threaded, * still slow, but much faster than Vec.set(). Limited to single-threaded * single-machine writes. * * Usage: * try( Vec.Writer vw = vec.open() ) { * vw.set(0, 3.32); * vw.set(1, 4.32); * vw.set(2, 5.32); * } */ public final class Writer implements java.io.Closeable { private Chunk _cache; private Chunk chk(long i) { Chunk c = _cache; return (c != null && c.chk2()==null && c._start <= i && i < c._start+ c._len) ? c : (_cache = chunkForRow(i)); } private Writer() { preWriting(); } public final void set( long i, long l) { chk(i).set_abs(i, l); } public final void set( long i, double d) { chk(i).set_abs(i, d); } public final void set( long i, float f) { chk(i).set_abs(i, f); } public final void setNA( long i ) { chk(i).setNA_abs(i); } public final void set( long i,String str){ chk(i).set_abs(i, str); } public Futures close(Futures fs) { return postWrite(closeLocal(fs)); } public void close() { close(new Futures()).blockForPending(); } } /** Create a writer for bulk serial writes into this Vec. * @return A Writer for bulk serial writes */ public final Writer open() { return new Writer(); } /** Close all chunks that are local (not just the ones that are homed) * This should only be called from a Writer object */ private Futures closeLocal(Futures fs) { int nc = nChunks(); for( int i=0; i<nc; i++ ) if( H2O.containsKey(chunkKey(i)) ) chunkForChunkIdx(i).close(i, fs); return fs; // Flow-coding } /** Pretty print the Vec: {@code [#elems, min/mean/max]{chunks,...}} * @return Brief string representation of a Vec */ @Override public String toString() { RollupStats rs = RollupStats.getOrNull(this,rollupStatsKey()); String s = "["+length()+(rs == null ? ", {" : ","+rs._mins[0]+"/"+rs._mean+"/"+rs._maxs[0]+", "+PrettyPrint.bytes(rs._size)+", {"); int nc = nChunks(); for( int i=0; i<nc; i++ ) { s += chunkKey(i).home_node()+":"+chunk2StartElem(i)+":"; // CNC: Bad plan to load remote data during a toString... messes up debug printing // Stupidly chunkForChunkIdx loads all data locally // s += chunkForChunkIdx(i).getClass().getSimpleName().replaceAll("Chunk","")+", "; } return s+"}]"; } /** * Turn this Vec into a TwoDimTable of a 1-column Frame * @param off index of first element to include * @param len number of elements to include * @return TwoDimTable that can be toString()'ed, etc. */ public TwoDimTable toTwoDimTable(int off, int len) { return new Frame(this).toTwoDimTable(off, len); } /** * Turn this Vec into a TwoDimTable of a 1-column Frame * @return TwoDimTable that can be toString()'ed, etc. */ public TwoDimTable toTwoDimTable() { int len = (int)Math.min(Integer.MAX_VALUE, length()); return new Frame(this).toTwoDimTable(0,len); } /** * Convenience method for converting to a categorical vector. * @return A categorical vector based on the contents of the original vector. */ public Vec toCategoricalVec() {return VecUtils.toCategoricalVec(this);} public Vec toIntegerVec() {return VecUtils.toIntegerVec(this);} public void asDouble() { assert _type==T_NUM; rollupStats()._isInt=false; } /** * Convenience method for converting to a string vector. * @return A string vector based on the contents of the original vector. */ public Vec toStringVec() {return VecUtils.toStringVec(this);} /** * Convenience method for converting to a numeric vector. * @return A numeric vector based on the contents of the original vector. */ public Vec toNumericVec() {return VecUtils.toNumericVec(this);} /** True if two Vecs are equal. Checks for equal-Keys only (so it is fast) * and not equal-contents. * @return True if two Vecs are equal */ @Override public boolean equals( Object o ) { return o instanceof Vec && ((Vec)o)._key.equals(_key); } /** Vec's hashcode, which is just the Vec Key hashcode. * @return Vec's hashcode */ @Override public int hashCode() { return _key.hashCode(); } /** Remove associated Keys when this guy removes. For Vecs, remove all * associated Chunks. * @return Passed in Futures for flow-coding */ @Override public Futures remove_impl(Futures fs, boolean cascade) { bulk_remove(new Key[]{_key}, nChunks()); return fs; } @Override protected Futures remove_self_key_impl(Futures fs) { // nothing to do, Vec removal is handled in a special way in bulk_remove return fs; } public static void bulk_remove(final Key<Vec>[] keys, final int ncs ) { // Need to mark the Vec as mutating to make sure that no running computations of RollupStats will // re-insert the rollups into DKV after they are deleted in bulk_remove(Key, int). Futures fs = new Futures(); for (Key key : keys) fs.add(new SetMutating().fork(chunkKey(key,-2))); fs.blockForPending(); // Bulk dumb local remove - no JMM, no ordering, no safety. // Remove Vecs everywhere first - important! (this should make simultaneously running Rollups to fail) new MRTask() { @Override public void setupLocal() { for( Key k : keys ) if( k != null ) Vec.bulk_remove_vec(k, ncs); } }.doAllNodes(); // Remove RollupStats new MRTask() { @Override public void setupLocal() { for( Key k : keys ) if( k != null ) H2O.raw_remove(chunkKey(k,-2)); } }.doAllNodes(); } // Bulk remove: removes LOCAL keys only, without regard to total visibility. // Must be run in parallel on all nodes to preserve semantics, completely // removing the Vec without any JMM communication. private static void bulk_remove_vec( Key vkey, int ncs ) { for( int i=0; i<ncs; i++ ) { Key kc = chunkKey(vkey,i); H2O.raw_remove(kc); } H2O.raw_remove(vkey); } /** Write out K/V pairs */ @Override protected AutoBuffer writeAll_impl(AutoBuffer ab) { int ncs = nChunks(); for( int i=0; i<ncs; i++ ) { Key ck = chunkKey(i); ab.put(DKV.getGet(ck)); // Pull all Chunks local if( !ck.home() ) H2O.raw_remove(ck); // Remove the non-local ones as you go } return super.writeAll_impl(ab); } @Override protected Keyed readAll_impl(AutoBuffer ab, Futures fs) { int ncs = nChunks(); for( int i=0; i<ncs; i++ ) DKV.put(chunkKey(i),ab.get(Chunk.class),fs,true); // Push chunk remote; do not cache local return super.readAll_impl(ab,fs); } // ======= Whole Vec Transformations ====== /** Always makes a copy of the given vector which shares the same group as * this Vec. This can be expensive operation since it can force copy of * data among nodes. * * @param vec vector which is intended to be copied * @return a copy of vec which shared the same {@link VectorGroup} with this vector */ public Vec align(final Vec vec) { return new Frame(this).makeCompatible(new Frame(vec),true)[0]; } /** Make a Vec adapting this cal vector to the 'to' categorical Vec. The adapted * CategoricalWrappedVec has 'this' as it's masterVec, but returns results in the 'to' * newDomain (or just past it, if 'this' has elements not appearing in the 'to' * newDomain). */ public Vec adaptTo( String[] domain ) { if(!isBad() && isNumeric() && !ArrayUtils.isInt(domain)) { // try to adapt double domain // treat double domain here - return full vector copy instead of mapping double to ints on the fly in a WrappedVec final int oldDomainLen = domain.length; int nan_cnt = 0; int j = 0; double [] double_domain = MemoryManager.malloc8d(domain.length); for (int i = 0; i < double_domain.length; ++i) try { double_domain[j] = Double.parseDouble(domain[i]); j++; } catch(NumberFormatException ex){nan_cnt++;} if(j == double_domain.length) { // only atempt to adapt if we have fully double domain, (to preserve current behavior for ints, could relax this later) if (j < double_domain.length) double_domain = Arrays.copyOf(double_domain, j); double[] new_double_domain = new VecUtils.CollectDoubleDomain(double_domain, 100000).doAll(this).domain(); if (new_double_domain.length > 0) { int n = domain.length; domain = Arrays.copyOf(domain, domain.length + new_double_domain.length); for (int i = 0; i < new_double_domain.length; ++i) domain[n + i] = String.valueOf(new_double_domain[i]); } Vec res = makeZero(domain); double_domain = MemoryManager.malloc8d(domain.length - nan_cnt); j = 0; final int[] indeces = MemoryManager.malloc4(domain.length - nan_cnt); int[] order_indeces = ArrayUtils.seq(0, indeces.length); for (int i = 0; i < domain.length; ++i) { try { double_domain[j] = Double.parseDouble(domain[i]); indeces[j] = i; j++; } catch (NumberFormatException ex) {/*ignore*/} } if (!ArrayUtils.isSorted(double_domain)) ArrayUtils.sort(order_indeces, double_domain); final double[] sorted_domain_vals = ArrayUtils.select(double_domain, order_indeces); final int[] sorted_indeces = ArrayUtils.select(indeces, order_indeces); new MRTask() { @Override public void map(Chunk c0, Chunk c1) { for (int i = 0; i < c0._len; ++i) { double d = c0.atd(i); if (Double.isNaN(d)) c1.setNA(i); else { c1.set(i, sorted_indeces[Arrays.binarySearch(sorted_domain_vals, d)]); } } } }.doAll(new Vec[]{this, res}); return res; } } return new CategoricalWrappedVec(group().addVec(),_rowLayout,domain,this._key); } /** Class representing the group of vectors. * * Vectors from the same group have same distribution of chunks among nodes. * Each vector is member of exactly one group. Default group of one vector * is created for each vector. Group of each vector can be retrieved by * calling group() method; * * The expected mode of operation is that user wants to add new vectors * matching the source. E.g. parse creates several vectors (one for each * column) which are all colocated and are colocated with the original * bytevector. * * To do this, user should first ask for the set of keys for the new vectors * by calling addVecs method on the target group. * * Vectors in the group will have the same keys except for the prefix which * specifies index of the vector inside the group. The only information the * group object carries is its own key and the number of vectors it * contains (deleted vectors still count). * * Because vectors (and chunks) share the same key-pattern with the group, * default group with only one vector does not have to be actually created, * it is implicit. * * @author tomasnykodym */ public static class VectorGroup extends Keyed<VectorGroup> { /** The common shared vector group for very short vectors */ public static final VectorGroup VG_LEN1 = new VectorGroup(); // The number of Vec keys handed out by the this VectorGroup already. // Updated by overwriting in a TAtomic. final int _len; // New empty VectorGroup (no Vecs handed out) public VectorGroup() { super(init_key()); _len = 0; } static private Key init_key() { byte[] bits = new byte[26]; bits[0] = Key.GRP; bits[1] = -1; UnsafeUtils.set4(bits, 2, -1); UnsafeUtils.set4(bits, 6, -1); UUID uu = UUID.randomUUID(); UnsafeUtils.set8(bits,10,uu.getLeastSignificantBits()); UnsafeUtils.set8(bits,18,uu. getMostSignificantBits()); return Key.make(bits); } // Clone an old vector group, setting a new len private VectorGroup(Key key, int newlen) { super(key); _len = newlen; } /** Returns Vec Key from Vec id#. Does NOT allocate a Key id# * @return Vec Key from Vec id# */ public Key<Vec> vecKey(int vecId) { byte [] bits = _key._kb.clone(); bits[0] = Key.VEC; UnsafeUtils.set4(bits,2,vecId); return Key.make(bits); } /** Task to atomically add vectors into existing group. * @author tomasnykodym */ private final static class AddVecs2GroupTsk extends TAtomic<VectorGroup> { final Key _key; final int _n; // INPUT: Keys to allocate; int _offset; // OUTPUT: start of run of keys private AddVecs2GroupTsk(Key key, int n){_key = key; _n = n;} @Override protected VectorGroup atomic(VectorGroup old) { // If the old group is missing, assume it is the default group-of-self // (having 1 ID already allocated for self), not a new group with // zero prior vectors. _offset = old==null ? 1 : old._len; // start of allocated key run return new VectorGroup(_key, _offset+_n); } } /** Reserve a range of keys and return index of first new available key * @return Vec id# of a range of Vec keys in this group */ public int reserveKeys(final int n) { AddVecs2GroupTsk tsk = new AddVecs2GroupTsk(_key, n); tsk.invoke(_key); return tsk._offset; } /** Gets the next n keys of this group. * @param n number of keys to make * @return arrays of unique keys belonging to this group. */ public Key<Vec>[] addVecs(final int n) { int nn = reserveKeys(n); Key<Vec>[] res = (Key<Vec>[])new Key[n]; for( int i = 0; i < n; ++i ) res[i] = vecKey(i + nn); return res; } /** Shortcut for {@code addVecs(1)}. * @see #addVecs(int) * @return a new Vec Key in this group */ public Key<Vec> addVec() { return addVecs(1)[0]; } // ------------------------------------------------- static boolean sameGroup(Vec v1, Vec v2) { byte[] bits1 = v1._key._kb; byte[] bits2 = v2._key._kb; if( bits1.length != bits2.length ) return false; int res = 0; for( int i = KEY_PREFIX_LEN; i < bits1.length; i++ ) res |= bits1[i] ^ bits2[i]; return res == 0; } /** Pretty print the VectorGroup * @return String representation of a VectorGroup */ @Override public String toString() { return "VecGrp "+_key.toString()+", next free="+_len; } // Return current VectorGroup index; used for tests public int len() { return _len; } /** True if two VectorGroups are equal * @return True if two VectorGroups are equal */ @Override public boolean equals( Object o ) { return o instanceof VectorGroup && ((VectorGroup)o)._key.equals(_key); } /** VectorGroups's hashcode * @return VectorGroups's hashcode */ @Override public int hashCode() { return _key.hashCode(); } @Override protected long checksum_impl() { throw H2O.fail(); } // Fail to remove a VectorGroup unless you also remove all related Vecs, // Chunks, Rollups (and any Frame that uses them), etc. @Override protected Futures remove_impl(Futures fs, boolean cascade) { throw H2O.fail(); } /** Write out K/V pairs */ @Override protected AutoBuffer writeAll_impl(AutoBuffer ab) { throw H2O.fail(); } @Override protected Keyed readAll_impl(AutoBuffer ab, Futures fs) { throw H2O.unimpl(); } } // --------------------------------------- // Unify ESPC arrays on the local node as much as possible. This is a // similar problem to what TypeMap solves: sometimes I have a rowLayout index // and want the matching ESPC array, and sometimes I have the ESPC array and // want an index. The operation is frequent and must be cached locally, but // must be globally consistent. Hence a "miss" in the local cache means we // need to fetch from global state, and sometimes update the global state // before fetching. public static class ESPC extends Keyed<ESPC> { static private NonBlockingHashMap<Key,ESPC> ESPCS = new NonBlockingHashMap<>(); // Array of Row Layouts (Element Start Per Chunk) ever seen by this // VectorGroup. Shared here, amongst all Vecs using the same row layout // (instead of each of 1000's of Vecs having a copy, each of which is // nChunks long - could be millions). // // Element-start per chunk. Always zero for chunk 0. One more entry than // chunks, so the last entry is the total number of rows. public final long[][] _espcs; private ESPC(Key key, long[][] espcs) { super(key); _espcs = espcs;} // Fetch from the local cache private static ESPC getLocal( Key kespc ) { ESPC local = ESPCS.get(kespc); if( local != null ) return local; ESPCS.putIfAbsent(kespc,new ESPC(kespc,new long[0][])); // Racey, not sure if new or old is returned return ESPCS.get(kespc); } // Fetch from remote, and unify as needed private static ESPC getRemote( ESPC local, Key kespc ) { final ESPC remote = DKV.getGet(kespc); if( remote == null || remote == local ) return local; // No change // Something New? If so, we need to unify the sharable arrays with a // "smashing merge". Every time a remote instance of a ESPC is updated // (to add new ESPC layouts), and it is pulled locally, the new copy // brings with it a complete copy of all ESPC arrays - most of which // already exist locally in the old ESPC instance. Since these arrays // are immutable and monotonically growing, it's safe (and much more // efficient!) to make the new copy use the old copies arrays where // possible. long[][] local_espcs = local ._espcs; long[][] remote_espcs= remote._espcs; // Spin attempting to move the larger remote value into the local cache while( true ) { // Is the remote stale, and the local value already larger? Can happen // if the local is racily updated by another thread, after this thread // reads the remote value (which then gets invalidated, and updated to // a new larger value). if( local_espcs.length >= remote_espcs.length ) return local; // Use my (local, older, more heavily shared) ESPCs where possible. // I.e., the standard remote read will create new copies of all ESPC // arrays, but the *local* copies are heavily shared. All copies are // equal, but using the same shared copies cuts down on copies. System.arraycopy(local._espcs, 0, remote._espcs, 0, local._espcs.length); // Here 'remote' is larger than 'local' (but with a shared common prefix). // Attempt to update local cache with the larger value ESPC res = ESPCS.putIfMatch(kespc,remote,local); // Update local copy with larger // if res==local, then update succeeded, table has 'remote' (the larger object). if( res == local ) return remote; // if res!=local, then update failed, and returned 'res' is probably // larger than either remote or local local = res; local_espcs = res._espcs; assert remote_espcs== remote._espcs; // unchanging final field } } /** Get the ESPC for a Vec. Called once per new construction or read_impl. */ public static long[] espc( Vec v ) { final int r = v._rowLayout; if( r == -1 ) return null; // Never was any row layout // Check the local cache final Key kespc = espcKey(v._key); ESPC local = getLocal(kespc); if( r < local._espcs.length ) return local._espcs[r]; // Now try to refresh the local cache from the remote cache final ESPC remote = getRemote( local, kespc); if( r < remote._espcs.length ) return remote._espcs[r]; throw H2O.fail("Vec "+v._key+" asked for layout "+r+", but only "+remote._espcs.length+" layouts defined"); } // Check for a prior matching ESPC private static int find_espc( long[] espc, long[][] espcs ) { // Check for a local pointer-hit first: for( int i=0; i<espcs.length; i++ ) if( espc==espcs[i] ) return i; // Check for a local deep equals next: for( int i=0; i<espcs.length; i++ ) if( espc.length==espcs[i].length && Arrays.equals(espc,espcs[i]) ) return i; return -1; // No match } /** Get the shared ESPC index for this layout. Will return an old layout * if one matches, otherwise will atomically update the ESPC to set * a new layout. The expectation is that new layouts are rare: once per * parse, and perhaps from filtering MRTasks, or data-shuffling. */ public static int rowLayout( Key key, final long[] espc ) { Key kespc = espcKey(key); ESPC local = getLocal(kespc); int idx = find_espc(espc,local._espcs); if( idx != -1 ) return idx; // See if the ESPC is in the LOCAL DKV - if not it might have been // invalidated, and a refetch might get a new larger ESPC with the // desired layout. if( !H2O.containsKey(kespc) ) { local = getRemote(local,kespc); // Fetch remote, merge as needed idx = find_espc(espc, local._espcs); // Retry if( idx != -1 ) return idx; } // Send the ESPC over to the ESPC master, and request it get // inserted. new TAtomic<ESPC>() { @Override public ESPC atomic( ESPC old ) { if( old == null ) return new ESPC(_key,new long[][]{espc}); long[][] espcs = old._espcs; int idx = find_espc(espc,espcs); if( idx != -1 ) return null; // Abort transaction, idx exists; client needs to refresh int len = espcs.length; espcs = Arrays.copyOf(espcs,len+1); espcs[len] = espc; // Insert into array return new ESPC(_key,espcs); } }.invoke(kespc); // Refetch from master, try again ESPC reloaded = getRemote(local,kespc); // Fetch remote, merge as needed idx = find_espc(espc,reloaded._espcs); // Retry assert idx != -1; // Must work now (or else the install failed!) return idx; } public static void clear() { ESPCS.clear(); } @Override protected long checksum_impl() { throw H2O.fail(); } } public static Key setChunkIdx(Key k, int cidx){ UnsafeUtils.set4(k._kb, 6, cidx); // chunk# return k; } public boolean isHomedLocally(int cidx){ return chunkKey(cidx).home(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/WrappedVec.java
package water.fvec; import water.*; /** * A simple wrapper over another Vec. Transforms either data values or rows. */ abstract class WrappedVec extends Vec { /** A key for underlying vector which contains values which are transformed by this vector. */ final Key<Vec> _masterVecKey; /** Cached instances of underlying vector. */ transient Vec _masterVec; public WrappedVec(Key<Vec> key, int rowLayout, Key<Vec> masterVecKey ) { this(key, rowLayout, null, masterVecKey); } public WrappedVec(Key<Vec> key, int rowLayout, String[] domain, Key<Vec> masterVecKey) { super(key, rowLayout, domain); _masterVecKey = masterVecKey; } public Vec masterVec() { return _masterVec != null ? _masterVec : (_masterVec = _masterVecKey.get()); } /** Map from chunk-index to Chunk. These wrappers are making custom Chunks */ public abstract Chunk chunkForChunkIdx(int cidx); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/persist/FramePersist.java
package water.fvec.persist; import jsr166y.CountedCompleter; import water.*; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.Vec; import water.util.FileUtils; import java.net.URI; import java.util.HashSet; import java.util.Set; import static water.fvec.persist.PersistUtils.*; public class FramePersist { static { // make sure FrameMeta is registered in TypeMap TypeMap.onIce(FrameMeta.class.getName()); } private final Frame frame; public FramePersist(Frame frame) { this.frame = frame; } private static class FrameMeta extends Iced<FrameMeta> { Key<Frame> key; String[] names; Vec[] vecs; long[] espc; int numNodes; FrameMeta(Frame f) { key = f._key; names = f.names(); vecs = f.vecs(); espc = f.anyVec().espc(); numNodes = H2O.CLOUD.size(); } } private static URI getMetaUri(Key key, String dest) { return FileUtils.getURI(dest + "/" + key); } private static URI getDataUri(String metaUri, int cidx) { return FileUtils.getURI(metaUri + "_n" + H2O.SELF.index() + "_c" + cidx); } private SaveFrameDriver setupDriver(String uri, boolean overwrite) { URI metaUri = getMetaUri(frame._key, sanitizeUri(uri)); if (exists(metaUri) && !overwrite) { throw new IllegalArgumentException("File already exists at " + metaUri); } FrameMeta frameMeta = new FrameMeta(frame); write(metaUri, ab -> ab.put(frameMeta)); Job<Frame> job = new Job<>(frame._key, "water.fvec.Frame", "Save frame"); return new SaveFrameDriver(job, frame, metaUri); } public Job<Frame> saveTo(String uri, boolean overwrite) { SaveFrameDriver driver = setupDriver(uri, overwrite); return driver.job.start(driver, frame.anyVec().nChunks()); } public String[] saveToAndWait(String uri, boolean overwrite) { SaveFrameDriver driver = setupDriver(uri, overwrite); driver.job.start(driver, frame.anyVec().nChunks()); driver.job.get(); String[] allWrittenFiles = new String[driver.task.writtenFiles.length+1]; allWrittenFiles[0] = driver.metaUri.toString(); System.arraycopy(driver.task.writtenFiles, 0, allWrittenFiles, 1, driver.task.writtenFiles.length); return allWrittenFiles; } public static class SaveFrameDriver extends H2O.H2OCountedCompleter<LoadFrameDriver> { private final Job<Frame> job; private final Frame frame; public final URI metaUri; public final SaveChunksTask task; public SaveFrameDriver( Job<Frame> job, Frame frame, URI metaUri ) { this.job = job; this.frame = frame; this.metaUri = metaUri; this.task = new SaveChunksTask(job, frame, metaUri.toString()); } @Override public void compute2() { frame.read_lock(job._key); task.doAll(frame).join(); tryComplete(); } @Override public void onCompletion(CountedCompleter caller) { frame.unlock(job); } @Override public boolean onExceptionalCompletion(Throwable t, CountedCompleter caller) { frame.unlock(job); return super.onExceptionalCompletion(t, caller); } } static class SaveChunksTask extends MRTask<SaveChunksTask> { private final Job<Frame> job; private final String metaUri; public String[] writtenFiles; SaveChunksTask(Job<Frame> job, Frame frame, String metaUri) { this.job = job; this.metaUri = metaUri; this.writtenFiles = new String[frame.anyVec().nChunks()]; } @Override public void map(Chunk[] cs) { URI dataUri = getDataUri(metaUri, cs[0].cidx()); writtenFiles[cs[0].cidx()] = dataUri.toString(); PersistUtils.write(dataUri, ab -> writeChunks(ab, cs)); job.update(1); } private void writeChunks(AutoBuffer autoBuffer, Chunk[] chunks) { for (Chunk c : chunks) { autoBuffer.put(c); } } @Override public void reduce(SaveChunksTask mrt) { for (int i = 0; i < writtenFiles.length; i++) { if (mrt.writtenFiles[i] != null) { assert writtenFiles[i] == null || writtenFiles[i].equals(mrt.writtenFiles[i]) : "When merging written files expecting " + writtenFiles[i] + " to be null or equal to " + mrt.writtenFiles[i]; writtenFiles[i] = mrt.writtenFiles[i]; } } } } public static Job<Frame> loadFrom(Key<Frame> key, String uri) { URI metaUri = getMetaUri(key, sanitizeUri(uri)); FrameMeta meta = read(metaUri, AutoBuffer::get); if (meta.numNodes != H2O.CLOUD.size()) { throw new IllegalArgumentException("To load this frame a cluster with " + meta.numNodes + " nodes is needed."); } Job<Frame> job = new Job<>(meta.key, "water.fvec.Frame", "Load frame"); return job.start(new LoadFrameDriver(job, metaUri.toString(), meta), meta.espc.length-1); } public static class LoadFrameDriver extends H2O.H2OCountedCompleter<LoadFrameDriver> { private final Job<Frame> job; private final String metaUri; private final FrameMeta meta; public LoadFrameDriver( Job<Frame> job, String metaUri, FrameMeta meta ) { this.job = job; this.metaUri = metaUri; this.meta = meta; } @Override public void compute2() { Vec con = null; Key<Vec>[] vecKeys = new Vec.VectorGroup().addVecs(meta.vecs.length); try { long nrow = meta.espc[meta.espc.length-1]; int nchunk = meta.espc.length-1; con = Vec.makeConN(nrow, nchunk); new LoadChunksTask(job, metaUri, vecKeys).doAll(con).join(); } finally { if (con != null) con.remove(); } int rowLayout = Vec.ESPC.rowLayout(vecKeys[0], meta.espc); Futures fs = new Futures(); for (int i = 0; i < meta.vecs.length; i++) { Vec v = meta.vecs[i]; v._rowLayout = rowLayout; v._key = vecKeys[i]; DKV.put(v, fs); } fs.blockForPending(); Frame frame = new Frame(meta.key, meta.names, meta.vecs); DKV.put(frame); tryComplete(); } } static class LoadChunksTask extends MRTask<LoadChunksTask> { private final Job<Frame> job; private final String metaUri; private final Key[] vecKeys; LoadChunksTask(Job<Frame> job, String metaUri, Key[] vecKeys) { this.job = job; this.metaUri = metaUri; this.vecKeys = vecKeys; } @Override public void map(Chunk c) { PersistUtils.read(getDataUri(metaUri, c.cidx()), ab -> readChunks(ab, c.cidx())); job.update(1); } @SuppressWarnings("rawtypes") private int readChunks(AutoBuffer autoBuffer, int cidx) { for (Key k : vecKeys) { Key chunkKey = Vec.chunkKey(k, cidx); Chunk chunk = autoBuffer.get(); DKV.put(chunkKey, new Value(chunkKey, chunk)); } return vecKeys.length; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/persist/PersistUtils.java
package water.fvec.persist; import water.AutoBuffer; import water.H2O; import water.persist.Persist; import java.io.*; import java.net.URI; public class PersistUtils { public static String sanitizeUri(String uri) { if (uri.endsWith("/")) { return uri.substring(0, uri.length()-1); } else { return uri; } } public static <T> T read(URI uri, Reader<T> r) { final Persist persist = H2O.getPM().getPersistForURI(uri); try (final InputStream inputStream = persist.open(uri.toString())) { final AutoBuffer autoBuffer = new AutoBuffer(inputStream); T res = r.read(autoBuffer); autoBuffer.close(); return res; } catch (IOException e) { throw new RuntimeException("Failed to write to " + uri, e); } } public static <T> T readStream(URI uri, StreamReader<T> r) { final Persist persist = H2O.getPM().getPersistForURI(uri); try ( final InputStream inputStream = persist.open(uri.toString()); final InputStreamReader reader = new InputStreamReader(inputStream) ) { return r.read(reader); } catch (IOException e) { throw new RuntimeException("Failed to write to " + uri, e); } } public static void write(URI uri, Writer w) { final Persist persist = H2O.getPM().getPersistForURI(uri); try (final OutputStream outputStream = persist.create(uri.toString(), true)) { final AutoBuffer autoBuffer = new AutoBuffer(outputStream, true); w.write(autoBuffer); autoBuffer.close(); } catch (IOException e) { throw new RuntimeException("Failed to write to " + uri, e); } } public static void writeStream(URI uri, StreamWriter w) { final Persist persist = H2O.getPM().getPersistForURI(uri); try ( final OutputStream outputStream = persist.create(uri.toString(), true); final OutputStreamWriter writer = new OutputStreamWriter(outputStream) ) { w.write(writer); } catch (IOException e) { throw new RuntimeException("Failed to write to " + uri, e); } } public static boolean exists(URI uri) { final Persist persist = H2O.getPM().getPersistForURI(uri); return persist.exists(uri.toString()); } public interface Reader<T> { T read(AutoBuffer ab); } public interface Writer { void write(AutoBuffer ab); } public interface StreamWriter { void write(OutputStreamWriter w) throws IOException; } public interface StreamReader<T> { T read(InputStreamReader r) throws IOException; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/task/FillNAWithDoubleValueTask.java
package water.fvec.task; import water.MRTask; import water.fvec.Chunk; public class FillNAWithDoubleValueTask extends MRTask<FillNAWithDoubleValueTask> { private int _columnIdx; private double _valueToImpute; public FillNAWithDoubleValueTask(int columnIdx, double valueToImpute ) { _columnIdx = columnIdx; _valueToImpute = valueToImpute; } @Override public void map(Chunk cs[]) { Chunk num = cs[_columnIdx]; for (int i = 0; i < num._len; i++) { if (num.isNA(i)) { num.set(i, _valueToImpute); } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/task/FillNAWithLongValueTask.java
package water.fvec.task; import water.MRTask; import water.fvec.Chunk; public class FillNAWithLongValueTask extends MRTask<FillNAWithLongValueTask> { private int _columnIdx; private long _intValue; public boolean _imputationHappened; public FillNAWithLongValueTask(int columnIdx, long intValue) { _columnIdx = columnIdx; _intValue = intValue; _imputationHappened = false; } @Override public void map(Chunk cs[]) { Chunk num = cs[_columnIdx]; for (int i = 0; i < num._len; i++) { if (num.isNA(i)) { num.set(i, _intValue); _imputationHappened = true; } } } @Override public void reduce(FillNAWithLongValueTask mrt) { _imputationHappened = _imputationHappened || mrt._imputationHappened; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/task/FilterByValueTask.java
package water.fvec.task; import water.MRTask; import water.fvec.Chunk; import water.fvec.NewChunk; public class FilterByValueTask extends MRTask<FilterByValueTask> { private double _value; private boolean _isInverted; public FilterByValueTask( double value, boolean isInverted ) { _value = value; _isInverted = isInverted; } @Override public void map(Chunk cs[], NewChunk ncs[]) { for (int col = 0; col < cs.length; col++) { Chunk c = cs[col]; NewChunk nc = ncs[col]; for (int i = 0; i < c._len; i++) { double currentValue = c.atd(i); if(_isInverted) nc.addNum(_value == currentValue ? 0 : 1); else nc.addNum(_value == currentValue ? 1 : 0); } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/task/IsNotNaTask.java
package water.fvec.task; import water.MRTask; import water.fvec.Chunk; import water.fvec.NewChunk; public class IsNotNaTask extends MRTask<IsNotNaTask> { @Override public void map(Chunk cs[], NewChunk ncs[]) { for (int col = 0; col < cs.length; col++) { Chunk c = cs[col]; NewChunk nc = ncs[col]; for (int i = 0; i < c._len; i++) nc.addNum(c.isNA(i) ? 0 : 1); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/task/UniqOldTask.java
package water.fvec.task; import water.MRTask; import water.fvec.Chunk; import water.fvec.Vec; import water.rapids.ast.prims.mungers.AstGroup; import water.util.IcedHashMap; @Deprecated // in favor of UniqTask - kept for experimenting only public class UniqOldTask extends MRTask<UniqOldTask> { public IcedHashMap<AstGroup.G, String> _uniq; @Override public void map(Chunk[] c) { _uniq = new IcedHashMap<>(); AstGroup.G g = new AstGroup.G(1, null); for (int i = 0; i < c[0]._len; ++i) { g.fill(i, c, new int[]{0}); String s_old = _uniq.putIfAbsent(g, ""); if (s_old == null) g = new AstGroup.G(1, null); } } @Override public void reduce(UniqOldTask t) { if (_uniq != t._uniq) { IcedHashMap<AstGroup.G, String> l = _uniq; IcedHashMap<AstGroup.G, String> r = t._uniq; if (l.size() < r.size()) { l = r; r = _uniq; } // larger on the left for (AstGroup.G rg : r.keySet()) l.putIfAbsent(rg, ""); // loop over smaller set _uniq = l; t._uniq = null; } } public Vec toVec() { final int nUniq = _uniq.size(); final AstGroup.G[] uniq = _uniq.keySet().toArray(new AstGroup.G[nUniq]); Vec v = Vec.makeZero(nUniq, _fr.vec(0).get_type()); new MRTask() { @Override public void map(Chunk c) { int start = (int) c.start(); for (int i = 0; i < c._len; ++i) c.set(i, uniq[i + start]._gs[0]); } }.doAll(v); return v; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/task/UniqTask.java
package water.fvec.task; import water.MRTask; import water.fvec.Chunk; import water.fvec.Vec; import water.util.IcedDouble; import water.util.IcedHashSet; public class UniqTask extends MRTask<UniqTask> { private IcedHashSet<IcedDouble> _uniq; private boolean _na; @Override public void map(Chunk[] c) { _uniq = new IcedHashSet<>(); double prev = Double.NaN; for (int i = 0; i < c[0]._len; ++i) { final double val = c[0].atd(i); if (Double.isNaN(val)) { _na = true; continue; } if (val == prev) // helps with sparse data and continuous runs of single values continue; prev = val; _uniq.addIfAbsent(new IcedDouble(val)); } } @Override public void reduce(UniqTask t) { IcedHashSet<IcedDouble> l = _uniq; IcedHashSet<IcedDouble> r = t._uniq; if (l.size() < r.size()) { l = r; r = _uniq; } // larger on the left for (IcedDouble rg : r) l.addIfAbsent(rg); // loop over smaller set _uniq = l; _na = _na || t._na; t._uniq = null; } public double[] toArray() { final int size = _uniq.size(); double[] res = new double[size + (_na ? 1 : 0)]; int i = 0; if (_na) res[i++] = Double.NaN; for (IcedDouble d : _uniq) res[i++] = d._val; assert i == res.length; return res; } public Vec toVec() { double[] uniq = toArray(); Vec v = Vec.makeZero(uniq.length, _fr.vec(0).get_type()); new MRTask() { @Override public void map(Chunk c) { int start = (int) c.start(); for (int i = 0; i < c._len; ++i) c.set(i, uniq[i + start]); } }.doAll(v); return v; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/init/AbstractBuildVersion.java
package water.init; import water.util.PojoUtils; import water.util.ReflectionUtils; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; import java.util.regex.Pattern; abstract public class AbstractBuildVersion { // A date format use for compiledOn field static String DATE_FORMAT = "yyyy-MM-dd HH:mm:ss"; // Threshold to identify too old version static int TOO_OLD_THRESHOLD = 100 /* days */; // A link to the latest version identifier static String LATEST_STABLE_URL = " http://h2o-release.s3.amazonaws.com/h2o/latest_stable"; // Pattern to extract version from URL static Pattern VERSION_EXTRACT_PATTERN = Pattern.compile(".*h2o-(.*).zip"); // Devel version has a specific patch number X.Y.Z.99999 public static String DEVEL_VERSION_PATCH_NUMBER = "99999"; abstract public String branchName(); abstract public String lastCommitHash(); abstract public String describe(); abstract public String projectVersion(); abstract public String compiledOn(); abstract public String compiledBy(); @Override public String toString() { return "H2O v"+projectVersion()+ " ("+branchName()+" - "+lastCommitHash()+")"; } public String buildNumber() { String pv = projectVersion(); if (pv.equals(UNKNOWN_VERSION_MARKER)) { return UNKNOWN_VERSION_MARKER; } String[] split_pv = pv.split("\\."); String bn = split_pv[split_pv.length-1]; return(bn); } /** Returns compile date for this H2O version or null. */ public final Date compiledOnDate() { SimpleDateFormat dateFormat = new SimpleDateFormat(DATE_FORMAT); try { return dateFormat.parse(compiledOn()); } catch (ParseException e) { return null; } } public final boolean isTooOld() { Date compileTime = compiledOnDate(); if (compileTime == null) return false; long timeDiff = System.currentTimeMillis() - compileTime.getTime(); long days = timeDiff / (24*60*60*1000L) /* msec per day */; return days > TOO_OLD_THRESHOLD; } public boolean isDevVersion() { return projectVersion().equals(UNKNOWN_VERSION_MARKER) || projectVersion().endsWith(DEVEL_VERSION_PATCH_NUMBER); } /** Dummy version of H2O. */ private static final String UNKNOWN_VERSION_MARKER = "(unknown)"; public static final AbstractBuildVersion UNKNOWN_VERSION = new AbstractBuildVersion() { @Override public String projectVersion() { return UNKNOWN_VERSION_MARKER; } @Override public String lastCommitHash() { return UNKNOWN_VERSION_MARKER; } @Override public String describe() { return UNKNOWN_VERSION_MARKER; } @Override public String compiledOn() { return UNKNOWN_VERSION_MARKER; } @Override public String compiledBy() { return UNKNOWN_VERSION_MARKER; } @Override public String branchName() { return UNKNOWN_VERSION_MARKER; } }; private String getValue(String name) { switch (name) { case "projectVersion": return projectVersion(); case "lastCommitHash": return lastCommitHash(); case "describe": return describe(); case "compiledOn": return compiledOn(); case "compiledBy": return compiledBy(); case "branchName": return branchName(); default: return null; } } public static AbstractBuildVersion getBuildVersion() { AbstractBuildVersion abv = AbstractBuildVersion.UNKNOWN_VERSION; try { Class klass = Class.forName("water.init.BuildVersion"); java.lang.reflect.Constructor constructor = klass.getConstructor(); abv = (AbstractBuildVersion) constructor.newInstance(); } catch (Exception ignore) { } return abv; } public static void main(String[] args) { if (args.length == 0) { args = new String[]{"projectVersion"}; } AbstractBuildVersion buildVersion = getBuildVersion(); System.out.print(buildVersion.getValue(args[0])); for (int i = 1; i < args.length; i++) { System.out.print(' '); System.out.print(buildVersion.getValue(args[i])); } System.out.println(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/init/AbstractEmbeddedH2OConfig.java
package water.init; import hex.faulttolerance.Recovery; import water.H2O; import java.net.InetAddress; import java.util.Optional; /** * This class is a small shim between a main java program (such as a * Hadoop mapper) and an embedded full-capability H2O. */ public abstract class AbstractEmbeddedH2OConfig { /** * Tell the embedding software that H2O has started an embedded * web server on an IP and port. * This may be nonblocking. * * @param ip IP address this H2O can be reached at. * @param port Port this H2O can be reached at (for REST API and browser). */ public abstract void notifyAboutEmbeddedWebServerIpPort(InetAddress ip, int port); /** * Whether H2O gets a flatfile config from this config object. * @return true if H2O should query the config object for a flatfile. false otherwise. */ public abstract boolean providesFlatfile(); /** * If configProvidesFlatfile, get it. This may incur a blocking network call. * This must be called after notifyAboutEmbeddedWebServerIpPort() or the behavior * will be undefined. * * This method includes it's own address, because the config may be building up * and managing a directory of H2O nodes. * * If this method throws any kind of exception, the node failed to get it's config, * and this H2O is hosed and should exit gracefully. * * @return A string with the multi-line flatfile text. */ public abstract String fetchFlatfile() throws Exception; /** * @deprecated please override version of this method that has leader information. */ @Deprecated public void notifyAboutCloudSize(InetAddress ip, int port, int size) { throw new IllegalStateException("Please override method new version of the method notifyAboutCloudSize(..)."); } /** * Tell the embedding software that this H2O instance belongs to * a cloud of a certain size. * This may be nonblocking. * * Note: this method will be made abstract in a future stable release. User is expected to override the method * in the implementation. * * @param ip IP address this H2O can be reached at. * @param port Port this H2O can be reached at (for REST API and browser). * @param leaderIp IP address of the leader H2O node of the cloud. * @param leaderPort Port of the leader H2O node (for REST API and browser). * @param size Number of H2O instances in the cloud. */ public void notifyAboutCloudSize(InetAddress ip, int port, InetAddress leaderIp, int leaderPort, int size) { notifyAboutCloudSize(ip, port, size); } /** * Must be called by subclass when clouding is finished. */ protected final void cloudingFinished() { if (H2O.SELF.isLeaderNode()) { Recovery.autoRecover(Optional.ofNullable(H2O.ARGS.auto_recovery_dir)); } } /** * Tell the embedding software that H2O wants the process to exit. * This should not return. The embedding software should do any * required cleanup and then call exit with the status. * * @param status Process-level exit status */ public abstract void exit (int status); /** * Print debug information. */ public abstract void print(); /** * Indicates whether we should disable REST API access on non-leader nodes. * @return false by default == all nodes will be accessible */ public boolean disableNonLeaderNodeAccess() { return false; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/init/BuildVersion.java
package water.init; public class BuildVersion extends AbstractBuildVersion { public String branchName() { return "rel-3.46.0"; } public String lastCommitHash() { return "479065fe4d09bf8f364b6cab45b54f9324ee1c1d"; } public String describe() { return "479065fe4d"; } public String projectVersion() { return "3.46.0.7"; } public String compiledOn() { return "2025-03-27 16:00:19"; } public String compiledBy() { return "root"; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/init/EmbeddedConfigProvider.java
package water.init; public interface EmbeddedConfigProvider { default String getName() { return getClass().getName(); } /** * Provider initialization. Guaranteed to be called before any other method is called, including the`isActive` * method. */ void init(); /** * Whether the provider is active and should be used by H2O. * * @return True if H2O should use this {@link EmbeddedConfigProvider}, otherwise false. */ default boolean isActive() { return false; } /** * @return An instance of {@link AbstractEmbeddedH2OConfig} configuration. Never null. */ AbstractEmbeddedH2OConfig getConfig(); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/init/HostnameGuesser.java
package water.init; import water.util.Log; import water.util.NetworkUtils; import water.util.OSUtils; import water.util.StringUtils; import java.net.*; import java.util.*; import java.util.regex.Matcher; import java.util.regex.Pattern; public class HostnameGuesser { /** * Finds InetAddress for a specified IP, guesses the address if parameter is not specified * (uses network as a hint if provided). * * @param ip ip address (optional) * @param network network (optional) * @return inet address for this node. */ public static InetAddress findInetAddressForSelf(String ip, String network) throws Error { if ((ip != null) && (network != null)) { throw new HostnameGuessingException("ip and network options must not be used together"); } ArrayList<CIDRBlock> networkList = calcArrayList(network); // Get a list of all valid IPs on this machine. ArrayList<InetAddress> ias = calcPrioritizedInetAddressList(); InetAddress local; // My final choice // Check for an "-ip xxxx" option and accept a valid user choice; required // if there are multiple valid IP addresses. if (ip != null) { local = getInetAddress(ip, ias); } else if (networkList.size() > 0) { // Return the first match from the list, if any. // If there are no matches, then exit. Log.info("Network list was specified by the user. Searching for a match..."); for( InetAddress ia : ias ) { Log.info(" Considering " + ia.getHostAddress() + " ..."); for (CIDRBlock n : networkList) { if (n.isInetAddressOnNetwork(ia)) { Log.info(" Matched " + ia.getHostAddress()); return ia; } } } throw new HostnameGuessingException("No interface matches the network list from the -network option. Exiting."); } else { // No user-specified IP address. Attempt auto-discovery. Roll through // all the network choices on looking for a single non-local address. // Right now the loop up order is: site local address > link local address > fallback loopback ArrayList<InetAddress> globalIps = new ArrayList<>(); ArrayList<InetAddress> siteLocalIps = new ArrayList<>(); ArrayList<InetAddress> linkLocalIps = new ArrayList<>(); boolean isIPv6Preferred = NetworkUtils.isIPv6Preferred(); boolean isIPv4Preferred = NetworkUtils.isIPv4Preferred(); for( InetAddress ia : ias ) { // Make sure the given IP address can be found here if(!(ia.isLoopbackAddress() || ia.isAnyLocalAddress())) { // Always prefer IPv4 if (isIPv6Preferred && !isIPv4Preferred && ia instanceof Inet4Address) continue; if (isIPv4Preferred && ia instanceof Inet6Address) continue; if (ia.isSiteLocalAddress()) siteLocalIps.add(ia); if (ia.isLinkLocalAddress()) linkLocalIps.add(ia); globalIps.add(ia); } } // The ips were already sorted in priority based way, so use it // There is only a single global or site local address, use it if (globalIps.size() == 1) { local = globalIps.get(0); } else if (siteLocalIps.size() == 1) { local = siteLocalIps.get(0); } else if (linkLocalIps.size() > 0) { // Always use link local address on IPv6 local = linkLocalIps.get(0); } else { local = guessInetAddress(siteLocalIps); } } // The above fails with no network connection, in that case go for a truly // local host. if( local == null ) { try { Log.warn("Failed to determine IP, falling back to localhost."); // set default ip address to be 127.0.0.1 /localhost local = NetworkUtils.isIPv6Preferred() && ! NetworkUtils.isIPv4Preferred() ? InetAddress.getByName("::1") // IPv6 localhost : InetAddress.getByName("127.0.0.1"); } catch (UnknownHostException e) { Log.throwErr(e); } } return local; } /** * Return a list of interfaces sorted by importance (most important first). * This is the order we want to test for matches when selecting an interface. */ private static ArrayList<NetworkInterface> calcPrioritizedInterfaceList() { ArrayList<NetworkInterface> networkInterfaceList = null; try { Enumeration<NetworkInterface> nis = NetworkInterface.getNetworkInterfaces(); ArrayList<NetworkInterface> tmpList = Collections.list(nis); Comparator<NetworkInterface> c = new Comparator<NetworkInterface>() { @Override public int compare(NetworkInterface lhs, NetworkInterface rhs) { // Handle null inputs. if ((lhs == null) && (rhs == null)) { return 0; } if (lhs == null) { return 1; } if (rhs == null) { return -1; } // If the names are equal, then they are equal. if (lhs.getName().equals (rhs.getName())) { return 0; } // If both are bond drivers, choose a precedence. if (lhs.getName().startsWith("bond") && (rhs.getName().startsWith("bond"))) { Integer li = lhs.getName().length(); Integer ri = rhs.getName().length(); // Bond with most number of characters is always highest priority. if (li.compareTo(ri) != 0) { return li.compareTo(ri); } // Otherwise, sort lexicographically by name. return lhs.getName().compareTo(rhs.getName()); } // If only one is a bond driver, give that precedence. if (lhs.getName().startsWith("bond")) { return -1; } if (rhs.getName().startsWith("bond")) { return 1; } // Everything that isn't a bond driver is equal. return 0; } }; Collections.sort(tmpList, c); networkInterfaceList = tmpList; } catch( SocketException e ) { Log.err(e); } return networkInterfaceList; } /** * Return a list of internet addresses sorted by importance (most important first). * This is the order we want to test for matches when selecting an internet address. */ private static ArrayList<java.net.InetAddress> calcPrioritizedInetAddressList() { ArrayList<java.net.InetAddress> ips = new ArrayList<>(); ArrayList<NetworkInterface> networkInterfaceList = calcPrioritizedInterfaceList(); boolean isWindows = OSUtils.isWindows(); boolean isWsl = OSUtils.isWsl(); int localIpTimeout = NetworkUtils.getLocalIpPingTimeout(); for (NetworkInterface nIface : networkInterfaceList) { Enumeration<InetAddress> ias = nIface.getInetAddresses(); if (NetworkUtils.isUp(nIface)) { while (ias.hasMoreElements()) { InetAddress ia = ias.nextElement(); // Windows specific code, since isReachable was not able to detect live IPs on Windows8.1 machines if (isWindows || isWsl || NetworkUtils.isReachable(null, ia, localIpTimeout /* ms */)) { ips.add(ia); Log.info("Possible IP Address: ", nIface.getName(), " (", nIface.getDisplayName(), "), ", ia.getHostAddress()); } else { Log.info("Network address/interface is not reachable in 150ms: ", ia, "/", nIface); } } } else { Log.info("Network interface is down: ", nIface); } } return ips; } private static InetAddress guessInetAddress(List<InetAddress> ips) { String m = "Multiple local IPs detected:\n"; for(InetAddress ip : ips) m+=" " + ip; m += "\nAttempting to determine correct address...\n"; Socket s = null; try { // using google's DNS server as an external IP to find s = NetworkUtils.isIPv6Preferred() && !NetworkUtils.isIPv4Preferred() ? new Socket(InetAddress.getByAddress(NetworkUtils.GOOGLE_DNS_IPV6), 53) : new Socket(InetAddress.getByAddress(NetworkUtils.GOOGLE_DNS_IPV4), 53); m += "Using " + s.getLocalAddress() + "\n"; return s.getLocalAddress(); } catch( java.net.SocketException se ) { return null; // No network at all? (Laptop w/wifi turned off?) } catch( Throwable t ) { Log.err(t); return null; } finally { Log.warn(m); if( s != null ) try { s.close(); } catch( java.io.IOException ie ) { } } } /** * Get address for given IP. * @param ip textual representation of IP (host) * @param allowedIps range of allowed IPs on this machine * @return IPv4 or IPv6 address which matches given IP and is in specified range */ private static InetAddress getInetAddress(String ip, List<InetAddress> allowedIps) { InetAddress addr = null; if (ip != null) { try { addr = InetAddress.getByName(ip); } catch (UnknownHostException e) { throw new HostnameGuessingException(e); } if (allowedIps != null) { if (!allowedIps.contains(addr)) { throw new HostnameGuessingException("IP address not found on this machine"); } } } return addr; } /** * Parses specification of subnets and returns their CIDRBlock representation. * @param networkOpt comma separated list of subnets * @return a list of subnets, possibly empty. Never returns null. * @throws HostnameGuessingException if one of the subnet specification is invalid (it cannot be parsed) */ static ArrayList<CIDRBlock> calcArrayList(String networkOpt) throws HostnameGuessingException { ArrayList<CIDRBlock> networkList = new ArrayList<>(); if (networkOpt == null) { return networkList; } String[] networks = networkOpt.split(","); for (String n : networks) { CIDRBlock usn = CIDRBlock.parse(n); if (usn == null || !usn.valid()) { Log.err("Network invalid: " + n); throw new HostnameGuessingException("Invalid subnet specification: " + n + " (full '-network' argument: " + networkOpt + ")."); } networkList.add(usn); } return networkList; } /** Representation of a single CIDR block (subnet). */ public static class CIDRBlock { /** Patterns to recognize IPv4 CIDR selector (network routing prefix */ private static Pattern NETWORK_IPV4_CIDR_PATTERN = Pattern.compile("(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+)/(\\d+)"); /** Patterns to recognize IPv6 CIDR selector (network routing prefix * Warning: the pattern recognize full IPv6 specification and does not support short specification via :: replacing block of 0s. * * From wikipedia: An IPv6 address is represented as eight groups of four hexadecimal digits * https://en.wikipedia.org/wiki/IPv6_address#Presentation */ private static Pattern NETWORK_IPV6_CIDR_PATTERN = Pattern.compile("([a-fA-F\\d]+):([a-fA-F\\d]+):([a-fA-F\\d]+):([a-fA-F\\d]+):([a-fA-F\\d]+):([a-fA-F\\d]+):([a-fA-F\\d]+):([a-fA-F\\d]+)/(\\d+)"); final int[] ip; final int bits; public static CIDRBlock parse(String cidrBlock) { boolean isIPV4 = cidrBlock.contains("."); Matcher m = isIPV4 ? NETWORK_IPV4_CIDR_PATTERN.matcher(cidrBlock) : NETWORK_IPV6_CIDR_PATTERN.matcher(cidrBlock); boolean b = m.matches(); if (!b) { return null; } assert (isIPV4 && m.groupCount() == 5 || m.groupCount() == 9); int len = isIPV4 ? 4 : 8; int[] ipBytes = new int[len]; for(int i = 0; i < len; i++) { ipBytes[i] = isIPV4 ? Integer.parseInt(m.group(i + 1)) : Integer.parseInt(m.group(i + 1), 16); } // Active bits in CIDR specification int bits = Integer.parseInt(m.group(len + 1)); CIDRBlock usn = isIPV4 ? CIDRBlock.createIPv4(ipBytes, bits) : CIDRBlock.createIPv6(ipBytes, bits); return usn.valid() ? usn : null; } public static CIDRBlock createIPv4(int[] ip, int bits) { assert ip.length == 4; return new CIDRBlock(ip, bits); } public static CIDRBlock createIPv6(int[] ip, int bits) { assert ip.length == 8; // Expand 8 double octets into 16 octets int[] ipLong = new int[16]; for (int i = 0; i < ip.length; i++) { ipLong[2*i + 0] = (ip[i] >> 8) & 0xff; ipLong[2*i + 1] = ip[i] & 0xff; } return new CIDRBlock(ipLong, bits); } /** * Create object from user specified data. * * @param ip Array of octets specifying IP (4 for IPv4, 16 for IPv6) * @param bits Bits specifying active part of IP */ private CIDRBlock(int[] ip, int bits) { assert ip.length == 4 || ip.length == 16 : "Wrong number of bytes to construct IP: " + ip.length; this.ip = ip; this.bits = bits; } private boolean validOctet(int o) { return 0 <= o && o <= 255; } private boolean valid() { for (int i = 0; i < ip.length; i++) { if (!validOctet(ip[i])) return false; } return 0 <= bits && bits <= ip.length * 8; } /** * Test if an internet address lives on this user specified network. * * @param ia Address to test. * @return true if the address is on the network; false otherwise. */ public boolean isInetAddressOnNetwork(InetAddress ia) { byte[] ipBytes = ia.getAddress(); return isInetAddressOnNetwork(ipBytes); } boolean isInetAddressOnNetwork(byte[] ipBytes) { // Compare common byte prefix int i = 0; for (i = 0; i < bits/8; i++) { if (((int) ipBytes[i] & 0xff) != ip[i]) return false; } // Compare remaining bit-prefix int remaining = 0; if ((remaining = 8-(bits % 8)) < 8) { int mask = ~((1 << remaining) - 1) & 0xff; // Remaining 3bits for comparison: 1110 0000 return (((int) ipBytes[i] & 0xff) & mask) == (ip[i] & mask); } return true; } } static class HostnameGuessingException extends RuntimeException { private HostnameGuessingException(String message) { super(message); } private HostnameGuessingException(Exception e) { super(e); } } public static String localAddressToHostname(InetAddress address) { String hostname = address.getHostName(); if (! address.getHostAddress().equals(hostname)) { return hostname; } // we don't want to return IP address (because of a security policy of a particular customer, see PUBDEV-5680) hostname = System.getenv("HOSTNAME"); if (!StringUtils.isNullOrEmpty(hostname)) { Log.info("Machine hostname determined using environment variable HOSTNAME='" + hostname + "'."); return hostname; } else { Log.warn("Machine hostname cannot be determined. Using `localhost` as a fallback."); return "localhost"; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/init/JarHash.java
package water.init; import water.util.Log; import java.io.*; import java.net.URL; import java.net.URLDecoder; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; import java.util.Enumeration; import java.util.HashSet; import java.util.Set; import java.util.List; import java.util.jar.JarEntry; import java.util.jar.JarFile; /** Self-jar file MD5 hash, to help make sure clusters are made from the same jar. */ public abstract class JarHash { static final String JARPATH; // Path to self-jar, or NULL if we cannot find it static public final byte[] JARHASH; // MD5 hash of self-jar, or 0xFF's if we cannot figure it out static { JARPATH = cl_init_jarpath(); JARHASH = cl_init_md5(JARPATH); } private static String cl_init_jarpath() { try { final String ownJar = JarHash.class.getProtectionDomain().getCodeSource().getLocation().getPath(); if( ownJar.endsWith(".jar") ) // Path if run from a Jar return URLDecoder.decode(ownJar, "UTF-8"); if( !ownJar.endsWith(".jar/") ) return null; // Not a Jar? // Some hadoop versions (like Hortonworks) will unpack the jar file on their own. String stem = "h2o.jar"; File f = new File(ownJar + stem); if( !f.exists() ) return null; // Still not a jar return URLDecoder.decode(ownJar + stem, "UTF-8"); } catch( IOException ie ) { return null; } } private static byte[] cl_init_md5(String jarpath) { byte[] ffHash = new byte[16]; Arrays.fill(ffHash, (byte)0xFF); // The default non-MD5 if( jarpath==null ) return ffHash; // Ok, pop Jar open & make MD5 InputStream is = null; try { is = new FileInputStream(jarpath); MessageDigest md5 = MessageDigest.getInstance("MD5"); byte[] buf = new byte[4096]; int pos; while( (pos = is.read(buf)) > 0 ) md5.update(buf, 0, pos); return md5.digest(); // haz md5! } catch( IOException | NoSuchAlgorithmException e ) { Log.err(e); // No MD5 algo handy??? } finally { try { if( is != null ) is.close(); } catch( IOException ignore ) { } } return ffHash; } private static final ArrayList<File> RESOURCE_FILES = new ArrayList<>(); public static void registerResourceRoot(File f) { if (f.exists()) { RESOURCE_FILES.add(f); } } // Look for resources (JS files, PNG's, etc) from the self-jar first, then // from a possible local dev build. public static InputStream getResource2(String uri) { try { // If -Dwebdev=1 is set in VM args, we're in front end dev mode, so skip the class loader. // This is to allow the front end scripts/styles/templates to be loaded from the build // directory during development. // Try all registered locations for( File f : RESOURCE_FILES ) { File f2 = new File(f,uri); if( f2.exists() ) return new FileInputStream(f2); } // Fall through to jar file mode. ClassLoader cl = ClassLoader.getSystemClassLoader(); InputStream is = loadResource(uri, cl); if (is == null && (cl=Thread.currentThread().getContextClassLoader())!=null) { is = loadResource(uri, cl); } if (is == null && (cl=JarHash.class.getClassLoader())!=null) { is = loadResource(uri, cl); } if (is != null) return is; } catch (FileNotFoundException ignore) {} Log.warn("Resource not found: " + uri); return null; } private static InputStream loadResource(String uri, ClassLoader cl) { Log.trace("Trying to load resource " + uri + " via classloader " + cl); InputStream is = cl.getResourceAsStream("resources/www" + uri); if( is != null ) return is; is = cl.getResourceAsStream("resources/main/www" + uri); if( is != null ) return is; // This is the right file location of resource inside jar bundled by gradle is = cl.getResourceAsStream("www" + uri); return is; } /** * Given a path name (without preceding and appending "/"), * return the names of all file and directory names contained * in the path location (not recursive). * * @param path - name of resource path * @return - list of resource names at that path */ public static List<String> getResourcesList(String path) { Set<String> resList = new HashSet<>(); // subdirectories can cause duplicate entries try { // Java doesn't allow simple exploration of resources as directories // when the resources are inside a jar file. This searches the contents // of the jar to get the list URL classUrl = JarHash.class.getResource("/water/H2O.class"); if (classUrl != null && classUrl.getProtocol().equals("jar")) { // extract jarPath from classUrl string String jarPath = classUrl.getPath().substring(5, classUrl.getPath().indexOf("!")); JarFile jar = new JarFile(URLDecoder.decode(jarPath, "UTF-8")); Enumeration<JarEntry> files = jar.entries(); // look for all entries within the supplied resource path while (files.hasMoreElements()) { String fName = files.nextElement().getName(); if (fName.startsWith(path + "/")) { String resourceName = fName.substring((path + "/").length()); int checkSubdir = resourceName.indexOf("/"); if (checkSubdir >= 0) // subdir, trim to subdir name resourceName = resourceName.substring(0, checkSubdir); if (resourceName.length() > 0) resList.add(resourceName); } } } else { // not a jar, retrieve resource from file system String resourceName; BufferedReader resources = new BufferedReader(new InputStreamReader(JarHash.class.getResourceAsStream("/gaid"))); if (resources != null) { while ((resourceName = resources.readLine()) != null) if (resourceName.length() > 0) resList.add(resourceName); } } }catch(Exception ignore){ Log.debug("Failed in reading gaid resources."); } return new ArrayList<>(resList); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/init/Linpack.java
package water.init; /* Modified 7/12/14 by Arno E. Candel arno.candel@gmail.com Added support for repeating the main loop to improve timing. Added support for warming up the JIT. Added support for nanosecond timer. Added support for multi-threading. Modified 3/3/97 by David M. Doolin (dmd) doolin@cs.utk.edu Fixed error in matgen() method. Added some comments. Modified 1/22/97 by Paul McMahan mcmahan@cs.utk.edu Added more MacOS options to form. Optimized by Jonathan Hardwick (jch@cs.cmu.edu), 3/28/96 Compare to Linkpack.java. Optimizations performed: - added "final" modifier to performance-critical methods. - changed lines of the form "a[i] = a[i] + x" to "a[i] += x". - minimized array references using common subexpression elimination. - eliminated unused variables. - undid an unrolled loop. - added temporary 1D arrays to hold frequently-used columns of 2D arrays. - wrote my own abs() method See http://www.cs.cmu.edu/~jch/java/linpack.html for more details. Ported to Java by Reed Wade (wade@cs.utk.edu) 2/96 built using JDK 1.0 on solaris using "javac -O Linpack.java" Translated to C by Bonnie Toy 5/88 (modified on 2/25/94 to fix a problem with daxpy for unequal increments or equal increments not equal to 1. Jack Dongarra) */ import water.H2ORuntime; import water.util.ArrayUtils; import water.util.Log; import water.util.Timer; public class Linpack { public static void main(String[] args) { int num_threads = H2ORuntime.availableProcessors(); double sumgflops = run(num_threads); Log.info("CPU speed (" + num_threads + " cores) : " + sumgflops + " Gflops."); } /** * Compute system CPU speed in Gflops */ public static double run(int num_threads) { final double gflops[] = new double[num_threads]; Thread[] threads = new Thread[num_threads]; for (int t=0;t<num_threads;++t) { final int thread_num = t; threads[t] = new Thread() { public void run() { Linpack l = new Linpack(); gflops[thread_num] = l.run_benchmark(); } }; } for (int t=0;t<num_threads;++t) { threads[t].start(); } for (int t=0;t<num_threads;++t) { try { threads[t].join(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } return ArrayUtils.sum(gflops); } final double abs (double d) { return (d >= 0) ? d : -d; } double second_orig = -1; double second() { if (second_orig==-1) { second_orig = System.currentTimeMillis(); } return (System.currentTimeMillis() - second_orig)/1000; } public double run_benchmark() { double gflops_result = 0.0; double residn_result = 0.0; double time_result = 0.0; double eps_result = 0.0; double a[][] = new double[200][201]; double b[] = new double[200]; double x[] = new double[200]; double cray,ops,total,norma,normx; double resid,time; double kf; int n,i,ntimes,info,lda,ldaa,kflops; int ipvt[] = new int[200]; //double gflops_result; //double residn_result; //double time_result; //double eps_result; lda = 201; ldaa = 200; cray = .056; n = 200; ops = (2.0e0*(n*n*n))/3.0 + 2.0*(n*n); norma = matgen(a,lda,n,b); int repeats = 200; //warmup JIT for (int r=0; r<10; ++r) { info = dgefa(a, lda, n, ipvt); dgesl(a, lda, n, ipvt, b, 0); } //actual run Timer timer = new Timer(); //ms for (int r=0; r<repeats; ++r) { info = dgefa(a, lda, n, ipvt); dgesl(a, lda, n, ipvt, b, 0); } total = (double)timer.time()/1000.; for (i = 0; i < n; i++) { x[i] = b[i]; } norma = matgen(a,lda,n,b); for (i = 0; i < n; i++) { b[i] = -b[i]; } dmxpy(n,b,n,lda,x,a); resid = 0.0; normx = 0.0; for (i = 0; i < n; i++) { resid = (resid > abs(b[i])) ? resid : abs(b[i]); normx = (normx > abs(x[i])) ? normx : abs(x[i]); } eps_result = epslon(1.0); /* residn_result = resid/( n*norma*normx*eps_result ); time_result = total; gflops_result = ops/(1.0e6*total); return ("Mflops/s: " + gflops_result + " Time: " + time_result + " secs" + " Norm Res: " + residn_result + " Precision: " + eps_result); */ residn_result = resid/( n*norma*normx*eps_result ); residn_result += 0.005; // for rounding residn_result = (int)(residn_result*100); residn_result /= 100; time_result = total; time_result += 0.005; // for rounding time_result = (int)(time_result*100); time_result /= 100; gflops_result = ops/(1.0e9*total)*repeats; gflops_result += 0.0005; // for rounding gflops_result = (int)(gflops_result*1000); gflops_result /= 1000; // System.out.println("Gflops/s: " + gflops_result + // " Time: " + time_result + " secs" + // " Norm Res: " + residn_result + // " Precision: " + eps_result); return gflops_result; } final double matgen (double a[][], int lda, int n, double b[]) { double norma; int init, i, j; init = 1325; norma = 0.0; /* Next two for() statements switched. Solver wants matrix in column order. --dmd 3/3/97 */ for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { init = 3125*init % 65536; a[j][i] = (init - 32768.0)/16384.0; norma = (a[j][i] > norma) ? a[j][i] : norma; } } for (i = 0; i < n; i++) { b[i] = 0.0; } for (j = 0; j < n; j++) { for (i = 0; i < n; i++) { b[i] += a[j][i]; } } return norma; } /* dgefa factors a double precision matrix by gaussian elimination. dgefa is usually called by dgeco, but it can be called directly with a saving in time if rcond is not needed. (time for dgeco) = (1 + 9/n)*(time for dgefa) . on entry a double precision[n][lda] the matrix to be factored. lda integer the leading dimension of the array a . n integer the order of the matrix a . on return a an upper triangular matrix and the multipliers which were used to obtain it. the factorization can be written a = l*u where l is a product of permutation and unit lower triangular matrices and u is upper triangular. ipvt integer[n] an integer vector of pivot indices. info integer = 0 normal value. = k if u[k][k] .eq. 0.0 . this is not an error condition for this subroutine, but it does indicate that dgesl or dgedi will divide by zero if called. use rcond in dgeco for a reliable indication of singularity. linpack. this version dated 08/14/78. cleve moler, university of new mexico, argonne national lab. functions blas daxpy,dscal,idamax */ final int dgefa( double a[][], int lda, int n, int ipvt[]) { double[] col_k, col_j; double t; int j,k,kp1,l,nm1; int info; // gaussian elimination with partial pivoting info = 0; nm1 = n - 1; if (nm1 >= 0) { for (k = 0; k < nm1; k++) { col_k = a[k]; kp1 = k + 1; // find l = pivot index l = idamax(n-k,col_k,k,1) + k; ipvt[k] = l; // zero pivot implies this column already triangularized if (col_k[l] != 0) { // interchange if necessary if (l != k) { t = col_k[l]; col_k[l] = col_k[k]; col_k[k] = t; } // compute multipliers t = -1.0/col_k[k]; dscal(n-(kp1),t,col_k,kp1,1); // row elimination with column indexing for (j = kp1; j < n; j++) { col_j = a[j]; t = col_j[l]; if (l != k) { col_j[l] = col_j[k]; col_j[k] = t; } daxpy(n-(kp1),t,col_k,kp1,1, col_j,kp1,1); } } else { info = k; } } } ipvt[n-1] = n-1; if (a[(n-1)][(n-1)] == 0) info = n-1; return info; } /* dgesl solves the double precision system a * x = b or trans(a) * x = b using the factors computed by dgeco or dgefa. on entry a double precision[n][lda] the output from dgeco or dgefa. lda integer the leading dimension of the array a . n integer the order of the matrix a . ipvt integer[n] the pivot vector from dgeco or dgefa. b double precision[n] the right hand side vector. job integer = 0 to solve a*x = b , = nonzero to solve trans(a)*x = b where trans(a) is the transpose. on return b the solution vector x . error condition a division by zero will occur if the input factor contains a zero on the diagonal. technically this indicates singularity but it is often caused by improper arguments or improper setting of lda . it will not occur if the subroutines are called correctly and if dgeco has set rcond .gt. 0.0 or dgefa has set info .eq. 0 . to compute inverse(a) * c where c is a matrix with p columns dgeco(a,lda,n,ipvt,rcond,z) if (!rcond is too small){ for (j=0,j<p,j++) dgesl(a,lda,n,ipvt,c[j][0],0); } linpack. this version dated 08/14/78 . cleve moler, university of new mexico, argonne national lab. functions blas daxpy,ddot */ final void dgesl( double a[][], int lda, int n, int ipvt[], double b[], int job) { double t; int k,kb,l,nm1,kp1; nm1 = n - 1; if (job == 0) { // job = 0 , solve a * x = b. first solve l*y = b if (nm1 >= 1) { for (k = 0; k < nm1; k++) { l = ipvt[k]; t = b[l]; if (l != k){ b[l] = b[k]; b[k] = t; } kp1 = k + 1; daxpy(n-(kp1),t,a[k],kp1,1,b,kp1,1); } } // now solve u*x = y for (kb = 0; kb < n; kb++) { k = n - (kb + 1); b[k] /= a[k][k]; t = -b[k]; daxpy(k,t,a[k],0,1,b,0,1); } } else { // job = nonzero, solve trans(a) * x = b. first solve trans(u)*y = b for (k = 0; k < n; k++) { t = ddot(k,a[k],0,1,b,0,1); b[k] = (b[k] - t)/a[k][k]; } // now solve trans(l)*x = y if (nm1 >= 1) { for (kb = 1; kb < nm1; kb++) { k = n - (kb+1); kp1 = k + 1; b[k] += ddot(n-(kp1),a[k],kp1,1,b,kp1,1); l = ipvt[k]; if (l != k) { t = b[l]; b[l] = b[k]; b[k] = t; } } } } } /* constant times a vector plus a vector. jack dongarra, linpack, 3/11/78. */ final void daxpy( int n, double da, double dx[], int dx_off, int incx, double dy[], int dy_off, int incy) { int i,ix,iy; if ((n > 0) && (da != 0)) { if (incx != 1 || incy != 1) { // code for unequal increments or equal increments not equal to 1 ix = 0; iy = 0; if (incx < 0) ix = (-n+1)*incx; if (incy < 0) iy = (-n+1)*incy; for (i = 0;i < n; i++) { dy[iy +dy_off] += da*dx[ix +dx_off]; ix += incx; iy += incy; } return; } else { // code for both increments equal to 1 for (i=0; i < n; i++) dy[i +dy_off] += da*dx[i +dx_off]; } } } /* forms the dot product of two vectors. jack dongarra, linpack, 3/11/78. */ final double ddot( int n, double dx[], int dx_off, int incx, double dy[], int dy_off, int incy) { double dtemp; int i,ix,iy; dtemp = 0; if (n > 0) { if (incx != 1 || incy != 1) { // code for unequal increments or equal increments not equal to 1 ix = 0; iy = 0; if (incx < 0) ix = (-n+1)*incx; if (incy < 0) iy = (-n+1)*incy; for (i = 0;i < n; i++) { dtemp += dx[ix +dx_off]*dy[iy +dy_off]; ix += incx; iy += incy; } } else { // code for both increments equal to 1 for (i=0;i < n; i++) dtemp += dx[i +dx_off]*dy[i +dy_off]; } } return(dtemp); } /* scales a vector by a constant. jack dongarra, linpack, 3/11/78. */ final void dscal( int n, double da, double dx[], int dx_off, int incx) { int i,nincx; if (n > 0) { if (incx != 1) { // code for increment not equal to 1 nincx = n*incx; for (i = 0; i < nincx; i += incx) dx[i +dx_off] *= da; } else { // code for increment equal to 1 for (i = 0; i < n; i++) dx[i +dx_off] *= da; } } } /* finds the index of element having max. absolute value. jack dongarra, linpack, 3/11/78. */ final int idamax( int n, double dx[], int dx_off, int incx) { double dmax, dtemp; int i, ix, itemp=0; if (n < 1) { itemp = -1; } else if (n ==1) { itemp = 0; } else if (incx != 1) { // code for increment not equal to 1 dmax = abs(dx[0 +dx_off]); ix = 1 + incx; for (i = 1; i < n; i++) { dtemp = abs(dx[ix + dx_off]); if (dtemp > dmax) { itemp = i; dmax = dtemp; } ix += incx; } } else { // code for increment equal to 1 itemp = 0; dmax = abs(dx[0 +dx_off]); for (i = 1; i < n; i++) { dtemp = abs(dx[i + dx_off]); if (dtemp > dmax) { itemp = i; dmax = dtemp; } } } return (itemp); } /* estimate unit roundoff in quantities of size x. this program should function properly on all systems satisfying the following two assumptions, 1. the base used in representing dfloating point numbers is not a power of three. 2. the quantity a in statement 10 is represented to the accuracy used in dfloating point variables that are stored in memory. the statement number 10 and the go to 10 are intended to force optimizing compilers to generate code satisfying assumption 2. under these assumptions, it should be true that, a is not exactly equal to four-thirds, b has a zero for its last bit or digit, c is not exactly equal to one, eps measures the separation of 1.0 from the next larger dfloating point number. the developers of eispack would appreciate being informed about any systems where these assumptions do not hold. ***************************************************************** this routine is one of the auxiliary routines used by eispack iii to avoid machine dependencies. ***************************************************************** this version dated 4/6/83. */ final double epslon (double x) { double a,b,c,eps; a = 4.0e0/3.0e0; eps = 0; while (eps == 0) { b = a - 1.0; c = b + b + b; eps = abs(c-1.0); } return(eps*abs(x)); } /* purpose: multiply matrix m times vector x and add the result to vector y. parameters: n1 integer, number of elements in vector y, and number of rows in matrix m y double [n1], vector of length n1 to which is added the product m*x n2 integer, number of elements in vector x, and number of columns in matrix m ldm integer, leading dimension of array m x double [n2], vector of length n2 m double [ldm][n2], matrix of n1 rows and n2 columns */ final void dmxpy ( int n1, double y[], int n2, int ldm, double x[], double m[][]) { int j,i; // cleanup odd vector for (j = 0; j < n2; j++) { for (i = 0; i < n1; i++) { y[i] += x[j]*m[j][i]; } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/init/MemoryBandwidth.java
package water.init; import water.H2ORuntime; import water.util.ArrayUtils; import water.util.Log; import water.util.Timer; public class MemoryBandwidth { public static void main(String[] args) { int num_threads = H2ORuntime.availableProcessors(); double membw = run(num_threads); Log.info("Memory bandwidth (" + num_threads + " cores) : " + membw + " GB/s."); } /** * Compute memory bandwidth in bytes / second */ public static double run(int num_threads) { final double membw[] = new double[num_threads]; Thread[] threads = new Thread[num_threads]; for (int t=0;t<num_threads;++t) { final int thread_num = t; threads[t] = new Thread() { public void run() { MemoryBandwidth l = new MemoryBandwidth(); membw[thread_num] = l.run_benchmark(); } }; } for (int t=0;t<num_threads;++t) { threads[t].start(); } for (int t=0;t<num_threads;++t) { try { threads[t].join(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } return ArrayUtils.sum(membw); } // memory bandwidth in bytes / second double run_benchmark() { // use the lesser of 40MB or 10% of Heap final long M = Math.min(10000000l, Runtime.getRuntime().maxMemory()/10); int[] vals = water.MemoryManager.malloc4((int)M); double total; int repeats = 20; Timer timer = new Timer(); //ms long sum = 0; // write repeats * M ints // read repeats * M ints for (int l=repeats-1; l>=0; --l) { for (int i=0; i<M; ++i) { vals[i] = i + l; } sum = 0; for (int i=0; i<M; ++i) { sum += vals[i]; } } total = (double)timer.time()/1000./repeats; //use the sum in a way that doesn't affect the result (don't want the compiler to optimize it away) double time = total + ((M*(M-1)/2) - sum); // == total return (double)2*M*4/time; //(read+write) * 4 bytes } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/init/NetworkBench.java
package water.init; import jsr166y.CountedCompleter; import water.*; import water.H2O.H2OCountedCompleter; import water.util.Log; import water.util.TwoDimTable; import java.util.Random; /** * Created by tomasnykodym on 7/28/15. */ public class NetworkBench extends Iced { // public static int [] MSG_SZS = new int[]{1, 64, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304}; public static int [] MSG_SZS = new int[]{1,1,1,1,1}; public static int [] MSG_CNT = new int[]{500000,500000,500000,500000,500000}; // public static int [] MSG_CNT = new int[]{500000, 25000, 12500, 6000, 781, 391, 195, 98, 49, 25}; public static class NetworkBenchResults { final int _msgSz; final int _msgCnt; final long [] _mrtTimes; final long [][] _all2AllTimes; public NetworkBenchResults(int msgSz, int msgCnt, long [][] all2all, long [] mrts) { _msgSz = msgSz; _msgCnt = msgCnt; _mrtTimes = mrts; _all2AllTimes = all2all; } public TwoDimTable to2dTable(){ // public TwoDimTable(String tableHeader, String tableDescription, String[] rowHeaders, String[] colHeaders, String[] colTypes, // String[] colFormats, String colHeaderForRowHeaders) { String title = "Network Bench, sz = " + _msgSz + "B, cnt = " + _msgCnt + ", total sz = " + 0.01*((int)(100*_msgSz*_msgCnt/(1024.0*1024))) + "MB"; String [] rowHeaders = new String[H2O.CLOUD.size() + 1]; rowHeaders[H2O.CLOUD.size()] = "MrTasks"; String [] colHeaders = new String[H2O.CLOUD.size()]; String [] colTypes = new String[H2O.CLOUD.size()]; String [] colFormats = new String[H2O.CLOUD.size()]; for(int i = 0; i < H2O.CLOUD.size(); ++i) { rowHeaders[i] = colHeaders[i] = H2O.CLOUD._memary[i].toString(); colTypes[i] = "double"; colFormats[i] = "%2f"; } TwoDimTable td = new TwoDimTable(title, "Network benchmark results, round-trip bandwidth in MB/s", rowHeaders, colHeaders, colTypes, colFormats, ""); for(int i = 0 ; i < _all2AllTimes.length; ++i) { for (int j = 0; j < _all2AllTimes.length; ++j) td.set(i, j, 0.01 * ((int) (_msgSz * _msgCnt / (_all2AllTimes[i][j] * 0.00001)))); td.set(H2O.CLOUD.size(),i, 0.01 * ((int) (_msgSz * _msgCnt / (_mrtTimes[i] * 0.00001)))); } return td; } } public NetworkBenchResults [] _results; public NetworkBench doTest(){ long t1 = System.currentTimeMillis(); H2O.submitTask(new H2OCountedCompleter() { @Override public void compute2() { _results = new NetworkBenchResults[MSG_SZS.length]; for(int i = 0; i < MSG_SZS.length; ++i) { long t2 = System.currentTimeMillis(); long [] mrts = new long[H2O.CLOUD.size()]; Log.info("Network Bench, running All2All, message size = " + MSG_SZS[i] + ", message count = " + MSG_CNT[i]); long[][] all2all = new TestAll2All(MSG_SZS[i], MSG_CNT[i]).doAllNodes()._time; Log.info("All2All test done in " + ((System.currentTimeMillis()-t2)*0.001) + "s"); // for(int j = 0; j < H2O.CLOUD.size(); ++j) { // Log.info("Network Bench, running MRTask test at node " + j + ", message size = " + MSG_SZS[i] + ", message count = " + MSG_CNT[i]); // mrts[j] = RPC.call(H2O.CLOUD._memary[j], new TestMRTasks(MSG_SZS[i],MSG_CNT[i])).get()._time; // } _results[i] = new NetworkBenchResults(MSG_SZS[i],MSG_CNT[i],all2all,mrts); } tryComplete(); } }).join(); for(NetworkBenchResults r:_results) { System.out.println("===================================== MSG SZ = " + r._msgSz + ", CNT = " + r._msgCnt + " ========================================="); System.out.println(r.to2dTable()); System.out.println(); } Log.info("Newtork test done in " + ((System.currentTimeMillis()-t1)*0.001) + "s"); return this; } private static class TestAll2All extends MRTask<TestAll2All> { final int _msgSz; // in final int _msgCnt; // in long [][] _time; // out public TestAll2All(int msgSz, int msgCnt) { _msgSz = msgSz; _msgCnt = msgCnt; } private static class SendRandomBytesTsk extends DTask{ final byte [] dd; public SendRandomBytesTsk(int sz) { dd = new byte[sz]; new Random().nextBytes(dd); } @Override public void compute2() {tryComplete();} } @Override public void setupLocal(){ _time = new long[H2O.CLOUD.size()][]; final int myId = H2O.SELF.index(); _time[myId] = new long[H2O.CLOUD.size()]; addToPendingCount(H2O.CLOUD.size()-1); for (int i = 0; i < H2O.CLOUD.size(); ++i) { if (i != myId) { final int fi = i; H2O.submitTask(new H2OCountedCompleter(this) { long t1; @Override public void compute2() { t1 = System.currentTimeMillis(); addToPendingCount(_msgCnt - 1); for (int j = 0; j < _msgCnt; ++j) new RPC(H2O.CLOUD._memary[fi], new SendRandomBytesTsk(_msgSz)).addCompleter(this).call(); } @Override public void onCompletion(CountedCompleter cc) { long t2 = System.currentTimeMillis(); _time[myId][fi] = (t2 - t1); } }); } } } @Override public void reduce(TestAll2All tst) { for(int i = 0; i < _time.length; ++i) if(_time[i] == null) _time[i] = tst._time[i]; else assert tst._time[i] == null; } } private static class TestMRTasks extends DTask<TestMRTasks> { final int _msgSz; // in final int _msgCnt; // in public TestMRTasks(int msgSz, int msgCnt) { _msgSz = msgSz; _msgCnt = msgCnt; } long _time; // out @Override public void compute2() { Futures fs = new Futures(); _time = System.currentTimeMillis(); addToPendingCount(_msgCnt-1); final byte [] data = new byte[_msgSz]; new Random().nextBytes(data); for(int i = 0; i < _msgCnt; ++i) new MRTask(this){ byte [] dd = data; @Override public void setupLocal(){ dd = null; } }.asyncExecOnAllNodes(); } @Override public void onCompletion(CountedCompleter cc) { _time = System.currentTimeMillis() - _time; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/init/NetworkInit.java
package water.init; import water.H2O; import water.H2ONode; import water.TCPReceiverThread; import water.util.Log; import water.util.NetworkUtils; import water.util.StringUtils; import water.webserver.H2OHttpViewImpl; import water.webserver.iface.H2OHttpConfig; import water.webserver.iface.HttpServerLoader; import water.webserver.iface.LoginType; import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.net.*; import java.nio.ByteBuffer; import java.nio.channels.ServerSocketChannel; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; /** * Data structure for holding network info specified by the user on the command line. */ public class NetworkInit { private static ServerSocketChannel _tcpSocket; public static H2OHttpViewImpl h2oHttpView; public static InetAddress findInetAddressForSelf() throws Error { if (H2O.SELF_ADDRESS != null) return H2O.SELF_ADDRESS; else { if (H2O.ARGS.disable_web && H2O.ARGS.disable_net) { // if we don't need an address just use loopback as a filler, SELF_ADDRESS always needs to be defined return InetAddress.getLoopbackAddress(); } try { return HostnameGuesser.findInetAddressForSelf(H2O.ARGS.ip, H2O.ARGS.network); } catch (HostnameGuesser.HostnameGuessingException e) { if (e.getCause() != null) Log.err(e.getCause()); else Log.err(e.getMessage()); H2O.clusterInitializationFailed(); } } assert false; // should never be reached return null; } /** * Parse arguments and set cloud name in any case. Strip out "-name NAME" * and "-flatfile <filename>". Ignore the rest. Set multi-cast port as a hash * function of the name. Parse node ip addresses from the filename. * @todo this method introduces mutual dependency between classes {@link H2O} and {@link NetworkInit} ! Move it out! */ public static void initializeNetworkSockets( ) { // Assign initial ports H2O.API_PORT = H2O.ARGS.port == 0 ? H2O.ARGS.baseport : H2O.ARGS.port; // Late instantiation of web server, if needed. if (H2O.getWebServer() == null && !H2O.ARGS.disable_web) { final H2OHttpConfig config = webServerConfig(H2O.ARGS); h2oHttpView = new H2OHttpViewImpl(config); H2O.setWebServer(HttpServerLoader.INSTANCE.createWebServer(h2oHttpView)); } // API socket is only used to find opened port on given ip. ServerSocket apiSocket = null; // At this point we would like to allocate 2 consecutive ports - by default (if `port_offset` is not specified). // If `port_offset` is specified we are trying to allocate a pair (port, port + port_offset). // while (true) { H2O.H2O_PORT = H2O.API_PORT + H2O.ARGS.port_offset; try { // kbn. seems like we need to set SO_REUSEADDR before binding? // http://www.javadocexamples.com/java/net/java.net.ServerSocket.html#setReuseAddress:boolean // When a TCP connection is closed the connection may remain in a timeout state // for a period of time after the connection is closed (typically known as the // TIME_WAIT state or 2MSL wait state). For applications using a well known socket address // or port it may not be possible to bind a socket to the required SocketAddress // if there is a connection in the timeout state involving the socket address or port. // Enabling SO_REUSEADDR prior to binding the socket using bind(SocketAddress) // allows the socket to be bound even though a previous connection is in a timeout state. // cnc: this is busted on windows. Back to the old code. if (!H2O.ARGS.disable_web) { apiSocket = H2O.ARGS.web_ip == null // Listen to any interface ? new ServerSocket(H2O.API_PORT) : new ServerSocket(H2O.API_PORT, -1, getInetAddress(H2O.ARGS.web_ip)); apiSocket.setReuseAddress(true); } if (!H2O.ARGS.disable_net) { InetSocketAddress isa = new InetSocketAddress(H2O.SELF_ADDRESS, H2O.H2O_PORT); // Bind to the TCP socket also _tcpSocket = ServerSocketChannel.open(); _tcpSocket.socket().setReceiveBufferSize(water.AutoBuffer.TCP_BUF_SIZ); _tcpSocket.socket().bind(isa); } // Warning: There is a ip:port race between socket close and starting Jetty if (!H2O.ARGS.disable_web) { apiSocket.close(); H2O.getWebServer().start(H2O.ARGS.web_ip, H2O.API_PORT); } break; } catch (IOException e) { Log.trace("Cannot allocate API port " + H2O.API_PORT + " because of following exception: ", e); if( apiSocket != null ) try { apiSocket.close(); } catch( IOException ohwell ) { Log.err(ohwell); } if( _tcpSocket != null ) try { _tcpSocket.close(); } catch( IOException ie ) { } apiSocket = null; _tcpSocket = null; if( H2O.ARGS.port != 0 ) H2O.die("On " + H2O.SELF_ADDRESS + " some of the required ports " + H2O.ARGS.port + ", " + (H2O.ARGS.port+H2O.ARGS.port_offset) + " are not available, change -port PORT and try again."); } // Try next available port to bound H2O.API_PORT += (H2O.ARGS.port_offset == 1) ? 2 : 1; if (H2O.API_PORT > (1<<16)) { Log.err("Cannot find free port for " + H2O.SELF_ADDRESS + " from baseport = " + H2O.ARGS.baseport); H2O.exit(-1); } } Log.notifyAboutNetworkingInitialized(); boolean isIPv6 = H2O.SELF_ADDRESS instanceof Inet6Address; // Is IPv6 address was assigned to this node H2O.SELF = H2ONode.self(H2O.SELF_ADDRESS); if (!H2O.ARGS.disable_web) { Log.info("Internal communication uses port: ", H2O.H2O_PORT, "\n" + "Listening for HTTP and REST traffic on " + H2O.getURL(h2oHttpView.getScheme()) + "/"); } try { Log.debug("Interface MTU: ", (NetworkInterface.getByInetAddress(H2O.SELF_ADDRESS)).getMTU()); } catch (SocketException se) { Log.debug("No MTU due to SocketException. " + se.toString()); } String embeddedConfigFlatfile = null; AbstractEmbeddedH2OConfig ec = H2O.getEmbeddedH2OConfig(); if (ec != null) { ec.notifyAboutEmbeddedWebServerIpPort(H2O.SELF_ADDRESS, H2O.API_PORT); if (ec.providesFlatfile()) { try { embeddedConfigFlatfile = ec.fetchFlatfile(); } catch (Exception e) { Log.err("Failed to get embedded config flatfile"); Log.err(e); H2O.exit(1); } } } // Read a flatfile of allowed nodes if (embeddedConfigFlatfile != null) H2O.setFlatfile(parseFlatFileFromString(embeddedConfigFlatfile)); else H2O.setFlatfile(parseFlatFile(H2O.ARGS.flatfile)); // All the machines has to agree on the same multicast address (i.e., multicast group) // Hence use the cloud name to generate multicast address // Note: if necessary we should permit configuration of multicast address manually // Note: // - IPv4 Multicast IPs are in the range E1.00.00.00 to EF.FF.FF.FF // - IPv6 Multicast IPs are in the range defined in NetworkUtils int hash = H2O.ARGS.name.hashCode(); try { H2O.CLOUD_MULTICAST_GROUP = isIPv6 ? NetworkUtils.getIPv6MulticastGroup(hash, NetworkUtils.getIPv6Scope(H2O.SELF_ADDRESS)) : NetworkUtils.getIPv4MulticastGroup(hash); } catch (UnknownHostException e) { Log.err("Cannot get multicast group address for " + H2O.SELF_ADDRESS); Log.throwErr(e); } H2O.CLOUD_MULTICAST_PORT = NetworkUtils.getMulticastPort(hash); } public static TCPReceiverThread makeReceiverThread() { return new TCPReceiverThread(NetworkInit._tcpSocket); } public static void close() throws IOException { ServerSocketChannel tcpSocket = _tcpSocket; if (tcpSocket != null) { _tcpSocket = null; tcpSocket.close(); } } public static H2OHttpConfig webServerConfig(H2O.OptArgs args) { final H2OHttpConfig config = new H2OHttpConfig(); config.jks = args.jks; config.jks_pass = args.jks_pass; config.jks_alias = getJksAlias(args); config.loginType = parseLoginType(args); configureLoginType(config.loginType, args.login_conf); config.login_conf = args.login_conf; config.spnego_properties = args.spnego_properties; config.form_auth = args.form_auth; config.session_timeout = args.session_timeout; config.user_name = args.user_name; config.context_path = args.context_path; config.ensure_daemon_threads = args.embedded; return config; } static String getJksAlias(H2O.OptArgs args) { return getJksAlias(args, H2O.SELF_ADDRESS); } static String getJksAlias(H2O.OptArgs args, InetAddress self) { final String alias; if (args.hostname_as_jks_alias) { alias = args.ip != null ? args.ip : HostnameGuesser.localAddressToHostname(self); } else alias = args.jks_alias; if (alias != null) Log.info("HTTPS will be secured using a certificate with alias `" + alias + "`"); return alias; } /** * @param args commandline arguments to parse * @return one of login types - never returns null */ private static LoginType parseLoginType(H2O.BaseArgs args) { final LoginType loginType; if (args.hash_login) { loginType = LoginType.HASH; } else if (args.ldap_login) { loginType = LoginType.LDAP; } else if (args.kerberos_login) { loginType = LoginType.KERBEROS; } else if (args.spnego_login) { loginType = LoginType.SPNEGO; } else if (args.pam_login) { loginType = LoginType.PAM; } else { return LoginType.NONE; } return loginType; } private static void configureLoginType(LoginType loginType, String loginConf) { if (loginType == LoginType.NONE) { return; } if (loginConf == null) { throw new IllegalArgumentException("Must specify -login_conf argument"); } if (loginType.needToCheckUserName()) { // LDAP, KERBEROS, PAM Log.info(String.format("Configuring LoginService (with %s)", loginType)); System.setProperty("java.security.auth.login.config", loginConf); } else { // HASH only Log.info("Configuring HashLoginService"); } } /** * Get address for given IP. * @param ip textual representation of IP (host) * @return IPv4 or IPv6 address which matches given IP and is in specified range */ private static InetAddress getInetAddress(String ip) { if (ip == null) return null; InetAddress addr = null; try { addr = InetAddress.getByName(ip); } catch (UnknownHostException e) { Log.err(e); H2O.exit(-1); } return addr; } // Multicast send-and-close. Very similar to udp_send, except to the // multicast port (or all the individuals we can find, if multicast is // disabled). public static void multicast( ByteBuffer bb , byte priority) { if (H2O.ARGS.disable_net) return; try { multicast2(bb, priority); } catch (Exception ie) {} } static private void multicast2( ByteBuffer bb, byte priority ) { if( !H2O.isFlatfileEnabled() ) { byte[] buf = new byte[bb.remaining()]; bb.get(buf); synchronized( H2O.class ) { // Sync'd so single-thread socket create/destroy assert H2O.CLOUD_MULTICAST_IF != null; try { if( H2O.CLOUD_MULTICAST_SOCKET == null ) { H2O.CLOUD_MULTICAST_SOCKET = new MulticastSocket(); // Allow multicast traffic to go across subnets H2O.CLOUD_MULTICAST_SOCKET.setTimeToLive(2); H2O.CLOUD_MULTICAST_SOCKET.setNetworkInterface(H2O.CLOUD_MULTICAST_IF); } // Make and send a packet from the buffer H2O.CLOUD_MULTICAST_SOCKET.send(new DatagramPacket(buf, buf.length, H2O.CLOUD_MULTICAST_GROUP, H2O.CLOUD_MULTICAST_PORT)); } catch( Exception e ) { // On any error from anybody, close all sockets & re-open // No error on multicast fail: common occurrance for laptops coming // awake from sleep. if( H2O.CLOUD_MULTICAST_SOCKET != null ) try { H2O.CLOUD_MULTICAST_SOCKET.close(); } catch( Exception e2 ) { Log.err("Got",e2); } finally { H2O.CLOUD_MULTICAST_SOCKET = null; } } } } else { // Multicast Simulation // The multicast simulation is little bit tricky. To achieve union of all // specified nodes' flatfiles (via option -flatfile), the simulated // multicast has to send packets not only to nodes listed in the node's // flatfile (H2O.STATIC_H2OS), but also to all cloud members (they do not // need to be specified in THIS node's flatfile but can be part of cloud // due to another node's flatfile). // // Furthermore, the packet have to be send also to Paxos proposed members // to achieve correct functionality of Paxos. Typical situation is when // this node receives a Paxos heartbeat packet from a node which is not // listed in the node's flatfile -- it means that this node is listed in // another node's flatfile (and wants to create a cloud). Hence, to // allow cloud creation, this node has to reply. // // Typical example is: // node A: flatfile (B) // node B: flatfile (C), i.e., A -> (B), B-> (C), C -> (A) // node C: flatfile (A) // Cloud configuration: (A, B, C) // // Hideous O(n) algorithm for broadcast - avoid the memory allocation in // this method (since it is heavily used) Set<H2ONode> nodes = H2O.getFlatfile(); nodes.addAll(water.Paxos.PROPOSED.values()); bb.mark(); for( H2ONode h2o : nodes ) { if(h2o.isRemovedFromCloud()) { continue; } bb.reset(); h2o.sendMessage(bb, priority); } } } /** * Read a set of Nodes from a file. Format is: * * name/ip_address:port * - name is unused and optional * - port is optional * - leading '#' indicates a comment * * For example: * * 10.10.65.105:54322 * # disabled for testing * # 10.10.65.106 * /10.10.65.107 * # run two nodes on 108 * 10.10.65.108:54322 * 10.10.65.108:54325 */ private static HashSet<H2ONode> parseFlatFile( String fname ) { if( fname == null ) return null; File f = new File(fname); if( !f.exists() ) { Log.warn("-flatfile specified but not found: " + fname); return null; // No flat file } HashSet<H2ONode> h2os = new HashSet<>(); List<FlatFileEntry> list = parseFlatFile(f); for(FlatFileEntry entry : list) h2os.add(H2ONode.intern(entry.inet, entry.port+H2O.ARGS.port_offset));// use the UDP port here return h2os; } static HashSet<H2ONode> parseFlatFileFromString( String s ) { HashSet<H2ONode> h2os = new HashSet<>(); InputStream is = new ByteArrayInputStream(StringUtils.bytesOf(s)); List<FlatFileEntry> list = parseFlatFile(is); for(FlatFileEntry entry : list) h2os.add(H2ONode.intern(entry.inet, entry.port+H2O.ARGS.port_offset));// use the UDP port here return h2os; } static class FlatFileEntry { InetAddress inet; int port; } static List<FlatFileEntry> parseFlatFile( File f ) { InputStream is = null; try { is = new FileInputStream(f); } catch (Exception e) { H2O.die(e.toString()); } return parseFlatFile(is); } static List<FlatFileEntry> parseFlatFile( InputStream is ) { List<FlatFileEntry> list = new ArrayList<>(); BufferedReader br = null; int port = H2O.ARGS.port; try { br = new BufferedReader(new InputStreamReader(is)); String strLine = null; while( (strLine = br.readLine()) != null) { strLine = strLine.trim(); // be user friendly and skip comments and empty lines if (strLine.startsWith("#") || strLine.isEmpty()) continue; String ip = null, portStr = null; int slashIdx = strLine.indexOf('/'); int colonIdx = strLine.lastIndexOf(':'); // Get the last index in case it is IPv6 address if( slashIdx == -1 && colonIdx == -1 ) { ip = strLine; } else if( slashIdx == -1 ) { ip = strLine.substring(0, colonIdx); portStr = strLine.substring(colonIdx+1); } else if( colonIdx == -1 ) { ip = strLine.substring(slashIdx+1); } else if( slashIdx > colonIdx ) { H2O.die("Invalid format, must be [name/]ip[:port], not '"+strLine+"'"); } else { ip = strLine.substring(slashIdx+1, colonIdx); portStr = strLine.substring(colonIdx+1); } InetAddress inet = InetAddress.getByName(ip); if( portStr!=null && !portStr.equals("") ) { try { port = Integer.decode(portStr); } catch( NumberFormatException nfe ) { H2O.die("Invalid port #: "+portStr); } } FlatFileEntry entry = new FlatFileEntry(); entry.inet = inet; entry.port = port; list.add(entry); } } catch( Exception e ) { H2O.die(e.toString()); } finally { if( br != null ) try { br.close(); } catch( IOException ie ) { } } return list; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/init/NetworkTest.java
package water.init; import water.fvec.Vec; import water.*; import water.util.*; import java.util.Random; public class NetworkTest extends Iced { public int[] msg_sizes = new int[]{1, 1 << 10, 1 << 20}; //INPUT // Message sizes public int repeats = 10; //INPUT // Repeats public boolean collective = true; // Do collective test public boolean serial = true; // Do serial test public double[] microseconds_collective; //OUTPUT // Collective broadcast/reduce times in microseconds (for each message size) public double[] bandwidths_collective; //OUTPUT // Collective bandwidths in Bytes/sec (for each message size, for each node) public double[][] microseconds; //OUTPUT // Round-trip times in microseconds (for each message size, for each node) public double[][] bandwidths; //OUTPUT // Bi-directional bandwidths in Bytes/sec (for each message size, for each node) public String[] nodes; //OUTPUT // Nodes public TwoDimTable table; //OUTPUT public NetworkTest execImpl() { microseconds = new double[msg_sizes.length][]; microseconds_collective = new double[msg_sizes.length]; NetworkTester nt = new NetworkTester(msg_sizes, microseconds, microseconds_collective, repeats, serial, collective); H2O.submitTask(nt); nt.join(); // compute bandwidths from timing results bandwidths = new double[msg_sizes.length][]; for (int i = 0; i < bandwidths.length; ++i) { bandwidths[i] = new double[microseconds[i].length]; for (int j = 0; j < microseconds[i].length; ++j) { //send and receive the same message -> 2x bandwidths[i][j] = (2 * msg_sizes[i] /*Bytes*/) / (microseconds[i][j] / 1e6 /*Seconds*/); } } bandwidths_collective = new double[msg_sizes.length]; for (int i = 0; i < bandwidths_collective.length; ++i) { //broadcast and reduce the message to all nodes -> 2 x nodes bandwidths_collective[i] = (2 * H2O.CLOUD.size() * msg_sizes[i] /*Bytes*/) / (microseconds_collective[i] / 1e6 /*Seconds*/); } // populate node names nodes = new String[H2O.CLOUD.size()]; for (int i = 0; i < nodes.length; ++i) nodes[i] = H2O.CLOUD._memary[i].toString(); fillTable(); Log.info(table.toString()); return this; } // Helper class to run the actual test public static class NetworkTester extends H2O.H2OCountedCompleter { double[][] microseconds; double[] microseconds_collective; int[] msg_sizes; public int repeats = 10; boolean serial; boolean collective; public NetworkTester(int[] msg, double[][] res, double[] res_collective, int rep, boolean serial, boolean collective) { super((byte)(H2O.MIN_HI_PRIORITY-1)); microseconds = res; microseconds_collective = res_collective; msg_sizes = msg; repeats = rep; this.serial = serial; this.collective = collective; } @Override public void compute2() { // serial comm if (serial) { for (int i = 0; i < microseconds.length; ++i) { microseconds[i] = send_recv_all(msg_sizes[i], repeats); ArrayUtils.div(microseconds[i], 1e3f); //microseconds } } // collective comm if (collective) { for (int i = 0; i < microseconds_collective.length; ++i) { microseconds_collective[i] = send_recv_collective(msg_sizes[i], repeats); } ArrayUtils.div(microseconds_collective, 1e3f); //microseconds } tryComplete(); } } /** * Helper class that contains a payload and has an empty compute2(). * If it is remotely executed, it will just send the payload over the wire. */ private static class PingPongTask extends DTask<PingPongTask> { private final byte[] _payload; public PingPongTask(byte[] payload) { _payload = payload; } @Override public void compute2() { tryComplete(); } } /** * Send a message from this node to all nodes in serial (including self), and receive it back * * @param msg_size message size in bytes * @return Time in nanoseconds that it took to send and receive the message (one per node) */ private static double[] send_recv_all(int msg_size, int repeats) { byte[] payload = new byte[msg_size]; new Random().nextBytes(payload); final int siz = H2O.CLOUD.size(); double[] times = new double[siz]; for (int i = 0; i < siz; ++i) { //loop over compute nodes H2ONode node = H2O.CLOUD._memary[i]; Timer t = new Timer(); for (int l = 0; l < repeats; ++l) { PingPongTask ppt = new PingPongTask(payload); //same payload for all nodes new RPC<>(node, ppt).call().get(); //blocking send } times[i] = (double) t.nanos() / repeats; } return times; } /** * Helper class that contains a payload and has an empty map/reduce. * If it is remotely executed, it will just send the payload over the wire. */ private static class CollectiveTask extends MRTask<CollectiveTask> { private final byte[] _payload; //will be sent over the wire (broadcast/reduce) public CollectiveTask(byte[] payload) { _payload = payload; } } /** * Broadcast a message from this node to all nodes and reduce it back * * @param msg_size message size in bytes * @return Time in nanoseconds that it took */ private static double send_recv_collective(int msg_size, int repeats) { byte[] payload = new byte[msg_size]; new Random().nextBytes(payload); Vec v = Vec.makeZero(1); //trivial Vec: 1 element with value 0. Timer t = new Timer(); for (int l = 0; l < repeats; ++l) { new CollectiveTask(payload).doAll(v); //same payload for all nodes } v.remove(new Futures()).blockForPending(); return (double) t.nanos() / repeats; } public void fillTable() { String tableHeader = "Network Test"; String tableDescription = "Launched from " + H2O.SELF._key; String[] rowHeaders = new String[H2O.CLOUD.size()+1]; rowHeaders[0] = "all - collective bcast/reduce"; for (int i = 0; i < H2O.CLOUD.size(); ++i) { rowHeaders[1+i] = ((H2O.SELF.equals(H2O.CLOUD._memary[i]) ? "self" : "remote") + " " + H2O.CLOUD._memary[i].toString()); } String[] colHeaders = new String[msg_sizes.length]; for (int i = 0; i < colHeaders.length; ++i) { colHeaders[i] = msg_sizes[i] + " bytes"; } String[] colTypes = new String[msg_sizes.length]; for (int i = 0; i < colTypes.length; ++i) { colTypes[i] = "string"; } String[] colFormats = new String[msg_sizes.length]; for (int i = 0; i < colTypes.length; ++i) { colFormats[i] = "%s"; } String colHeaderForRowHeaders = "Destination"; table = new TwoDimTable(tableHeader, tableDescription, rowHeaders, colHeaders, colTypes, colFormats, colHeaderForRowHeaders); for (int m = 0; m < msg_sizes.length; ++m) { table.set(0, m, PrettyPrint.usecs((long) microseconds_collective[m]) + ", " + PrettyPrint.bytesPerSecond((long) bandwidths_collective[m])); } for (int n = 0; n < H2O.CLOUD._memary.length; ++n) { for (int m = 0; m < msg_sizes.length; ++m) { table.set(1 + n, m, PrettyPrint.usecs((long) microseconds[m][n]) + ", " + PrettyPrint.bytesPerSecond((long) bandwidths[m][n])); } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/init/NodePersistentStorage.java
package water.init; import water.H2O; import water.Iced; import water.persist.Persist.PersistEntry; import water.persist.PersistManager; import water.util.FileUtils; import water.util.Log; import water.util.StringUtils; import java.io.*; import java.util.concurrent.atomic.AtomicLong; import java.util.regex.Pattern; public class NodePersistentStorage { final String NPS_DIR; final String NPS_SEPARATOR; public static class NodePersistentStorageEntry extends Iced { public String _category; public String _name; public long _size; public long _timestamp_millis; } public NodePersistentStorage(String npsDir) { if (npsDir == null) { NPS_DIR = null; NPS_SEPARATOR = null; return; } if (H2O.getPM().isHdfsPath(npsDir)) { NPS_SEPARATOR = "/"; } else { NPS_SEPARATOR = File.separator; } String s = npsDir.toString(); if (s.startsWith("file://")) { s = s.substring(7); } else if (s.startsWith("file:")) { s = s.substring(5); } NPS_DIR = s; } private void validateGeneral() { if (NPS_DIR == null) { throw new IllegalArgumentException("NodePersistentStorage directory not specified (try setting -flow_dir)"); } } private void validateCategoryName(String categoryName) { if (categoryName == null) { throw new IllegalArgumentException("NodePersistentStorage category not specified"); } if (! Pattern.matches("[\\-a-zA-Z0-9]+", categoryName)) { throw new IllegalArgumentException("NodePersistentStorage illegal category (" + categoryName + ")"); } } private void validateKeyName(String keyName) { if (keyName == null) { throw new IllegalArgumentException("NodePersistentStorage name not specified"); } if (! Pattern.matches("[\\-a-zA-Z0-9_ \\(\\)]+", keyName)) { throw new IllegalArgumentException("NodePersistentStorage illegal name (" + keyName + ")"); } } public boolean configured() { return (NPS_DIR != null); } public boolean exists(String categoryName) { validateGeneral(); validateCategoryName(categoryName); String dirName = NPS_DIR + NPS_SEPARATOR + categoryName; return H2O.getPM().exists(dirName); } public boolean exists(String categoryName, String keyName) { validateGeneral(); validateCategoryName(categoryName); validateKeyName(keyName); String fileName = NPS_DIR + NPS_SEPARATOR + categoryName + NPS_SEPARATOR + keyName; return H2O.getPM().exists(fileName); } public void put(String categoryName, String keyName, InputStream is) { Log.info("NPS put content category(" + categoryName + ") keyName(" + keyName + ")"); // Error checking validateGeneral(); validateCategoryName(categoryName); validateKeyName(keyName); // Create common directories PersistManager pm = H2O.getPM(); if (! pm.exists(NPS_DIR)) { boolean success = pm.mkdirs(NPS_DIR); if (! success) { throw new RuntimeException("Could not make NodePersistentStorage directory (" + NPS_DIR + ")"); } } if (! pm.exists(NPS_DIR)) { throw new RuntimeException("NodePersistentStorage directory does not exist (" + NPS_DIR + ")"); } String tmpd = NPS_DIR + NPS_SEPARATOR + "_tmp"; if (! pm.exists(tmpd)) { boolean success = pm.mkdirs(tmpd); if (! success) { throw new RuntimeException("Could not make NodePersistentStorage category directory (" + tmpd + ")"); } } if (! H2O.getPM().isGcsPath(tmpd) && ! pm.exists(tmpd)) { throw new RuntimeException("NodePersistentStorage category directory does not exist (" + tmpd + ")"); } // Create category directory String d2 = NPS_DIR + NPS_SEPARATOR + categoryName; if (! pm.exists(d2)) { boolean success = pm.mkdirs(d2); if (! success) { throw new RuntimeException("Could not make NodePersistentStorage category directory (" + d2 + ")"); } } if (! H2O.getPM().isGcsPath(tmpd) && ! pm.exists(d2)) { throw new RuntimeException("NodePersistentStorage category directory does not exist (" + d2 + ")"); } // Create tmp file String tmpf = tmpd + NPS_SEPARATOR + keyName; OutputStream os = null; try { os = pm.create(tmpf, true); FileUtils.copyStream(is, os, 1024); } finally { if (os != null) { try { os.close(); } catch (Exception e) { Log.err(e); } } } // Make final spot available if needed, and move tmp file to final spot. boolean success; String realf = d2 + NPS_SEPARATOR + keyName; if (pm.exists(realf)) { success = pm.delete(realf); if (! success) { throw new RuntimeException("NodePersistentStorage delete failed (" + realf + ")"); } } success = pm.rename(tmpf, realf); if (! success) { throw new RuntimeException("NodePersistentStorage rename failed (" + tmpf + " -> " + realf + ")"); } if (! pm.exists(realf)) { throw new RuntimeException("NodePersistentStorage file does not exist (" + realf + ")"); } Log.info("Put succeeded"); } public void put(String categoryName, String keyName, String value) { validateGeneral(); validateCategoryName(categoryName); validateKeyName(keyName); InputStream is = new ByteArrayInputStream(StringUtils.bytesOf(value)); put(categoryName, keyName, is); } public NodePersistentStorageEntry[] list(String categoryName) { validateGeneral(); validateCategoryName(categoryName); String dirName = NPS_DIR + NPS_SEPARATOR + categoryName; PersistEntry[] arr1 = H2O.getPM().list(dirName); NodePersistentStorageEntry[] arr2 = new NodePersistentStorageEntry[arr1.length]; for (int i = 0; i < arr1.length; i++) { arr2[i] = new NodePersistentStorageEntry(); arr2[i]._category = categoryName; arr2[i]._name = arr1[i]._name; arr2[i]._size = arr1[i]._size; arr2[i]._timestamp_millis = arr1[i]._timestamp_millis; } return arr2; } public String get_as_string(String categoryName, String keyName) { validateGeneral(); validateCategoryName(categoryName); validateKeyName(keyName); String fileName = NPS_DIR + NPS_SEPARATOR + categoryName + NPS_SEPARATOR + keyName; InputStream is = H2O.getPM().open(fileName); ByteArrayOutputStream baos = new ByteArrayOutputStream(); byte[] buf = new byte[4096]; try { int n = is.read(buf, 0, buf.length); while (true) { if (baos.size() > (1024L * 1024L * 1024L)) { throw new RuntimeException("File too big (" + fileName + ")"); } if (n < 0) { return baos.toString(); } baos.write(buf, 0, n); n = is.read(buf, 0, buf.length); } } catch (Exception e) { throw new RuntimeException(e); } } public long get_length(String categoryName, String keyName) { validateGeneral(); validateCategoryName(categoryName); validateKeyName(keyName); String fileName = NPS_DIR + NPS_SEPARATOR + categoryName + NPS_SEPARATOR + keyName; if (! H2O.getPM().exists(fileName)) { throw new IllegalArgumentException("File not found (" + fileName + ")"); } return H2O.getPM().length(fileName); } public InputStream get(String categoryName, String keyName, AtomicLong length) { validateGeneral(); validateCategoryName(categoryName); validateKeyName(keyName); String fileName = NPS_DIR + NPS_SEPARATOR + categoryName + NPS_SEPARATOR + keyName; if (length != null) { length.set(H2O.getPM().length(fileName)); } return H2O.getPM().open(fileName); } public void delete(String categoryName, String keyName) { validateGeneral(); validateCategoryName(categoryName); validateKeyName(keyName); String fileName = NPS_DIR + NPS_SEPARATOR + categoryName + NPS_SEPARATOR + keyName; if (! H2O.getPM().exists(fileName)) { return; } boolean success = H2O.getPM().delete(fileName); if (! success) { throw new RuntimeException("NodePersistentStorage delete failed (" + fileName + ")"); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/init/StandaloneKerberosComponent.java
package water.init; import water.H2O; import java.util.Comparator; import java.util.List; import java.util.ServiceLoader; import java.util.stream.Collectors; import java.util.stream.StreamSupport; /** * Interface of a component that needs to be initialized during boot of H2O running * in a Standalone mode in Kerberos environment */ public interface StandaloneKerberosComponent { /** * Name of the component * @return short identifier of the component */ String name(); /** * Initialization priority - components with higher priority will be initialized before * components with lower priority. Third parties can use 0-999, 1000+ is reserved for internal H2O components. * @return initialization priority of the component */ int priority(); /** * Initializes the component, called after Kerberos is initialized in Standalone mode * * @param conf instance of Hadoop Configuration object * @param args parsed H2O arguments * @return flag indicating if component was successfully initialized */ boolean initComponent(Object conf, H2O.OptArgs args); static List<StandaloneKerberosComponent> loadAll() { ServiceLoader<StandaloneKerberosComponent> componentLoader = ServiceLoader.load(StandaloneKerberosComponent.class); return StreamSupport .stream(componentLoader.spliterator(), false) .sorted(Comparator.comparingInt(StandaloneKerberosComponent::priority).reversed()) .collect(Collectors.toList()); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/init/TimelineSnapshot.java
package water.init; import java.net.InetAddress; import java.util.*; import water.*; import water.util.Log; /** * Wrapper around timeline snapshot. Implements iterator interface (events are * ordered according to send/receive dependencies across the nodes and trivial time * dependencies inside node) * * @author tomas */ public final class TimelineSnapshot implements Iterable<TimelineSnapshot.Event>, Iterator<TimelineSnapshot.Event> { final long[][] _snapshot; final Event[] _events; final HashMap<Event, Event> _edges; final public HashMap<Event, ArrayList<Event>> _sends; final H2O _cloud; boolean _processed; public TimelineSnapshot(H2O cloud, long[][] snapshot) { _cloud = cloud; _snapshot = snapshot; _edges = new HashMap<Event, Event>(); _sends = new HashMap<Event, ArrayList<Event>>(); _events = new Event[snapshot.length]; // DEBUG: print out the event stack as we got it // System.out.println("# of nodes: " + _events.length); // for (int j = 0; j < TimeLine.length(); ++j) { // System.out.print("row# " + j + ":"); // for (int i = 0; i < _events.length; ++i) { // System.out.print(" || " + new Event(i, j)); // } // System.out.println(" ||"); // } for (int i = 0; i < _events.length; ++i) { // For a new Snapshot, most of initial entries are all zeros. Skip them // until we start finding entries... which will be the oldest entries. // The timeline is age-ordered (per-thread, we hope the threads are // fairly consistent) _events[i] = new Event(i, 0); if (_events[i].isEmpty()) { if (!_events[i].next()) _events[i] = null; } if (_events[i] != null) processEvent(_events[i]); assert (_events[i] == null) || (_events[i]._eventIdx < TimeLine.MAX_EVENTS); } // now build the graph (i.e. go through all the events once) for (@SuppressWarnings("unused") Event e : this) ; _processed = true; for (int i = 0; i < _events.length; ++i) { // For a new Snapshot, most of initial entries are all zeros. Skip them // until we start finding entries... which will be the oldest entries. // The timeline is age-ordered (per-thread, we hope the threads are // fairly consistent) _events[i] = new Event(i, 0); if (_events[i].isEmpty()) { if (!_events[i].next()) _events[i] = null; } assert (_events[i] == null) || (_events[i]._eventIdx < TimeLine.MAX_EVENTS); } } // convenience wrapper around event stored in snapshot // contains methods to access event data, move to the next previous event // and to test whether two events form valid sender/receiver pair // // it is also needed to keep track of send/recv dependencies when iterating // over events in timeline public class Event { public final int _nodeId; // Which node/column# in the snapshot final long[] _val; // The column from the snapshot int _eventIdx; // Which row in the snapshot // For send-packets, the column# is the cloud-wide idx of the sender, and // the packet contains the reciever. Vice-versa for received packets, // where the column# is the cloud-wide idx of the receiver, and the packet // contains the sender. H2ONode _packh2o; // The H2O in the packet boolean _blocked; public UDP.udp udpType(){ return UDP.getUdp((int)(dataLo() & 0xff)); } // First byte is UDP packet type public Event(int nodeId, int eventIdx) { _nodeId = nodeId; _eventIdx = eventIdx; _val = _snapshot[nodeId]; computeH2O(false); } @Override public final int hashCode() { return (_nodeId <<10)^_eventIdx; } @Override public final boolean equals(Object o) { Event e = (Event)o; return _nodeId==e._nodeId && _eventIdx==e._eventIdx; } // (re)compute the correct H2ONode, if the _eventIdx changes. private boolean computeH2O(boolean b) { H2ONode h2o = null; if( dataLo() != 0 ) { // Dead/initial packet InetAddress inet = addrPack(); if( !inet.isMulticastAddress() ) { // Is multicast? h2o = H2ONode.intern(inet,portPack()); if( isSend() && h2o == recoH2O() ) // Another multicast indicator: sending to self h2o = null; // Flag as multicast } } _packh2o = h2o; return b; // For flow-coding } public final int send_recv() { return TimeLine.send_recv(_val, _eventIdx); } public final int dropped () { return TimeLine.dropped (_val, _eventIdx); } public final boolean isSend() { return send_recv() == 0; } public final boolean isRecv() { return send_recv() == 1; } public final boolean isDropped() { return dropped() != 0; } public final InetAddress addrPack() { return TimeLine.inet(_val, _eventIdx); } public final long dataLo() { return TimeLine.l0(_val, _eventIdx); } public final long dataHi() { return TimeLine.l8(_val, _eventIdx); } public final long ns() { return TimeLine.ns(_val, _eventIdx); } public final boolean isTCP(){return (ns() & 4) != 0;} public final long ms() { return TimeLine.ms(_val, _eventIdx) + recoH2O()._heartbeat.jvmBootTimeMsec(); } public H2ONode packH2O() { return _packh2o; } // H2O in packet public H2ONode recoH2O() { return _cloud.members()[_nodeId]; } // H2O recording packet public final int portPack() { long i = dataLo(); // 1st byte is UDP type, so shift right by 8. // Next 2 bytes are timestamp, shift by another 16 // Next 2 bytes are UDP port #, so mask by 0xFFFF. return (int)((0xFFFF) & (i >> 24)); } public final String addrString() { return _packh2o==null ? "multicast" : _packh2o.toString(); } public final String ioflavor() { int flavor = is_io(); return flavor == -1 ? (isTCP()?"TCP":"UDP") : Value.nameOfPersist(flavor); } public final int is_io() { int udp_type = (int) (dataLo() & 0xff); // First byte is UDP packet type return UDP.udp.i_o.ordinal() == udp_type ? (int)((dataLo()>>24)&0xFF) : -1; } // ms doing I/O public final int ms_io() { return (int)(dataLo()>>32); } public final int size_io() { return (int)dataHi(); } public String toString() { int udp_type = (int) (dataLo() & 0xff); // First byte is UDP packet type UDP.udp udpType = UDP.getUdp(udp_type); String operation = isSend() ? " SEND " : " RECV "; String host1 = addrString(); String host2 = recoH2O().toString(); String networkPart = isSend() ? (host2 + " -> " + host1) : (host1 + " -> " + host2); return "Node(" + _nodeId + ": " + ns() + ") " + udpType.toString() + operation + networkPart + (isDropped()?" DROPPED ":"") + ", data = '" + Long.toHexString(this.dataLo()) + ',' + Long.toHexString(this.dataHi()) + "'"; } /** * Check if two events form valid sender/receiver pair. * * Two events are valid sender/receiver pair iff the ports, adresses and * payload match. * * @param ev * @return true iff the two events form valid sender/receiver pair */ final boolean match(Event ev) { // check we're matching send and receive if (send_recv() == ev.send_recv()) return false; // compare the packet payload matches long myl0 = dataLo(); long evl0 = ev.dataLo(); int my_udp_type = (int) (myl0 & 0xff); // first byte is udp type int ev_udp_type = (int) (evl0 & 0xff); // first byte is udp type if (my_udp_type != ev_udp_type) return false; UDP.udp e = UDP.getUdp(my_udp_type); switch (e) { case rebooted: case timeline: // compare only first 3 bytes here (udp type and port), // but port# is checked below as part of address break; case ack: case nack: case fetchack: case ackack: case exec: case heartbeat: // compare 3 ctrl bytes + 4 bytes task # // if ((myl0 & 0xFFFFFFFFFFFFFFl) != (evl0 & 0xFFFFFFFFFFFFFFl)) if( (int)(myl0>>24) != (int)(evl0>>24)) return false; break; case i_o: // Shows up as I/O-completing recorded packets return false; default: throw new RuntimeException("unexpected packet type " + e.toString()); } // Check that port numbers are compatible. Really check that the // H2ONode's are compatible. The port#'s got flipped during recording to // allow this check (and a null _packh2o is a multicast). if( _packh2o!=null && _packh2o.index()!=ev._nodeId ) return false; if( ev._packh2o!=null && ev._packh2o.index()!= _nodeId ) return false; return true; } public final boolean isEmpty() { return (_eventIdx < TimeLine.length()) ? TimeLine.isEmpty(_val, _eventIdx) : false; } public final Event clone() { return new Event(_nodeId, _eventIdx); } boolean prev(int minIdx) { int min = Math.max(minIdx, -1); if (_eventIdx <= minIdx) return false; while (--_eventIdx > min) if (!isEmpty()) return computeH2O(true); return computeH2O(false); } boolean prev() { return prev(-1); } Event previousEvent(int minIdx) { Event res = new Event(_nodeId, _eventIdx); return (res.prev(minIdx)) ? res : null; } Event previousEvent() { return previousEvent(-1); } boolean next(int maxIdx) { int max = Math.min(maxIdx, TimeLine.length()); if (_eventIdx >= max) return false; while (++_eventIdx < max) if (!isEmpty()) return computeH2O(true); return computeH2O(false); } boolean next() { return next(TimeLine.length()); } Event nextEvent(int maxIdx) { Event res = new Event(_nodeId, _eventIdx); return (res.next(maxIdx)) ? res : null; } Event nextEvent() { return nextEvent(TimeLine.length()); } /** * Used to determine ordering of events not bound by any dependency. * * Events compared according to following rules: * Receives go before sends. Since we are only here with unbound events, * unbound receives means their sender has already appeared and they * should go adjacent to their sender. * For two sends, pick the one with receives with smallest timestamp (ms) * otherwise pick the sender with smallest timestamp (ms) * * @param ev other Event to compare * @return */ public final int compareTo(Event ev) { if( ev == null ) return -1; if( ev == this ) return 0; if( ev.equals(this) ) return 0; int res = ev.send_recv() - send_recv(); // recvs should go before sends if( res != 0 ) return res; if (isSend()) { // compare by the time of receivers long myMinMs = Long.MAX_VALUE; long evMinMs = Long.MAX_VALUE; ArrayList<Event> myRecvs = _sends.get(this); ArrayList<Event> evRecvs = _sends.get(ev ); for (Event e : myRecvs) if (e.ms() < myMinMs) myMinMs = e.ms(); for (Event e : evRecvs) if (e.ms() < evMinMs) evMinMs = e.ms(); res = (int) (myMinMs - evMinMs); if( myMinMs == Long.MAX_VALUE && evMinMs != Long.MAX_VALUE ) res = -1; if( myMinMs != Long.MAX_VALUE && evMinMs == Long.MAX_VALUE ) res = 1; } if (res == 0) res = (int) (ms() - ev.ms()); if( res == 0 ) res = (int) (ns() - ev.ns()); return res; } } /** * Check whether two events can be put together in sender/recv relationship. * * Events must match, also each sender can have only one receiver per node. * * @param senderCnd * @param recvCnd * @return */ private boolean isSenderRecvPair(Event senderCnd, Event recvCnd) { if (senderCnd.isSend() && recvCnd.isRecv() && senderCnd.match(recvCnd)) { ArrayList<Event> recvs = _sends.get(senderCnd); if (recvs.isEmpty() || senderCnd.packH2O()==null ) { for (Event e : recvs) if (e._nodeId == recvCnd._nodeId) return false; return true; } } return false; } /** * Process new event. For sender, check if there are any blocked receives * waiting for this send. For receiver, try to find matching sender, otherwise * block. * * @param e */ void processEvent(Event e) { assert !_processed; // Event e = _events[idx]; if (e.isSend()) { _sends.put(e, new ArrayList<TimelineSnapshot.Event>()); for (Event otherE : _events) { if ((otherE != null) && (otherE != e) && (!otherE.equals(e)) && otherE._blocked && otherE.match(e)) { _edges.put(otherE, e); _sends.get(e).add(otherE); otherE._blocked = false; } } } else { // look for matching send, otherwise set _blocked assert !_edges.containsKey(e); int senderIdx = e.packH2O().index(); if (senderIdx < 0) { // binary search did not find member, should not happen? // no possible sender - return and do not block Log.warn("no sender found! port = " + e.portPack() + ", ip = " + e.addrPack().toString()); return; } Event senderCnd = _events[senderIdx]; if (senderCnd != null) { if (isSenderRecvPair(senderCnd, e)) { _edges.put(e, senderCnd.clone()); _sends.get(senderCnd).add(e); return; } senderCnd = senderCnd.clone(); while (senderCnd.prev()) { if (isSenderRecvPair(senderCnd, e)) { _edges.put(e, senderCnd); _sends.get(senderCnd).add(e); return; } } } e._blocked = true; } assert (e == null) || (e._eventIdx < TimeLine.MAX_EVENTS); } @Override public Iterator<TimelineSnapshot.Event> iterator() { return this; } /** * Just check if there is any non null non-issued event. */ @Override public boolean hasNext() { for (int i = 0; i < _events.length; ++i) if (_events[i] != null && (!_events[i].isEmpty() || _events[i].next())) { assert (_events[i] == null) || ((_events[i]._eventIdx < TimeLine.MAX_EVENTS) && !_events[i].isEmpty()); return true; } else { assert (_events[i] == null) || ((_events[i]._eventIdx < TimeLine.MAX_EVENTS) && !_events[i].isEmpty()); _events[i] = null; } return false; } public Event getDependency(Event e) { return _edges.get(e); } /** * Get the next event of the timeline according to the ordering. Ordering is * performed in this method. Basically there are n ordered stream of events * with possible dependenencies caused by send/rcv relation. * * Sends are always eligible to be scheduled. Receives are eligible only if * their matching send was already issued. In situation when current events of * all streams are blocked (should not happen!) the oldest one is unblocked * and issued. * * Out of all eligible events, the smallest one (according to Event.compareTo) * is picked. */ @Override public TimelineSnapshot.Event next() { if (!hasNext()) throw new NoSuchElementException(); int selectedIdx = -1; for (int i = 0; i < _events.length; ++i) { if (_events[i] == null || _events[i]._blocked) continue; if (_events[i].isRecv()) { // check edge dependency Event send = _edges.get(_events[i]); if ((send != null) && (_events[send._nodeId] != null) && send._eventIdx >= _events[send._nodeId]._eventIdx) continue; } selectedIdx = ((selectedIdx == -1) || _events[i] .compareTo(_events[selectedIdx]) < 0) ? i : selectedIdx; } if (selectedIdx == -1) { // we did not select anything -> all event streams // must be blocked return the oldest one (assuming // corresponding send was in previous snapshot) // System.out.println("*** all blocked ***"); selectedIdx = 0; long selectedNs = (_events[selectedIdx] != null) ? _events[selectedIdx] .ns() : Long.MAX_VALUE; long selectedMs = (_events[selectedIdx] != null) ? _events[selectedIdx] .ms() : Long.MAX_VALUE; for (int i = 1; i < _events.length; ++i) { if (_events[i] == null) continue; if ((_events[i].ms() < selectedMs) && (_events[i].ns() < selectedNs)) { selectedIdx = i; selectedNs = _events[i].ns(); selectedMs = _events[i].ms(); } } } assert (selectedIdx != -1); assert (_events[selectedIdx] != null) && ((_events[selectedIdx]._eventIdx < TimeLine.MAX_EVENTS) && !_events[selectedIdx] .isEmpty()); Event res = _events[selectedIdx]; _events[selectedIdx] = _events[selectedIdx].nextEvent(); if (_events[selectedIdx] != null && !_processed) processEvent(_events[selectedIdx]); // DEBUG // if (_processed) // if (res.isRecv()) // System.out.println("# " + res + " PAIRED WITH " // + (_edges.containsKey(res) ? _edges.get(res) : "*** NONE ****")); // else // System.out.println("# " + res + " receivers: " // + _sends.get(res).toString()); return res; } @Override public void remove() { throw new UnsupportedOperationException(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/jdbc/SQLManager.java
package water.jdbc; import water.*; import water.fvec.*; import water.parser.ParseDataset; import water.util.Log; import java.io.UnsupportedEncodingException; import java.net.URI; import java.net.URISyntaxException; import java.net.URLDecoder; import java.sql.*; import java.util.Arrays; import java.util.List; import java.util.Objects; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.atomic.AtomicLong; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.Stream; public class SQLManager { private static final String TEMP_TABLE_NAME = "table_for_h2o_import"; private static final String MAX_USR_CONNECTIONS_KEY = H2O.OptArgs.SYSTEM_PROP_PREFIX + "sql.connections.max"; private static final String JDBC_DRIVER_CLASS_KEY_PREFIX = H2O.OptArgs.SYSTEM_PROP_PREFIX + "sql.jdbc.driver."; //A target upper bound on number of connections to database private static final int MAX_CONNECTIONS = 100; //A lower bound on number of connections to database per node private static final int MIN_CONNECTIONS_PER_NODE = 1; private static final String NETEZZA_DB_TYPE = "netezza"; private static final String HIVE_DB_TYPE = "hive2"; private static final String ORACLE_DB_TYPE = "oracle"; private static final String SQL_SERVER_DB_TYPE = "sqlserver"; private static final String TERADATA_DB_TYPE = "teradata"; private static final String NETEZZA_JDBC_DRIVER_CLASS = "org.netezza.Driver"; private static final String HIVE_JDBC_DRIVER_CLASS = "org.apache.hive.jdbc.HiveDriver"; private static final String TMP_TABLE_ENABLED = H2O.OptArgs.SYSTEM_PROP_PREFIX + "sql.tmp_table.enabled"; private static final String DISALLOWED_JDBC_PARAMETERS_PARAM = H2O.OptArgs.SYSTEM_PROP_PREFIX + "sql.jdbc.disallowed.parameters"; private static final Pattern JDBC_PARAMETERS_REGEX_PATTERN = Pattern.compile("(?i)[?;&]([a-z]+)="); private static final List<String> DEFAULT_JDBC_DISALLOWED_PARAMETERS = Stream.of( "autoDeserialize", "queryInterceptors", "allowLoadLocalInfile", "allowMultiQueries", //mysql "allowLoadLocalInfileInPath", "allowUrlInLocalInfile", "allowPublicKeyRetrieval", //mysql "init", "script", "shutdown" //h2 ).map(String::toLowerCase).collect(Collectors.toList()); private static AtomicLong NEXT_TABLE_NUM = new AtomicLong(0); static Key<Frame> nextTableKey(String prefix, String postfix) { Objects.requireNonNull(prefix); Objects.requireNonNull(postfix); final long num = NEXT_TABLE_NUM.incrementAndGet(); final String s = prefix + "_" + num + "_" + postfix; final String withoutWhiteChars = s.replaceAll("\\W", "_"); return Key.make(withoutWhiteChars); } /** * @param connection_url (Input) * @param table (Input) * @param select_query (Input) * @param username (Input) * @param password (Input) * @param columns (Input) * @param fetchMode (Input) * @param numChunksHint (optional) Specifies the desired number of chunks for the target Frame */ public static Job<Frame> importSqlTable( final String connection_url, final String table, final String select_query, final String username, final String password, final String columns, final Boolean useTempTable, final String tempTableName, final SqlFetchMode fetchMode, final Integer numChunksHint) { validateJdbcUrl(connection_url); final Key<Frame> destination_key = nextTableKey(table, "sql_to_hex"); final Job<Frame> j = new Job<>(destination_key, Frame.class.getName(), "Import SQL Table"); final String databaseType = getDatabaseType(connection_url); initializeDatabaseDriver(databaseType); // fail early if driver is not present SQLImportDriver importDriver = new SQLImportDriver( j, destination_key, databaseType, connection_url, table, select_query, username, password, columns, useTempTable, tempTableName, fetchMode, numChunksHint ); j.start(importDriver, Job.WORK_UNKNOWN); return j; } private static class SQLImportDriver extends H2O.H2OCountedCompleter<SQLImportDriver> { final Job<Frame> _j; final Key<Frame> _destination_key; final String _database_type; final String _connection_url; final String _table; final String _select_query; final String _username; final String _password; final String _columns; final boolean _useTempTable; final String _tempTableName; final SqlFetchMode _fetch_mode; final Integer _num_chunks_hint; SQLImportDriver( Job<Frame> job, Key<Frame> destination_key, String database_type, String connection_url, String table, String select_query, String username, String password, String columns, Boolean useTempTable, String tempTableName, SqlFetchMode fetch_mode, Integer numChunksHint ) { _j = job; _destination_key = destination_key; _database_type = database_type; _connection_url = connection_url; _table = table; _select_query = select_query; _username = username; _password = password; _columns = columns; _useTempTable = shouldUseTempTable(useTempTable); _tempTableName = getTempTableName(tempTableName); _fetch_mode = fetch_mode; _num_chunks_hint = numChunksHint; } /* * if tmp table disabled, we use sub-select instead, which outperforms tmp source_table for very * large queries/tables the main drawback of sub-selects is that we lose isolation, but as we're only reading data * and counting the rows from the beginning, it should not an issue (at least when using hive...) */ private boolean shouldUseTempTable(Boolean fromParams) { if (fromParams != null) { return fromParams; } else { return Boolean.parseBoolean(System.getProperty(TMP_TABLE_ENABLED, "true")); } } private String getTempTableName(String fromParams) { if (fromParams == null || fromParams.isEmpty()) { return SQLManager.TEMP_TABLE_NAME; } else { return fromParams; } } @Override public void compute2() { _j.update(0, "Initializing import"); Connection conn = null; Statement stmt = null; ResultSet rs = null; int catcols = 0, intcols = 0, bincols = 0, realcols = 0, timecols = 0, stringcols = 0; final int numCol; long numRow = 0; String source_table = _table; final String[] columnNames; final byte[] columnH2OTypes; try { conn = getConnectionSafe(_connection_url, _username, _password); stmt = conn.createStatement(); //set fetch size for improved performance stmt.setFetchSize(1); //if _select_query has been specified instead of source_table if (source_table.equals("")) { if (!_select_query.toLowerCase().startsWith("select")) { throw new IllegalArgumentException("The select query must start with `SELECT`, but instead is: " + _select_query); } if (_useTempTable) { source_table = _tempTableName; //returns number of rows, but as an int, not long. if int max value is exceeded, result is negative _j.update(0L, "Creating a temporary table"); numRow = stmt.executeUpdate(createTempTableSql(_database_type, source_table, _select_query)); } else { source_table = "(" + _select_query + ") sub_h2o_import"; } } else if (source_table.equals(SQLManager.TEMP_TABLE_NAME)) { //tables with this name are assumed to be created here temporarily and are dropped throw new IllegalArgumentException("The specified source_table cannot be named: " + SQLManager.TEMP_TABLE_NAME); } //get number of rows. check for negative row count if (numRow <= 0) { _j.update(0L, "Getting number of rows"); rs = stmt.executeQuery("SELECT COUNT(1) FROM " + source_table); rs.next(); numRow = rs.getLong(1); rs.close(); } //get H2O column names and types _j.update(0L, "Getting table schema"); if (SqlFetchMode.DISTRIBUTED.equals(_fetch_mode)) { rs = stmt.executeQuery(buildSelectSingleRowSql(_database_type, source_table, _columns)); } else { // we use a simpler SQL-dialect independent query in the `streaming` mode because the goal is to be dialect independent stmt.setMaxRows(1); rs = stmt.executeQuery("SELECT " + _columns + " FROM " + source_table); } ResultSetMetaData rsmd = rs.getMetaData(); numCol = rsmd.getColumnCount(); columnNames = new String[numCol]; columnH2OTypes = new byte[numCol]; rs.next(); for (int i = 0; i < numCol; i++) { columnNames[i] = rsmd.getColumnName(i + 1); //must iterate through sql types instead of getObject bc object could be null switch (rsmd.getColumnType(i + 1)) { case Types.NUMERIC: case Types.REAL: case Types.DOUBLE: case Types.FLOAT: case Types.DECIMAL: columnH2OTypes[i] = Vec.T_NUM; realcols += 1; break; case Types.INTEGER: case Types.TINYINT: case Types.SMALLINT: case Types.BIGINT: columnH2OTypes[i] = Vec.T_NUM; intcols += 1; break; case Types.BIT: case Types.BOOLEAN: columnH2OTypes[i] = Vec.T_NUM; bincols += 1; break; case Types.VARCHAR: case Types.NVARCHAR: case Types.CHAR: case Types.NCHAR: case Types.LONGVARCHAR: case Types.LONGNVARCHAR: columnH2OTypes[i] = Vec.T_STR; stringcols += 1; break; case Types.DATE: case Types.TIME: case Types.TIMESTAMP: columnH2OTypes[i] = Vec.T_TIME; timecols += 1; break; default: Log.warn("Unsupported column type: " + rsmd.getColumnTypeName(i + 1)); columnH2OTypes[i] = Vec.T_BAD; } } } catch (SQLException ex) { Log.err(ex); throw new RuntimeException("SQLException: " + ex.getMessage() + "\nFailed to connect and read from SQL database with connection_url: " + _connection_url, ex); } finally { // release resources in a finally{} block in reverse-order of their creation if (rs != null) { try { rs.close(); } catch (SQLException sqlEx) {} // ignore rs = null; } if (stmt != null) { try { stmt.close(); } catch (SQLException sqlEx) {} // ignore stmt = null; } if (conn != null) { try { conn.close(); } catch (SQLException sqlEx) {} // ignore conn = null; } } double binary_ones_fraction = 0.5; //estimate //create template vectors in advance and run MR final long totSize = (long)((float)(catcols+intcols)*numRow*4 //4 bytes for categoricals and integers +(float)bincols *numRow*1*binary_ones_fraction //sparse uses a fraction of one byte (or even less) +(float)(realcols+timecols+stringcols) *numRow*8); //8 bytes for real and time (long) values final Vec vec; final int num_chunks; if (_num_chunks_hint == null) { final int chunk_size = FileVec.calcOptimalChunkSize(totSize, numCol, numCol * 4, H2O.ARGS.nthreads, H2O.getCloudSize(), false, true); final double rows_per_chunk = chunk_size; //why not numRow * chunk_size / totSize; it's supposed to be rows per chunk, not the byte size num_chunks = Vec.nChunksFor(numRow, (int) Math.ceil(Math.log1p(rows_per_chunk)), false); Log.info("Optimal calculated target number of chunks: " + num_chunks); } else { num_chunks = _num_chunks_hint; Log.info("Using user-specified target number of chunks: " + num_chunks); } if (SqlFetchMode.DISTRIBUTED.equals(_fetch_mode)) { final int num_retrieval_chunks = ConnectionPoolProvider.estimateConcurrentConnections(H2O.getCloudSize(), H2O.ARGS.nthreads); vec = num_retrieval_chunks >= num_chunks ? Vec.makeConN(numRow, num_chunks) : Vec.makeConN(numRow, num_retrieval_chunks); } else { vec = Vec.makeConN(numRow, num_chunks); } Log.info("Number of chunks for data retrieval: " + vec.nChunks() + ", number of rows: " + numRow); _j.setWork(vec.nChunks()); // Finally read the data into an H2O Frame _j.update(0L, "Importing data"); final ConnectionPoolProvider provider = new ConnectionPoolProvider(_connection_url, _username, _password, vec.nChunks()); final Frame fr; if (SqlFetchMode.DISTRIBUTED.equals(_fetch_mode)) { fr = new SqlTableToH2OFrame(source_table, _database_type, _columns, columnNames, numCol, _j, provider) .doAll(columnH2OTypes, vec) .outputFrame(_destination_key, columnNames, null); } else { fr = new SqlTableToH2OFrameStreaming(source_table, _database_type, _columns, columnNames, numCol, _j, provider) .readTable(vec, columnH2OTypes, _destination_key); } vec.remove(); DKV.put(fr); ParseDataset.logParseResults(fr); if (source_table.equals(_tempTableName)) dropTempTable(_connection_url, _username, _password, source_table); tryComplete(); } } static String createTempTableSql(String databaseType, String tableName, String selectQuery) { switch (databaseType) { case TERADATA_DB_TYPE: return "CREATE TABLE " + tableName + " AS (" + selectQuery + ") WITH DATA"; default: return "CREATE TABLE " + tableName + " AS " + selectQuery; } } /** * Builds SQL SELECT to retrieve single row from a table based on type of database * * @param databaseType * @param table * @param columns * @return String SQL SELECT statement */ static String buildSelectSingleRowSql(String databaseType, String table, String columns) { switch(databaseType) { case SQL_SERVER_DB_TYPE: //syntax supported since SQLServer 2008 return "SELECT TOP(1) " + columns + " FROM " + table; case ORACLE_DB_TYPE: return "SELECT " + columns + " FROM " + table + " FETCH NEXT 1 ROWS ONLY"; case TERADATA_DB_TYPE: return "SELECT TOP 1 " + columns + " FROM " + table; default: return "SELECT " + columns + " FROM " + table + " LIMIT 1"; } } /** * Builds SQL SELECT to retrieve chunk of rows from a table based on row offset and number of rows in a chunk. * * Pagination in following Databases: * SQL Server, Oracle 12c: OFFSET x ROWS FETCH NEXT y ROWS ONLY * SQL Server, Vertica may need ORDER BY * * MySQL, PostgreSQL, MariaDB: LIMIT y OFFSET x * * Teradata (and possibly older Oracle): * SELECT * FROM mytable * QUALIFY ROW_NUMBER() OVER (ORDER BY column_name) BETWEEN x and x+y; * * @param databaseType * @param table * @param start * @param length * @param columns * @param columnNames array of column names retrieved and parsed from single row SELECT prior to this call * @return String SQL SELECT statement */ static String buildSelectChunkSql(String databaseType, String table, long start, int length, String columns, String[] columnNames) { String sqlText = "SELECT " + columns + " FROM " + table; switch(databaseType) { case SQL_SERVER_DB_TYPE: // requires ORDER BY clause with OFFSET/FETCH NEXT clauses, syntax supported since SQLServer 2012 sqlText += " ORDER BY ROW_NUMBER() OVER (ORDER BY (SELECT 0))"; sqlText += " OFFSET " + start + " ROWS FETCH NEXT " + length + " ROWS ONLY"; break; case ORACLE_DB_TYPE: sqlText += " OFFSET " + start + " ROWS FETCH NEXT " + length + " ROWS ONLY"; break; case TERADATA_DB_TYPE: sqlText += " QUALIFY ROW_NUMBER() OVER (ORDER BY " + columnNames[0] + ") BETWEEN " + (start+1) + " AND " + (start+length); break; default: sqlText += " LIMIT " + length + " OFFSET " + start; } return sqlText; } static class ConnectionPoolProvider extends Iced<ConnectionPoolProvider> { private String _url; private String _user; private String _password; private int _nChunks; /** * Instantiates ConnectionPoolProvider * @param url Database URL (JDBC format) * @param user Database username * @param password Username's password * @param nChunks Number of chunks */ ConnectionPoolProvider(String url, String user, String password, int nChunks) { _url = url; _user = user; _password = password; _nChunks = nChunks; } public ConnectionPoolProvider() {} // Externalizable classes need no-args constructor /** * Creates a connection pool for given target database, based on current H2O environment * * @return A connection pool, guaranteed to contain at least 1 connection per node if the database is reachable * @throws RuntimeException Thrown when database is unreachable */ ArrayBlockingQueue<Connection> createConnectionPool() { return createConnectionPool(H2O.getCloudSize(), H2O.ARGS.nthreads); } Connection createConnection() throws SQLException { return getConnectionSafe(_url, _user, _password); } /** * Creates a connection pool for given target database, based on current H2O environment * * @param cloudSize Size of H2O cloud * @param nThreads Number of maximum threads available * @return A connection pool, guaranteed to contain at least 1 connection per node if the database is reachable * @throws RuntimeException Thrown when database is unreachable */ ArrayBlockingQueue<Connection> createConnectionPool(final int cloudSize, final short nThreads) throws RuntimeException { final int maxConnectionsPerNode = getMaxConnectionsPerNode(cloudSize, nThreads, _nChunks); Log.info("Database connections per node: " + maxConnectionsPerNode); final ArrayBlockingQueue<Connection> connectionPool = new ArrayBlockingQueue<Connection>(maxConnectionsPerNode); try { for (int i = 0; i < maxConnectionsPerNode; i++) { Connection conn = createConnection(); connectionPool.add(conn); } } catch (SQLException ex) { throw new RuntimeException("SQLException: " + ex.getMessage() + "\nFailed to connect to SQL database with url: " + _url, ex); } return connectionPool; } private static int getMaxConnectionsTotal() { return getMaxConnectionsTotal(MAX_CONNECTIONS); } static int getMaxConnectionsTotal(final int allowedMaxConnections) { int maxConnections = allowedMaxConnections; final String userDefinedMaxConnections = System.getProperty(MAX_USR_CONNECTIONS_KEY); if (userDefinedMaxConnections != null) { try { final int userMaxConnections = Integer.parseInt(userDefinedMaxConnections); if (userMaxConnections > 0 && userMaxConnections < allowedMaxConnections) { maxConnections = userMaxConnections; } } catch (NumberFormatException e) { Log.warn("Unable to parse maximal number of connections: " + userDefinedMaxConnections + ". Falling back to default settings (" + allowedMaxConnections + ").", e); } } Log.info("SQL import will be limited be maximum of " + maxConnections + " connections."); return maxConnections; } /** * @return Number of connections to an SQL database to be opened on a single node. */ static int getMaxConnectionsPerNode(final int cloudSize, final short nThreads, final int nChunks) { return calculateLocalConnectionCount(getMaxConnectionsTotal(), cloudSize, nThreads, nChunks); } /** * Counts number of connections per node from give maximal number of connections for the whole cluster * * @param maxTotalConnections Maximal number of total connections to be opened by the whole cluster * @return Number of connections to open per node, within given minmal and maximal range */ private static int calculateLocalConnectionCount(final int maxTotalConnections, final int cloudSize, final short nThreads, final int nChunks) { int conPerNode = (int) Math.min(Math.ceil((double) nChunks / cloudSize), nThreads); conPerNode = Math.min(conPerNode, maxTotalConnections / cloudSize); //Make sure at least some connections are available to a node return Math.max(conPerNode, MIN_CONNECTIONS_PER_NODE); } /** * for data retrieval and rebalancing, use * minimum 1 connection per node, * maximum = min(max(total threads), max(total allowed connections)) * t * @return an estimation of the optimal amount of total concurrent connections available to retrieve data */ private static int estimateConcurrentConnections(final int cloudSize, final short nThreads) { return cloudSize * Math.min(nThreads, Math.max(getMaxConnectionsTotal() / cloudSize, MIN_CONNECTIONS_PER_NODE)); } } /** * Makes sure the appropriate database driver is initialized before calling DriverManager#getConnection. * * @param url JDBC connection string * @param username username * @param password password * @return a connection to the URL * @throws SQLException if a database access error occurs or the url is */ public static Connection getConnectionSafe(String url, String username, String password) throws SQLException { validateJdbcUrl(url); initializeDatabaseDriver(getDatabaseType(url)); try { return DriverManager.getConnection(url, username, password); } catch (NoClassDefFoundError e) { throw new RuntimeException("Failed to get database connection, probably due to using thin jdbc driver jar.", e); } } static String getDatabaseType(String url) { if (url == null) return null; String[] parts = url.split(":", 3); if (parts.length < 2) return null; return parts[1]; } /** * Initializes database driver for databases with JDBC driver version lower than 4.0 * * @param databaseType Name of target database from JDBC connection string */ static void initializeDatabaseDriver(String databaseType) { String driverClass = System.getProperty(JDBC_DRIVER_CLASS_KEY_PREFIX + databaseType); if (driverClass != null) { Log.debug("Loading " + driverClass + " to initialize database of type " + databaseType); try { Class.forName(driverClass); } catch (ClassNotFoundException e) { throw new RuntimeException("Connection to '" + databaseType + "' database is not possible due to missing JDBC driver. " + "User specified driver class: " + driverClass, e); } return; } // use built-in defaults switch (databaseType) { case HIVE_DB_TYPE: try { Class.forName(HIVE_JDBC_DRIVER_CLASS); } catch (ClassNotFoundException e) { throw new RuntimeException("Connection to HIVE database is not possible due to missing JDBC driver.", e); } break; case NETEZZA_DB_TYPE: try { Class.forName(NETEZZA_JDBC_DRIVER_CLASS); } catch (ClassNotFoundException e) { throw new RuntimeException("Connection to Netezza database is not possible due to missing JDBC driver.", e); } break; default: //nothing to do } } public static void validateJdbcUrl(String jdbcUrl) throws IllegalArgumentException { if (jdbcUrl == null || jdbcUrl.trim().isEmpty()) { throw new IllegalArgumentException("JDBC URL is null or empty"); } if (!jdbcUrl.toLowerCase().startsWith("jdbc:")) { throw new IllegalArgumentException("JDBC URL must start with 'jdbc:'"); } Matcher matcher = JDBC_PARAMETERS_REGEX_PATTERN.matcher(jdbcUrl); String property = System.getProperty(DISALLOWED_JDBC_PARAMETERS_PARAM); List<String> disallowedParameters = property == null ? DEFAULT_JDBC_DISALLOWED_PARAMETERS : Arrays.stream(property.split(",")).map(String::toLowerCase).collect(Collectors.toList()); while (matcher.find()) { String key = matcher.group(1); if (disallowedParameters.contains(key.toLowerCase())) { throw new IllegalArgumentException("Potentially dangerous JDBC parameter found: " + key + ". That behavior can be altered by setting " + DISALLOWED_JDBC_PARAMETERS_PARAM + " env variable to another comma separated list."); } } } static class SqlTableToH2OFrameStreaming { final String _table, _columns, _databaseType; final int _numCol; final Job _job; final ConnectionPoolProvider _poolProvider; final String[] _columnNames; SqlTableToH2OFrameStreaming(final String table, final String databaseType, final String columns, final String[] columnNames, final int numCol, final Job job, final ConnectionPoolProvider poolProvider) { _table = table; _databaseType = databaseType; _columns = columns; _columnNames = columnNames; _numCol = numCol; _job = job; _poolProvider = poolProvider; } Frame readTable(Vec blueprint, byte[] columnTypes, Key<Frame> destinationKey) { Vec.VectorGroup vg = blueprint.group(); int vecIdStart = vg.reserveKeys(columnTypes.length); AppendableVec[] res = new AppendableVec[columnTypes.length]; long[] espc = MemoryManager.malloc8(blueprint.nChunks()); for (int i = 0; i < res.length; ++i) { res[i] = new AppendableVec(vg.vecKey(vecIdStart + i), espc, columnTypes[i], 0); } String query = "SELECT " + _columns + " FROM " + _table; ResultSet rs = null; Futures fs = new Futures(); try (Connection conn = _poolProvider.createConnection(); Statement stmt = conn.createStatement()) { final int fetchSize = (int) Math.min(blueprint.chunkLen(0), 1e5); stmt.setFetchSize(fetchSize); rs = stmt.executeQuery(query); for (int cidx = 0; cidx < blueprint.nChunks(); cidx++) { if (_job.stop_requested()) break; NewChunk[] ncs = new NewChunk[columnTypes.length]; for (int i = 0; i < columnTypes.length; i++) { ncs[i] = res[i].chunkForChunkIdx(cidx); } final int len = blueprint.chunkLen(cidx); for (int r = 0; r < len && rs.next(); r++) { SqlTableToH2OFrame.writeRow(rs, ncs); } fs.add(H2O.submitTask(new FinalizeNewChunkTask(cidx, ncs))); _job.update(1); } } catch (SQLException e) { throw new RuntimeException("SQLException: " + e.getMessage() + "\nFailed to read SQL data", e); } finally { //close result set if (rs != null) { try { rs.close(); } catch (SQLException sqlEx) { Log.trace(sqlEx); } // ignore } } fs.blockForPending(); Vec[] vecs = AppendableVec.closeAll(res); if (vecs.length > 0 && vecs[0].length() != blueprint.length()) { Log.warn("Query `" + query + "` returned less rows than expected. " + "Actual: " + vecs[0].length() + ", expected: " + blueprint.length()); } return new Frame(destinationKey, _columnNames, vecs); } } private static class FinalizeNewChunkTask extends H2O.H2OCountedCompleter<FinalizeNewChunkTask> { private final int _cidx; private transient NewChunk[] _ncs; FinalizeNewChunkTask(int cidx, NewChunk[] ncs) { _cidx = cidx; _ncs = ncs; } @Override public void compute2() { if (_ncs == null) throw new IllegalStateException("There are no chunks to work with!"); Futures fs = new Futures(); for (NewChunk nc : _ncs) { nc.close(_cidx, fs); } fs.blockForPending(); tryComplete(); } } static class SqlTableToH2OFrame extends MRTask<SqlTableToH2OFrame> { final String _table, _columns, _databaseType; final int _numCol; final Job _job; final ConnectionPoolProvider _poolProvider; final String[] _columnNames; transient ArrayBlockingQueue<Connection> sqlConn; public SqlTableToH2OFrame(final String table, final String databaseType, final String columns, final String[] columnNames, final int numCol, final Job job, final ConnectionPoolProvider poolProvider) { _table = table; _databaseType = databaseType; _columns = columns; _columnNames = columnNames; _numCol = numCol; _job = job; _poolProvider = poolProvider; } @Override protected void setupLocal() { sqlConn = _poolProvider.createConnectionPool(); } @Override public void map(Chunk[] cs, NewChunk[] ncs) { if (isCancelled() || _job != null && _job.stop_requested()) return; //fetch data from sql table with limit and offset Connection conn = null; Statement stmt = null; ResultSet rs = null; Chunk c0 = cs[0]; String sqlText = buildSelectChunkSql(_databaseType, _table, c0.start(), c0._len , _columns, _columnNames); try { conn = sqlConn.take(); stmt = conn.createStatement(); //set fetch size for best performance stmt.setFetchSize(c0._len); rs = stmt.executeQuery(sqlText); while (rs.next()) { writeRow(rs, ncs); } } catch (SQLException ex) { throw new RuntimeException("SQLException: " + ex.getMessage() + "\nFailed to read SQL data", ex); } catch (InterruptedException e) { throw new RuntimeException("Interrupted exception when trying to take connection from pool", e); } finally { //close result set if (rs != null) { try { rs.close(); } catch (SQLException sqlEx) { } // ignore rs = null; } //close statement if (stmt != null) { try { stmt.close(); } catch (SQLException sqlEx) { } // ignore stmt = null; } //return connection to pool sqlConn.add(conn); } if (_job != null) _job.update(1); } static void writeRow(ResultSet rs, NewChunk[] ncs) throws SQLException { for (int i = 0; i < ncs.length; i++) { Object res = rs.getObject(i + 1); writeItem(res, ncs[i]); } } static void writeItem(Object res, NewChunk nc) { if (res == null) nc.addNA(); else { if (res instanceof Long || res instanceof Integer || res instanceof Short || res instanceof Byte) nc.addNum(((Number) res).longValue(), 0); else if (res instanceof Number) nc.addNum(((Number) res).doubleValue()); else if (res instanceof Boolean) nc.addNum(((boolean) res ? 1 : 0), 0); else if (res instanceof String) nc.addStr(res); else if (res instanceof java.util.Date) nc.addNum(((java.util.Date) res).getTime(), 0); else nc.addNA(); } } @Override protected void closeLocal() { try { for (Connection conn : sqlConn) { conn.close(); } } catch (Exception ex) { } // ignore } } private static void dropTempTable(String connection_url, String username, String password, String tableName) { Connection conn = null; Statement stmt = null; String drop_table_query = "DROP TABLE " + tableName; try { conn = DriverManager.getConnection(connection_url, username, password); stmt = conn.createStatement(); stmt.executeUpdate(drop_table_query); } catch (SQLException ex) { throw new RuntimeException("SQLException: " + ex.getMessage() + "\nFailed to execute SQL query: " + drop_table_query, ex); } finally { // release resources in a finally{} block in reverse-order of their creation if (stmt != null) { try { stmt.close(); } catch (SQLException sqlEx) { } // ignore stmt = null; } if (conn != null) { try { conn.close(); } catch (SQLException sqlEx) { } // ignore conn = null; } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/jdbc/SqlFetchMode.java
package water.jdbc; public enum SqlFetchMode { SINGLE, DISTRIBUTED }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/nbhm/ConcurrentAutoTable.java
package water.nbhm; import java.io.Serializable; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import sun.misc.Unsafe; /* * Written by Cliff Click and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain */ /** * An auto-resizing table of {@code longs}, supporting low-contention CAS * operations. Updates are done with CAS's to no particular table element. * The intent is to support highly scalable counters, r/w locks, and other * structures where the updates are associative, loss-free (no-brainer), and * otherwise happen at such a high volume that the cache contention for * CAS'ing a single word is unacceptable. * * <p>This API is overkill for simple counters (e.g. no need for the 'mask') * and is untested as an API for making a scalable r/w lock and so is likely * to change! * * @since 1.5 * @author Cliff Click */ public class ConcurrentAutoTable implements Serializable { // --- public interface --- /** * Add the given value to current counter value. Concurrent updates will * not be lost, but addAndGet or getAndAdd are not implemented because the * total counter value (i.e., {@link #get}) is not atomically updated. * Updates are striped across an array of counters to avoid cache contention * and has been tested with performance scaling linearly up to 768 CPUs. */ public void add( long x ) { add_if_mask( x,0); } /** {@link #add} with -1 */ public void decrement() { add_if_mask(-1L,0); } /** {@link #add} with +1 */ public void increment() { add_if_mask( 1L,0); } /** Atomically set the sum of the striped counters to specified value. * Rather more expensive than a simple store, in order to remain atomic. */ public void set( long x ) { CAT newcat = new CAT(null,4,x); // Spin until CAS works while( !CAS_cat(_cat,newcat) ); } /** * Current value of the counter. Since other threads are updating furiously * the value is only approximate, but it includes all counts made by the * current thread. Requires a pass over the internally striped counters. */ public long get() { return _cat.sum(0); } /** Same as {@link #get}, included for completeness. */ public int intValue() { return (int)_cat.sum(0); } /** Same as {@link #get}, included for completeness. */ public long longValue() { return _cat.sum(0); } /** * A cheaper {@link #get}. Updated only once/millisecond, but as fast as a * simple load instruction when not updating. */ public long estimate_get( ) { return _cat.estimate_sum(0); } /** * Return the counter's {@code long} value converted to a string. */ public String toString() { return _cat.toString(0); } /** * A more verbose print than {@link #toString}, showing internal structure. * Useful for debugging. */ public void print() { _cat.print(); } /** * Return the internal counter striping factor. Useful for diagnosing * performance problems. */ public int internal_size() { return _cat._t.length; } // Only add 'x' to some slot in table, hinted at by 'hash', if bits under // the mask are all zero. The sum can overflow or 'x' can contain bits in // the mask. Value is CAS'd so no counts are lost. The CAS is retried until // it succeeds or bits are found under the mask. Returned value is the old // value - which WILL have zero under the mask on success and WILL NOT have // zero under the mask for failure. private long add_if_mask( long x, long mask ) { return _cat.add_if_mask(x,mask,hash(),this); } // The underlying array of concurrently updated long counters private volatile CAT _cat = new CAT(null,4/*Start Small, Think Big!*/,0L); private static final AtomicReferenceFieldUpdater<ConcurrentAutoTable,CAT> _catUpdater = AtomicReferenceFieldUpdater.newUpdater(ConcurrentAutoTable.class,CAT.class, "_cat"); private boolean CAS_cat( CAT oldcat, CAT newcat ) { return _catUpdater.compareAndSet(this,oldcat,newcat); } // Hash spreader private static final int hash() { int h = (int)Thread.currentThread().getId(); //int hash = (((int) (id ^ (id >>> 32))) ^ 0x811c9dc5) * 0x01000193; // //final int nbits = (((0xfffffc00 >> max) & 4) | // Compute ceil(log2(m+1)) // ((0x000001f8 >>> max) & 2) | // The constants hold // ((0xffff00f2 >>> max) & 1)); // a lookup table //int index; //while((index = hash & ((1 << nbits) - 1)) > max) {// May retry on // hash = (hash >>> nbits) | (hash << (33 - nbits)); // non-power-2 m //} // You would think that System.identityHashCode on the current thread // would be a good hash fcn, but actually on SunOS 5.8 it is pretty lousy // in the low bits. //int h = System.identityHashCode(Thread.currentThread()); //h ^= (h>>>20) ^ (h>>>12); // Bit spreader, borrowed from Doug Lea //h ^= (h>>> 7) ^ (h>>> 4); return h<<2; // Pad out cache lines. The goal is to avoid cache-line contention } // --- CAT ----------------------------------------------------------------- private static class CAT implements Serializable { // Unsafe crud: get a function which will CAS arrays private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); private static final int _Lbase = _unsafe.arrayBaseOffset(long[].class); private static final int _Lscale = _unsafe.arrayIndexScale(long[].class); private static long rawIndex(long[] ary, int i) { assert i >= 0 && i < ary.length; return _Lbase + i * _Lscale; } private final static boolean CAS( long[] A, int idx, long old, long nnn ) { return _unsafe.compareAndSwapLong( A, rawIndex(A,idx), old, nnn ); } volatile long _resizers; // count of threads attempting a resize static private final AtomicLongFieldUpdater<CAT> _resizerUpdater = AtomicLongFieldUpdater.newUpdater(CAT.class, "_resizers"); private final CAT _next; private volatile long _fuzzy_sum_cache; private volatile long _fuzzy_time; private static final int MAX_SPIN=2; private final long[] _t; // Power-of-2 array of longs CAT( CAT next, int sz, long init ) { _next = next; _t = new long[sz]; _t[0] = init; } // Only add 'x' to some slot in table, hinted at by 'hash', if bits under // the mask are all zero. The sum can overflow or 'x' can contain bits in // the mask. Value is CAS'd so no counts are lost. The CAS is attempted // ONCE. public long add_if_mask( long x, long mask, int hash, ConcurrentAutoTable master ) { long[] t = _t; int idx = hash & (t.length-1); // Peel loop; try once fast long old = t[idx]; if( (old&mask) != 0 ) return old; // Failed for bit-set under mask boolean ok = CAS( t, idx, old&~mask, old+x ); if( ok ) return old; // Got it // Try harder int cnt=0; while( true ) { old = t[idx]; if( (old&mask) != 0 ) return old; // Failed for bit-set under mask if( CAS( t, idx, old, old+x ) ) break; // Got it! cnt++; } if( cnt < MAX_SPIN ) return old; // Allowable spin loop count if( t.length >= 1024*1024 ) return old; // too big already // Too much contention; double array size in an effort to reduce contention long r = _resizers; int newbytes = (t.length<<1)<<3/*word to bytes*/; while( !_resizerUpdater.compareAndSet(this,r,r+newbytes) ) r = _resizers; r += newbytes; if( master._cat != this ) return old; // Already doubled, don't bother if( (r>>17) != 0 ) { // Already too much allocation attempts? // TODO - use a wait with timeout, so we'll wakeup as soon as the new // table is ready, or after the timeout in any case. Annoyingly, this // breaks the non-blocking property - so for now we just briefly sleep. //synchronized( this ) { wait(8*megs); } // Timeout - we always wakeup try { Thread.sleep(r>>17); } catch( InterruptedException e ) { } if( master._cat != this ) return old; } CAT newcat = new CAT(this,t.length*2,0); // Take 1 stab at updating the CAT with the new larger size. If this // fails, we assume some other thread already expanded the CAT - so we // do not need to retry until it succeeds. master.CAS_cat(this,newcat); return old; } // Return the current sum of all things in the table, stripping off mask // before the add. Writers can be updating the table furiously, so the // sum is only locally accurate. public long sum( long mask ) { long sum = _next == null ? 0 : _next.sum(mask); // Recursively get cached sum final long[] t = _t; for( int i=0; i<t.length; i++ ) sum += t[i]&(~mask); return sum; } // Fast fuzzy version. Used a cached value until it gets old, then re-up // the cache. public long estimate_sum( long mask ) { // For short tables, just do the work if( _t.length <= 64 ) return sum(mask); // For bigger tables, periodically freshen a cached value long millis = System.currentTimeMillis(); if( _fuzzy_time != millis ) { // Time marches on? _fuzzy_sum_cache = sum(mask); // Get sum the hard way _fuzzy_time = millis; // Indicate freshness of cached value } return _fuzzy_sum_cache; // Return cached sum } // Update all table slots with CAS. public void all_or ( long mask ) { long[] t = _t; for( int i=0; i<t.length; i++ ) { boolean done = false; while( !done ) { long old = t[i]; done = CAS(t,i, old, old|mask ); } } if( _next != null ) _next.all_or(mask); } public void all_and( long mask ) { long[] t = _t; for( int i=0; i<t.length; i++ ) { boolean done = false; while( !done ) { long old = t[i]; done = CAS(t,i, old, old&mask ); } } if( _next != null ) _next.all_and(mask); } // Set/stomp all table slots. No CAS. public void all_set( long val ) { long[] t = _t; for( int i=0; i<t.length; i++ ) t[i] = val; if( _next != null ) _next.all_set(val); } String toString( long mask ) { return Long.toString(sum(mask)); } public void print() { long[] t = _t; System.out.print("["+t[0]); for( int i=1; i<t.length; i++ ) System.out.print(","+t[i]); System.out.print("]"); if( _next != null ) _next.print(); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/nbhm/NonBlockingHashMap.java
package water.nbhm; import sun.misc.Unsafe; import java.io.IOException; import java.io.Serializable; import java.lang.reflect.Field; import java.util.*; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; /* * Written by Cliff Click and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain */ /** * A lock-free alternate implementation of {@link java.util.concurrent.ConcurrentHashMap} * with better scaling properties and generally lower costs to mutate the Map. * It provides identical correctness properties as ConcurrentHashMap. All * operations are non-blocking and multi-thread safe, including all update * operations. {@link NonBlockingHashMap} scales substantially better than * {@link java.util.concurrent.ConcurrentHashMap} for high update rates, even with a * large concurrency factor. Scaling is linear up to 768 CPUs on a 768-CPU * Azul box, even with 100% updates or 100% reads or any fraction in-between. * Linear scaling up to all cpus has been observed on a 32-way Sun US2 box, * 32-way Sun Niagara box, 8-way Intel box and a 4-way Power box. * * This class obeys the same functional specification as {@link * java.util.Hashtable}, and includes versions of methods corresponding to * each method of <tt>Hashtable</tt>. However, even though all operations are * thread-safe, operations do <em>not</em> entail locking and there is * <em>not</em> any support for locking the entire table in a way that * prevents all access. This class is fully interoperable with * <tt>Hashtable</tt> in programs that rely on its thread safety but not on * its synchronization details. * * <p> Operations (including <tt>put</tt>) generally do not block, so may * overlap with other update operations (including other <tt>puts</tt> and * <tt>removes</tt>). Retrievals reflect the results of the most recently * <em>completed</em> update operations holding upon their onset. For * aggregate operations such as <tt>putAll</tt>, concurrent retrievals may * reflect insertion or removal of only some entries. Similarly, Iterators * and Enumerations return elements reflecting the state of the hash table at * some point at or since the creation of the iterator/enumeration. They do * <em>not</em> throw {@link ConcurrentModificationException}. However, * iterators are designed to be used by only one thread at a time. * * <p> Very full tables, or tables with high re-probe rates may trigger an * internal resize operation to move into a larger table. Resizing is not * terribly expensive, but it is not free either; during resize operations * table throughput may drop somewhat. All threads that visit the table * during a resize will 'help' the resizing but will still be allowed to * complete their operation before the resize is finished (i.e., a simple * 'get' operation on a million-entry table undergoing resizing will not need * to block until the entire million entries are copied). * * <p>This class and its views and iterators implement all of the * <em>optional</em> methods of the {@link Map} and {@link Iterator} * interfaces. * * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class * does <em>not</em> allow <tt>null</tt> to be used as a key or value. * * * @since 1.5 * @author Cliff Click * @param <TypeK> the type of keys maintained by this map * @param <TypeV> the type of mapped values */ public class NonBlockingHashMap<TypeK, TypeV> extends AbstractMap<TypeK, TypeV> implements ConcurrentMap<TypeK, TypeV>, Cloneable, Serializable { private static final long serialVersionUID = 1234123412341234123L; private static final int REPROBE_LIMIT=10; // Too many reprobes then force a table-resize // --- Bits to allow Unsafe access to arrays private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); private static final int _Obase = _unsafe.arrayBaseOffset(Object[].class); private static final int _Oscale = _unsafe.arrayIndexScale(Object[].class); private static final int _Olog = _Oscale==4?2:(_Oscale==8?3:9999); private static long rawIndex(final Object[] ary, final int idx) { assert idx >= 0 && idx < ary.length; // Note the long-math requirement, to handle arrays of more than 2^31 bytes // - or 2^28 - or about 268M - 8-byte pointer elements. return _Obase + ((long)idx << _Olog); } // --- Setup to use Unsafe private static final long _kvs_offset; static { // <clinit> Field f = null; try { f = NonBlockingHashMap.class.getDeclaredField("_kvs"); } catch( java.lang.NoSuchFieldException e ) { throw new RuntimeException(e); } _kvs_offset = _unsafe.objectFieldOffset(f); } private final boolean CAS_kvs( final Object[] oldkvs, final Object[] newkvs ) { return _unsafe.compareAndSwapObject(this, _kvs_offset, oldkvs, newkvs ); } // --- Adding a 'prime' bit onto Values via wrapping with a junk wrapper class private static final class Prime { final Object _V; Prime( Object V ) { _V = V; } static Object unbox( Object V ) { return V instanceof Prime ? ((Prime)V)._V : V; } } // --- hash ---------------------------------------------------------------- // Helper function to spread lousy hashCodes Throws NPE for null Key, on // purpose - as the first place to conveniently toss the required NPE for a // null Key. private static final int hash(final Object key) { int h = key.hashCode(); // The real hashCode call h ^= (h>>>20) ^ (h>>>12); h ^= (h>>> 7) ^ (h>>> 4); h += h<<7; // smear low bits up high, for hashcodes that only differ by 1 return h; } // --- The Hash Table -------------------- // Slot 0 is always used for a 'CHM' entry below to hold the interesting // bits of the hash table. Slot 1 holds full hashes as an array of ints. // Slots {2,3}, {4,5}, etc hold {Key,Value} pairs. The entire hash table // can be atomically replaced by CASing the _kvs field. // // Why is CHM buried inside the _kvs Object array, instead of the other way // around? The CHM info is used during resize events and updates, but not // during standard 'get' operations. I assume 'get' is much more frequent // than 'put'. 'get' can skip the extra indirection of skipping through the // CHM to reach the _kvs array. private transient Object[] _kvs; public Object[] kvs() {return _kvs;} private static final CHM chm (Object[] kvs) { return (CHM )kvs[0]; } private static final int[] hashes(Object[] kvs) { return (int[])kvs[1]; } // Number of K,V pairs in the table private static final int len(Object[] kvs) { return (kvs.length-2)>>1; } // Time since last resize private transient long _last_resize_milli; // --- Minimum table size ---------------- // Pick size 8 K/V pairs, which turns into (8*2+2)*4+12 = 84 bytes on a // standard 32-bit HotSpot, and (8*2+2)*8+12 = 156 bytes on 64-bit Azul. private static final int MIN_SIZE_LOG=3; // private static final int MIN_SIZE=(1<<MIN_SIZE_LOG); // Must be power of 2 // --- Sentinels ------------------------- // No-Match-Old - putIfMatch does updates only if it matches the old value, // and NO_MATCH_OLD basically counts as a wildcard match. private static final Object NO_MATCH_OLD = new Object(); // Sentinel // Match-Any-not-null - putIfMatch does updates only if it find a real old // value. private static final Object MATCH_ANY = new Object(); // Sentinel // This K/V pair has been deleted (but the Key slot is forever claimed). // The same Key can be reinserted with a new value later. public static final Object TOMBSTONE = new Object(); // Prime'd or box'd version of TOMBSTONE. This K/V pair was deleted, then a // table resize started. The K/V pair has been marked so that no new // updates can happen to the old table (and since the K/V pair was deleted // nothing was copied to the new table). private static final Prime TOMBPRIME = new Prime(TOMBSTONE); // A sentinel to indicate that this table is locked as readOnly. It is // slipped over the _newkvs field, and prevents any more keys from being // inserted, and eventually prevents any changes of any kind. public static final Object[] READONLY = new Object[0]; // --- key,val ------------------------------------------------------------- // Access K,V for a given idx // // Note that these are static, so that the caller is forced to read the _kvs // field only once, and share that read across all key/val calls - lest the // _kvs field move out from under us and back-to-back key & val calls refer // to different _kvs arrays. private static final Object key(Object[] kvs,int idx) { return kvs[(idx<<1)+2]; } private static final Object val(Object[] kvs,int idx) { return kvs[(idx<<1)+3]; } private static final boolean CAS_key( Object[] kvs, int idx, Object old, Object key ) { return _unsafe.compareAndSwapObject( kvs, rawIndex(kvs,(idx<<1)+2), old, key ); } private static final boolean CAS_val( Object[] kvs, int idx, Object old, Object val ) { return _unsafe.compareAndSwapObject( kvs, rawIndex(kvs,(idx<<1)+3), old, val ); } // --- dump ---------------------------------------------------------------- /** Verbose printout of table internals, useful for debugging. */ public final void print() { System.out.println("========="); print2(_kvs); System.out.println("========="); } // print the entire state of the table private final void print( Object[] kvs ) { for( int i=0; i<len(kvs); i++ ) { Object K = key(kvs,i); if( K != null ) { String KS = (K == TOMBSTONE) ? "XXX" : K.toString(); Object V = val(kvs,i); Object U = Prime.unbox(V); String p = (V==U) ? "" : "prime_"; String US = (U == TOMBSTONE) ? "tombstone" : U.toString(); System.out.println(""+i+" ("+KS+","+p+US+")"); } } Object[] newkvs = chm(kvs)._newkvs; // New table, if any if( newkvs != null ) { System.out.println("----"); print(newkvs); } } // print only the live values, broken down by the table they are in private final void print2( Object[] kvs) { for( int i=0; i<len(kvs); i++ ) { Object key = key(kvs,i); Object val = val(kvs,i); Object U = Prime.unbox(val); if( key != null && key != TOMBSTONE && // key is sane val != null && U != TOMBSTONE ) { // val is sane String p = (val==U) ? "" : "prime_"; System.out.println(""+i+" ("+key+","+p+val+")"); } } Object[] newkvs = chm(kvs)._newkvs; // New table, if any if( newkvs != null ) { System.out.println("----"); print2(newkvs); } } // Count of reprobes private transient ConcurrentAutoTable _reprobes = new ConcurrentAutoTable(); /** Get and clear the current count of reprobes. Reprobes happen on key * collisions, and a high reprobe rate may indicate a poor hash function or * weaknesses in the table resizing function. * @return the count of reprobes since the last call to {@link #reprobes} * or since the table was created. */ public long reprobes() { long r = _reprobes.get(); _reprobes = new ConcurrentAutoTable(); return r; } // --- reprobe_limit ----------------------------------------------------- // Heuristic to decide if we have reprobed toooo many times. Running over // the reprobe limit on a 'get' call acts as a 'miss'; on a 'put' call it // can trigger a table resize. Several places must have exact agreement on // what the reprobe_limit is, so we share it here. private static final int reprobe_limit( int len ) { return REPROBE_LIMIT + (len>>4); } // --- NonBlockingHashMap -------------------------------------------------- // Constructors /** Create a new NonBlockingHashMap with default minimum size (currently set * to 8 K/V pairs or roughly 84 bytes on a standard 32-bit JVM). */ public NonBlockingHashMap( ) { this(MIN_SIZE); } /** Create a new NonBlockingHashMap with initial room for the given number of * elements, thus avoiding internal resizing operations to reach an * appropriate size. Large numbers here when used with a small count of * elements will sacrifice space for a small amount of time gained. The * initial size will be rounded up internally to the next larger power of 2. */ public NonBlockingHashMap( final int initial_sz ) { initialize(initial_sz); } private final void initialize( int initial_sz ) { if( initial_sz < 0 ) throw new IllegalArgumentException(); int i; // Convert to next largest power-of-2 if( initial_sz > 1024*1024 ) initial_sz = 1024*1024; for( i=MIN_SIZE_LOG; (1<<i) < (initial_sz<<2); i++ ) ; // Double size for K,V pairs, add 1 for CHM and 1 for hashes _kvs = new Object[((1<<i)<<1)+2]; _kvs[0] = new CHM(new ConcurrentAutoTable()); // CHM in slot 0 _kvs[1] = new int[1<<i]; // Matching hash entries _last_resize_milli = System.currentTimeMillis(); } // Version for subclassed readObject calls, to be called after the defaultReadObject protected final void initialize() { initialize(MIN_SIZE); } // --- wrappers ------------------------------------------------------------ /** Returns the number of key-value mappings in this map. * @return the number of key-value mappings in this map */ @Override public int size ( ) { return chm(_kvs).size(); } /** Returns <tt>size() == 0</tt>. * @return <tt>size() == 0</tt> */ @Override public boolean isEmpty ( ) { return size() == 0; } /** Tests if the key in the table using the <tt>equals</tt> method. * @return <tt>true</tt> if the key is in the table using the <tt>equals</tt> method * @throws NullPointerException if the specified key is null */ @Override public boolean containsKey( Object key ) { return get(key) != null; } /** Legacy method testing if some key maps into the specified value in this * table. This method is identical in functionality to {@link * #containsValue}, and exists solely to ensure full compatibility with * class {@link java.util.Hashtable}, which supported this method prior to * introduction of the Java Collections framework. * @param val a value to search for * @return <tt>true</tt> if this map maps one or more keys to the specified value * @throws NullPointerException if the specified value is null */ public boolean contains ( Object val ) { return containsValue(val); } /** Maps the specified key to the specified value in the table. Neither key * nor value can be null. * <p> The value can be retrieved by calling {@link #get} with a key that is * equal to the original key. * @param key key with which the specified value is to be associated * @param val value to be associated with the specified key * @return the previous value associated with <tt>key</tt>, or * <tt>null</tt> if there was no mapping for <tt>key</tt> * @throws NullPointerException if the specified key or value is null */ @Override public TypeV put ( TypeK key, TypeV val ) { return putIfMatch( key, val, NO_MATCH_OLD); } /** Atomically, do a {@link #put} if-and-only-if the key is not mapped. * Useful to ensure that only a single mapping for the key exists, even if * many threads are trying to create the mapping in parallel. * @return the previous value associated with the specified key, * or <tt>null</tt> if there was no mapping for the key * @throws NullPointerException if the specified key or value is null */ public TypeV putIfAbsent( TypeK key, TypeV val ) { return putIfMatch( key, val, TOMBSTONE ); } /** Removes the key (and its corresponding value) from this map. * This method does nothing if the key is not in the map. * @return the previous value associated with <tt>key</tt>, or * <tt>null</tt> if there was no mapping for <tt>key</tt> * @throws NullPointerException if the specified key is null */ @Override public TypeV remove ( Object key ) { return putIfMatch( key,TOMBSTONE, NO_MATCH_OLD); } /** Atomically do a {@link #remove(Object)} if-and-only-if the key is mapped * to a value which is <code>equals</code> to the given value. * @throws NullPointerException if the specified key or value is null */ public boolean remove ( Object key,Object val ) { return putIfMatch( key,TOMBSTONE, val ) == val; } /** Atomically do a <code>put(key,val)</code> if-and-only-if the key is * mapped to some value already. * @throws NullPointerException if the specified key or value is null */ public TypeV replace ( TypeK key, TypeV val ) { return putIfMatch( key, val,MATCH_ANY ); } /** Atomically do a <code>put(key,newValue)</code> if-and-only-if the key is * mapped a value which is <code>equals</code> to <code>oldValue</code>. * @throws NullPointerException if the specified key or value is null */ public boolean replace ( TypeK key, TypeV oldValue, TypeV newValue ) { return putIfMatch( key, newValue, oldValue ) == oldValue; } // Atomically replace newVal for oldVal, returning the value that existed // there before, or READONLY. If the oldVal matches the returned value, // then the put inserted newVal, otherwise it failed. public final TypeV putIfMatchUnlocked( Object key, Object newVal, Object oldVal ) { if( oldVal == null ) oldVal = TOMBSTONE; if( newVal == null ) newVal = TOMBSTONE; final TypeV res = (TypeV)putIfMatch( this, _kvs, key, newVal, oldVal ); assert !(res instanceof Prime); //assert res != null; return res == TOMBSTONE ? null : res; } public final TypeV putIfMatch( Object key, Object newVal, Object oldVal ) { if (oldVal == null || newVal == null) throw new NullPointerException(); final Object res = putIfMatch( this, _kvs, key, newVal, oldVal ); assert !(res instanceof Prime); assert res != null; return res == TOMBSTONE ? null : (TypeV)res; } /** Copies all of the mappings from the specified map to this one, replacing * any existing mappings. * @param m mappings to be stored in this map */ @Override public void putAll(Map<? extends TypeK, ? extends TypeV> m) { for (Map.Entry<? extends TypeK, ? extends TypeV> e : m.entrySet()) put(e.getKey(), e.getValue()); } /** Removes all of the mappings from this map. */ @Override public void clear() { // Smack a new empty table down Object[] newkvs = new NonBlockingHashMap(MIN_SIZE)._kvs; while( !CAS_kvs(_kvs,newkvs) ) // Spin until the clear works ; } /** Returns <tt>true</tt> if this Map maps one or more keys to the specified * value. <em>Note</em>: This method requires a full internal traversal of the * hash table and is much slower than {@link #containsKey}. * @param val value whose presence in this map is to be tested * @return <tt>true</tt> if this map maps one or more keys to the specified value * @throws NullPointerException if the specified value is null */ @Override public boolean containsValue( final Object val ) { if( val == null ) throw new NullPointerException(); for( TypeV V : values() ) if( V == val || V.equals(val) ) return true; return false; } // This function is supposed to do something for Hashtable, and the JCK // tests hang until it gets called... by somebody ... for some reason, // any reason.... protected void rehash() { } /** * Creates a shallow copy of this hashtable. All the structure of the * hashtable itself is copied, but the keys and values are not cloned. * This is a relatively expensive operation. * * @return a clone of the hashtable. */ @Override public Object clone() { try { // Must clone, to get the class right; NBHM might have been // extended so it would be wrong to just make a new NBHM. NonBlockingHashMap<TypeK,TypeV> t = (NonBlockingHashMap<TypeK,TypeV>) super.clone(); // But I don't have an atomic clone operation - the underlying _kvs // structure is undergoing rapid change. If I just clone the _kvs // field, the CHM in _kvs[0] won't be in sync. // // Wipe out the cloned array (it was shallow anyways). t.clear(); // Now copy sanely for( TypeK K : keySet() ) { final TypeV V = get(K); // Do an official 'get' t.put(K,V); } return t; } catch (CloneNotSupportedException e) { // this shouldn't happen, since we are Cloneable throw new InternalError(); } } /** * Returns a string representation of this map. The string representation * consists of a list of key-value mappings in the order returned by the * map's <tt>entrySet</tt> view's iterator, enclosed in braces * (<tt>"{}"</tt>). Adjacent mappings are separated by the characters * <tt>", "</tt> (comma and space). Each key-value mapping is rendered as * the key followed by an equals sign (<tt>"="</tt>) followed by the * associated value. Keys and values are converted to strings as by * {@link String#valueOf(Object)}. * * @return a string representation of this map */ @Override public String toString() { Iterator<Entry<TypeK,TypeV>> i = entrySet().iterator(); if( !i.hasNext()) return "{}"; StringBuilder sb = new StringBuilder(); sb.append('{'); for (;;) { Entry<TypeK,TypeV> e = i.next(); TypeK key = e.getKey(); TypeV value = e.getValue(); sb.append(key == this ? "(this Map)" : key); sb.append('='); sb.append(value == this ? "(this Map)" : value); if( !i.hasNext()) return sb.append('}').toString(); sb.append(", "); } } // --- keyeq --------------------------------------------------------------- // Check for key equality. Try direct pointer compare first, then see if // the hashes are unequal (fast negative test) and finally do the full-on // 'equals' v-call. private static boolean keyeq( Object K, Object key, int[] hashes, int hash, int fullhash ) { return K==key || // Either keys match exactly OR // hash exists and matches? hash can be zero during the install of a // new key/value pair. ((hashes[hash] == 0 || hashes[hash] == fullhash) && // Do not call the users' "equals()" call with a Tombstone, as this can // surprise poorly written "equals()" calls that throw exceptions // instead of simply returning false. K != TOMBSTONE && // Do not call users' equals call with a Tombstone // Do the match the hard way - with the users' key being the loop- // invariant "this" pointer. I could have flipped the order of // operands (since equals is commutative), but I'm making mega-morphic // v-calls in a re-probing loop and nailing down the 'this' argument // gives both the JIT and the hardware a chance to prefetch the call target. key.equals(K)); // Finally do the hard match } // --- get ----------------------------------------------------------------- /** Returns the value to which the specified key is mapped, or {@code null} * if this map contains no mapping for the key. * <p>More formally, if this map contains a mapping from a key {@code k} to * a value {@code v} such that {@code key.equals(k)}, then this method * returns {@code v}; otherwise it returns {@code null}. (There can be at * most one such mapping.) * @throws NullPointerException if the specified key is null */ // Never returns a Prime nor a Tombstone. @Override public TypeV get( Object key ) { final Object V = get_impl(this,_kvs,key); assert !(V instanceof Prime); // Never return a Prime assert V != TOMBSTONE; assert V != READONLY; return (TypeV)V; } private static final Object get_impl( final NonBlockingHashMap topmap, final Object[] kvs, final Object key ) { final int fullhash= hash (key); // throws NullPointerException if key is null final int len = len (kvs); // Count of key/value pairs, reads kvs.length final CHM chm = chm (kvs); // The CHM, for a volatile read below; reads slot 0 of kvs final int[] hashes=hashes(kvs); // The memoized hashes; reads slot 1 of kvs int idx = fullhash & (len-1); // First key hash // Main spin/reprobe loop, looking for a Key hit int reprobe_cnt=0; while( true ) { // Probe table. Each read of 'val' probably misses in cache in a big // table; hopefully the read of 'key' then hits in cache. final Object K = key(kvs,idx); // Get key before volatile read, could be null final Object V = val(kvs,idx); // Get value before volatile read, could be null or Tombstone or Prime if( K == null ) return null; // A clear miss // We need a volatile-read here to preserve happens-before semantics on // newly inserted Keys. If the Key body was written just before inserting // into the table a Key-compare here might read the uninitialized Key body. // Annoyingly this means we have to volatile-read before EACH key compare. // . // We also need a volatile-read between reading a newly inserted Value // and returning the Value (so the user might end up reading the stale // Value contents). Same problem as with keys - and the one volatile // read covers both. final Object[] newkvs = chm._newkvs; // VOLATILE READ before key compare // Key-compare if( keyeq(K,key,hashes,idx,fullhash) ) { // Key hit! Check for no table-copy-in-progress if( !(V instanceof Prime) ) // No copy? return (V == TOMBSTONE) ? null : V; // Return the value // Key hit in locked table? Just unbox. if( newkvs == READONLY ) return Prime.unbox(V); // Key hit - but slot is (possibly partially) copied to the new table. // Finish the copy & retry in the new table. return get_impl(topmap,chm.copy_slot_and_check(topmap,kvs,idx,key),key); // Retry in the new table } // get and put must have the same key lookup logic! But only 'put' // needs to force a table-resize for a too-long key-reprobe sequence. // Check for too-many-reprobes on get - and flip to the new table. if( ++reprobe_cnt >= reprobe_limit(len) || // too many probes K == TOMBSTONE ) { // found a TOMBSTONE key, means no more keys in this table if( newkvs == READONLY ) return null; // Missed in a locked table return newkvs == null ? null : get_impl(topmap,topmap.help_copy(newkvs),key); // Retry in the new table } idx = (idx+1)&(len-1); // Reprobe by 1! (could now prefetch) } } // --- getk ----------------------------------------------------------------- /** Returns the Key to which the specified key is mapped, or {@code null} * if this map contains no mapping for the key. * @throws NullPointerException if the specified key is null */ // Never returns a Prime nor a Tombstone. public TypeK getk( TypeK key ) { return (TypeK)getk_impl(this,_kvs,key); } private static final Object getk_impl( final NonBlockingHashMap topmap, final Object[] kvs, final Object key ) { final int fullhash= hash (key); // throws NullPointerException if key is null final int len = len (kvs); // Count of key/value pairs, reads kvs.length final CHM chm = chm (kvs); // The CHM, for a volatile read below; reads slot 0 of kvs final int[] hashes=hashes(kvs); // The memoized hashes; reads slot 1 of kvs int idx = fullhash & (len-1); // First key hash // Main spin/reprobe loop, looking for a Key hit int reprobe_cnt=0; while( true ) { // Probe table. final Object K = key(kvs,idx); // Get key before volatile read, could be null if( K == null ) return null; // A clear miss // We need a volatile-read here to preserve happens-before semantics on // newly inserted Keys. If the Key body was written just before inserting // into the table a Key-compare here might read the uninitialized Key body. // Annoyingly this means we have to volatile-read before EACH key compare. // . // We also need a volatile-read between reading a newly inserted Value // and returning the Value (so the user might end up reading the stale // Value contents). Same problem as with keys - and the one volatile // read covers both. final Object[] newkvs = chm._newkvs; // VOLATILE READ before key compare // Key-compare if( keyeq(K,key,hashes,idx,fullhash) ) return K; // Return existing Key! // get and put must have the same key lookup logic! But only 'put' // needs to force a table-resize for a too-long key-reprobe sequence. // Check for too-many-reprobes on get - and flip to the new table. if( ++reprobe_cnt >= reprobe_limit(len) || // too many probes K == TOMBSTONE ) { // found a TOMBSTONE key, means no more keys in this table if( newkvs == READONLY ) return null; // Missed in a locked table return newkvs == null ? null : getk_impl(topmap,topmap.help_copy(newkvs),key); // Retry in the new table } idx = (idx+1)&(len-1); // Reprobe by 1! (could now prefetch) } } // --- putIfMatch --------------------------------------------------------- // Put, Remove, PutIfAbsent, etc. Return the old value. If the returned // value is equal to expVal (or expVal is NO_MATCH_OLD) then the put can be // assumed to work (although might have been immediately overwritten). Only // the path through copy_slot passes in an expected value of null, and // putIfMatch only returns a null if passed in an expected null. static volatile int DUMMY_VOLATILE; private static final Object putIfMatch( final NonBlockingHashMap topmap, final Object[] kvs, final Object key, final Object putval, final Object expVal ) { assert putval != null; assert !(putval instanceof Prime); assert !(expVal instanceof Prime); if( kvs == READONLY ) { // Update attempt in a locked table? if( expVal == NO_MATCH_OLD || expVal == MATCH_ANY ) throw new IllegalStateException("attempting to modify a locked table"); System.out.println("put denied for readonly"); return READONLY; // putIfMatch forced-miss for locked table } final int fullhash = hash (key); // throws NullPointerException if key null final int len = len (kvs); // Count of key/value pairs, reads kvs.length final CHM chm = chm (kvs); // Reads kvs[0] final int[] hashes = hashes(kvs); // Reads kvs[1], read before kvs[0] int idx = fullhash & (len-1); // --- // Key-Claim stanza: spin till we can claim a Key (or force a resizing). int reprobe_cnt=0; Object K=null, V=null; Object[] newkvs=null; while( true ) { // Spin till we get a Key slot V = val(kvs,idx); // Get old value (before volatile read below!) K = key(kvs,idx); // Get current key if( K == null ) { // Slot is free? // Found an empty Key slot - which means this Key has never been in // this table. No need to put a Tombstone - the Key is not here! if( putval == TOMBSTONE ) return putval; // Not-now & never-been in this table if( expVal == MATCH_ANY ) return null; // Will not match, even after K inserts // Claim the null key-slot if( CAS_key(kvs,idx, null, key ) ) { // Claim slot for Key chm._slots.add(1); // Raise key-slots-used count hashes[idx] = fullhash; // Memoize fullhash break; // Got it! } // CAS to claim the key-slot failed. // // This re-read of the Key points out an annoying short-coming of Java // CAS. Most hardware CAS's report back the existing value - so that // if you fail you have a *witness* - the value which caused the CAS to // fail. The Java API turns this into a boolean destroying the // witness. Re-reading does not recover the witness because another // thread can write over the memory after the CAS. Hence we can be in // the unfortunate situation of having a CAS fail *for cause* but // having that cause removed by a later store. This turns a // non-spurious-failure CAS (such as Azul has) into one that can // apparently spuriously fail - and we avoid apparent spurious failure // by not allowing Keys to ever change. // Volatile read, to force loads of K to retry despite JIT, otherwise // it is legal to e.g. haul the load of "K = key(kvs,idx);" outside of // this loop (since failed CAS ops have no memory ordering semantics). int dummy = DUMMY_VOLATILE; continue; } // Key slot was not null, there exists a Key here // We need a volatile-read here to preserve happens-before semantics on // newly inserted Keys. If the Key body was written just before inserting // into the table a Key-compare here might read the uninitialized Key body. // Annoyingly this means we have to volatile-read before EACH key compare. newkvs = chm._newkvs; // VOLATILE READ before key compare if( keyeq(K,key,hashes,idx,fullhash) ) break; // Got it! // get and put must have the same key lookup logic! Lest 'get' give // up looking too soon. //topmap._reprobes.add(1); if( ++reprobe_cnt >= reprobe_limit(len) || // too many probes or K == TOMBSTONE ) { // found a TOMBSTONE key, means no more keys // We simply must have a new table to do a 'put'. At this point a // 'get' will also go to the new table (if any). We do not need // to claim a key slot (indeed, we cannot find a free one to claim!). newkvs = chm.resize(topmap,kvs); if( expVal != null ) topmap.help_copy(newkvs); // help along an existing copy return putIfMatch(topmap,newkvs,key,putval,expVal); } idx = (idx+1)&(len-1); // Reprobe! } // End of spinning till we get a Key slot // --- // Found the proper Key slot, now update the matching Value slot. We // never put a null, so Value slots monotonically move from null to // not-null (deleted Values use Tombstone). Thus if 'V' is null we // fail this fast cutout and fall into the check for table-full. if( putval == V ) return V; // Fast cutout for no-change // See if we want to move to a new table (to avoid high average re-probe // counts). We only check on the initial set of a Value from null to // not-null (i.e., once per key-insert). Of course we got a 'free' check // of newkvs once per key-compare (not really free, but paid-for by the // time we get here). if( newkvs == null && // New table-copy already spotted? // Once per fresh key-insert check the hard way ((V == null && chm.tableFull(reprobe_cnt,len)) || // Or we found a Prime, but the JMM allowed reordering such that we // did not spot the new table (very rare race here: the writing // thread did a CAS of _newkvs then a store of a Prime. This thread // reads the Prime, then reads _newkvs - but the read of Prime was so // delayed (or the read of _newkvs was so accelerated) that they // swapped and we still read a null _newkvs. The resize call below // will do a CAS on _newkvs forcing the read. V instanceof Prime) ) newkvs = chm.resize(topmap,kvs); // Force the new table copy to start // See if we are moving to a new table. // If so, copy our slot and retry in the new table. if( newkvs != null ) return putIfMatch(topmap,chm.copy_slot_and_check(topmap,kvs,idx,expVal),key,putval,expVal); // --- // We are finally prepared to update the existing table assert !(V instanceof Prime); // Must match old, and we do not? Then bail out now. Note that either V // or expVal might be TOMBSTONE. Also V can be null, if we've never // inserted a value before. expVal can be null if we are called from // copy_slot. if( expVal != NO_MATCH_OLD && // Do we care about expected-Value at all? V != expVal && // No instant match already? (expVal != MATCH_ANY || V == TOMBSTONE || V == null) && !(V==null && expVal == TOMBSTONE) && // Match on null/TOMBSTONE combo (expVal == null || !expVal.equals(V)) ) // Expensive equals check at the last return V; // Do not update! // Actually change the Value in the Key,Value pair if( CAS_val(kvs, idx, V, putval ) ) { // CAS succeeded - we did the update! // Both normal put's and table-copy calls putIfMatch, but table-copy // does not (effectively) increase the number of live k/v pairs. if( expVal != null ) { // Adjust sizes - a striped counter if( (V == null || V == TOMBSTONE) && putval != TOMBSTONE ) chm._size.add( 1); if( !(V == null || V == TOMBSTONE) && putval == TOMBSTONE ) chm._size.add(-1); } } else { // Else CAS failed V = val(kvs,idx); // Get new value // If a Prime'd value got installed, we need to re-run the put on the // new table. Otherwise we lost the CAS to another racing put. // Simply retry from the start. if( V instanceof Prime ) return putIfMatch(topmap,chm.copy_slot_and_check(topmap,kvs,idx,expVal),key,putval,expVal); } // Win or lose the CAS, we are done. If we won then we know the update // happened as expected. If we lost, it means "we won but another thread // immediately stomped our update with no chance of a reader reading". return (V==null && expVal!=null) ? TOMBSTONE : V; } // --- help_copy --------------------------------------------------------- // Help along an existing resize operation. This is just a fast cut-out // wrapper, to encourage inlining for the fast no-copy-in-progress case. We // always help the top-most table copy, even if there are nested table // copies in progress. private final Object[] help_copy( Object[] helper ) { // Read the top-level KVS only once. We'll try to help this copy along, // even if it gets promoted out from under us (i.e., the copy completes // and another KVS becomes the top-level copy). Object[] topkvs = _kvs; CHM topchm = chm(topkvs); if( topchm._newkvs == null ) return helper; // No copy in-progress topchm.help_copy_impl(this,topkvs,false); return helper; } // --- CHM ----------------------------------------------------------------- // The control structure for the NonBlockingHashMap private static final class CHM<TypeK,TypeV> { // Size in active K,V pairs private final ConcurrentAutoTable _size; public int size () { return (int)_size.get(); } // --- // These next 2 fields are used in the resizing heuristics, to judge when // it is time to resize or copy the table. Slots is a count of used-up // key slots, and when it nears a large fraction of the table we probably // end up reprobing too much. Last-resize-milli is the time since the // last resize; if we are running back-to-back resizes without growing // (because there are only a few live keys but many slots full of dead // keys) then we need a larger table to cut down on the churn. // Count of used slots, to tell when table is full of dead unusable slots private final ConcurrentAutoTable _slots; public int slots() { return (int)_slots.get(); } // --- // New mappings, used during resizing. // The 'new KVs' array - created during a resize operation. This // represents the new table being copied from the old one. It's the // volatile variable that is read as we cross from one table to the next, // to get the required memory orderings. It monotonically transits from // null to set (once). volatile Object[] _newkvs; private final AtomicReferenceFieldUpdater<CHM,Object[]> _newkvsUpdater = AtomicReferenceFieldUpdater.newUpdater(CHM.class,Object[].class, "_newkvs"); // Set the _next field if we can. boolean CAS_newkvs( Object[] newkvs ) { while( _newkvs == null ) if( _newkvsUpdater.compareAndSet(this,null,newkvs) ) return true; return false; } // Sometimes many threads race to create a new very large table. Only 1 // wins the race, but the losers all allocate a junk large table with // hefty allocation costs. Attempt to control the overkill here by // throttling attempts to create a new table. I cannot really block here // (lest I lose the non-blocking property) but late-arriving threads can // give the initial resizing thread a little time to allocate the initial // new table. The Right Long Term Fix here is to use array-lets and // incrementally create the new very large array. In C I'd make the array // with malloc (which would mmap under the hood) which would only eat // virtual-address and not real memory - and after Somebody wins then we // could in parallel initialize the array. Java does not allow // un-initialized array creation (especially of ref arrays!). volatile long _resizers; // count of threads attempting an initial resize private static final AtomicLongFieldUpdater<CHM> _resizerUpdater = AtomicLongFieldUpdater.newUpdater(CHM.class, "_resizers"); // --- // Simple constructor CHM( ConcurrentAutoTable size ) { _size = size; _slots= new ConcurrentAutoTable(); } // --- tableFull --------------------------------------------------------- // Heuristic to decide if this table is too full, and we should start a // new table. Note that if a 'get' call has reprobed too many times and // decided the table must be full, then always the estimate_sum must be // high and we must report the table is full. If we do not, then we might // end up deciding that the table is not full and inserting into the // current table, while a 'get' has decided the same key cannot be in this // table because of too many reprobes. The invariant is: // slots.estimate_sum >= max_reprobe_cnt >= reprobe_limit(len) private final boolean tableFull( int reprobe_cnt, int len ) { return // Do the cheap check first: we allow some number of reprobes always reprobe_cnt >= REPROBE_LIMIT && (reprobe_cnt >= reprobe_limit(len) || // More expensive check: see if the table is > 1/2 full. _slots.estimate_get() >= (len>>1)); } // --- resize ------------------------------------------------------------ // Resizing after too many probes. "How Big???" heuristics are here. // Callers will (not this routine) will 'help_copy' any in-progress copy. // Since this routine has a fast cutout for copy-already-started, callers // MUST 'help_copy' lest we have a path which forever runs through // 'resize' only to discover a copy-in-progress which never progresses. private final Object[] resize( NonBlockingHashMap topmap, Object[] kvs) { assert chm(kvs) == this; // Check for resize already in progress, probably triggered by another thread Object[] newkvs = _newkvs; // VOLATILE READ if( newkvs != null ) // See if resize is already in progress return newkvs; // Use the new table already // No copy in-progress, so start one. First up: compute new table size. int oldlen = len(kvs); // Old count of K,V pairs allowed int sz = size(); // Get current table count of active K,V pairs int newsz = sz; // First size estimate // Heuristic to determine new size. We expect plenty of dead-slots-with-keys // and we need some decent padding to avoid endless reprobing. if( sz >= (oldlen>>2) ) { // If we are >25% full of keys then... newsz = oldlen<<1; // Double size, so new table will be between 12.5% and 25% full // For tables less than 1M entries, if >50% full of keys then... // For tables more than 1M entries, if >75% full of keys then... if( 4L*sz >= ((oldlen>>20)!=0?3L:2L)*oldlen ) newsz = oldlen<<2; // Double double size, so new table will be between %12.5 (18.75%) and 25% (25%) } // This heuristic in the next 2 lines leads to a much denser table // with a higher reprobe rate //if( sz >= (oldlen>>1) ) // If we are >50% full of keys then... // newsz = oldlen<<1; // Double size // Last (re)size operation was very recent? Then double again // despite having few live keys; slows down resize operations // for tables subject to a high key churn rate - but do not // forever grow the table. If there is a high key churn rate // the table needs a steady state of rare same-size resize // operations to clean out the dead keys. long tm = System.currentTimeMillis(); if( newsz <= oldlen && // New table would shrink or hold steady? tm <= topmap._last_resize_milli+10000) // Recent resize (less than 10 sec ago) newsz = oldlen<<1; // Double the existing size // Do not shrink, ever. If we hit this size once, assume we // will again. if( newsz < oldlen ) newsz = oldlen; // Convert to power-of-2 int log2; for( log2=MIN_SIZE_LOG; (1<<log2) < newsz; log2++ ) ; // Compute log2 of size long len = ((1L << log2) << 1) + 2; // prevent integer overflow - limit of 2^31 elements in a Java array // so here, 2^30 + 2 is the largest number of elements in the hash table if ((int)len!=len) { log2 = 30; len = (1L << log2) + 2; if (sz > ((len >> 2) + (len >> 1))) throw new RuntimeException("Table is full."); } // Now limit the number of threads actually allocating memory to a // handful - lest we have 750 threads all trying to allocate a giant // resized array. long r = _resizers; while( !_resizerUpdater.compareAndSet(this,r,r+1) ) r = _resizers; // Size calculation: 2 words (K+V) per table entry, plus a handful. We // guess at 64-bit pointers; 32-bit pointers screws up the size calc by // 2x but does not screw up the heuristic very much. long megs = ((((1L<<log2)<<1)+8)<<3/*word to bytes*/)>>20/*megs*/; if( r >= 2 && megs > 0 ) { // Already 2 guys trying; wait and see newkvs = _newkvs; // Between dorking around, another thread did it if( newkvs != null ) // See if resize is already in progress return newkvs; // Use the new table already // TODO - use a wait with timeout, so we'll wakeup as soon as the new table // is ready, or after the timeout in any case. //synchronized( this ) { wait(8*megs); } // Timeout - we always wakeup // For now, sleep a tad and see if the 2 guys already trying to make // the table actually get around to making it happen. try { Thread.sleep(megs); } catch( Exception e ) { } } // Last check, since the 'new' below is expensive and there is a chance // that another thread slipped in a new thread while we ran the heuristic. newkvs = _newkvs; if( newkvs != null ) // See if resize is already in progress return newkvs; // Use the new table already // Double size for K,V pairs, add 1 for CHM newkvs = water.MemoryManager.mallocObj((int)len); // This can get expensive for big arrays newkvs[0] = new CHM(_size); // CHM in slot 0 newkvs[1] = water.MemoryManager.malloc4(1<<log2); // hashes in slot 1 // Another check after the slow allocation if( _newkvs != null ) // See if resize is already in progress return _newkvs; // Use the new table already // The new table must be CAS'd in so only 1 winner amongst duplicate // racing resizing threads. Extra CHM's will be GC'd. if( CAS_newkvs( newkvs ) ) { // NOW a resize-is-in-progress! //notifyAll(); // Wake up any sleepers // Log.info("Resizing NBHM: "+oldlen+" -> "+(1<<log2)); topmap.rehash(); // Call for Hashtable's benefit } else // CAS failed? newkvs = _newkvs; // Reread new table return newkvs; } // The next part of the table to copy. It monotonically transits from zero // to _kvs.length. Visitors to the table can claim 'work chunks' by // CAS'ing this field up, then copying the indicated indices from the old // table to the new table. Workers are not required to finish any chunk; // the counter simply wraps and work is copied duplicately until somebody // somewhere completes the count. volatile long _copyIdx = 0; static private final AtomicLongFieldUpdater<CHM> _copyIdxUpdater = AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyIdx"); // Work-done reporting. Used to efficiently signal when we can move to // the new table. From 0 to len(oldkvs) refers to copying from the old // table to the new. volatile long _copyDone= 0; static private final AtomicLongFieldUpdater<CHM> _copyDoneUpdater = AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyDone"); // --- help_copy_impl ---------------------------------------------------- // Help along an existing resize operation. We hope its the top-level // copy (it was when we started) but this CHM might have been promoted out // of the top position. private final void help_copy_impl( NonBlockingHashMap topmap, Object[] oldkvs, boolean copy_all ) { assert chm(oldkvs) == this; Object[] newkvs = _newkvs; assert newkvs != null; // Already checked by caller int oldlen = len(oldkvs); // Total amount to copy final int MIN_COPY_WORK = Math.min(oldlen,1024); // Limit per-thread work // --- int panic_start = -1; int copyidx=-9999; // Fool javac to think it's initialized while( _copyDone < oldlen ) { // Still needing to copy? // Carve out a chunk of work. The counter wraps around so every // thread eventually tries to copy every slot repeatedly. // We "panic" if we have tried TWICE to copy every slot - and it still // has not happened. i.e., twice some thread somewhere claimed they // would copy 'slot X' (by bumping _copyIdx) but they never claimed to // have finished (by bumping _copyDone). Our choices become limited: // we can wait for the work-claimers to finish (and become a blocking // algorithm) or do the copy work ourselves. Tiny tables with huge // thread counts trying to copy the table often 'panic'. if( panic_start == -1 ) { // No panic? copyidx = (int)_copyIdx; while( !_copyIdxUpdater.compareAndSet(this,copyidx,copyidx+MIN_COPY_WORK) ) copyidx = (int)_copyIdx; // Re-read if( !(copyidx < (oldlen<<1)) ) // Panic! panic_start = copyidx; // Record where we started to panic-copy } // We now know what to copy. Try to copy. int workdone = 0; for( int i=0; i<MIN_COPY_WORK; i++ ) if( copy_slot(topmap,(copyidx+i)&(oldlen-1),oldkvs,newkvs) ) // Made an oldtable slot go dead? workdone++; // Yes! if( workdone > 0 ) // Report work-done occasionally copy_check_and_promote( topmap, oldkvs, workdone );// See if we can promote //for( int i=0; i<MIN_COPY_WORK; i++ ) // if( copy_slot(topmap,(copyidx+i)&(oldlen-1),oldkvs,newkvs) ) // Made an oldtable slot go dead? // copy_check_and_promote( topmap, oldkvs, 1 );// See if we can promote copyidx += MIN_COPY_WORK; // Uncomment these next 2 lines to turn on incremental table-copy. // Otherwise this thread continues to copy until it is all done. if( !copy_all && panic_start == -1 ) // No panic? return; // Then done copying after doing MIN_COPY_WORK } // Extra promotion check, in case another thread finished all copying // then got stalled before promoting. copy_check_and_promote( topmap, oldkvs, 0 );// See if we can promote } // --- copy_slot_and_check ----------------------------------------------- // Copy slot 'idx' from the old table to the new table. If this thread // confirmed the copy, update the counters and check for promotion. // // Returns the result of reading the volatile _newkvs, mostly as a // convenience to callers. We come here with 1-shot copy requests // typically because the caller has found a Prime, and has not yet read // the _newkvs volatile - which must have changed from null-to-not-null // before any Prime appears. So the caller needs to read the _newkvs // field to retry his operation in the new table, but probably has not // read it yet. private final Object[] copy_slot_and_check( NonBlockingHashMap topmap, Object[] oldkvs, int idx, Object should_help ) { assert chm(oldkvs) == this; Object[] newkvs = _newkvs; // VOLATILE READ // We're only here because the caller saw a Prime, which implies a // table-copy is in progress. assert newkvs != null; if( copy_slot(topmap,idx,oldkvs,_newkvs) ) // Copy the desired slot copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied // Generically help along any copy (except if called recursively from a helper) return (should_help == null) ? newkvs : topmap.help_copy(newkvs); } // --- copy_check_and_promote -------------------------------------------- private final void copy_check_and_promote( NonBlockingHashMap topmap, Object[] oldkvs, int workdone ) { assert chm(oldkvs) == this; int oldlen = len(oldkvs); // We made a slot unusable and so did some of the needed copy work long copyDone = _copyDone; assert (copyDone+workdone) <= oldlen; if( workdone > 0 ) { while( !_copyDoneUpdater.compareAndSet(this,copyDone,copyDone+workdone) ) { copyDone = _copyDone; // Reload, retry assert (copyDone+workdone) <= oldlen; } //if( (10*copyDone/oldlen) != (10*(copyDone+workdone)/oldlen) ) //System.out.print(" "+(copyDone+workdone)*100/oldlen+"%"+"_"+(_copyIdx*100/oldlen)+"%"); } // Check for copy being ALL done, and promote. Note that we might have // nested in-progress copies and manage to finish a nested copy before // finishing the top-level copy. We only promote top-level copies. if( copyDone+workdone == oldlen && // Ready to promote this table? topmap._kvs == oldkvs && // Looking at the top-level table? _newkvs != READONLY && // Table is locked down? // Attempt to promote topmap.CAS_kvs(oldkvs,_newkvs) ) { topmap._last_resize_milli = System.currentTimeMillis(); // Record resize time for next check } } // --- copy_slot --------------------------------------------------------- // Copy one K/V pair from oldkvs[i] to newkvs. Returns true if we can // confirm that the new table guaranteed has a value for this old-table // slot. We need an accurate confirmed-copy count so that we know when we // can promote (if we promote the new table too soon, other threads may // 'miss' on values not-yet-copied from the old table). We don't allow // any direct updates on the new table, unless they first happened to the // old table - so that any transition in the new table from null to // not-null must have been from a copy_slot (or other old-table overwrite) // and not from a thread directly writing in the new table. Thus we can // count null-to-not-null transitions in the new table. private boolean copy_slot( NonBlockingHashMap topmap, int idx, Object[] oldkvs, Object[] newkvs ) { // Blindly set the key slot from null to TOMBSTONE, to eagerly stop // fresh put's from inserting new values in the old table when the old // table is mid-resize. We don't need to act on the results here, // because our correctness stems from box'ing the Value field. Slamming // the Key field is a minor speed optimization. Object key; while( (key=key(oldkvs,idx)) == null ) CAS_key(oldkvs,idx, null, TOMBSTONE); // --- // Prevent new values from appearing in the old table. // Box what we see in the old table, to prevent further updates. Object oldval = val(oldkvs,idx); // Read OLD table while( !(oldval instanceof Prime) ) { final Prime box = (oldval == null || oldval == TOMBSTONE) ? TOMBPRIME : new Prime(oldval); if( CAS_val(oldkvs,idx,oldval,box) ) { // CAS down a box'd version of oldval // If we made the Value slot hold a TOMBPRIME, then we both // prevented further updates here but also the (absent) // oldval is vacuously available in the new table. We // return with true here: any thread looking for a value for // this key can correctly go straight to the new table and // skip looking in the old table. if( box == TOMBPRIME ) return true; // Otherwise we boxed something, but it still needs to be // copied into the new table. oldval = box; // Record updated oldval break; // Break loop; oldval is now boxed by us } oldval = val(oldkvs,idx); // Else try, try again } if( oldval == TOMBPRIME ) return false; // Copy already complete here! // If the new table is really the table-locked flag, then we are done // here: the Value was wrapped in a Prime preventing it from changing // again. if( newkvs == READONLY ) return true; // --- // Copy the value into the new table, but only if we overwrite a null. // If another value is already in the new table, then somebody else // wrote something there and that write is happens-after any value that // appears in the old table. Object old_unboxed = ((Prime)oldval)._V; assert old_unboxed != TOMBSTONE; putIfMatch(topmap, newkvs, key, old_unboxed, null); // --- // Finally, now that any old value is exposed in the new table, we can // forever hide the old-table value by slapping a TOMBPRIME down. This // will stop other threads from uselessly attempting to copy this slot // (i.e., it's a speed optimization not a correctness issue). while( oldval != TOMBPRIME && !CAS_val(oldkvs,idx,oldval,TOMBPRIME) ) oldval = val(oldkvs,idx); return oldval != TOMBPRIME; // True if we slammed the TOMBPRIME down } // end copy_slot } // End of CHM // --- Snapshot ------------------------------------------------------------ // The main class for iterating over the NBHM. It "snapshots" a clean // view of the K/V array. private class SnapshotV implements Iterator<TypeV>, Enumeration<TypeV> { final Object[] _sskvs; public SnapshotV() { while( true ) { // Verify no table-copy-in-progress Object[] topkvs = _kvs; CHM topchm = chm(topkvs); if( topchm._newkvs == null || // No table-copy-in-progress topchm._newkvs == READONLY ) { // The "linearization point" for the iteration. Every key in this // table will be visited, but keys added later might be skipped or // even be added to a following table (also not iterated over). _sskvs = topkvs; break; } // Table copy in-progress - so we cannot get a clean iteration. We // must help finish the table copy before we can start iterating. topchm.help_copy_impl(NonBlockingHashMap.this,topkvs,true); } // Warm-up the iterator next(); } int length() { return len(_sskvs); } Object key(int idx) { return NonBlockingHashMap.key(_sskvs,idx); } private int _idx; // Varies from 0-keys.length private Object _nextK, _prevK; // Last 2 keys found private TypeV _nextV, _prevV; // Last 2 values found public boolean hasNext() { return _nextV != null; } public TypeV next() { // 'next' actually knows what the next value will be - it had to // figure that out last go-around lest 'hasNext' report true and // some other thread deleted the last value. Instead, 'next' // spends all its effort finding the key that comes after the // 'next' key. if( _idx != 0 && _nextV == null ) throw new NoSuchElementException(); _prevK = _nextK; // This will become the previous key _prevV = _nextV; // This will become the previous value _nextV = null; // We have no more next-key // Attempt to set <_nextK,_nextV> to the next K,V pair. // _nextV is the trigger: stop searching when it is != null while( _idx<length() ) { // Scan array _nextK = key(_idx++); // Get a key that definitely is in the set (for the moment!) if( _nextK != null && // Found something? _nextK != TOMBSTONE && (_nextV=get(_nextK)) != null ) break; // Got it! _nextK is a valid Key } // Else keep scanning return _prevV; // Return current value. } public void remove() { if( _prevV == null ) throw new IllegalStateException(); putIfMatch( NonBlockingHashMap.this, _sskvs, _prevK, TOMBSTONE, _prevV ); _prevV = null; } public TypeV nextElement() { return next(); } public boolean hasMoreElements() { return hasNext(); } } public Object[] raw_array() { return new SnapshotV()._sskvs; } /** Returns an enumeration of the values in this table. * @return an enumeration of the values in this table * @see #values() */ public Enumeration<TypeV> elements() { return new SnapshotV(); } // --- values -------------------------------------------------------------- /** Returns a {@link Collection} view of the values contained in this map. * The collection is backed by the map, so changes to the map are reflected * in the collection, and vice-versa. The collection supports element * removal, which removes the corresponding mapping from this map, via the * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>, * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations. * It does not support the <tt>add</tt> or <tt>addAll</tt> operations. * * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator that * will never throw {@link ConcurrentModificationException}, and guarantees * to traverse elements as they existed upon construction of the iterator, * and may (but is not guaranteed to) reflect any modifications subsequent * to construction. */ @Override public Collection<TypeV> values() { return new AbstractCollection<TypeV>() { @Override public void clear ( ) { NonBlockingHashMap.this.clear ( ); } @Override public int size ( ) { return NonBlockingHashMap.this.size ( ); } @Override public boolean contains( Object v ) { return NonBlockingHashMap.this.containsValue(v); } @Override public Iterator<TypeV> iterator() { return new SnapshotV(); } }; } // --- keySet -------------------------------------------------------------- private class SnapshotK implements Iterator<TypeK>, Enumeration<TypeK> { final SnapshotV _ss; public SnapshotK() { _ss = new SnapshotV(); } public void remove() { _ss.remove(); } public TypeK next() { _ss.next(); return (TypeK)_ss._prevK; } public boolean hasNext() { return _ss.hasNext(); } public TypeK nextElement() { return next(); } public boolean hasMoreElements() { return hasNext(); } } /** Returns an enumeration of the keys in this table. * @return an enumeration of the keys in this table * @see #keySet() */ public Enumeration<TypeK> keys() { return new SnapshotK(); } /** Returns a {@link Set} view of the keys contained in this map. The set * is backed by the map, so changes to the map are reflected in the set, * and vice-versa. The set supports element removal, which removes the * corresponding mapping from this map, via the <tt>Iterator.remove</tt>, * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and * <tt>clear</tt> operations. It does not support the <tt>add</tt> or * <tt>addAll</tt> operations. * * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator that * will never throw {@link ConcurrentModificationException}, and guarantees * to traverse elements as they existed upon construction of the iterator, * and may (but is not guaranteed to) reflect any modifications subsequent * to construction. */ @Override public Set<TypeK> keySet() { return new AbstractSet<TypeK> () { @Override public void clear ( ) { NonBlockingHashMap.this.clear ( ); } @Override public int size ( ) { return NonBlockingHashMap.this.size ( ); } @Override public boolean contains( Object k ) { return NonBlockingHashMap.this.containsKey(k); } @Override public boolean remove ( Object k ) { return NonBlockingHashMap.this.remove (k) != null; } @Override public Iterator<TypeK> iterator() { return new SnapshotK(); } // This is an efficient implementation of toArray instead of the standard // one. In particular it uses a smart iteration over the NBHM. @Override public <T> T[] toArray(T[] a) { Object[] kvs = raw_array(); // Estimate size of array; be prepared to see more or fewer elements int sz = size(); T[] r = a.length >= sz ? a : (T[])java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), sz); // Fast efficient element walk. int j=0; for( int i=0; i<len(kvs); i++ ) { Object K = key(kvs,i); Object V = Prime.unbox(val(kvs,i)); if( K != null && K != TOMBSTONE && V != null && V != TOMBSTONE ) { if( j >= r.length ) { int sz2 = (int)Math.min(Integer.MAX_VALUE-8,((long)j)<<1); if( sz2<=r.length ) throw new OutOfMemoryError("Required array size too large"); r = Arrays.copyOf(r,sz2); } r[j++] = (T)K; } } if( j <= a.length ) { // Fit in the original array? if( a!=r ) System.arraycopy(r,0,a,0,j); if( j<a.length ) r[j++]=null; // One final null not in the spec but in the default impl return a; // Return the original } return Arrays.copyOf(r,j); } }; } // --- entrySet ------------------------------------------------------------ // Warning: Each call to 'next' in this iterator constructs a new NBHMEntry. private class NBHMEntry extends AbstractEntry<TypeK,TypeV> { NBHMEntry( final TypeK k, final TypeV v ) { super(k,v); } public TypeV setValue(final TypeV val) { if( val == null ) throw new NullPointerException(); _val = val; return put(_key, val); } } private class SnapshotE implements Iterator<Map.Entry<TypeK,TypeV>> { final SnapshotV _ss; public SnapshotE() { _ss = new SnapshotV(); } public void remove() { _ss.remove(); } public Map.Entry<TypeK,TypeV> next() { _ss.next(); return new NBHMEntry((TypeK)_ss._prevK,_ss._prevV); } public boolean hasNext() { return _ss.hasNext(); } } /** Returns a {@link Set} view of the mappings contained in this map. The * set is backed by the map, so changes to the map are reflected in the * set, and vice-versa. The set supports element removal, which removes * the corresponding mapping from the map, via the * <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, <tt>removeAll</tt>, * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not support * the <tt>add</tt> or <tt>addAll</tt> operations. * * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator * that will never throw {@link ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. * * <p><strong>Warning:</strong> the iterator associated with this Set * requires the creation of {@link java.util.Map.Entry} objects with each * iteration. The {@link NonBlockingHashMap} does not normally create or * using {@link java.util.Map.Entry} objects so they will be created soley * to support this iteration. Iterating using {@link #keySet} or {@link * #values} will be more efficient. */ @Override public Set<Map.Entry<TypeK,TypeV>> entrySet() { return new AbstractSet<Map.Entry<TypeK,TypeV>>() { @Override public void clear ( ) { NonBlockingHashMap.this.clear( ); } @Override public int size ( ) { return NonBlockingHashMap.this.size ( ); } @Override public boolean remove( final Object o ) { if( !(o instanceof Map.Entry)) return false; final Map.Entry<?,?> e = (Map.Entry<?,?>)o; return NonBlockingHashMap.this.remove(e.getKey(), e.getValue()); } @Override public boolean contains(final Object o) { if( !(o instanceof Map.Entry)) return false; final Map.Entry<?,?> e = (Map.Entry<?,?>)o; TypeV v = get(e.getKey()); return v.equals(e.getValue()); } @Override public Iterator<Map.Entry<TypeK,TypeV>> iterator() { return new SnapshotE(); } }; } // --- writeObject ------------------------------------------------------- // Write a NBHM to a stream private void writeObject(java.io.ObjectOutputStream s) throws IOException { s.defaultWriteObject(); // Nothing to write for( Object K : keySet() ) { final Object V = get(K); // Do an official 'get' s.writeObject(K); // Write the <TypeK,TypeV> pair s.writeObject(V); } s.writeObject(null); // Sentinel to indicate end-of-data s.writeObject(null); } // --- readObject -------------------------------------------------------- // Read a CHM from a stream private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { s.defaultReadObject(); // Read nothing initialize(MIN_SIZE); for(;;) { final TypeK K = (TypeK) s.readObject(); final TypeV V = (TypeV) s.readObject(); if( K == null ) break; put(K,V); // Insert with an offical put } } /** * Atomically make the set immutable. Future calls to mutate with wildcard * matching will throw an IllegalStateException. This basically outlaws put, * remove and replace, but allows putIfAbsent and putIfMatch. Existing * mutator calls in other threads racing with this thread and will either * throw IllegalStateException or their update will be visible to this * thread. This implies that a simple flag cannot make the Set immutable, * because a late-arriving update in another thread might see immutable flag * not set yet, then mutate the Set after the {@link #readOnly} call returns. * This call can be called concurrently (and indeed until the operation * completes, all calls on the Set from any thread either complete normally * or end up calling {@link #readOnly} internally). * * <p> This call is useful in debugging multi-threaded programs where the * Set is constructed in parallel, but construction completes after some * time; and after construction the Set is only read. Making the Set * read-only will cause updates arriving after construction is supposedly * complete to throw an {@link IllegalStateException}. */ public void readOnly() { // Set the innermost kvs to the READONLY sentinel. This will (gradually) // prevent future updates. Object[] kvs = _kvs; while( true ) { // Spin, until we lock down the innermost table CHM chm = chm(kvs); Object[] newkvs = chm._newkvs; if( newkvs == READONLY ) break; if( chm.CAS_newkvs(READONLY) ) break; kvs = newkvs; assert kvs != null && kvs != READONLY; } CHM chm = chm(kvs); // Do some table-lock, but not it all chm.help_copy_impl(NonBlockingHashMap.this,kvs,false); } } // End NonBlockingHashMap class
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/nbhm/NonBlockingHashMapLong.java
package water.nbhm; import java.io.IOException; import java.io.Serializable; import java.lang.reflect.Field; import java.util.*; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import sun.misc.Unsafe; /* * Written by Cliff Click and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain */ /** * A lock-free alternate implementation of {@link * java.util.concurrent.ConcurrentHashMap} with <strong>primitive long * keys</strong>, better scaling properties and generally lower costs. The use * of {@code long} keys allows for faster compares and lower memory costs. The * Map provides identical correctness properties as ConcurrentHashMap. All * operations are non-blocking and multi-thread safe, including all update * operations. {@link NonBlockingHashMapLong} scales substatially better than * {@link java.util.concurrent.ConcurrentHashMap} for high update rates, even * with a large concurrency factor. Scaling is linear up to 768 CPUs on a * 768-CPU Azul box, even with 100% updates or 100% reads or any fraction * in-between. Linear scaling up to all cpus has been observed on a 32-way Sun * US2 box, 32-way Sun Niagra box, 8-way Intel box and a 4-way Power box. * * <p><strong>The main benefit of this class</strong> over using plain * org.cliffc.high_scale_lib.NonBlockingHashMap with {@link Long} keys is * that it avoids the auto-boxing and unboxing costs. Since auto-boxing is * <em>automatic</em>, it is easy to accidentally cause auto-boxing and negate * the space and speed benefits. * * <p>This class obeys the same functional specification as {@link * java.util.Hashtable}, and includes versions of methods corresponding to * each method of <tt>Hashtable</tt>. However, even though all operations are * thread-safe, operations do <em>not</em> entail locking and there is * <em>not</em> any support for locking the entire table in a way that * prevents all access. This class is fully interoperable with * <tt>Hashtable</tt> in programs that rely on its thread safety but not on * its synchronization details. * * <p> Operations (including <tt>put</tt>) generally do not block, so may * overlap with other update operations (including other <tt>puts</tt> and * <tt>removes</tt>). Retrievals reflect the results of the most recently * <em>completed</em> update operations holding upon their onset. For * aggregate operations such as <tt>putAll</tt>, concurrent retrievals may * reflect insertion or removal of only some entries. Similarly, Iterators * and Enumerations return elements reflecting the state of the hash table at * some point at or since the creation of the iterator/enumeration. They do * <em>not</em> throw {@link ConcurrentModificationException}. However, * iterators are designed to be used by only one thread at a time. * * <p> Very full tables, or tables with high reprobe rates may trigger an * internal resize operation to move into a larger table. Resizing is not * terribly expensive, but it is not free either; during resize operations * table throughput may drop somewhat. All threads that visit the table * during a resize will 'help' the resizing but will still be allowed to * complete their operation before the resize is finished (i.e., a simple * 'get' operation on a million-entry table undergoing resizing will not need * to block until the entire million entries are copied). * * <p>This class and its views and iterators implement all of the * <em>optional</em> methods of the {@link Map} and {@link Iterator} * interfaces. * * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class * does <em>not</em> allow <tt>null</tt> to be used as a value. * * * @since 1.5 * @author Cliff Click * @param <TypeV> the type of mapped values */ public class NonBlockingHashMapLong<TypeV> extends AbstractMap<Long,TypeV> implements ConcurrentMap<Long,TypeV>, Serializable { private static final long serialVersionUID = 1234123412341234124L; private static final int REPROBE_LIMIT=10; // Too many reprobes then force a table-resize // --- Bits to allow Unsafe access to arrays private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); private static final int _Obase = _unsafe.arrayBaseOffset(Object[].class); private static final int _Oscale = _unsafe.arrayIndexScale(Object[].class); private static long rawIndex(final Object[] ary, final int idx) { assert idx >= 0 && idx < ary.length; return _Obase + idx * _Oscale; } private static final int _Lbase = _unsafe.arrayBaseOffset(long[].class); private static final int _Lscale = _unsafe.arrayIndexScale(long[].class); private static long rawIndex(final long[] ary, final int idx) { assert idx >= 0 && idx < ary.length; return _Lbase + idx * _Lscale; } // --- Bits to allow Unsafe CAS'ing of the CHM field private static final long _chm_offset; private static final long _val_1_offset; static { // <clinit> Field f = null; try { f = NonBlockingHashMapLong.class.getDeclaredField("_chm"); } catch( java.lang.NoSuchFieldException e ) { throw new RuntimeException(e); } _chm_offset = _unsafe.objectFieldOffset(f); try { f = NonBlockingHashMapLong.class.getDeclaredField("_val_1"); } catch( java.lang.NoSuchFieldException e ) { throw new RuntimeException(e); } _val_1_offset = _unsafe.objectFieldOffset(f); } private final boolean CAS( final long offset, final Object old, final Object nnn ) { return _unsafe.compareAndSwapObject(this, offset, old, nnn ); } // --- Adding a 'prime' bit onto Values via wrapping with a junk wrapper class private static final class Prime { final Object _V; Prime( Object V ) { _V = V; } static Object unbox( Object V ) { return V instanceof Prime ? ((Prime)V)._V : V; } } // --- The Hash Table -------------------- private transient CHM _chm; // This next field holds the value for Key 0 - the special key value which // is the initial array value, and also means: no-key-inserted-yet. private transient Object _val_1; // Value for Key: NO_KEY // Time since last resize private transient long _last_resize_milli; // Optimize for space: use a 1/2-sized table and allow more re-probes private final boolean _opt_for_space; // --- Minimum table size ---------------- // Pick size 16 K/V pairs, which turns into (16*2)*4+12 = 140 bytes on a // standard 32-bit HotSpot, and (16*2)*8+12 = 268 bytes on 64-bit Azul. private static final int MIN_SIZE_LOG=4; // private static final int MIN_SIZE=(1<<MIN_SIZE_LOG); // Must be power of 2 // --- Sentinels ------------------------- // No-Match-Old - putIfMatch does updates only if it matches the old value, // and NO_MATCH_OLD basically counts as a wildcard match. private static final Object NO_MATCH_OLD = new Object(); // Sentinel // Match-Any-not-null - putIfMatch does updates only if it find a real old // value. private static final Object MATCH_ANY = new Object(); // Sentinel // This K/V pair has been deleted (but the Key slot is forever claimed). // The same Key can be reinserted with a new value later. private static final Object TOMBSTONE = new Object(); // Prime'd or box'd version of TOMBSTONE. This K/V pair was deleted, then a // table resize started. The K/V pair has been marked so that no new // updates can happen to the old table (and since the K/V pair was deleted // nothing was copied to the new table). private static final Prime TOMBPRIME = new Prime(TOMBSTONE); // I exclude 1 long from the 2^64 possibilities, and test for it before // entering the main array. The NO_KEY value must be zero, the initial // value set by Java before it hands me the array. private static final long NO_KEY = 0L; // --- dump ---------------------------------------------------------------- /** Verbose printout of table internals, useful for debugging. */ public final void print() { System.out.println("========="); print_impl(-99,NO_KEY,_val_1); _chm.print(); System.out.println("========="); } private static final void print_impl(final int i, final long K, final Object V) { String p = (V instanceof Prime) ? "prime_" : ""; Object V2 = Prime.unbox(V); String VS = (V2 == TOMBSTONE) ? "tombstone" : V2.toString(); System.out.println("["+i+"]=("+K+","+p+VS+")"); } private final void print2() { System.out.println("========="); print2_impl(-99,NO_KEY,_val_1); _chm.print(); System.out.println("========="); } private static final void print2_impl(final int i, final long K, final Object V) { if( V != null && Prime.unbox(V) != TOMBSTONE ) print_impl(i,K,V); } // Count of reprobes private transient ConcurrentAutoTable _reprobes = new ConcurrentAutoTable(); /** Get and clear the current count of reprobes. Reprobes happen on key * collisions, and a high reprobe rate may indicate a poor hash function or * weaknesses in the table resizing function. * @return the count of reprobes since the last call to {@link #reprobes} * or since the table was created. */ public long reprobes() { long r = _reprobes.get(); _reprobes = new ConcurrentAutoTable(); return r; } // --- reprobe_limit ----------------------------------------------------- // Heuristic to decide if we have reprobed toooo many times. Running over // the reprobe limit on a 'get' call acts as a 'miss'; on a 'put' call it // can trigger a table resize. Several places must have exact agreement on // what the reprobe_limit is, so we share it here. private static final int reprobe_limit( int len ) { return REPROBE_LIMIT + (len>>8); } // --- NonBlockingHashMapLong ---------------------------------------------- // Constructors /** Create a new NonBlockingHashMapLong with default minimum size (currently set * to 8 K/V pairs or roughly 84 bytes on a standard 32-bit JVM). */ public NonBlockingHashMapLong( ) { this(MIN_SIZE,true); } /** Create a new NonBlockingHashMapLong with initial room for the given * number of elements, thus avoiding internal resizing operations to reach * an appropriate size. Large numbers here when used with a small count of * elements will sacrifice space for a small amount of time gained. The * initial size will be rounded up internally to the next larger power of 2. */ public NonBlockingHashMapLong( final int initial_sz ) { this(initial_sz,true); } /** Create a new NonBlockingHashMapLong, setting the space-for-speed * tradeoff. {@code true} optimizes for space and is the default. {@code * false} optimizes for speed and doubles space costs for roughly a 10% * speed improvement. */ public NonBlockingHashMapLong( final boolean opt_for_space ) { this(1,opt_for_space); } /** Create a new NonBlockingHashMapLong, setting both the initial size and * the space-for-speed tradeoff. {@code true} optimizes for space and is * the default. {@code false} optimizes for speed and doubles space costs * for roughly a 10% speed improvement. */ public NonBlockingHashMapLong( final int initial_sz, final boolean opt_for_space ) { _opt_for_space = opt_for_space; initialize(initial_sz); } private final void initialize( final int initial_sz ) { if( initial_sz < 0 ) throw new IllegalArgumentException(); int i; // Convert to next largest power-of-2 for( i=MIN_SIZE_LOG; (1<<i) < initial_sz; i++ ) ; _chm = new CHM(this,new ConcurrentAutoTable(),i); _val_1 = TOMBSTONE; // Always as-if deleted _last_resize_milli = System.currentTimeMillis(); } // --- wrappers ------------------------------------------------------------ /** Returns the number of key-value mappings in this map. * @return the number of key-value mappings in this map */ public int size ( ) { return (_val_1==TOMBSTONE?0:1) + _chm.size(); } /** Tests if the key in the table. * @return <tt>true</tt> if the key is in the table */ public boolean containsKey( long key ) { return get(key) != null; } /** Legacy method testing if some key maps into the specified value in this * table. This method is identical in functionality to {@link * #containsValue}, and exists solely to ensure full compatibility with * class {@link java.util.Hashtable}, which supported this method prior to * introduction of the Java Collections framework. * @param val a value to search for * @return <tt>true</tt> if this map maps one or more keys to the specified value * @throws NullPointerException if the specified value is null */ public boolean contains ( Object val ) { return containsValue(val); } /** Maps the specified key to the specified value in the table. The value * cannot be null. <p> The value can be retrieved by calling {@link #get} * with a key that is equal to the original key. * @param key key with which the specified value is to be associated * @param val value to be associated with the specified key * @return the previous value associated with <tt>key</tt>, or * <tt>null</tt> if there was no mapping for <tt>key</tt> * @throws NullPointerException if the specified value is null */ public TypeV put ( long key, TypeV val ) { return putIfMatch( key, val,NO_MATCH_OLD);} /** Atomically, do a {@link #put} if-and-only-if the key is not mapped. * Useful to ensure that only a single mapping for the key exists, even if * many threads are trying to create the mapping in parallel. * @return the previous value associated with the specified key, * or <tt>null</tt> if there was no mapping for the key * @throws NullPointerException if the specified is value is null */ public TypeV putIfAbsent( long key, TypeV val ) { return putIfMatch( key, val,TOMBSTONE );} /** Removes the key (and its corresponding value) from this map. * This method does nothing if the key is not in the map. * @return the previous value associated with <tt>key</tt>, or * <tt>null</tt> if there was no mapping for <tt>key</tt>*/ public TypeV remove ( long key ) { return putIfMatch( key,TOMBSTONE,NO_MATCH_OLD);} /** Atomically do a {@link #remove(long)} if-and-only-if the key is mapped * to a value which is <code>equals</code> to the given value. * @throws NullPointerException if the specified value is null */ public boolean remove ( long key,Object val ) { return putIfMatch( key,TOMBSTONE,val ) == val ;} /** Atomically do a <code>put(key,val)</code> if-and-only-if the key is * mapped to some value already. * @throws NullPointerException if the specified value is null */ public TypeV replace ( long key, TypeV val ) { return putIfMatch( key, val,MATCH_ANY );} /** Atomically do a <code>put(key,newValue)</code> if-and-only-if the key is * mapped a value which is <code>equals</code> to <code>oldValue</code>. * @throws NullPointerException if the specified value is null */ public boolean replace ( long key, TypeV oldValue, TypeV newValue ) { return putIfMatch( key, newValue, oldValue ) == oldValue; } private final TypeV putIfMatch( long key, Object newVal, Object oldVal ) { if (oldVal == null || newVal == null) throw new NullPointerException(); if( key == NO_KEY ) { Object curVal = _val_1; if( oldVal == NO_MATCH_OLD || // Do we care about expected-Value at all? curVal == oldVal || // No instant match already? (oldVal == MATCH_ANY && curVal != TOMBSTONE) || oldVal.equals(curVal) ) { // Expensive equals check if( !CAS(_val_1_offset,curVal,newVal) ) // One shot CAS update attempt curVal = _val_1; // Failed; get failing witness } return curVal == TOMBSTONE ? null : (TypeV)curVal; // Return the last value present } final Object res = _chm.putIfMatch( key, newVal, oldVal ); assert !(res instanceof Prime); assert res != null; return res == TOMBSTONE ? null : (TypeV)res; } /** Removes all of the mappings from this map. */ public void clear() { // Smack a new empty table down CHM newchm = new CHM(this,new ConcurrentAutoTable(),MIN_SIZE_LOG); while( !CAS(_chm_offset,_chm,newchm) ) // Spin until the clear works ; CAS(_val_1_offset,_val_1,TOMBSTONE); } /** Returns <tt>true</tt> if this Map maps one or more keys to the specified * value. <em>Note</em>: This method requires a full internal traversal of the * hash table and is much slower than {@link #containsKey}. * @param val value whose presence in this map is to be tested * @return <tt>true</tt> if this Map maps one or more keys to the specified value * @throws NullPointerException if the specified value is null */ public boolean containsValue( Object val ) { if( val == null ) return false; if( val == _val_1 ) return true; // Key 0 for( TypeV V : values() ) if( V == val || V.equals(val) ) return true; return false; } // --- get ----------------------------------------------------------------- /** Returns the value to which the specified key is mapped, or {@code null} * if this map contains no mapping for the key. * <p>More formally, if this map contains a mapping from a key {@code k} to * a value {@code v} such that {@code key==k}, then this method * returns {@code v}; otherwise it returns {@code null}. (There can be at * most one such mapping.) * @throws NullPointerException if the specified key is null */ // Never returns a Prime nor a Tombstone. public final TypeV get( long key ) { if( key == NO_KEY ) { final Object V = _val_1; return V == TOMBSTONE ? null : (TypeV)V; } final Object V = _chm.get_impl(key); assert !(V instanceof Prime); // Never return a Prime assert V != TOMBSTONE; return (TypeV)V; } /** Auto-boxing version of {@link #get(long)}. */ public TypeV get ( Object key ) { return (key instanceof Long) ? get (((Long)key).longValue()) : null; } /** Auto-boxing version of {@link #remove(long)}. */ public TypeV remove ( Object key ) { return (key instanceof Long) ? remove (((Long)key).longValue()) : null; } /** Auto-boxing version of {@link #remove(long,Object)}. */ public boolean remove ( Object key, Object Val ) { return (key instanceof Long) && remove(((Long) key).longValue(), Val); } /** Auto-boxing version of {@link #containsKey(long)}. */ public boolean containsKey( Object key ) { return (key instanceof Long) && containsKey(((Long) key).longValue()); } /** Auto-boxing version of {@link #putIfAbsent}. */ public TypeV putIfAbsent( Long key, TypeV val ) { return putIfAbsent( key.longValue(), val ); } /** Auto-boxing version of {@link #replace}. */ public TypeV replace( Long key, TypeV Val ) { return replace(key.longValue(), Val); } /** Auto-boxing version of {@link #put}. */ public TypeV put ( Long key, TypeV val ) { return put(key.longValue(),val); } /** Auto-boxing version of {@link #replace}. */ public boolean replace( Long key, TypeV oldValue, TypeV newValue ) { return replace(key.longValue(), oldValue, newValue); } // --- help_copy ----------------------------------------------------------- // Help along an existing resize operation. This is just a fast cut-out // wrapper, to encourage inlining for the fast no-copy-in-progress case. We // always help the top-most table copy, even if there are nested table // copies in progress. private final void help_copy( ) { // Read the top-level CHM only once. We'll try to help this copy along, // even if it gets promoted out from under us (i.e., the copy completes // and another KVS becomes the top-level copy). CHM topchm = _chm; if( topchm._newchm == null ) return; // No copy in-progress topchm.help_copy_impl(false); } // --- CHM ----------------------------------------------------------------- // The control structure for the NonBlockingHashMapLong private static final class CHM<TypeV> implements Serializable { // Back-pointer to top-level structure final NonBlockingHashMapLong _nbhml; // Size in active K,V pairs private final ConcurrentAutoTable _size; public int size () { return (int)_size.get(); } // --- // These next 2 fields are used in the resizing heuristics, to judge when // it is time to resize or copy the table. Slots is a count of used-up // key slots, and when it nears a large fraction of the table we probably // end up reprobing too much. Last-resize-milli is the time since the // last resize; if we are running back-to-back resizes without growing // (because there are only a few live keys but many slots full of dead // keys) then we need a larger table to cut down on the churn. // Count of used slots, to tell when table is full of dead unusable slots private final ConcurrentAutoTable _slots; public int slots() { return (int)_slots.get(); } // --- // New mappings, used during resizing. // The 'next' CHM - created during a resize operation. This represents // the new table being copied from the old one. It's the volatile // variable that is read as we cross from one table to the next, to get // the required memory orderings. It monotonically transits from null to // set (once). volatile CHM _newchm; private static final AtomicReferenceFieldUpdater<CHM,CHM> _newchmUpdater = AtomicReferenceFieldUpdater.newUpdater(CHM.class,CHM.class, "_newchm"); // Set the _newchm field if we can. AtomicUpdaters do not fail spuriously. boolean CAS_newchm( CHM newchm ) { return _newchmUpdater.compareAndSet(this,null,newchm); } // Sometimes many threads race to create a new very large table. Only 1 // wins the race, but the losers all allocate a junk large table with // hefty allocation costs. Attempt to control the overkill here by // throttling attempts to create a new table. I cannot really block here // (lest I lose the non-blocking property) but late-arriving threads can // give the initial resizing thread a little time to allocate the initial // new table. The Right Long Term Fix here is to use array-lets and // incrementally create the new very large array. In C I'd make the array // with malloc (which would mmap under the hood) which would only eat // virtual-address and not real memory - and after Somebody wins then we // could in parallel initialize the array. Java does not allow // un-initialized array creation (especially of ref arrays!). volatile long _resizers; // count of threads attempting an initial resize private static final AtomicLongFieldUpdater<CHM> _resizerUpdater = AtomicLongFieldUpdater.newUpdater(CHM.class, "_resizers"); // --- key,val ------------------------------------------------------------- // Access K,V for a given idx private final boolean CAS_key( int idx, long old, long key ) { return _unsafe.compareAndSwapLong ( _keys, rawIndex(_keys, idx), old, key ); } private final boolean CAS_val( int idx, Object old, Object val ) { return _unsafe.compareAndSwapObject( _vals, rawIndex(_vals, idx), old, val ); } final long [] _keys; final Object [] _vals; // Simple constructor CHM( final NonBlockingHashMapLong nbhml, ConcurrentAutoTable size, final int logsize ) { _nbhml = nbhml; _size = size; _slots= new ConcurrentAutoTable(); _keys = water.MemoryManager.malloc8 (1<<logsize); _vals = water.MemoryManager.mallocObj(1<<logsize); } // --- print innards private final void print() { for( int i=0; i<_keys.length; i++ ) { long K = _keys[i]; if( K != NO_KEY ) print_impl(i,K,_vals[i]); } CHM newchm = _newchm; // New table, if any if( newchm != null ) { System.out.println("----"); newchm.print(); } } // --- print only the live objects private final void print2( ) { for( int i=0; i<_keys.length; i++ ) { long K = _keys[i]; if( K != NO_KEY ) // key is sane print2_impl(i,K,_vals[i]); } CHM newchm = _newchm; // New table, if any if( newchm != null ) { System.out.println("----"); newchm.print2(); } } // --- get_impl ---------------------------------------------------------- // Never returns a Prime nor a Tombstone. private final Object get_impl ( final long key ) { final int len = _keys.length; int idx = (int)(key & (len-1)); // First key hash // Main spin/reprobe loop, looking for a Key hit int reprobe_cnt=0; while( true ) { final long K = _keys[idx]; // Get key before volatile read, could be NO_KEY final Object V = _vals[idx]; // Get value before volatile read, could be null or Tombstone or Prime if( K == NO_KEY ) return null; // A clear miss // Key-compare if( key == K ) { // Key hit! Check for no table-copy-in-progress if( !(V instanceof Prime) ) { // No copy? if( V == TOMBSTONE) return null; // We need a volatile-read between reading a newly inserted Value // and returning the Value (so the user might end up reading the // stale Value contents). // VOLATILE READ before returning V @SuppressWarnings("unused") final CHM newchm = _newchm; return V; } // Key hit - but slot is (possibly partially) copied to the new table. // Finish the copy & retry in the new table. return copy_slot_and_check(idx,key).get_impl(key); // Retry in the new table } // get and put must have the same key lookup logic! But only 'put' // needs to force a table-resize for a too-long key-reprobe sequence. // Check for too-many-reprobes on get. if( ++reprobe_cnt >= reprobe_limit(len) ) // too many probes return _newchm == null // Table copy in progress? ? null // Nope! A clear miss : copy_slot_and_check(idx,key).get_impl(key); // Retry in the new table idx = (idx+1)&(len-1); // Reprobe by 1! (could now prefetch) } } // --- putIfMatch --------------------------------------------------------- // Put, Remove, PutIfAbsent, etc. Return the old value. If the returned // value is equal to expVal (or expVal is NO_MATCH_OLD) then the put can // be assumed to work (although might have been immediately overwritten). // Only the path through copy_slot passes in an expected value of null, // and putIfMatch only returns a null if passed in an expected null. private final Object putIfMatch( final long key, final Object putval, final Object expVal ) { assert putval != null; assert !(putval instanceof Prime); assert !(expVal instanceof Prime); final int len = _keys.length; int idx = (int)(key & (len-1)); // The first key // --- // Key-Claim stanza: spin till we can claim a Key (or force a resizing). int reprobe_cnt=0; long K = NO_KEY; Object V = null; while( true ) { // Spin till we get a Key slot V = _vals[idx]; // Get old value K = _keys[idx]; // Get current key if( K == NO_KEY ) { // Slot is free? // Found an empty Key slot - which means this Key has never been in // this table. No need to put a Tombstone - the Key is not here! if( putval == TOMBSTONE ) return putval; // Not-now & never-been in this table // Claim the zero key-slot if( CAS_key(idx, NO_KEY, key) ) { // Claim slot for Key _slots.add(1); // Raise key-slots-used count break; // Got it! } // CAS to claim the key-slot failed. // // This re-read of the Key points out an annoying short-coming of Java // CAS. Most hardware CAS's report back the existing value - so that // if you fail you have a *witness* - the value which caused the CAS // to fail. The Java API turns this into a boolean destroying the // witness. Re-reading does not recover the witness because another // thread can write over the memory after the CAS. Hence we can be in // the unfortunate situation of having a CAS fail *for cause* but // having that cause removed by a later store. This turns a // non-spurious-failure CAS (such as Azul has) into one that can // apparently spuriously fail - and we avoid apparent spurious failure // by not allowing Keys to ever change. K = _keys[idx]; // CAS failed, get updated value assert K != NO_KEY ; // If keys[idx] is NO_KEY, CAS shoulda worked } // Key slot was not null, there exists a Key here if( K == key ) break; // Got it! // get and put must have the same key lookup logic! Lest 'get' give // up looking too soon. //topmap._reprobes.add(1); if( ++reprobe_cnt >= reprobe_limit(len) ) { // We simply must have a new table to do a 'put'. At this point a // 'get' will also go to the new table (if any). We do not need // to claim a key slot (indeed, we cannot find a free one to claim!). final CHM newchm = resize(); if( expVal != null ) _nbhml.help_copy(); // help along an existing copy return newchm.putIfMatch(key,putval,expVal); } idx = (idx+1)&(len-1); // Reprobe! } // End of spinning till we get a Key slot // --- // Found the proper Key slot, now update the matching Value slot. We // never put a null, so Value slots monotonically move from null to // not-null (deleted Values use Tombstone). Thus if 'V' is null we // fail this fast cutout and fall into the check for table-full. if( putval == V ) return V; // Fast cutout for no-change // See if we want to move to a new table (to avoid high average re-probe // counts). We only check on the initial set of a Value from null to // not-null (i.e., once per key-insert). if( (V == null && tableFull(reprobe_cnt,len)) || // Or we found a Prime: resize is already in progress. The resize // call below will do a CAS on _newchm forcing the read. V instanceof Prime) { resize(); // Force the new table copy to start return copy_slot_and_check(idx,expVal).putIfMatch(key,putval,expVal); } // --- // We are finally prepared to update the existing table assert !(V instanceof Prime); // Must match old, and we do not? Then bail out now. Note that either V // or expVal might be TOMBSTONE. Also V can be null, if we've never // inserted a value before. expVal can be null if we are called from // copy_slot. if( expVal != NO_MATCH_OLD && // Do we care about expected-Value at all? V != expVal && // No instant match already? (expVal != MATCH_ANY || V == TOMBSTONE || V == null) && !(V==null && expVal == TOMBSTONE) && // Match on null/TOMBSTONE combo (expVal == null || !expVal.equals(V)) ) // Expensive equals check at the last return V; // Do not update! // Actually change the Value in the Key,Value pair if( CAS_val(idx, V, putval ) ) { // CAS succeeded - we did the update! // Both normal put's and table-copy calls putIfMatch, but table-copy // does not (effectively) increase the number of live k/v pairs. if( expVal != null ) { // Adjust sizes - a striped counter if( (V == null || V == TOMBSTONE) && putval != TOMBSTONE ) _size.add( 1); if( !(V == null || V == TOMBSTONE) && putval == TOMBSTONE ) _size.add(-1); } } else { // Else CAS failed V = _vals[idx]; // Get new value // If a Prime'd value got installed, we need to re-run the put on the // new table. Otherwise we lost the CAS to another racing put. // Simply retry from the start. if( V instanceof Prime ) return copy_slot_and_check(idx,expVal).putIfMatch(key,putval,expVal); } // Win or lose the CAS, we are done. If we won then we know the update // happened as expected. If we lost, it means "we won but another thread // immediately stomped our update with no chance of a reader reading". return (V==null && expVal!=null) ? TOMBSTONE : V; } // --- tableFull --------------------------------------------------------- // Heuristic to decide if this table is too full, and we should start a // new table. Note that if a 'get' call has reprobed too many times and // decided the table must be full, then always the estimate_sum must be // high and we must report the table is full. If we do not, then we might // end up deciding that the table is not full and inserting into the // current table, while a 'get' has decided the same key cannot be in this // table because of too many reprobes. The invariant is: // slots.estimate_sum >= max_reprobe_cnt >= reprobe_limit(len) private final boolean tableFull( int reprobe_cnt, int len ) { return // Do the cheap check first: we allow some number of reprobes always reprobe_cnt >= REPROBE_LIMIT && // More expensive check: see if the table is > 1/2 full. _slots.estimate_get() >= reprobe_limit(len)*2; } // --- resize ------------------------------------------------------------ // Resizing after too many probes. "How Big???" heuristics are here. // Callers will (not this routine) will 'help_copy' any in-progress copy. // Since this routine has a fast cutout for copy-already-started, callers // MUST 'help_copy' lest we have a path which forever runs through // 'resize' only to discover a copy-in-progress which never progresses. private final CHM resize() { // Check for resize already in progress, probably triggered by another thread CHM newchm = _newchm; // VOLATILE READ if( newchm != null ) // See if resize is already in progress return newchm; // Use the new table already // No copy in-progress, so start one. First up: compute new table size. int oldlen = _keys.length; // Old count of K,V pairs allowed int sz = size(); // Get current table count of active K,V pairs int newsz = sz; // First size estimate // Heuristic to determine new size. We expect plenty of dead-slots-with-keys // and we need some decent padding to avoid endless reprobing. if( _nbhml._opt_for_space ) { // This heuristic leads to a much denser table with a higher reprobe rate if( sz >= (oldlen>>1) ) // If we are >50% full of keys then... newsz = oldlen<<1; // Double size } else { if( sz >= (oldlen>>2) ) { // If we are >25% full of keys then... newsz = oldlen<<1; // Double size if( sz >= (oldlen>>1) ) // If we are >50% full of keys then... newsz = oldlen<<2; // Double double size } } // Last (re)size operation was very recent? Then double again; slows // down resize operations for tables subject to a high key churn rate. long tm = System.currentTimeMillis(); if( newsz <= oldlen && // New table would shrink or hold steady? tm <= _nbhml._last_resize_milli+10000 && // Recent resize (less than 1 sec ago) //(q=_slots.estimate_sum()) >= (sz<<1) ) // 1/2 of keys are dead? true ) newsz = oldlen<<1; // Double the existing size // Do not shrink, ever if( newsz < oldlen ) newsz = oldlen; //System.out.println("old="+oldlen+" new="+newsz+" size()="+sz+" est_slots()="+q+" millis="+(tm-_nbhml._last_resize_milli)); // Convert to power-of-2 int log2; for( log2=MIN_SIZE_LOG; (1<<log2) < newsz; log2++ ) ; // Compute log2 of size // Now limit the number of threads actually allocating memory to a // handful - lest we have 750 threads all trying to allocate a giant // resized array. long r = _resizers; while( !_resizerUpdater.compareAndSet(this,r,r+1) ) r = _resizers; // Size calculation: 2 words (K+V) per table entry, plus a handful. We // guess at 32-bit pointers; 64-bit pointers screws up the size calc by // 2x but does not screw up the heuristic very much. int megs = ((((1<<log2)<<1)+4)<<3/*word to bytes*/)>>20/*megs*/; if( r >= 2 && megs > 0 ) { // Already 2 guys trying; wait and see newchm = _newchm; // Between dorking around, another thread did it if( newchm != null ) // See if resize is already in progress return newchm; // Use the new table already // TODO - use a wait with timeout, so we'll wakeup as soon as the new table // is ready, or after the timeout in any case. //synchronized( this ) { wait(8*megs); } // Timeout - we always wakeup // For now, sleep a tad and see if the 2 guys already trying to make // the table actually get around to making it happen. try { Thread.sleep(8*megs); } catch( Exception e ) { } } // Last check, since the 'new' below is expensive and there is a chance // that another thread slipped in a new thread while we ran the heuristic. newchm = _newchm; if( newchm != null ) // See if resize is already in progress return newchm; // Use the new table already // New CHM - actually allocate the big arrays newchm = new CHM(_nbhml,_size,log2); // Another check after the slow allocation if( _newchm != null ) // See if resize is already in progress return _newchm; // Use the new table already // The new table must be CAS'd in so only 1 winner amongst duplicate // racing resizing threads. Extra CHM's will be GC'd. if( CAS_newchm( newchm ) ) { // NOW a resize-is-in-progress! //notifyAll(); // Wake up any sleepers //long nano = System.nanoTime(); //System.out.println(" "+nano+" Resize from "+oldlen+" to "+(1<<log2)+" and had "+(_resizers-1)+" extras" ); //System.out.print("["+log2); } else // CAS failed? newchm = _newchm; // Reread new table return newchm; } // The next part of the table to copy. It monotonically transits from zero // to _keys.length. Visitors to the table can claim 'work chunks' by // CAS'ing this field up, then copying the indicated indices from the old // table to the new table. Workers are not required to finish any chunk; // the counter simply wraps and work is copied duplicately until somebody // somewhere completes the count. volatile long _copyIdx = 0; static private final AtomicLongFieldUpdater<CHM> _copyIdxUpdater = AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyIdx"); // Work-done reporting. Used to efficiently signal when we can move to // the new table. From 0 to len(oldkvs) refers to copying from the old // table to the new. volatile long _copyDone= 0; static private final AtomicLongFieldUpdater<CHM> _copyDoneUpdater = AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyDone"); // --- help_copy_impl ---------------------------------------------------- // Help along an existing resize operation. We hope its the top-level // copy (it was when we started) but this CHM might have been promoted out // of the top position. private final void help_copy_impl( final boolean copy_all ) { final CHM newchm = _newchm; assert newchm != null; // Already checked by caller int oldlen = _keys.length; // Total amount to copy final int MIN_COPY_WORK = Math.min(oldlen,1024); // Limit per-thread work // --- int panic_start = -1; int copyidx=-9999; // Fool javac to think it's initialized while( _copyDone < oldlen ) { // Still needing to copy? // Carve out a chunk of work. The counter wraps around so every // thread eventually tries to copy every slot repeatedly. // We "panic" if we have tried TWICE to copy every slot - and it still // has not happened. i.e., twice some thread somewhere claimed they // would copy 'slot X' (by bumping _copyIdx) but they never claimed to // have finished (by bumping _copyDone). Our choices become limited: // we can wait for the work-claimers to finish (and become a blocking // algorithm) or do the copy work ourselves. Tiny tables with huge // thread counts trying to copy the table often 'panic'. if( panic_start == -1 ) { // No panic? copyidx = (int)_copyIdx; while( copyidx < (oldlen<<1) && // 'panic' check !_copyIdxUpdater.compareAndSet(this,copyidx,copyidx+MIN_COPY_WORK) ) copyidx = (int)_copyIdx; // Re-read if( !(copyidx < (oldlen<<1)) ) // Panic! panic_start = copyidx; // Record where we started to panic-copy } // We now know what to copy. Try to copy. int workdone = 0; for( int i=0; i<MIN_COPY_WORK; i++ ) if( copy_slot((copyidx+i)&(oldlen-1)) ) // Made an oldtable slot go dead? workdone++; // Yes! if( workdone > 0 ) // Report work-done occasionally copy_check_and_promote( workdone );// See if we can promote //for( int i=0; i<MIN_COPY_WORK; i++ ) // if( copy_slot((copyidx+i)&(oldlen-1)) ) // Made an oldtable slot go dead? // copy_check_and_promote( 1 );// See if we can promote copyidx += MIN_COPY_WORK; // Uncomment these next 2 lines to turn on incremental table-copy. // Otherwise this thread continues to copy until it is all done. if( !copy_all && panic_start == -1 ) // No panic? return; // Then done copying after doing MIN_COPY_WORK } // Extra promotion check, in case another thread finished all copying // then got stalled before promoting. copy_check_and_promote( 0 ); // See if we can promote } // --- copy_slot_and_check ----------------------------------------------- // Copy slot 'idx' from the old table to the new table. If this thread // confirmed the copy, update the counters and check for promotion. // // Returns the result of reading the volatile _newchm, mostly as a // convenience to callers. We come here with 1-shot copy requests // typically because the caller has found a Prime, and has not yet read // the _newchm volatile - which must have changed from null-to-not-null // before any Prime appears. So the caller needs to read the _newchm // field to retry his operation in the new table, but probably has not // read it yet. private final CHM copy_slot_and_check( int idx, Object should_help ) { // We're only here because the caller saw a Prime, which implies a // table-copy is in progress. assert _newchm != null; if( copy_slot(idx) ) // Copy the desired slot copy_check_and_promote(1); // Record the slot copied // Generically help along any copy (except if called recursively from a helper) if( should_help != null ) _nbhml.help_copy(); return _newchm; } // --- copy_check_and_promote -------------------------------------------- private final void copy_check_and_promote( int workdone ) { int oldlen = _keys.length; // We made a slot unusable and so did some of the needed copy work long copyDone = _copyDone; long nowDone = copyDone+workdone; assert nowDone <= oldlen; if( workdone > 0 ) { while( !_copyDoneUpdater.compareAndSet(this,copyDone,nowDone) ) { copyDone = _copyDone; // Reload, retry nowDone = copyDone+workdone; assert nowDone <= oldlen; } //if( (10*copyDone/oldlen) != (10*nowDone/oldlen) ) // System.out.print(" "+nowDone*100/oldlen+"%"+"_"+(_copyIdx*100/oldlen)+"%"); } // Check for copy being ALL done, and promote. Note that we might have // nested in-progress copies and manage to finish a nested copy before // finishing the top-level copy. We only promote top-level copies. if( nowDone == oldlen && // Ready to promote this table? _nbhml._chm == this && // Looking at the top-level table? // Attempt to promote _nbhml.CAS(_chm_offset,this,_newchm) ) { _nbhml._last_resize_milli = System.currentTimeMillis(); // Record resize time for next check //long nano = System.nanoTime(); //System.out.println(" "+nano+" Promote table "+oldlen+" to "+_newchm._keys.length); //System.out.print("_"+oldlen+"]"); } } // --- copy_slot --------------------------------------------------------- // Copy one K/V pair from oldkvs[i] to newkvs. Returns true if we can // confirm that the new table guaranteed has a value for this old-table // slot. We need an accurate confirmed-copy count so that we know when we // can promote (if we promote the new table too soon, other threads may // 'miss' on values not-yet-copied from the old table). We don't allow // any direct updates on the new table, unless they first happened to the // old table - so that any transition in the new table from null to // not-null must have been from a copy_slot (or other old-table overwrite) // and not from a thread directly writing in the new table. Thus we can // count null-to-not-null transitions in the new table. private boolean copy_slot( int idx ) { // Blindly set the key slot from NO_KEY to some key which hashes here, // to eagerly stop fresh put's from inserting new values in the old // table when the old table is mid-resize. We don't need to act on the // results here, because our correctness stems from box'ing the Value // field. Slamming the Key field is a minor speed optimization. long key; while( (key=_keys[idx]) == NO_KEY ) CAS_key(idx, NO_KEY, (idx+_keys.length)/*a non-zero key which hashes here*/); // --- // Prevent new values from appearing in the old table. // Box what we see in the old table, to prevent further updates. Object oldval = _vals[idx]; // Read OLD table while( !(oldval instanceof Prime) ) { final Prime box = (oldval == null || oldval == TOMBSTONE) ? TOMBPRIME : new Prime(oldval); if( CAS_val(idx,oldval,box) ) { // CAS down a box'd version of oldval // If we made the Value slot hold a TOMBPRIME, then we both // prevented further updates here but also the (absent) oldval is // vaccuously available in the new table. We return with true here: // any thread looking for a value for this key can correctly go // straight to the new table and skip looking in the old table. if( box == TOMBPRIME ) return true; // Otherwise we boxed something, but it still needs to be // copied into the new table. oldval = box; // Record updated oldval break; // Break loop; oldval is now boxed by us } oldval = _vals[idx]; // Else try, try again } if( oldval == TOMBPRIME ) return false; // Copy already complete here! // --- // Copy the value into the new table, but only if we overwrite a null. // If another value is already in the new table, then somebody else // wrote something there and that write is happens-after any value that // appears in the old table. If putIfMatch does not find a null in the // new table - somebody else should have recorded the null-not_null // transition in this copy. Object old_unboxed = ((Prime)oldval)._V; assert old_unboxed != TOMBSTONE; boolean copied_into_new = (_newchm.putIfMatch(key, old_unboxed, null) == null); // --- // Finally, now that any old value is exposed in the new table, we can // forever hide the old-table value by slapping a TOMBPRIME down. This // will stop other threads from uselessly attempting to copy this slot // (i.e., it's a speed optimization not a correctness issue). while( !CAS_val(idx,oldval,TOMBPRIME) ) oldval = _vals[idx]; return copied_into_new; } // end copy_slot } // End of CHM // --- Snapshot ------------------------------------------------------------ private class SnapshotV implements Iterator<TypeV>, Enumeration<TypeV> { final CHM _sschm; public SnapshotV() { CHM topchm; while( true ) { // Verify no table-copy-in-progress topchm = _chm; if( topchm._newchm == null ) // No table-copy-in-progress break; // Table copy in-progress - so we cannot get a clean iteration. We // must help finish the table copy before we can start iterating. topchm.help_copy_impl(true); } // The "linearization point" for the iteration. Every key in this table // will be visited, but keys added later might be skipped or even be // added to a following table (also not iterated over). _sschm = topchm; // Warm-up the iterator _idx = -1; next(); } int length() { return _sschm._keys.length; } long key(final int idx) { return _sschm._keys[idx]; } private int _idx; // -2 for NO_KEY, -1 for CHECK_NEW_TABLE_LONG, 0-keys.length private long _nextK, _prevK; // Last 2 keys found private TypeV _nextV, _prevV; // Last 2 values found public boolean hasNext() { return _nextV != null; } public TypeV next() { // 'next' actually knows what the next value will be - it had to // figure that out last go 'round lest 'hasNext' report true and // some other thread deleted the last value. Instead, 'next' // spends all its effort finding the key that comes after the // 'next' key. if( _idx != -1 && _nextV == null ) throw new NoSuchElementException(); _prevK = _nextK; // This will become the previous key _prevV = _nextV; // This will become the previous value _nextV = null; // We have no more next-key // Attempt to set <_nextK,_nextV> to the next K,V pair. // _nextV is the trigger: stop searching when it is != null if( _idx == -1 ) { // Check for NO_KEY _idx = 0; // Setup for next phase of search _nextK = NO_KEY; if( (_nextV=get(_nextK)) != null ) return _prevV; } while( _idx<length() ) { // Scan array _nextK = key(_idx++); // Get a key that definitely is in the set (for the moment!) if( _nextK != NO_KEY && // Found something? (_nextV=get(_nextK)) != null ) break; // Got it! _nextK is a valid Key } // Else keep scanning return _prevV; // Return current value. } public void remove() { if( _prevV == null ) throw new IllegalStateException(); _sschm.putIfMatch( _prevK, TOMBSTONE, _prevV ); _prevV = null; } public TypeV nextElement() { return next(); } public boolean hasMoreElements() { return hasNext(); } } /** Returns an enumeration of the values in this table. * @return an enumeration of the values in this table * @see #values() */ public Enumeration<TypeV> elements() { return new SnapshotV(); } // --- values -------------------------------------------------------------- /** Returns a {@link Collection} view of the values contained in this map. * The collection is backed by the map, so changes to the map are reflected * in the collection, and vice-versa. The collection supports element * removal, which removes the corresponding mapping from this map, via the * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>, * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations. * It does not support the <tt>add</tt> or <tt>addAll</tt> operations. * * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator that * will never throw {@link ConcurrentModificationException}, and guarantees * to traverse elements as they existed upon construction of the iterator, * and may (but is not guaranteed to) reflect any modifications subsequent * to construction. */ public Collection<TypeV> values() { return new AbstractCollection<TypeV>() { public void clear ( ) { NonBlockingHashMapLong.this.clear ( ); } public int size ( ) { return NonBlockingHashMapLong.this.size ( ); } public boolean contains( Object v ) { return NonBlockingHashMapLong.this.containsValue(v); } public Iterator<TypeV> iterator() { return new SnapshotV(); } }; } // --- keySet -------------------------------------------------------------- /** A class which implements the {@link Iterator} and {@link Enumeration} * interfaces, generified to the {@link Long} class and supporting a * <strong>non-auto-boxing</strong> {@link #nextLong} function. */ public class IteratorLong implements Iterator<Long>, Enumeration<Long> { private final SnapshotV _ss; /** A new IteratorLong */ public IteratorLong() { _ss = new SnapshotV(); } /** Remove last key returned by {@link #next} or {@link #nextLong}. */ public void remove() { _ss.remove(); } /** <strong>Auto-box</strong> and return the next key. */ public Long next () { _ss.next(); return _ss._prevK; } /** Return the next key as a primitive {@code long}. */ public long nextLong() { _ss.next(); return _ss._prevK; } /** True if there are more keys to iterate over. */ public boolean hasNext() { return _ss.hasNext(); } /** <strong>Auto-box</strong> and return the next key. */ public Long nextElement() { return next(); } /** True if there are more keys to iterate over. */ public boolean hasMoreElements() { return hasNext(); } } /** Returns an enumeration of the <strong>auto-boxed</strong> keys in this table. * <strong>Warning:</strong> this version will auto-box all returned keys. * @return an enumeration of the auto-boxed keys in this table * @see #keySet() */ public Enumeration<Long> keys() { return new IteratorLong(); } /** Returns a {@link Set} view of the keys contained in this map; with care * the keys may be iterated over <strong>without auto-boxing</strong>. The * set is backed by the map, so changes to the map are reflected in the * set, and vice-versa. The set supports element removal, which removes * the corresponding mapping from this map, via the * <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, <tt>removeAll</tt>, * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not support * the <tt>add</tt> or <tt>addAll</tt> operations. * * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator that * will never throw {@link ConcurrentModificationException}, and guarantees * to traverse elements as they existed upon construction of the iterator, * and may (but is not guaranteed to) reflect any modifications subsequent * to construction. */ public Set<Long> keySet() { return new AbstractSet<Long> () { public void clear ( ) { NonBlockingHashMapLong.this.clear ( ); } public int size ( ) { return NonBlockingHashMapLong.this.size ( ); } public boolean contains( Object k ) { return NonBlockingHashMapLong.this.containsKey(k); } public boolean remove ( Object k ) { return NonBlockingHashMapLong.this.remove (k) != null; } public IteratorLong iterator() { return new IteratorLong(); } }; } /** Keys as a long array. Array may be zero-padded if keys are concurrently deleted. */ public long[] keySetLong() { long[] dom = new long[size()]; IteratorLong i=(IteratorLong)keySet().iterator(); int j=0; while( j < dom.length && i.hasNext() ) dom[j++] = i.nextLong(); return dom; } // --- entrySet ------------------------------------------------------------ // Warning: Each call to 'next' in this iterator constructs a new Long and a // new NBHMLEntry. private class NBHMLEntry extends AbstractEntry<Long,TypeV> { NBHMLEntry( final Long k, final TypeV v ) { super(k,v); } public TypeV setValue(final TypeV val) { if (val == null) throw new NullPointerException(); _val = val; return put(_key, val); } } private class SnapshotE implements Iterator<Map.Entry<Long,TypeV>> { final SnapshotV _ss; public SnapshotE() { _ss = new SnapshotV(); } public void remove() { _ss.remove(); } public Map.Entry<Long,TypeV> next() { _ss.next(); return new NBHMLEntry(_ss._prevK,_ss._prevV); } public boolean hasNext() { return _ss.hasNext(); } } /** Returns a {@link Set} view of the mappings contained in this map. The * set is backed by the map, so changes to the map are reflected in the * set, and vice-versa. The set supports element removal, which removes * the corresponding mapping from the map, via the * <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, <tt>removeAll</tt>, * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not support * the <tt>add</tt> or <tt>addAll</tt> operations. * * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator * that will never throw {@link ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. * * <p><strong>Warning:</strong> the iterator associated with this Set * requires the creation of {@link java.util.Map.Entry} objects with each * iteration. The org.cliffc.high_scale_lib.NonBlockingHashMap * does not normally create or using {@link java.util.Map.Entry} objects so * they will be created soley to support this iteration. Iterating using * {@link #keySet} or {@link #values} will be more efficient. In addition, * this version requires <strong>auto-boxing</strong> the keys. */ public Set<Map.Entry<Long,TypeV>> entrySet() { return new AbstractSet<Map.Entry<Long,TypeV>>() { public void clear ( ) { NonBlockingHashMapLong.this.clear( ); } public int size ( ) { return NonBlockingHashMapLong.this.size ( ); } public boolean remove( final Object o ) { if (!(o instanceof Map.Entry)) return false; final Map.Entry<?,?> e = (Map.Entry<?,?>)o; return NonBlockingHashMapLong.this.remove(e.getKey(), e.getValue()); } public boolean contains(final Object o) { if (!(o instanceof Map.Entry)) return false; final Map.Entry<?,?> e = (Map.Entry<?,?>)o; TypeV v = get(e.getKey()); return v.equals(e.getValue()); } public Iterator<Map.Entry<Long,TypeV>> iterator() { return new SnapshotE(); } }; } // --- writeObject ------------------------------------------------------- // Write a NBHML to a stream private void writeObject(java.io.ObjectOutputStream s) throws IOException { s.defaultWriteObject(); // Write nothing for( long K : keySet() ) { final Object V = get(K); // Do an official 'get' s.writeLong (K); // Write the <long,TypeV> pair s.writeObject(V); } s.writeLong(NO_KEY); // Sentinel to indicate end-of-data s.writeObject(null); } // --- readObject -------------------------------------------------------- // Read a CHM from a stream private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { s.defaultReadObject(); // Read nothing initialize(MIN_SIZE); for (;;) { final long K = s.readLong(); final TypeV V = (TypeV) s.readObject(); if( K == NO_KEY && V == null ) break; put(K,V); // Insert with an offical put } } } // End NonBlockingHashMapLong class
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/nbhm/NonBlockingHashSet.java
package water.nbhm; import java.io.Serializable; import java.util.AbstractSet; import java.util.Iterator; import java.util.Set; /* * Written by Cliff Click and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain */ /** * A simple wrapper around {@link NonBlockingHashMap} making it implement the * {@link Set} interface. All operations are Non-Blocking and multi-thread safe. * * @since 1.5 * @author Cliff Click */ public class NonBlockingHashSet<E> extends AbstractSet<E> implements Serializable { private static final Object V = ""; private final NonBlockingHashMap<E,Object> _map; /** Make a new empty {@link NonBlockingHashSet}. */ public NonBlockingHashSet() { super(); _map = new NonBlockingHashMap<>(); } /** Add {@code o} to the set. * @return <tt>true</tt> if {@code o} was added to the set, <tt>false</tt> * if {@code o} was already in the set. */ public boolean add( final E o ) { return _map.putIfAbsent(o,V) == null; } /** @return <tt>true</tt> if {@code o} is in the set. */ public boolean contains ( final Object o ) { return _map.containsKey(o); } /** @return Returns the match for {@code o} if {@code o} is in the set. */ public E get( final E o ) { return _map.getk(o); } /** Remove {@code o} from the set. * @return <tt>true</tt> if {@code o} was removed to the set, <tt>false</tt> * if {@code o} was not in the set. */ public boolean remove( final Object o ) { return _map.remove(o) == V; } /** Current count of elements in the set. Due to concurrent racing updates, * the size is only ever approximate. Updates due to the calling thread are * immediately visible to calling thread. * @return count of elements. */ public int size( ) { return _map.size(); } /** Empty the set. */ public void clear( ) { _map.clear(); } public Iterator<E>iterator( ) { return _map.keySet().iterator(); } // --- /** * Atomically make the set immutable. Future calls to mutate will throw an * IllegalStateException. Existing mutator calls in other threads racing * with this thread and will either throw IllegalStateException or their * update will be visible to this thread. This implies that a simple flag * cannot make the Set immutable, because a late-arriving update in another * thread might see immutable flag not set yet, then mutate the Set after * the {@link #readOnly} call returns. This call can be called concurrently * (and indeed until the operation completes, all calls on the Set from any * thread either complete normally or end up calling {@link #readOnly} * internally). * * <p> This call is useful in debugging multi-threaded programs where the * Set is constructed in parallel, but construction completes after some * time; and after construction the Set is only read. Making the Set * read-only will cause updates arriving after construction is supposedly * complete to throw an {@link IllegalStateException}. */ // (1) call _map's immutable() call // (2) get snapshot // (3) CAS down a local map, power-of-2 larger than _map.size()+1/8th // (4) start @ random, visit all snapshot, insert live keys // (5) CAS _map to null, needs happens-after (4) // (6) if Set call sees _map is null, needs happens-after (4) for readers public void readOnly() { throw new RuntimeException("Unimplemented"); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/nbhm/NonBlockingSetInt.java
package water.nbhm; import java.io.IOException; import java.io.Serializable; import java.lang.reflect.Field; import java.util.AbstractSet; import java.util.Iterator; import java.util.NoSuchElementException; import java.util.concurrent.atomic.AtomicInteger; import sun.misc.Unsafe; /* * Written by Cliff Click and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain */ /** * A multi-threaded bit-vector set, implemented as an array of primitive * {@code longs}. All operations are non-blocking and multi-threaded safe. * {@link #contains(int)} calls are roughly the same speed as a {load, mask} * sequence. {@link #add(int)} and {@link #remove(int)} calls are a tad more * expensive than a {load, mask, store} sequence because they must use a CAS. * The bit-vector is auto-sizing. * * <p><em>General note of caution:</em> The Set API allows the use of {@link Integer} * with silent autoboxing - which can be very expensive if many calls are * being made. Since autoboxing is silent you may not be aware that this is * going on. The built-in API takes lower-case {@code ints} and is much more * efficient. * * <p>Space: space is used in proportion to the largest element, as opposed to * the number of elements (as is the case with hash-table based Set * implementations). Space is approximately (largest_element/8 + 64) bytes. * * The implementation is a simple bit-vector using CAS for update. * * @since 1.5 * @author Cliff Click */ public class NonBlockingSetInt extends AbstractSet<Integer> implements Serializable { private static final long serialVersionUID = 1234123412341234123L; private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); // --- Bits to allow atomic update of the NBSI private static final long _nbsi_offset; static { // <clinit> Field f = null; try { f = NonBlockingSetInt.class.getDeclaredField("_nbsi"); } catch( java.lang.NoSuchFieldException e ) { } _nbsi_offset = _unsafe.objectFieldOffset(f); } private final boolean CAS_nbsi( NBSI old, NBSI nnn ) { return _unsafe.compareAndSwapObject(this, _nbsi_offset, old, nnn ); } // The actual Set of Joy, which changes during a resize event. The // Only Field for this class, so I can atomically change the entire // set implementation with a single CAS. private transient NBSI _nbsi; /** Create a new empty bit-vector */ public NonBlockingSetInt( ) { _nbsi = new NBSI(63, new ConcurrentAutoTable(), this); // The initial 1-word set } /** * Add {@code i} to the set. Uppercase {@link Integer} version of add, * requires auto-unboxing. When possible use the {@code int} version of * {@link #add(int)} for efficiency. * @throws IllegalArgumentException if i is negative. * @return <tt>true</tt> if i was added to the set. */ public boolean add ( final Integer i ) { return add(i.intValue()); } /** * Test if {@code o} is in the set. This is the uppercase {@link Integer} * version of contains, requires a type-check and auto-unboxing. When * possible use the {@code int} version of {@link #contains(int)} for * efficiency. * @return <tt>true</tt> if i was in the set. */ public boolean contains( final Object o ) { return o instanceof Integer && contains(((Integer) o).intValue()); } /** * Remove {@code o} from the set. This is the uppercase {@link Integer} * version of remove, requires a type-check and auto-unboxing. When * possible use the {@code int} version of {@link #remove(int)} for * efficiency. * @return <tt>true</tt> if i was removed to the set. */ public boolean remove( final Object o ) { return o instanceof Integer && remove(((Integer) o).intValue()); } /** * Add {@code i} to the set. This is the lower-case '{@code int}' version * of {@link #add} - no autoboxing. Negative values throw * IllegalArgumentException. * @throws IllegalArgumentException if i is negative. * @return <tt>true</tt> if i was added to the set. */ public boolean add( final int i ) { if( i < 0 ) throw new IllegalArgumentException(""+i); return _nbsi.add(i); } /** * Test if {@code i} is in the set. This is the lower-case '{@code int}' * version of {@link #contains} - no autoboxing. * @return <tt>true</tt> if i was int the set. */ public boolean contains( final int i ) { return i >= 0 && _nbsi.contains(i); } /** * Remove {@code i} from the set. This is the fast lower-case '{@code int}' * version of {@link #remove} - no autoboxing. * @return <tt>true</tt> if i was added to the set. */ public boolean remove ( final int i ) { return i >= 0 && _nbsi.remove(i); } /** * Current count of elements in the set. Due to concurrent racing updates, * the size is only ever approximate. Updates due to the calling thread are * immediately visible to calling thread. * @return count of elements. */ public int size ( ) { return _nbsi.size( ); } /** Approx largest element in set; at least as big (but max might be smaller). */ public int length() { return _nbsi._bits.length<<6; } /** Empty the bitvector. */ public void clear ( ) { NBSI cleared = new NBSI(63, new ConcurrentAutoTable(), this); // An empty initial NBSI while( !CAS_nbsi( _nbsi, cleared ) ) // Spin until clear works ; } /** Verbose printout of internal structure for debugging. */ public void print() { _nbsi.print(0); } /** * Standard Java {@link Iterator}. Not very efficient because it * auto-boxes the returned values. */ public Iterator<Integer> iterator( ) { return new iter(); } private class iter implements Iterator<Integer> { NBSI _nbsi2; int _idx = -1; int _prev = -1; iter() { _nbsi2 = _nbsi; advance(); } public boolean hasNext() { return _idx != -2; } private void advance() { while( true ) { _idx++; // Next index while( (_idx>>6) >= _nbsi2._bits.length ) { // Index out of range? if( _nbsi2._new == null ) { // New table? _idx = -2; // No, so must be all done return; // } _nbsi2 = _nbsi2._new; // Carry on, in the new table } if( _nbsi2.contains(_idx) ) return; } } public Integer next() { if( _idx == -1 ) throw new NoSuchElementException(); _prev = _idx; advance(); return _prev; } public void remove() { if( _prev == -1 ) throw new IllegalStateException(); _nbsi2.remove(_prev); _prev = -1; } } // --- writeObject ------------------------------------------------------- // Write a NBSI to a stream private void writeObject(java.io.ObjectOutputStream s) throws IOException { s.defaultWriteObject(); // Nothing to write final NBSI nbsi = _nbsi; // The One Field is transient final int len = _nbsi._bits.length<<6; s.writeInt(len); // Write max element for( int i=0; i<len; i++ ) s.writeBoolean( _nbsi.contains(i) ); } // --- readObject -------------------------------------------------------- // Read a CHM from a stream private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { s.defaultReadObject(); // Read nothing final int len = s.readInt(); // Read max element _nbsi = new NBSI(len, new ConcurrentAutoTable(), this); for( int i=0; i<len; i++ ) // Read all bits if( s.readBoolean() ) _nbsi.add(i); } // --- NBSI ---------------------------------------------------------------- private static final class NBSI { // Back pointer to the parent wrapper; sorta like make the class non-static private transient final NonBlockingSetInt _non_blocking_set_int; // Used to count elements: a high-performance counter. private transient final ConcurrentAutoTable _size; // The Bits private final long _bits[]; // --- Bits to allow Unsafe access to arrays private static final int _Lbase = _unsafe.arrayBaseOffset(long[].class); private static final int _Lscale = _unsafe.arrayIndexScale(long[].class); private static long rawIndex(final long[] ary, final int idx) { assert idx >= 0 && idx < ary.length; return _Lbase + idx * _Lscale; } private final boolean CAS( int idx, long old, long nnn ) { return _unsafe.compareAndSwapLong( _bits, rawIndex(_bits, idx), old, nnn ); } // --- Resize // The New Table, only set once to non-zero during a resize. // Must be atomically set. private NBSI _new; private static final long _new_offset; static { // <clinit> Field f = null; try { f = NBSI.class.getDeclaredField("_new"); } catch( java.lang.NoSuchFieldException e ) { } _new_offset = _unsafe.objectFieldOffset(f); } private final boolean CAS_new( NBSI nnn ) { return _unsafe.compareAndSwapObject(this, _new_offset, null, nnn ); } private transient final AtomicInteger _copyIdx; // Used to count bits started copying private transient final AtomicInteger _copyDone; // Used to count words copied in a resize operation private transient final int _sum_bits_length; // Sum of all nested _bits.lengths private static final long mask( int i ) { return 1L<<(i&63); } // I need 1 free bit out of 64 to allow for resize. I do this by stealing // the high order bit - but then I need to do something with adding element // number 63 (and friends). I could use a mod63 function but it's more // efficient to handle the mod-64 case as an exception. // // Every 64th bit is put in it's own recursive bitvector. If the low 6 bits // are all set, we shift them off and recursively operate on the _nbsi64 set. private final NBSI _nbsi64; private NBSI( int max_elem, ConcurrentAutoTable ctr, NonBlockingSetInt nonb ) { super(); _non_blocking_set_int = nonb; _size = ctr; _copyIdx = ctr == null ? null : new AtomicInteger(); _copyDone = ctr == null ? null : new AtomicInteger(); // The main array of bits _bits = new long[(int)(((long)max_elem+63)>>>6)]; // Every 64th bit is moved off to it's own subarray, so that the // sign-bit is free for other purposes _nbsi64 = ((max_elem+1)>>>6) == 0 ? null : new NBSI((max_elem+1)>>>6, null, null); _sum_bits_length = _bits.length + (_nbsi64==null ? 0 : _nbsi64._sum_bits_length); } // Lower-case 'int' versions - no autoboxing, very fast. // 'i' is known positive. public boolean add( final int i ) { // Check for out-of-range for the current size bit vector. // If so we need to grow the bit vector. if( (i>>6) >= _bits.length ) return install_larger_new_bits(i). // Install larger pile-o-bits (duh) help_copy().add(i); // Finally, add to the new table // Handle every 64th bit via using a nested array NBSI nbsi = this; // The bit array being added into int j = i; // The bit index being added while( (j&63) == 63 ) { // Bit 64? (low 6 bits are all set) nbsi = nbsi._nbsi64; // Recurse j = j>>6; // Strip off low 6 bits (all set) } final long mask = mask(j); long old; do { old = nbsi._bits[j>>6]; // Read old bits if( old < 0 ) // Not mutable? // Not mutable: finish copy of word, and retry on copied word return help_copy_impl(i).help_copy().add(i); if( (old & mask) != 0 ) return false; // Bit is already set? } while( !nbsi.CAS( j>>6, old, old | mask ) ); _size.add(1); return true; } public boolean remove( final int i ) { if( (i>>6) >= _bits.length ) // Out of bounds? Not in this array! return _new != null && help_copy().remove(i); // Handle every 64th bit via using a nested array NBSI nbsi = this; // The bit array being added into int j = i; // The bit index being added while( (j&63) == 63 ) { // Bit 64? (low 6 bits are all set) nbsi = nbsi._nbsi64; // Recurse j = j>>6; // Strip off low 6 bits (all set) } final long mask = mask(j); long old; do { old = nbsi._bits[j>>6]; // Read old bits if( old < 0 ) // Not mutable? // Not mutable: finish copy of word, and retry on copied word return help_copy_impl(i).help_copy().remove(i); if( (old & mask) == 0 ) return false; // Bit is already clear? } while( !nbsi.CAS( j>>6, old, old & ~mask ) ); _size.add(-1); return true; } public boolean contains( final int i ) { if( (i>>6) >= _bits.length ) // Out of bounds? Not in this array! return _new != null && help_copy().contains(i); // Handle every 64th bit via using a nested array NBSI nbsi = this; // The bit array being added into int j = i; // The bit index being added while( (j&63) == 63 ) { // Bit 64? (low 6 bits are all set) nbsi = nbsi._nbsi64; // Recurse j = j>>6; // Strip off low 6 bits (all set) } final long mask = mask(j); long old = nbsi._bits[j>>6]; // Read old bits if( old < 0 ) // Not mutable? // Not mutable: finish copy of word, and retry on copied word return help_copy_impl(i).help_copy().contains(i); // Yes mutable: test & return bit return (old & mask) != 0; } public int size() { return (int)_size.get(); } // Must grow the current array to hold an element of size i private NBSI install_larger_new_bits( final int i ) { if( _new == null ) { // Grow by powers of 2, to avoid minor grow-by-1's. // Note: must grow by exact powers-of-2 or the by-64-bit trick doesn't work right int sz = (_bits.length<<6)<<1; // CAS to install a new larger size. Did it work? Did it fail? We // don't know and don't care. Only One can be installed, so if // another thread installed a too-small size, we can't help it - we // must simply install our new larger size as a nested-resize table. CAS_new(new NBSI(sz, _size, _non_blocking_set_int)); } // Return self for 'fluid' programming style return this; } // Help any top-level NBSI to copy until completed. // Always return the _new version of *this* NBSI, in case we're nested. private NBSI help_copy() { // Pick some words to help with - but only help copy the top-level NBSI. // Nested NBSI waits until the top is done before we start helping. NBSI top_nbsi = _non_blocking_set_int._nbsi; final int HELP = 8; // Tuning number: how much copy pain are we willing to inflict? // We "help" by forcing individual bit indices to copy. However, bits // come in lumps of 64 per word, so we just advance the bit counter by 64's. int idx = top_nbsi._copyIdx.getAndAdd(64*HELP); for( int i=0; i<HELP; i++ ) { int j = idx+i*64; j %= (top_nbsi._bits.length<<6); // Limit, wrap to array size; means we retry indices top_nbsi.help_copy_impl(j ); top_nbsi.help_copy_impl(j+63); // Also force the nested-by-64 bit } // Top level guy ready to promote? // Note: WE may not be the top-level guy! if( top_nbsi._copyDone.get() == top_nbsi._sum_bits_length ) // One shot CAS to promote - it may fail since we are racing; others // may promote as well if( _non_blocking_set_int.CAS_nbsi( top_nbsi, top_nbsi._new ) ) { //System.out.println("Promote at top level to size "+(_non_blocking_set_int._nbsi._bits.length<<6)); } // Return the new bitvector for 'fluid' programming style return _new; } // Help copy this one word. State Machine. // (1) If not "made immutable" in the old array, set the sign bit to make // it immutable. // (2) If non-zero in old array & zero in new, CAS new from 0 to copy-of-old // (3) If non-zero in old array & non-zero in new, CAS old to zero // (4) Zero in old, new is valid // At this point, old should be immutable-zero & new has a copy of bits private NBSI help_copy_impl( int i ) { // Handle every 64th bit via using a nested array NBSI old = this; // The bit array being copied from NBSI nnn = _new; // The bit array being copied to if( nnn == null ) return this; // Promoted already int j = i; // The bit index being added while( (j&63) == 63 ) { // Bit 64? (low 6 bits are all set) old = old._nbsi64; // Recurse nnn = nnn._nbsi64; // Recurse j = j>>6; // Strip off low 6 bits (all set) } // Transit from state 1: word is not immutable yet // Immutable is in bit 63, the sign bit. long bits = old._bits[j>>6]; while( bits >= 0 ) { // Still in state (1)? long oldbits = bits; bits |= mask(63); // Target state of bits: sign-bit means immutable if( old.CAS( j>>6, oldbits, bits ) ) { if( oldbits == 0 ) _copyDone.addAndGet(1); break; // Success - old array word is now immutable } bits = old._bits[j>>6]; // Retry if CAS failed } // Transit from state 2: non-zero in old and zero in new if( bits != mask(63) ) { // Non-zero in old? long new_bits = nnn._bits[j>>6]; if( new_bits == 0 ) { // New array is still zero new_bits = bits & ~mask(63); // Desired new value: a mutable copy of bits // One-shot CAS attempt, no loop, from 0 to non-zero. // If it fails, somebody else did the copy for us if( !nnn.CAS( j>>6, 0, new_bits ) ) new_bits = nnn._bits[j>>6]; // Since it failed, get the new value assert new_bits != 0; } // Transit from state 3: non-zero in old and non-zero in new // One-shot CAS attempt, no loop, from non-zero to 0 (but immutable) if( old.CAS( j>>6, bits, mask(63) ) ) _copyDone.addAndGet(1); // One more word finished copying } // Now in state 4: zero (and immutable) in old // Return the self bitvector for 'fluid' programming style return this; } private void print( int d, String msg ) { for( int i=0; i<d; i++ ) System.out.print(" "); System.out.println(msg); } private void print(int d) { StringBuilder buf = new StringBuilder(); buf.append("NBSI - _bits.len="); NBSI x = this; while( x != null ) { buf.append(" "+x._bits.length); x = x._nbsi64; } print(d,buf.toString()); x = this; while( x != null ) { for( int i=0; i<x._bits.length; i++ ) System.out.print(Long.toHexString(x._bits[i])+" "); x = x._nbsi64; System.out.println(); } if( _copyIdx.get() != 0 || _copyDone.get() != 0 ) print(d,"_copyIdx="+_copyIdx.get()+" _copyDone="+_copyDone.get()+" _words_to_cpy="+_sum_bits_length); if( _new != null ) { print(d,"__has_new - "); _new.print(d+1); } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/nbhm/UtilUnsafe.java
package water.nbhm; import java.lang.reflect.Field; import sun.misc.Unsafe; /** * Simple class to obtain access to the {@link Unsafe} object. {@link Unsafe} * is required to allow efficient CAS operations on arrays. Note that the * versions in java.util.concurrent.atomic, such as {@link * java.util.concurrent.atomic.AtomicLongArray}, require extra memory ordering * guarantees which are generally not needed in these algorithms and are also * expensive on most processors. */ public class UtilUnsafe { private UtilUnsafe() { } // dummy private constructor /** Fetch the Unsafe. Use With Caution. */ public static Unsafe getUnsafe() { // Not on bootclasspath if( UtilUnsafe.class.getClassLoader() == null ) return Unsafe.getUnsafe(); try { final Field fld = Unsafe.class.getDeclaredField("theUnsafe"); fld.setAccessible(true); return (Unsafe) fld.get(UtilUnsafe.class); } catch (Exception e) { throw new RuntimeException("Could not obtain access to sun.misc.Unsafe", e); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/network/SSLContextException.java
package water.network; public class SSLContextException extends Throwable { SSLContextException(String msg, Throwable e) { super(msg, e); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/network/SSLProperties.java
package water.network; import java.io.File; import java.util.Properties; class SSLProperties extends Properties { private final File _pathRoot; SSLProperties(File pathRoot) { _pathRoot = pathRoot; } SSLProperties() { this(null); } String[] h2o_ssl_enabled_algorithms() { String algs = getProperty("h2o_ssl_enabled_algorithms"); if(null != algs) { return algs.split(","); } return null; } String h2o_ssl_protocol(String defaultTLS) { return getProperty("h2o_ssl_protocol", defaultTLS); } String h2o_ssl_jks_internal() { return expandPath(getProperty("h2o_ssl_jks_internal")); } String h2o_ssl_jks_password() { return getProperty("h2o_ssl_jks_password"); } String h2o_ssl_jts() { String jts = getProperty("h2o_ssl_jts"); if (jts == null) return h2o_ssl_jks_internal(); return expandPath(jts); } String h2o_ssl_jts_password() { return getProperty("h2o_ssl_jts_password") != null ? getProperty("h2o_ssl_jts_password") : getProperty("h2o_ssl_jks_password"); } String expandPath(String path) { if (path == null) return null; if (_pathRoot == null) return path; if (new File(path).isAbsolute()) return path; return new File(_pathRoot, path).getAbsolutePath(); } File getPathRoot() { return _pathRoot; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/network/SSLSocketChannel.java
package water.network; import water.H2O; import water.util.Log; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLEngineResult; import javax.net.ssl.SSLException; import javax.net.ssl.SSLSession; import java.io.IOException; import java.net.SocketException; import java.nio.ByteBuffer; import java.nio.channels.ByteChannel; import java.nio.channels.SocketChannel; /** * This class is based on: * <a href="https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html">Oracle's JSSE guide.</a> * <a href="https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/samples/sslengine/SSLEngineSimpleDemo.java">Oracle's SSLEngine demo.</a> * * It's a simple wrapper around SocketChannels which enables SSL/TLS * communication using {@link javax.net.ssl.SSLEngine}. */ class SSLSocketChannel implements ByteChannel { // Empty buffer for handshakes private static final ByteBuffer EMPTY_BUFFER = ByteBuffer.allocate(0); // Buffer holding encrypted outgoing data private ByteBuffer netInBuffer; // Buffer holding encrypted incoming data private ByteBuffer netOutBuffer; // Buffer holding decrypted incoming data private ByteBuffer peerAppData; private SocketChannel channel = null; private SSLEngine sslEngine = null; private boolean closing = false; private boolean closed = false; private boolean handshakeComplete = false; SSLSocketChannel(SocketChannel channel, SSLEngine sslEngine) throws IOException { this.channel = channel; this.sslEngine = sslEngine; sslEngine.setEnableSessionCreation(true); SSLSession session = sslEngine.getSession(); prepareBuffers(session); handshake(); } @Override public boolean isOpen() { return channel.isOpen(); } @Override public void close() throws IOException { closing = true; sslEngine.closeOutbound(); sslEngine.getSession().invalidate(); netOutBuffer.clear(); channel.close(); closed = true; } private void prepareBuffers(SSLSession session) throws SocketException { int appBufferSize = session.getApplicationBufferSize(); // Less is not more. More is more. Bigger than the app buffer size so successful unwraps() don't cause BUFFER_OVERFLOW // Value 64 was based on other frameworks using it and some manual testing. Might require tuning in the future. peerAppData = ByteBuffer.allocate(appBufferSize + 64); int netBufferSize = session.getPacketBufferSize(); netInBuffer = ByteBuffer.allocate(netBufferSize); netOutBuffer = ByteBuffer.allocate(netBufferSize); } // ----------------------------------------------------------- // HANDSHAKE // ----------------------------------------------------------- private SSLEngineResult.HandshakeStatus hs; private void handshake() throws IOException { Log.debug("Starting SSL handshake..."); sslEngine.beginHandshake(); hs = sslEngine.getHandshakeStatus(); SSLEngineResult initHandshakeStatus; while (!handshakeComplete) { switch (hs) { case NOT_HANDSHAKING: { //should never happen throw new IOException("NOT_HANDSHAKING during handshake"); } case FINISHED: handshakeComplete = !netOutBuffer.hasRemaining(); break; case NEED_WRAP: { initHandshakeStatus = handshakeWrap(); if ( initHandshakeStatus.getStatus() == SSLEngineResult.Status.OK ){ if (hs == SSLEngineResult.HandshakeStatus.NEED_TASK) { tasks(); } } break; } case NEED_UNWRAP: { initHandshakeStatus = handshakeUnwrap(); if ( initHandshakeStatus.getStatus() == SSLEngineResult.Status.OK ){ if (hs == SSLEngineResult.HandshakeStatus.NEED_TASK) { tasks(); } } break; } // SSL needs to perform some delegating tasks before it can continue. // Those tasks will be run in the same thread and can be blocking. case NEED_TASK: tasks(); break; } } Log.debug("SSL handshake finished successfully!"); } private synchronized SSLEngineResult handshakeWrap() throws IOException { netOutBuffer.clear(); SSLEngineResult wrapResult = sslEngine.wrap(EMPTY_BUFFER, netOutBuffer); netOutBuffer.flip(); hs = wrapResult.getHandshakeStatus(); channel.write(netOutBuffer); return wrapResult; } private synchronized SSLEngineResult handshakeUnwrap() throws IOException { if (netInBuffer.position() == netInBuffer.limit()) { netInBuffer.clear(); } channel.read(netInBuffer); SSLEngineResult unwrapResult; peerAppData.clear(); do { netInBuffer.flip(); unwrapResult = sslEngine.unwrap(netInBuffer, peerAppData); netInBuffer.compact(); hs = unwrapResult.getHandshakeStatus(); switch (unwrapResult.getStatus()) { case OK: case BUFFER_UNDERFLOW: { if (unwrapResult.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.NEED_TASK) { tasks(); } break; } case BUFFER_OVERFLOW: { int applicationBufferSize = sslEngine.getSession().getApplicationBufferSize(); if (applicationBufferSize > peerAppData.capacity()) { ByteBuffer b = ByteBuffer.allocate(applicationBufferSize + peerAppData.position()); peerAppData.flip(); b.put(peerAppData); peerAppData = b; } else { peerAppData.compact(); } break; } default: throw new IOException("Failed to SSL unwrap with status " + unwrapResult.getStatus()); } } while(unwrapResult.getStatus() == SSLEngineResult.Status.OK && hs == SSLEngineResult.HandshakeStatus.NEED_UNWRAP); return unwrapResult; } // ----------------------------------------------------------- // READ AND WRITE // ----------------------------------------------------------- @Override public int read(ByteBuffer dst) throws IOException { if (closing || closed) return -1; return unwrap(dst); } private synchronized int unwrap(ByteBuffer dst) throws IOException { int read = 0; // We have outstanding data in our incoming decrypted buffer, use that data first to fill dst if(!dst.hasRemaining()) { return 0; } if(peerAppData.position() != 0) { read += copy(peerAppData, dst); return read; } if(netInBuffer.position() == 0) { channel.read(netInBuffer); } while(netInBuffer.position() != 0) { netInBuffer.flip(); // We still might have left data here if dst was smaller than the amount of data in peerAppData if(peerAppData.position() != 0) { peerAppData.compact(); } SSLEngineResult unwrapResult = sslEngine.unwrap(netInBuffer, peerAppData); switch (unwrapResult.getStatus()) { case OK: { unwrapResult.bytesProduced(); if (unwrapResult.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.NEED_TASK) tasks(); break; } case BUFFER_OVERFLOW: { int applicationBufferSize = sslEngine.getSession().getApplicationBufferSize(); if (applicationBufferSize > peerAppData.capacity()) { int appSize = applicationBufferSize; ByteBuffer b = ByteBuffer.allocate(appSize + peerAppData.position()); peerAppData.flip(); b.put(peerAppData); peerAppData = b; } else { // We tried to unwrap data into peerAppData which means there's leftover in netInBuffer // the upcoming read should read int potential new data after the leftover netInBuffer.position(netInBuffer.limit()); netInBuffer.limit(netInBuffer.capacity()); peerAppData.compact(); if(!dst.hasRemaining()) { return read; } } break; } case BUFFER_UNDERFLOW: { int packetBufferSize = sslEngine.getSession().getPacketBufferSize(); if (packetBufferSize > netInBuffer.capacity()) { int netSize = packetBufferSize; if (netSize > netInBuffer.capacity()) { ByteBuffer b = ByteBuffer.allocate(netSize); netInBuffer.flip(); b.put(netInBuffer); netInBuffer = b; } } else { // We have some leftover data from unwrap but no enough. // We need to read in more data from the socket AFTER the current data. netInBuffer.position(netInBuffer.limit()); netInBuffer.limit(netInBuffer.capacity()); channel.read(netInBuffer); continue; } break; } default: throw new IOException("Failed to SSL unwrap with status " + unwrapResult.getStatus()); } if (peerAppData != dst && dst.hasRemaining()) { peerAppData.flip(); read += copy(peerAppData, dst); if(!dst.hasRemaining()) { netInBuffer.compact(); return read; } } netInBuffer.compact(); } return read; } private int copy(ByteBuffer src, ByteBuffer dst) { int toCopy = Math.min(src.remaining(), dst.remaining()); dst.put(src.array(), src.position(), toCopy); src.position(src.position() + toCopy); if(!src.hasRemaining()) { src.clear(); } return toCopy; } @Override public int write(ByteBuffer src) throws IOException { if(closing || closed) { throw new IOException("Cannot perform socket write, the socket is closed (or being closed)."); } int wrote = 0; // src can be much bigger than what our SSL session allows to send in one go while (src.hasRemaining()) { netOutBuffer.clear(); SSLEngineResult wrapResult = sslEngine.wrap(src, netOutBuffer); netOutBuffer.flip(); if (wrapResult.getStatus() == SSLEngineResult.Status.OK) { if (wrapResult.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.NEED_TASK) tasks(); } while (netOutBuffer.hasRemaining()) { wrote += channel.write(netOutBuffer); } } return wrote; } // ----------------------------------------------------------- // MISC // ----------------------------------------------------------- private void tasks() { Runnable r; while ( (r = sslEngine.getDelegatedTask()) != null) { r.run(); } hs = sslEngine.getHandshakeStatus(); } public SocketChannel channel() { return channel; } SSLEngine getEngine() { return sslEngine; } boolean isHandshakeComplete() { return handshakeComplete; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/network/SSLSocketChannelFactory.java
package water.network; import water.H2O; import water.util.Log; import javax.net.ssl.*; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.nio.channels.ByteChannel; import java.nio.channels.SocketChannel; import java.security.*; import java.security.cert.CertificateException; public class SSLSocketChannelFactory { private static final String DEFAULT_TLS_VERSION = "TLSv1.2"; private SSLContext sslContext = null; private SSLProperties properties = null; public SSLSocketChannelFactory() throws SSLContextException { try { File confFile = new File(H2O.ARGS.internal_security_conf); SSLProperties props = H2O.ARGS.internal_security_conf_rel_paths ? new SSLProperties(confFile.getParentFile()) : new SSLProperties(); props.load(new FileInputStream(confFile)); init(props); } catch (IOException e) { Log.err("Failed to initialized SSL context.", e); throw new SSLContextException("Failed to initialized SSL context.", e); } } public SSLSocketChannelFactory(SSLProperties props) throws SSLContextException { init(props); } private void init(SSLProperties props) throws SSLContextException { properties = props; try { if (requiredParamsPresent()) { this.sslContext = SSLContext.getInstance(properties.h2o_ssl_protocol(DEFAULT_TLS_VERSION)); this.sslContext.init(keyManager(), trustManager(), null); } else { this.sslContext = SSLContext.getDefault(); } } catch (NoSuchAlgorithmException e) { Log.err("Failed to initialized SSL context.", e); throw new SSLContextException("Failed to initialized SSL context.", e); } catch (IOException e) { Log.err("Failed to initialized SSL context.", e); throw new SSLContextException("Failed to initialized SSL context.", e); } catch (UnrecoverableKeyException e) { Log.err("Failed to initialized SSL context.", e); throw new SSLContextException("Failed to initialized SSL context.", e); } catch (KeyStoreException e) { Log.err("Failed to initialized SSL context.", e); throw new SSLContextException("Failed to initialized SSL context.", e); } catch (KeyManagementException e) { Log.err("Failed to initialized SSL context.", e); throw new SSLContextException("Failed to initialized SSL context.", e); } catch (CertificateException e) { Log.err("Failed to initialized SSL context.", e); throw new SSLContextException("Failed to initialized SSL context.", e); } } private boolean requiredParamsPresent() { return null != properties.h2o_ssl_jks_internal() && null != properties.h2o_ssl_jks_password(); } private TrustManager[] trustManager() throws KeyStoreException, IOException, CertificateException, NoSuchAlgorithmException { KeyStore ksTrust = KeyStore.getInstance("JKS"); ksTrust.load( new FileInputStream(properties.h2o_ssl_jts()), properties.h2o_ssl_jts_password().toCharArray() ); TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); tmf.init(ksTrust); return tmf.getTrustManagers(); } private KeyManager[] keyManager() throws KeyStoreException, IOException, CertificateException, NoSuchAlgorithmException, UnrecoverableKeyException { KeyStore ksKeys = KeyStore.getInstance("JKS"); ksKeys.load(new FileInputStream(properties.h2o_ssl_jks_internal()), properties.h2o_ssl_jks_password().toCharArray() ); KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); kmf.init(ksKeys, properties.h2o_ssl_jks_password().toCharArray()); return kmf.getKeyManagers(); } public ByteChannel wrapClientChannel( SocketChannel channel, String host, int port) throws IOException { SSLEngine sslEngine = sslContext.createSSLEngine(host, port); sslEngine.setUseClientMode(false); if (null != properties.h2o_ssl_enabled_algorithms()) { sslEngine.setEnabledCipherSuites(properties.h2o_ssl_enabled_algorithms()); } return new SSLSocketChannel(channel, sslEngine); } public ByteChannel wrapServerChannel(SocketChannel channel) throws IOException { SSLEngine sslEngine = sslContext.createSSLEngine(); sslEngine.setUseClientMode(true); if (null != properties.h2o_ssl_enabled_algorithms()) { sslEngine.setEnabledCipherSuites(properties.h2o_ssl_enabled_algorithms()); } return new SSLSocketChannel(channel, sslEngine); } SSLProperties getProperties() { return properties; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/network/SocketChannelFactory.java
package water.network; import java.io.IOException; import java.nio.channels.ByteChannel; import java.nio.channels.SocketChannel; /** * Creates either a raw or an SSL/TLS wrapped socket depending on * the node's configuration. All sockets used in the application should be * created using this class. */ public class SocketChannelFactory { private volatile static SocketChannelFactory INSTANCE; private WrappingSecurityManager sm; private SocketChannelFactory(WrappingSecurityManager sm) { this.sm = sm; } public ByteChannel serverChannel(ByteChannel channel) throws IOException { if (sm.isSecurityEnabled() && !(channel instanceof SSLSocketChannel)) { return sm.wrapServerChannel((SocketChannel) channel); } else { return channel; } } public ByteChannel clientChannel(ByteChannel channel, String host, int port) throws IOException { if (sm.isSecurityEnabled() && !(channel instanceof SSLSocketChannel)) { return sm.wrapClientChannel((SocketChannel) channel, host, port); } else { return channel; } } public static SocketChannelFactory instance(WrappingSecurityManager sm) { if (null == INSTANCE) { synchronized (SocketChannelFactory.class) { if (null == INSTANCE) { INSTANCE = new SocketChannelFactory(sm); } } } return INSTANCE; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/network/SocketChannelUtils.java
package water.network; import java.nio.channels.Channel; import java.nio.channels.SocketChannel; public class SocketChannelUtils { public static boolean isSocketChannel(Channel channel) { return channel instanceof SocketChannel || channel instanceof SSLSocketChannel; } public static SocketChannel underlyingSocketChannel(Channel channel) { if(channel instanceof SSLSocketChannel) { return ((SSLSocketChannel) channel).channel(); } else if(channel instanceof SocketChannel) { return (SocketChannel) channel; } throw new UnsupportedOperationException( "Channel is not a socket channel. Cannot retrieve the underlying channel." ); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/network/WrappingSecurityManager.java
package water.network; import java.io.IOException; import java.nio.channels.ByteChannel; import java.nio.channels.SocketChannel; public interface WrappingSecurityManager { boolean isSecurityEnabled(); ByteChannel wrapServerChannel(SocketChannel channel) throws IOException; ByteChannel wrapClientChannel(SocketChannel channel, String host, int port) throws IOException; }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/ARFFParser.java
package water.parser; import java.util.ArrayList; import java.util.List; import water.Key; import water.exceptions.H2OUnsupportedDataFileException; import water.fvec.ByteVec; import water.fvec.Vec; import water.util.ArrayUtils; import static water.parser.DefaultParserProviders.ARFF_INFO; class ARFFParser extends CsvParser { private static final String INCOMPLETE_HEADER = "@H20_INCOMPLETE_HEADER@"; private static final String SKIP_NEXT_HEADER = "@H20_SKIP_NEXT_HEADER@"; private static final String TAG_ATTRIBUTE = "@ATTRIBUTE"; private static final String NA = "?"; //standard NA in Arff format private static final byte GUESS_SEP = ParseSetup.GUESS_SEP; private static final byte[] NON_DATA_LINE_MARKERS_DEFAULT = {'%', '@'}; ARFFParser(ParseSetup ps, Key jobKey) { super(ps, NON_DATA_LINE_MARKERS_DEFAULT, jobKey); } /** Try to parse the bytes as ARFF format */ static ParseSetup guessSetup(ByteVec bv, byte[] bits, byte sep, boolean singleQuotes, String[] columnNames, String[][] naStrings, byte[] nonDataLineMarkers, byte escapechar) { if (columnNames != null) throw new UnsupportedOperationException("ARFFParser doesn't accept columnNames."); if (nonDataLineMarkers == null) nonDataLineMarkers = NON_DATA_LINE_MARKERS_DEFAULT; // Parse all lines starting with @ until EOF or @DATA boolean haveData = false; String[][] data = new String[0][]; String[] labels; String[][] domains; String[] headerlines = new String[0]; byte[] ctypes; // header section ArrayList<String> header = new ArrayList<>(); int offset = 0; int chunk_idx = 0; //relies on the assumption that bits param have been extracted from first chunk: cf. ParseSetup#map boolean readHeader = true; while (readHeader) { offset = readArffHeader(0, header, bits, singleQuotes); if (isValidHeader(header)) { String lastHeader = header.get(header.size() - 1); if (INCOMPLETE_HEADER.equals(lastHeader) || SKIP_NEXT_HEADER.equals(lastHeader)) { bits = bv.chunkForChunkIdx(++chunk_idx).getBytes(); continue; } } else if (chunk_idx > 0) { //first chunk parsed correctly, but not the next => formatting issue throw new H2OUnsupportedDataFileException( "Arff parsing: Invalid header. If compressed file, please try without compression", "First chunk was parsed correctly, but a following one failed, common with archives as only first chunk in decompressed"); } readHeader = false; } if (offset < bits.length && !CsvParser.isEOL(bits[offset])) haveData = true; //more than just the header if (header.size() == 0) throw new ParseDataset.H2OParseException("No data!"); headerlines = header.toArray(headerlines); // process header int ncols = headerlines.length; labels = new String[ncols]; domains = new String[ncols][]; ctypes = new byte[ncols]; processArffHeader(ncols, headerlines, labels, domains, ctypes); // data section (for preview) if (haveData) { final int preview_max_length = 10; ArrayList<String> datablock = new ArrayList<>(); //Careful! the last data line could be incomplete too (cf. readArffHeader) while (offset < bits.length && datablock.size() < preview_max_length) { int lineStart = offset; while (offset < bits.length && !CsvParser.isEOL(bits[offset])) ++offset; int lineEnd = offset; ++offset; // For Windoze, skip a trailing LF after CR if ((offset < bits.length) && (bits[offset] == CsvParser.CHAR_LF)) ++offset; if (ArrayUtils.contains(nonDataLineMarkers, bits[lineStart])) continue; if (lineEnd > lineStart) { String str = new String(bits, lineStart, lineEnd - lineStart).trim(); if (!str.isEmpty()) datablock.add(str); } } if (datablock.size() == 0) throw new ParseDataset.H2OParseException("Unexpected line."); // process data section String[] datalines = datablock.toArray(new String[datablock.size()]); data = new String[datalines.length][]; // First guess the field separator by counting occurrences in first few lines if (datalines.length == 1) { if (sep == GUESS_SEP) { //could be a bit more robust than just counting commas? if (datalines[0].split(",").length > 2) sep = ','; else if (datalines[0].split(" ").length > 2) sep = ' '; else throw new ParseDataset.H2OParseException("Failed to detect separator."); } data[0] = determineTokens(datalines[0], sep, singleQuotes, escapechar); ncols = (ncols > 0) ? ncols : data[0].length; labels = labels[0] == null ? null : labels; } else { // 2 or more lines if (sep == GUESS_SEP) { // first guess the separator //FIXME if last line is incomplete, this logic fails sep = guessSeparator(datalines[0], datalines[1], singleQuotes, escapechar); if (sep == GUESS_SEP && datalines.length > 2) { sep = guessSeparator(datalines[1], datalines[2], singleQuotes, escapechar); if (sep == GUESS_SEP) sep = guessSeparator(datalines[0], datalines[2], singleQuotes, escapechar); } if (sep == GUESS_SEP) sep = (byte) ' '; // Bail out, go for space } for (int i = 0; i < datalines.length; ++i) { data[i] = determineTokens(datalines[i], sep, singleQuotes, escapechar); } } } naStrings = addDefaultNAs(naStrings, ncols); // Return the final setup return new ParseSetup(ARFF_INFO, sep, singleQuotes, ParseSetup.NO_HEADER, ncols, labels, ctypes, domains, naStrings, data, nonDataLineMarkers, escapechar, false); } private static String[][] addDefaultNAs(String[][] naStrings, int nCols) { final String[][] nas = naStrings == null ? new String[nCols][] : naStrings; for (int i = 0; i < nas.length; i++) { String [] colNas = nas[i]; if (!ArrayUtils.contains(colNas, NA)) { nas[i] = colNas = ArrayUtils.append(colNas, NA); } } return nas; } private static boolean isValidHeader(List<String> header) { for (String line : header) { if (!isValidHeaderLine(line)) return false; } return header.size() > 0; } private static boolean isValidHeaderLine(String str) { return str != null && str.startsWith("@"); } private static int readArffHeader(int offset, List<String> header, byte[] bits, boolean singleQuotes) { String lastHeader = header.size() > 0 ? header.get(header.size() - 1) : null; boolean lastHeaderIncomplete = INCOMPLETE_HEADER.equals(lastHeader); boolean skipFirstLine = SKIP_NEXT_HEADER.equals(lastHeader); if (lastHeaderIncomplete || skipFirstLine) header.remove(header.size() - 1); //remove fake header lastHeader = lastHeaderIncomplete ? header.remove(header.size() - 1) : null; //remove incomplete header for future concatenation while (offset < bits.length) { int lineStart = offset; while (offset < bits.length && !CsvParser.isEOL(bits[offset])) ++offset; int lineEnd = offset; ++offset; // For Windoze, skip a trailing LF after CR if ((offset < bits.length) && (bits[offset] == CsvParser.CHAR_LF)) ++offset; boolean lastLineIncomplete = lineEnd == bits.length && !CsvParser.isEOL(bits[lineEnd-1]); if (skipFirstLine) { skipFirstLine = false; if (lastLineIncomplete) header.add(SKIP_NEXT_HEADER); continue; } if (bits[lineStart] == '%') { //skip comment lines if (!lastHeaderIncomplete) { if (lastLineIncomplete) header.add(SKIP_NEXT_HEADER); continue; } } String str = new String(bits, lineStart, lineEnd - lineStart).trim(); if (lastHeaderIncomplete) { str = lastHeader + str; //add current line portion to last header portion from previous chunk lastHeaderIncomplete = false; } else if (str.matches("(?i)^@relation\\s?.*$")) { //ignore dataset name continue; } else if (str.matches("(?i)^@data\\s?.*$")) { //stop header parsing as soon as we encounter data break; } if (!str.isEmpty()) { header.add(str); if (lastLineIncomplete) header.add(INCOMPLETE_HEADER); } } return offset; } static void processArffHeader(int ncols, String[] headerlines, String[] labels, String[][] domains, byte[] ctypes) { for (int i=0; i<ncols; ++i) { String[] line = headerlines[i].split("\\s+", 2); if (!line[0].equalsIgnoreCase(TAG_ATTRIBUTE)) { throw new ParseDataset.H2OParseException("Expected line to start with @ATTRIBUTE."); } else { final String spec = (line.length == 2) ? line[1].replaceAll("\\s", " ") : ""; // normalize separators int sepIdx = spec.lastIndexOf(' '); if (sepIdx < 0) { throw new ParseDataset.H2OParseException("Expected @ATTRIBUTE to be followed by <attribute-name> <datatype>"); } final String type = spec.substring(sepIdx + 1).trim(); domains[i] = null; ctypes[i] = Vec.T_BAD; if (type.equalsIgnoreCase("NUMERIC") || type.equalsIgnoreCase("REAL") || type.equalsIgnoreCase("INTEGER") || type.equalsIgnoreCase("INT")) { ctypes[i] = Vec.T_NUM; } else if (type.equalsIgnoreCase("DATE") || type.equalsIgnoreCase("TIME")) { ctypes[i] = Vec.T_TIME; } else if (type.equalsIgnoreCase("ENUM")) { ctypes[i] = Vec.T_CAT; } else if (type.equalsIgnoreCase("STRING")) { ctypes[i] = Vec.T_STR; } else if (type.equalsIgnoreCase("UUID")) { //extension of ARFF ctypes[i] = Vec.T_UUID; } else if (type.equalsIgnoreCase("RELATIONAL")) { throw new UnsupportedOperationException("Relational ARFF format is not supported."); } else if (type.endsWith("}")) { int domainSpecStart = spec.lastIndexOf('{'); if (domainSpecStart < 0) throw new ParseDataset.H2OParseException("Invalid type specification."); sepIdx = domainSpecStart - 1; String domainSpec = spec.substring(domainSpecStart + 1, line[1].length() - 1); domains[i] = domainSpec.split(","); for (int j = 0; j < domains[i].length; j++) domains[i][j] = domains[i][j].trim(); if (domains[i][0].length() > 0) ctypes[i] = Vec.T_CAT; // case of {A,B,C} (valid list of factors) } if (ctypes[i] == Vec.T_BAD) throw new ParseDataset.H2OParseException("Unexpected line, type not recognized. Attribute specification: " + type); // remove the whitespaces separating the label and the type specification while ((sepIdx > 0) && (spec.charAt(sepIdx - 1) == ' ')) sepIdx--; String label = line[1].substring(0, sepIdx); // use the raw string before whitespace normalization // remove quotes if (label.length() >= 2 && label.startsWith("'") && label.endsWith("'")) label = label.substring(1, label.length() - 1); labels[i] = label; } } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/BinaryFormatExporter.java
package water.parser; import water.H2O; import water.fvec.Frame; import water.util.ExportFileFormat; public interface BinaryFormatExporter { H2O.H2OCountedCompleter export(Frame frame, String path, boolean force, String compression, boolean writeChecksum, boolean tzAdjustFromLocal); boolean supports(ExportFileFormat format); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/BinaryParserProvider.java
package water.parser; import water.fvec.ByteVec; /** * Base class for Binary format parsers that implements 2-phase ParseSetup. */ public abstract class BinaryParserProvider extends ParserProvider { /** * {@inheritDoc} */ @Override public abstract ParseSetup guessInitSetup(ByteVec v, byte[] bits, ParseSetup userSetup); /** * {@inheritDoc} */ @Override public abstract ParseSetup guessFinalSetup(ByteVec v, byte[] bits, ParseSetup ps); @Override @Deprecated public final ParseSetup guessSetup(ByteVec v, byte[] bits, byte sep, int ncols, boolean singleQuotes, int checkHeader, String[] columnNames, byte[] columnTypes, String[][] domains, String[][] naStrings) { ParseSetup ps = new ParseSetup(null, sep, singleQuotes, checkHeader, ncols, columnNames, columnTypes, domains, naStrings, null, false); return guessSetup(v, bits, ps); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/BufferedString.java
package water.parser; import water.AutoBuffer; import water.Iced; import water.util.StringUtils; import java.util.Arrays; import java.util.Formatter; /** * A mutable wrapper to hold String as a byte array. * * It can be modified by set of methods, the hash code is computed * on the fly. There is no speed up benefit of cashing the hash in * a dedicated private field. See the speed test in {@code ParseTest2#testSpeedOfCategoricalUpdate}. * * Warning: This data structure is not designed for parallel access! */ public class BufferedString extends Iced implements Comparable<BufferedString> { protected byte [] _buf; protected int _off; protected int _len; public BufferedString(byte[] buf, int off, int len) { _buf = buf; _off = off; _len = len; assert len >= 0 : "Bad length in constructor " + len; } private BufferedString(byte[] buf) { this(buf,0,buf.length); } // Cloning constructing used during collecting unique categoricals BufferedString(BufferedString from) { this(Arrays.copyOfRange(from._buf,from._off,from._off+from._len)); } public BufferedString(String from) { this(StringUtils.bytesOf(from)); } // Used to make a temp recycling BufferedString in hot loops public BufferedString() { } public final AutoBuffer write_impl(AutoBuffer ab) { if( _buf == null ) return ab.putInt(-1); ab.putInt(_len); return ab.putA1(_buf,_off,_off+_len); } public final BufferedString read_impl(AutoBuffer ab){ _buf = ab.getA1(); if(_buf != null) _len = _buf.length; return this; } /** * Comparison, according to Comparable interface * @param o other string to compare * @return -1 or 0 or 1, as specified in Comparable */ @Override public int compareTo( BufferedString o ) { int len = Math.min(_len,o._len); for( int i=0; i<len; i++ ) { int x = (0xFF&_buf[_off+i]) - (0xFF&o._buf[o._off+i]); if( x != 0 ) return x; } return _len - o._len; } @Override public int hashCode(){ int hash = 0; int n = _off + _len; for (int i = _off; i < n; ++i) // equivalent to String.hashCode (not actually) hash = 31 * hash + (char)_buf[i]; return hash; } // TODO(vlad): make sure that this method is not as destructive as it now is (see tests) void addChar() { _len++; } void removeChar(){ _len--; } void addBuff(byte [] bits){ byte [] buf = new byte[_len]; int l1 = _buf.length- _off; System.arraycopy(_buf, _off, buf, 0, l1); System.arraycopy(bits, 0, buf, l1, _len-l1); _off = 0; _buf = buf; } // WARNING: LOSSY CONVERSION!!! // Converting to a String will truncate all bytes with high-order bits set, // even if they are otherwise a valid member of the field/BufferedString. // Converting back to a BufferedString will then make something with fewer // characters than what you started with, and will fail all equals() tests. // TODO(Vlad): figure out what to do about the buffer being not UTF-8 (who guarantees?) @Override public String toString() { return _buf == null ? null : StringUtils.toString(_buf, Math.max(0, _off), Math.min(_buf.length, _len)); } /** * Converts this BufferedString into an ASCII String where all original non-ASCII characters * are represented in a hexadecimal notation. * @return Sanitized String value (safe to use eg. in domains). */ public String toSanitizedString() { StringBuilder sb = new StringBuilder(_len * 2); Formatter formatter = new Formatter(sb); boolean inHex = false; for (int i = 0; i < _len; i++) { if ((_buf[_off + i] & 0x80) == 128) { if (!inHex) sb.append("<0x"); formatter.format("%02X", _buf[_off + i]); inHex = true; } else { // ASCII if (inHex) { sb.append(">"); inHex = false; } formatter.format("%c", _buf[_off + i]); } } if (inHex) sb.append(">"); // close hex values as trailing char return sb.toString(); } public static String[] toString(BufferedString bStr[]) { if( bStr==null ) return null; String[] ss = new String[bStr.length]; for( int i=0; i<bStr.length; i++ ) ss[i] = bStr[i].toString(); return ss; } public static BufferedString[] toBufferedString(String[] strings) { if (strings == null) return null; BufferedString[] res = new BufferedString[strings.length]; for (int i = 0; i < strings.length; i++) { res[i] = new BufferedString(strings[i]); } return res; } public final BufferedString set(byte[] buf) { return set(buf, 0, buf.length); } public BufferedString set(byte[] buf, int off, int len) { _buf = buf; _off = off; _len = len; assert len >= 0 : "Bad length in setter " + len; return this; } public final BufferedString set(String s) { return set(StringUtils.bytesOf(s)); } public void setOff(int off) { _off=off; } public void setLen(int len) { _len=len; } @Override public boolean equals(Object o){ if(o instanceof BufferedString) { BufferedString str = (BufferedString) o; if (str._len != _len) return false; for (int i = 0; i < _len; ++i) if (_buf[_off + i] != str._buf[str._off + i]) return false; return true; } return false; } /** * Tests whether this BufferedString is equal to a given ASCII string * @param str string sequence made of ASCII characters (0..127) * @return true if this BufferedString represents a given ASCII String, false if sequences differ or if the test * string is not actually made of just ASCII characters */ public boolean equalsAsciiString(String str) { if (str == null || str.length() != _len) return false; for (int i = 0; i < _len; ++i) if (_buf[_off + i] != str.charAt(i)) return false; return true; } // Thou Shalt Not use accessors in performance critical code - because it // obfuscates the code's cost model. All file-local uses of the accessors // has been stripped, please do not re-insert them. In particular, the // hashcode and equals calls are made millions (billions?) of times a second // when parsing categoricals. public final byte [] getBuffer() {return _buf;} public final int getOffset() {return _off;} public final int length() {return _len;} public static final byte NA = 0; public static final byte INT = 1; public static final byte REAL= 2; public final byte getNumericType() { int i = 0; int decimalCnt = 0; if (_len == 0) return NA; if (_buf[_off] == '+' || _buf[_off] == '-') i++; while( i < _len) { if (_buf[_off+i] == '.') decimalCnt++; else if (_buf[_off+i] < '0' || _buf[_off+i] > '9') return NA; i++; } if (decimalCnt > 0) if (decimalCnt == 1) return REAL; else return NA; //more than one decimal, NaN else return INT; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/Categorical.java
package water.parser; import java.util.concurrent.atomic.AtomicInteger; import water.Iced; import water.util.IcedHashMap; import water.util.Log; import water.util.PrettyPrint; /** Class for tracking categorical (factor) columns. * * Basically a wrapper around non blocking hash map. * In the first pass, we just collect set of unique strings per column * (if there are less than MAX_CATEGORICAL_COUNT unique elements). * * After pass1, the keys are sorted and indexed alphabetically. * In the second pass, map is used only for lookup and never updated. * * Categorical objects are shared among threads on the local nodes! * * @author tomasnykodym * */ public final class Categorical extends Iced { public static final int MAX_CATEGORICAL_COUNT = 10000000; transient AtomicInteger _id = new AtomicInteger(); int _maxId = -1; volatile IcedHashMap<BufferedString, Integer> _map; boolean maxDomainExceeded = false; Categorical() { _map = new IcedHashMap<>(); } /** Add key to this map (treated as hash set in this case). */ int addKey(BufferedString str) { // _map is shared and be cast to null (if categorical is killed) -> grab local copy IcedHashMap<BufferedString, Integer> m = _map; if( m == null ) return Integer.MAX_VALUE; // Nuked already Integer res = m.get(str); if( res != null ) return res; // Recorded already assert str.length() < 65535; // Length limit so 65535 can be used as a sentinel int newVal = _id.incrementAndGet(); res = m.putIfAbsent(new BufferedString(str), newVal); if( res != null ) return res; if( m.size() > MAX_CATEGORICAL_COUNT) maxDomainExceeded = true; return newVal; } final boolean containsKey(BufferedString key){ return _map.containsKey(key); } @Override public String toString() { return "{"+_map+" }"; } int getTokenId( BufferedString str ) { return _map.get(str); } int maxId() { return _maxId == -1 ? _id.get() : _maxId; } int size() { return _map.size(); } boolean isMapFull() { return maxDomainExceeded; } BufferedString[] getColumnDomain() { return _map.keySet().toArray(new BufferedString[_map.size()]); } /** * Converts domain values represented as BufferedStrings to UTF-8 encoding {@see BufferedString.toString()}. * If the source value is not actually in UTF-8, the characters will be represented in hexadecimal notation. * @param col user-facing index of the column to which the categoricals belong (only for logging/debugging) */ void convertToUTF8(int col) { int hexConvLeft = 10; BufferedString[] bStrs = _map.keySet().toArray(new BufferedString[_map.size()]); StringBuilder hexSB = new StringBuilder(); for (int i = 0; i < bStrs.length; i++) { String s = bStrs[i].toString(); // converts to String using UTF-8 encoding if (bStrs[i].equalsAsciiString(s)) continue; // quick check for the typical case without new object allocation & map modification if (s.contains("\uFFFD")) { // converted string contains Unicode replacement character => sanitize the (whole) string s = bStrs[i].toSanitizedString(); if (hexConvLeft-- > 0) hexSB.append(s).append(", "); if (hexConvLeft == 0) hexSB.append("..."); } int val = _map.get(bStrs[i]); _map.remove(bStrs[i]); bStrs[i] = new BufferedString(s); _map.put(bStrs[i], val); } if (hexSB.length() > 0) Log.info("Found categoricals with non-UTF-8 characters or NULL character in the " + PrettyPrint.withOrdinalIndicator(col) + " column. Converting unrecognized characters into hex: " + hexSB.toString()); } // Since this is a *concurrent* hashtable, writing it whilst its being // updated is tricky. If the table is NOT being updated, then all is written // as expected. If the table IS being updated we only promise to write the // Keys that existed at the time the table write began. If elements are // being deleted, they may be written anyways. If the Values are changing, a // random Value is written. // public AutoBuffer write_impl( AutoBuffer ab ) { // if( _map == null ) return ab.put1(1); // Killed map marker // ab.put1(0); // Not killed // ab.put4(maxId()); // for( BufferedString key : _map.keySet() ) // ab.put2((char)key.length()).putA1(key.getBuffer(),key.length()).put4(_map.get(key)); // return ab.put2((char)65535); // End of map marker // } // // public Categorical read_impl( AutoBuffer ab ) { // assert _map == null || _map.size()==0; // _map = null; // if( ab.get1() == 1 ) return this; // Killed? // _maxId = ab.get4(); // _map = new NonBlockingHashMap<>(); // int len; // while( (len = ab.get2()) != 65535 ) // Read until end-of-map marker // _map.put(new BufferedString(ab.getA1(len)),ab.get4()); // return this; // } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/CharSkippingBufferedString.java
package water.parser; import water.MemoryManager; import water.util.ArrayUtils; import java.util.Arrays; /** * A schema over a string represented as an array of bytes. * This schema enables characters to be skipped inside the string. unlike the basic {@link BufferedString} * Skipped characters are not serialized by toString method. */ class CharSkippingBufferedString { private int[] _skipped; private int _skippedWriteIndex; private final BufferedString _bufferedString; CharSkippingBufferedString() { _skipped = new int[0]; _skippedWriteIndex = 0; _bufferedString = new BufferedString(); } protected void addChar() { _bufferedString.addChar(); } protected void removeChar() { _bufferedString.removeChar(); } protected byte[] getBuffer() { return _bufferedString.getBuffer(); } /** * * @return True if offset plus limit exceed the length of the underlying buffer. Otherwise false. */ protected boolean isOverflown(){ return _bufferedString._off + _bufferedString._len > _bufferedString.getBuffer().length; } protected void addBuff(final byte[] bits) { _bufferedString.addBuff(bits); _skipped = new int[0]; _skippedWriteIndex = 0; } /** * Marks a character in the backing array as skipped. Such character is no longer serialized when toString() method * is called on this buffer. * * @param skippedCharIndex Index of the character in the backing array to skip */ protected final void skipIndex(final int skippedCharIndex) { _bufferedString.addChar(); if (_skipped.length == 0 || _skipped[_skipped.length - 1] != -1) { _skipped = Arrays.copyOf(_skipped, Math.max(_skipped.length + 1, 1)); } _skipped[_skippedWriteIndex] = skippedCharIndex; _skippedWriteIndex++; } /** * A delegate to the underlying {@link StringBuffer}'s set() method. * * @param buf Buffer to operate with * @param off Beginning of the string (offset in the buffer) * @param len Length of the string from the offset. */ protected void set(final byte[] buf, final int off, final int len) { _skipped = new int[0]; _skippedWriteIndex = 0; _bufferedString.set(buf, off, len); } /** * Converts the current window into byte buffer to a {@link BufferedString}. The resulting new instance of {@link BufferedString} * is backed by a newly allocated byte[] buffer sized exactly to fit the desired string represented by current buffer window, * excluding the skipped characters. * * @return An instance of {@link BufferedString} containing only bytes from the original window, without skipped bytes. */ public BufferedString toBufferedString() { if (_skipped.length == 0) return _bufferedString; byte[] buf = MemoryManager.malloc1(_bufferedString._len - _skipped.length); // Length of the buffer window minus skipped chars int copyStart = _bufferedString._off; int target = 0; for (int skippedIndex : _skipped) { for (int i = copyStart; i < skippedIndex; i++) { buf[target++] = _bufferedString._buf[i]; } copyStart = skippedIndex + 1; } int windowEnd = _bufferedString._off + _bufferedString._len; for (int i = copyStart; i < windowEnd; i++) { buf[target++] = _bufferedString._buf[i]; } assert target == buf.length; return new BufferedString(buf, 0, buf.length); } /** * @return A string representation of the buffer window, excluding skipped characters */ @Override public String toString() { return toBufferedString().toString(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/CsvParser.java
package water.parser; import org.apache.commons.lang.math.NumberUtils; import water.Key; import water.fvec.FileVec; import water.fvec.Vec; import water.util.ArrayUtils; import water.util.StringUtils; import java.io.ByteArrayOutputStream; import java.util.ArrayList; import java.util.Arrays; import static water.parser.DefaultParserProviders.CSV_INFO; public class CsvParser extends Parser { private static final byte GUESS_SEP = ParseSetup.GUESS_SEP; private static final int NO_HEADER = ParseSetup.NO_HEADER; private static final int GUESS_HEADER = ParseSetup.GUESS_HEADER; private static final int HAS_HEADER = ParseSetup.HAS_HEADER; private static final byte[] NON_DATA_LINE_MARKERS_DEFAULT = {'#'}; private final byte[] _nonDataLineMarkers; CsvParser( ParseSetup ps, Key jobKey ) { this(ps, NON_DATA_LINE_MARKERS_DEFAULT, jobKey); } CsvParser(ParseSetup ps, byte[] defaultNonDataLineMarkers, Key jobKey) { super(ps, jobKey); _nonDataLineMarkers = ps._nonDataLineMarkers != null ? ps._nonDataLineMarkers : defaultNonDataLineMarkers; } // Parse this one Chunk (in parallel with other Chunks) @SuppressWarnings("fallthrough") @Override public ParseWriter parseChunk(int cidx, final ParseReader din, final ParseWriter dout) { CharSkippingBufferedString str = new CharSkippingBufferedString(); byte[] bits = din.getChunkData(cidx); if( bits == null ) return dout; int offset = din.getChunkDataStart(cidx); // General cursor into the giant array of bytes final byte[] bits0 = bits; // Bits for chunk0 boolean firstChunk = true; // Have not rolled into the 2nd chunk byte[] bits1 = null; // Bits for chunk1, loaded lazily. int state; boolean isNa = false; boolean isAllASCII = true; // If handed a skipping offset, then it points just past the prior partial line. if( offset >= 0 ) state = WHITESPACE_BEFORE_TOKEN; else { offset = 0; // Else start skipping at the start // Starting state. Are we skipping the first (partial) line, or not? Skip // a header line, or a partial line if we're in the 2nd and later chunks. if (_setup._check_header == ParseSetup.HAS_HEADER || cidx > 0) state = SKIP_LINE; else state = POSSIBLE_EMPTY_LINE; } byte quoteChar = _setup._single_quotes ? CHAR_SINGLE_QUOTE : CHAR_DOUBLE_QUOTE; int quotes = 0; byte quoteCount = 0; long number = 0; boolean escaped = false; int exp = 0; int sgnExp = 1; boolean decimal = false; int fractionDigits = 0; int tokenStart = 0; // used for numeric token to backtrace if not successful int parsedColumnCounter = 0; // index into parsed columns only, exclude skipped columns. int colIdx = 0; // count each actual column in the dataset including the skipped columns byte c = bits[offset]; // skip comments for the first chunk (or if not a chunk) if( cidx == 0 ) { while (ArrayUtils.contains(_nonDataLineMarkers, c) || isEOL(c)) { while ((offset < bits.length) && (bits[offset] != CHAR_CR) && (bits[offset ] != CHAR_LF)) { // System.out.print(String.format("%c",bits[offset])); ++offset; } if ((offset + 1 < bits.length) && (bits[offset] == CHAR_CR) && (bits[offset + 1] == CHAR_LF)) ++offset; ++offset; // System.out.println(); if (offset >= bits.length) return dout; c = bits[offset]; } } dout.newLine(); final boolean forceable = dout instanceof FVecParseWriter && ((FVecParseWriter)dout)._ctypes != null && _setup._column_types != null; int colIndexNum = _keepColumns.length-1; if (_setup._parse_columns_indices==null) { // _parse_columns_indices not properly set _setup.setParseColumnIndices(_setup.getNumberColumns(), _setup.getSkippedColumns()); } int parseIndexNum = _setup._parse_columns_indices.length-1; MAIN_LOOP: while (true) { final boolean forcedCategorical = forceable && colIdx < _setup._column_types.length && _setup._column_types[_setup._parse_columns_indices[parsedColumnCounter]] == Vec.T_CAT; final boolean forcedString = forceable && colIdx < _setup._column_types.length && _setup._column_types[_setup._parse_columns_indices[parsedColumnCounter]] == Vec.T_STR; switch (state) { // --------------------------------------------------------------------- case SKIP_LINE: if (isEOL(c)) { state = EOL; } else { break; } continue MAIN_LOOP; // --------------------------------------------------------------------- case EXPECT_COND_LF: state = POSSIBLE_EMPTY_LINE; if (c == CHAR_LF) break; continue MAIN_LOOP; // --------------------------------------------------------------------- case POSSIBLE_ESCAPED_QUOTE: if (c == quotes) { quoteCount--; str.skipIndex(offset); state = STRING; break; } else if (quoteCount > 1) { state = STRING_END; str.removeChar(); quoteCount = 0; continue MAIN_LOOP; } else { state = STRING; } case STRING: if (c == quotes && !escaped) { state = COND_QUOTE; continue MAIN_LOOP; } if ((!isEOL(c) && c != CHAR_SEPARATOR) || quoteCount == 1) { if (str.getBuffer() == null && isEOL(c)) str.set(bits, offset, 0); escaped = !escaped && c == CHAR_ESCAPE; if (escaped) str.skipIndex(offset); else str.addChar(); if ((c & 0x80) == 128) //value beyond std ASCII isAllASCII = false; break; } // fallthrough to STRING_END // --------------------------------------------------------------------- case STRING_END: if ((c != CHAR_SEPARATOR) && (c == CHAR_SPACE)) break; // we have parsed the string categorical correctly if(str.isOverflown()){ // crossing chunk boundary assert str.getBuffer() != bits; str.addBuff(bits); } if( !isNa && _setup.isNA(parsedColumnCounter, str.toBufferedString())) { isNa = true; } if (!isNa && (colIdx <= colIndexNum) && _keepColumns[colIdx]) { dout.addStrCol(parsedColumnCounter, str.toBufferedString()); if (!isAllASCII) dout.setIsAllASCII(parsedColumnCounter, isAllASCII); } else { if ((colIdx <= colIndexNum) && _keepColumns[colIdx]) dout.addInvalidCol(parsedColumnCounter); isNa = false; } str.set(null, 0, 0); quotes = 0; isAllASCII = true; if ((colIdx <= colIndexNum) && _keepColumns[colIdx++] && (parsedColumnCounter < parseIndexNum)) // only increment if not at the end parsedColumnCounter++; state = SEPARATOR_OR_EOL; // fallthrough to SEPARATOR_OR_EOL // --------------------------------------------------------------------- case SEPARATOR_OR_EOL: if (c == CHAR_SEPARATOR) { state = WHITESPACE_BEFORE_TOKEN; break; } if (c==CHAR_SPACE) break; // fallthrough to EOL // --------------------------------------------------------------------- case EOL: if (quoteCount == 1) { //There may be a new line character inside quotes state = STRING; continue MAIN_LOOP; } else if (quoteCount > 2) { String err = "Unmatched quote char " + ((char) quotes); dout.invalidLine(new ParseWriter.ParseErr(err, cidx, dout.lineNum(), offset + din.getGlobalByteOffset())); parsedColumnCounter =0; colIdx=0; quotes = 0; } else if (colIdx != 0) { dout.newLine(); parsedColumnCounter = 0; colIdx=0; } state = (c == CHAR_CR) ? EXPECT_COND_LF : POSSIBLE_EMPTY_LINE; if( !firstChunk ) break MAIN_LOOP; // second chunk only does the first row break; // --------------------------------------------------------------------- case POSSIBLE_CURRENCY: if (((c >= '0') && (c <= '9')) || (c == '-') || (c == CHAR_DECIMAL_SEP) || (c == '+')) { state = TOKEN; } else { str.set(bits, offset - 1, 0); str.addChar(); if (c == quotes) { state = COND_QUOTE; continue MAIN_LOOP; } if ((quotes != 0) || ((!isEOL(c) && (c != CHAR_SEPARATOR)))) { state = STRING; } else { state = STRING_END; } } continue MAIN_LOOP; // --------------------------------------------------------------------- case POSSIBLE_EMPTY_LINE: if (isEOL(c)) { if (c == CHAR_CR) state = EXPECT_COND_LF; break; } if (ArrayUtils.contains(_nonDataLineMarkers, c)) { state = SKIP_LINE; break; } // fallthrough to WHITESPACE_BEFORE_TOKEN // --------------------------------------------------------------------- case WHITESPACE_BEFORE_TOKEN: if (c == CHAR_SPACE || (c == CHAR_TAB && CHAR_TAB!=CHAR_SEPARATOR)) { break; } else if (c == CHAR_SEPARATOR) { // we have empty token, store as NaN if ((colIdx <= colIndexNum) && _keepColumns[colIdx]) dout.addInvalidCol(parsedColumnCounter); if ((colIdx <= colIndexNum) && _keepColumns[colIdx++] && (parsedColumnCounter < parseIndexNum)) { parsedColumnCounter++; } state = WHITESPACE_BEFORE_TOKEN; break; } else if (isEOL(c)) { if ((colIdx <= colIndexNum) && _keepColumns[colIdx]) dout.addInvalidCol(parsedColumnCounter); state = EOL; continue MAIN_LOOP; } // fallthrough to COND_QUOTED_TOKEN // --------------------------------------------------------------------- case COND_QUOTED_TOKEN: state = TOKEN; if( CHAR_SEPARATOR!=HIVE_SEP // Only allow quoting in CSV not Hive files && c == quoteChar) { quotes = c; quoteCount++; break; } // fallthrough to TOKEN // --------------------------------------------------------------------- case TOKEN: if( dout.isString(parsedColumnCounter) ) { // Forced already to a string col? state = STRING; // Do not attempt a number parse, just do a string parse str.set(bits, offset, 0); continue MAIN_LOOP; } else if (((c >= '0') && (c <= '9')) || (c == '-') || (c == CHAR_DECIMAL_SEP) || (c == '+')) { state = NUMBER; number = 0; fractionDigits = 0; decimal = false; tokenStart = offset; if (c == '-') { exp = -1; break; } else if(c == '+'){ exp = 1; break; } else { exp = 1; } // fallthrough } else if (c == '$') { state = POSSIBLE_CURRENCY; break; } else { state = STRING; str.set(bits, offset, 0); continue MAIN_LOOP; } // fallthrough to NUMBER // --------------------------------------------------------------------- case NUMBER: if ((c >= '0') && (c <= '9')) { if (number >= LARGEST_DIGIT_NUMBER) state = NUMBER_SKIP; else number = (number*10)+(c-'0'); break; } else if (c == CHAR_DECIMAL_SEP) { state = NUMBER_FRACTION; fractionDigits = offset; decimal = true; break; } else if ((c == 'e') || (c == 'E')) { state = NUMBER_EXP_START; sgnExp = 1; break; } if (exp == -1) { number = -number; } exp = 0; // fallthrough to COND_QUOTED_NUMBER_END // --------------------------------------------------------------------- case COND_QUOTED_NUMBER_END: if ( c == quotes) { state = NUMBER_END; quotes = 0; quoteCount = 0; break; } // fallthrough NUMBER_END case NUMBER_END: // forced if (forcedString || forcedCategorical ) { state = STRING; offset = tokenStart - 1; str.set(bits, tokenStart, 0); break; // parse as String token now } if (c == CHAR_SEPARATOR && quotes == 0) { exp = exp - fractionDigits; if ((colIdx <= colIndexNum) && _keepColumns[colIdx]) dout.addNumCol(parsedColumnCounter,number,exp); if ((colIdx <= colIndexNum) && _keepColumns[colIdx++] && (parsedColumnCounter < parseIndexNum)) parsedColumnCounter++; // do separator state here too state = WHITESPACE_BEFORE_TOKEN; break; } else if (isEOL(c)) { exp = exp - fractionDigits; if ((colIdx <= colIndexNum) && _keepColumns[colIdx]) dout.addNumCol(parsedColumnCounter,number,exp); // do EOL here for speedup reasons parsedColumnCounter =0; colIdx=0; dout.newLine(); state = (c == CHAR_CR) ? EXPECT_COND_LF : POSSIBLE_EMPTY_LINE; if( !firstChunk ) break MAIN_LOOP; // second chunk only does the first row break; } else if ((c == '%')) { state = NUMBER_END; exp -= 2; break; } else if ((c != CHAR_SEPARATOR) && ((c == CHAR_SPACE) || (c == CHAR_TAB))) { state = NUMBER_END; break; } else { state = STRING; offset = tokenStart-1; str.set(bits, tokenStart, 0); break; // parse as String token now } // --------------------------------------------------------------------- case NUMBER_SKIP: if ((c >= '0') && (c <= '9')) { exp++; break; } else if (c == CHAR_DECIMAL_SEP) { state = NUMBER_SKIP_NO_DOT; break; } else if ((c == 'e') || (c == 'E')) { state = NUMBER_EXP_START; sgnExp = 1; break; } state = COND_QUOTED_NUMBER_END; continue MAIN_LOOP; // --------------------------------------------------------------------- case NUMBER_SKIP_NO_DOT: if ((c >= '0') && (c <= '9')) { break; } else if ((c == 'e') || (c == 'E')) { state = NUMBER_EXP_START; sgnExp = 1; break; } state = COND_QUOTED_NUMBER_END; continue MAIN_LOOP; // --------------------------------------------------------------------- case NUMBER_FRACTION: if ((c >= '0') && (c <= '9')) { if (number >= LARGEST_DIGIT_NUMBER) { if (decimal) fractionDigits = offset - 1 - fractionDigits; if (exp == -1) number = -number; exp = 0; state = NUMBER_SKIP_NO_DOT; } else { number = (number*10)+(c-'0'); } break; } else if ((c == 'e') || (c == 'E')) { if (decimal) fractionDigits = offset - 1 - fractionDigits; state = NUMBER_EXP_START; sgnExp = 1; break; } state = COND_QUOTED_NUMBER_END; if (decimal) fractionDigits = offset - fractionDigits-1; if (exp == -1) { number = -number; } exp = 0; continue MAIN_LOOP; // --------------------------------------------------------------------- case NUMBER_EXP_START: if (exp == -1) { number = -number; } exp = 0; if (c == '-') { sgnExp *= -1; break; } else if (c == '+'){ break; } if ((c < '0') || (c > '9')){ state = STRING; offset = tokenStart-1; str.set(bits, tokenStart, 0); break; // parse as String token now } state = NUMBER_EXP; // fall through to NUMBER_EXP // --------------------------------------------------------------------- case NUMBER_EXP: if ((c >= '0') && (c <= '9')) { exp = (exp*10)+(c-'0'); break; } exp *= sgnExp; state = COND_QUOTED_NUMBER_END; continue MAIN_LOOP; // --------------------------------------------------------------------- case COND_QUOTE: if (c == quotes) { str.addChar(); quoteCount++; state = POSSIBLE_ESCAPED_QUOTE; break; } // --------------------------------------------------------------------- default: assert (false) : " We have wrong state "+state; } // end NEXT_CHAR // System.out.print(String.format("%c",bits[offset])); ++offset; // do not need to adjust for offset increase here - the offset is set to tokenStart-1! if (offset < 0) { // Offset is negative? assert !firstChunk; // Caused by backing up from 2nd chunk into 1st chunk firstChunk = true; bits = bits0; offset += bits.length; str.set(bits, offset, 0); } else if (offset >= bits.length) { // Off end of 1st chunk? Parse into 2nd chunk // Attempt to get more data. if( firstChunk && bits1 == null ) bits1 = din.getChunkData(cidx+1); // if we can't get further we might have been the last one and we must // commit the latest guy if we had one. if( !firstChunk || bits1 == null ) { // No more data available or allowed // If we are mid-parse of something, act like we saw a LF to end the // current token. if ((state != EXPECT_COND_LF) && (state != POSSIBLE_EMPTY_LINE)) { c = CHAR_LF; if(state == STRING) { quoteCount = 0; // In case of a String not properly ended with quotes state = STRING_END; } continue; // MAIN_LOOP; } break; // MAIN_LOOP; // Else we are just done } // Now parsing in the 2nd chunk. All offsets relative to the 2nd chunk start. firstChunk = false; if (state == NUMBER_FRACTION) fractionDigits -= bits.length; offset -= bits.length; tokenStart -= bits.length; bits = bits1; // Set main parsing loop bits if( bits[0] == CHAR_LF && state == EXPECT_COND_LF ) break; // MAIN_LOOP; // when the first character we see is a line end } c = bits[offset]; } // end MAIN_LOOP if (colIdx == 0) dout.rollbackLine(); // If offset is still validly within the buffer, save it so the next pass // can start from there. if( offset+1 < bits.length ) { if( state == EXPECT_COND_LF && bits[offset+1] == CHAR_LF ) offset++; if( offset+1 < bits.length ) din.setChunkDataStart(cidx+1, offset+1 ); } return dout; } @Override protected int fileHasHeader(byte[] bits, ParseSetup ps) { boolean hasHdr = true; String[] lines = getFirstLines(bits, ps._single_quotes, _nonDataLineMarkers); if (lines != null && lines.length > 0) { String[] firstLine = determineTokens(lines[0], _setup._separator, _setup._single_quotes, _setup._escapechar); if (_setup._column_names != null) { for (int i = 0; hasHdr && i < firstLine.length; ++i) hasHdr = (_setup._column_names[i] == firstLine[i]) || (_setup._column_names[i] != null && _setup._column_names[i].equalsIgnoreCase(firstLine[i])); } else { // declared to have header, but no column names provided, assume header exist in all files _setup._column_names = firstLine; } } // else FIXME Throw exception return hasHdr ? ParseSetup.HAS_HEADER: ParseSetup.NO_HEADER; // consider making insensitive to quotes } // ========================================================================== /** Separators recognized by the CSV parser. You can add new separators to * this list and the parser will automatically attempt to recognize them. * In case of doubt the separators are listed in descending order of * probability, with space being the last one - space must always be the * last one as it is used if all other fails because multiple spaces can be * used as a single separator. */ public static final byte HIVE_SEP = 0x1; // '^A', Hive table column separator private static byte[] separators = new byte[] { HIVE_SEP, ',', ';', '|', '\t', ' '/*space is last in this list, because we allow multiple spaces*/ }; /** Determines the number of separators in given line. Correctly handles quoted tokens. */ private static int[] determineSeparatorCounts(String from, byte quoteChar, byte escapechar) { int[] result = new int[separators.length]; byte[] bits = StringUtils.bytesOf(from); boolean inQuote = false; boolean escaped, escaping = false; for( int bi=0; bi < bits.length; bi++ ) { byte c = bits[bi]; escaped = escaping; escaping = !escaped && ( c == escapechar || (inQuote && c == quoteChar && bi < bits.length-1 && bits[bi+1] == quoteChar) // 2 consecutive quotes inside a quote are not csv quotes ); if( c == quoteChar && !escaped && !escaping) inQuote ^= true; if( !inQuote || c == HIVE_SEP ) for( int i = 0; i < separators.length; ++i ) if( c == separators[i] ) ++result[i]; } return result; } /** Determines the tokens that are inside a line and returns them as strings * in an array. Assumes the given separator. */ public static String[] determineTokens(String from, byte separator, boolean singleQuotes, byte escapechar) { final byte singleQuote = singleQuotes ? CsvParser.CHAR_SINGLE_QUOTE : CsvParser.CHAR_DOUBLE_QUOTE; return determineTokens(from, separator, singleQuote, escapechar); } public static String[] determineTokens(String from, byte separator, byte quoteChar, byte escapechar) { ArrayList<String> tokens = new ArrayList<>(); byte[] bits = StringUtils.bytesOf(from); int offset = 0; int quotes = 0; while (offset < bits.length) { while ((offset < bits.length) && (bits[offset] == CsvParser.CHAR_SPACE)) ++offset; // skip first whitespace if(offset == bits.length)break; final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); byte c = bits[offset]; boolean escaped, escaping = false; if (c == quoteChar) { quotes = c; ++offset; } while (offset < bits.length) { c = bits[offset]; escaped = escaping; escaping = !escaped && ( c == escapechar || (quotes > 0 && c == quoteChar && offset < bits.length-1 && bits[offset+1] == quoteChar) // 2 consecutive quotes inside a quote are not csv quotes ); if (c == quotes && !escaped && !escaping) { ++offset; if ((offset < bits.length) && (bits[offset] == c)) { byteArrayOutputStream.write(c); ++offset; continue; } quotes = 0; } else if( quotes == 0 && (c == separator || CsvParser.isEOL(c)) ) { break; } else { byteArrayOutputStream.write(c); ++offset; } } c = (offset == bits.length) ? CsvParser.CHAR_LF : bits[offset]; tokens.add(byteArrayOutputStream.toString()); if( CsvParser.isEOL(c) || (offset == bits.length) ) break; if (c != separator) return new String[0]; // an error ++offset; // Skip separator } // If we have trailing empty columns (split by separators) such as ",,\n" // then we did not add the final (empty) column, so the column count will // be down by 1. Add an extra empty column here if( bits.length > 0 && bits[bits.length-1] == separator && bits[bits.length-1] != CsvParser.CHAR_SPACE) tokens.add(""); return tokens.toArray(new String[tokens.size()]); } public static byte guessSeparator(String l1, String l2, boolean singleQuotes, byte escapechar) { final byte quoteChar = singleQuotes ? CsvParser.CHAR_SINGLE_QUOTE : CsvParser.CHAR_DOUBLE_QUOTE; int[] s1 = determineSeparatorCounts(l1, quoteChar, escapechar); int[] s2 = determineSeparatorCounts(l2, quoteChar, escapechar); // Now we have the counts - if both lines have the same number of // separators the we assume it is the separator. Separators are ordered by // their likelyhoods. int max = 0; for( int i = 0; i < s1.length; ++i ) { if( s1[i] == 0 ) continue; // Separator does not appear; ignore it if( s1[max] < s1[i] ) max=i; // Largest count sep on 1st line if( s1[i] == s2[i] && s1[i] >= s1[max]>>1 ) { // Sep counts are equal? And at nearly as large as the larger header sep? try { String[] t1 = determineTokens(l1, separators[i], quoteChar, escapechar); String[] t2 = determineTokens(l2, separators[i], quoteChar, escapechar); if( t1.length != s1[i]+1 || t2.length != s2[i]+1 ) continue; // Token parsing fails return separators[i]; } catch( Exception ignore ) { /*pass; try another parse attempt*/ } } } // No sep's appeared, or no sep's had equal counts on lines 1 & 2. If no // separators have same counts, the largest one will be used as the default // one. If there's no largest one, space will be used. if( s1[max]==0 ) max=separators.length-1; // Try last separator (space) if( s1[max]!=0 ) { String[] t1 = determineTokens(l1, separators[max], quoteChar, escapechar); String[] t2 = determineTokens(l2, separators[max], quoteChar, escapechar); if( t1.length == s1[max]+1 && t2.length == s2[max]+1 ) return separators[max]; } return GUESS_SEP; } // Guess number of columns public static int guessNcols( String[] columnNames, String[][] data ) { if( columnNames != null ) return columnNames.length; int longest = 0; // Longest line for( String[] s : data ) if( s.length > longest ) longest = s.length; if( longest == data[0].length ) return longest; // 1st line is longer than all the rest; take it // we don't have lines of same length, pick the most common length int lengths[] = new int[longest+1]; for( String[] s : data ) lengths[s.length]++; int maxCnt = 0; // Most common line length for( int i=0; i<=longest; i++ ) if( lengths[i] > lengths[maxCnt] ) maxCnt = i; return maxCnt; } /** Determines the CSV parser setup from the first few lines. Also parses * the next few lines, tossing out comments and blank lines. * * If the separator is GUESS_SEP, then it is guessed by looking at tokenization * and column count of the first few lines. * * If ncols is -1, then it is guessed similarly to the separator. * * singleQuotes is honored in all cases (and not guessed). * */ static ParseSetup guessSetup(byte[] bits, byte sep, int ncols, boolean singleQuotes, int checkHeader, String[] columnNames, byte[] columnTypes, String[][] naStrings, byte[] nonDataLineMarkers, byte escapechar) { if (nonDataLineMarkers == null) nonDataLineMarkers = NON_DATA_LINE_MARKERS_DEFAULT; int lastNewline = bits.length-1; while(lastNewline > 0 && !CsvParser.isEOL(bits[lastNewline]))lastNewline--; if(lastNewline > 0) bits = Arrays.copyOf(bits,lastNewline+1); String[] lines = getFirstLines(bits, singleQuotes, nonDataLineMarkers); if(lines.length==0 ) throw new ParseDataset.H2OParseException("No data!"); // Guess the separator, columns, & header String[] labels; final String[][] data = new String[lines.length][]; if( lines.length == 1 ) { // Ummm??? Only 1 line? if( sep == GUESS_SEP) { if (lines[0].split(",").length > 1) sep = (byte) ','; else if (lines[0].split(" ").length > 1) sep = ' '; else { //one item, guess type data[0] = new String[]{lines[0]}; byte[] ctypes = new byte[1]; String[][] domains = new String[1][]; if (NumberUtils.isNumber(data[0][0])) { ctypes[0] = Vec.T_NUM; } else { // non-numeric BufferedString str = new BufferedString(data[0][0]); if (ParseTime.isTime(str)) ctypes[0] = Vec.T_TIME; else if (ParseUUID.isUUID(str)) ctypes[0] = Vec.T_UUID; else { // give up and guess categorical ctypes[0] = Vec.T_CAT; domains[0] = new String[]{data[0][0]}; } } //FIXME should set warning message and let fall through return new ParseSetup(CSV_INFO, GUESS_SEP, singleQuotes, checkHeader, 1, null, ctypes, domains, naStrings, data, new ParseWriter.ParseErr[0],FileVec.DFLT_CHUNK_SIZE, nonDataLineMarkers, escapechar, false); } } data[0] = determineTokens(lines[0], sep, singleQuotes, escapechar); ncols = (ncols > 0) ? ncols : data[0].length; if( checkHeader == GUESS_HEADER) { if (ParseSetup.allStrings(data[0]) && !data[0][0].isEmpty()) { labels = data[0]; checkHeader = HAS_HEADER; } else { labels = null; checkHeader = NO_HEADER; } } else if( checkHeader == HAS_HEADER ) labels = data[0]; else labels = null; } else { // 2 or more lines // First guess the field separator by counting occurrences in first few lines if( sep == GUESS_SEP) { // first guess the separator sep = guessSeparator(lines[0], lines[1], singleQuotes, escapechar); if( sep == GUESS_SEP && lines.length > 2 ) { sep = guessSeparator(lines[1], lines[2], singleQuotes, escapechar); if( sep == GUESS_SEP) sep = guessSeparator(lines[0], lines[2], singleQuotes, escapechar); } if( sep == GUESS_SEP) sep = (byte)' '; // Bail out, go for space } // Tokenize the first few lines using the separator for( int i = 0; i < lines.length; ++i ) data[i] = determineTokens(lines[i], sep, singleQuotes, escapechar); // guess columns from tokenization ncols = guessNcols(columnNames,data); // Asked to check for a header, so see if 1st line looks header-ish if( checkHeader == HAS_HEADER || ( checkHeader == GUESS_HEADER && ParseSetup.hasHeader(data[0], data[1]))) { checkHeader = HAS_HEADER; labels = data[0]; } else { checkHeader = NO_HEADER; labels = columnNames; } // See if compatible headers if( columnNames != null && labels != null ) { if( labels.length != columnNames.length ) throw new ParseDataset.H2OParseException("Already have "+columnNames.length+" column labels, but found "+labels.length+" in this file"); else { for( int i = 0; i < labels.length; ++i ) if( !labels[i].equalsIgnoreCase(columnNames[i]) ) { throw new ParseDataset.H2OParseException("Column "+(i+1)+" label '"+labels[i]+"' does not match '"+columnNames[i]+"'"); } labels = columnNames; // Keep prior case & count in any case } } } // Assemble the setup understood so far ParseSetup resSetup = new ParseSetup(CSV_INFO, sep, singleQuotes, checkHeader, ncols, labels, null, null /*domains*/, naStrings, data, nonDataLineMarkers, escapechar, false); // now guess the types if (columnTypes == null || ncols != columnTypes.length) { int i = bits.length-1; for(; i > 0; --i) if(bits[i] == '\n') break; if(i > 0) bits = Arrays.copyOf(bits,i); // stop at the last full line CsvParser p = new CsvParser(resSetup, null); PreviewParseWriter dout = new PreviewParseWriter(resSetup._number_columns); try { p.parseChunk(0,new ByteAryData(bits,0), dout); resSetup._column_previews = dout; resSetup.addErrs(dout._errs); } catch (Throwable e) { throw new RuntimeException(e); } } else { // If user sets column type as unknown/bad, guess numeric. for(int i=0; i < columnTypes.length; i++) if (columnTypes[i] == Vec.T_BAD) columnTypes[i] = Vec.T_NUM; resSetup._column_types = columnTypes; resSetup._na_strings = null; } // Return the final setup return resSetup; } private static String[] getFirstLines(byte[] bits, boolean singleQuotes, byte[] nonDataLineMarkers) { // Parse up to 10 lines (skipping hash-comments & ARFF comments) String[] lines = new String[10]; // Parse 10 lines int nlines = 0; int offset = 0; boolean comment = false; while( offset < bits.length && nlines < lines.length ) { if (bits[offset] == HASHTAG) comment = true; int lineStart = offset; int quoteCount = 0; while (offset < bits.length) { if (!comment && ( (!singleQuotes && bits[offset] == CHAR_DOUBLE_QUOTE) || (singleQuotes && bits[offset] == CHAR_SINGLE_QUOTE))) quoteCount++; if (CsvParser.isEOL(bits[offset]) && quoteCount % 2 == 0){ comment = false; break; } ++offset; } int lineEnd = offset; ++offset; // For Windoze, skip a trailing LF after CR if( (offset < bits.length) && (bits[offset] == CsvParser.CHAR_LF)) ++offset; if (ArrayUtils.contains(nonDataLineMarkers, bits[lineStart])) continue; if( lineEnd > lineStart ) { String str = new String(bits, lineStart,lineEnd-lineStart).trim(); if( !str.isEmpty() ) lines[nlines++] = str; } } return Arrays.copyOf(lines, nlines); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/DecryptionTool.java
package water.parser; import water.DKV; import water.Iced; import water.Key; import water.Keyed; import water.fvec.ByteVec; import water.fvec.Frame; import javax.crypto.spec.SecretKeySpec; import java.io.IOException; import java.io.InputStream; import java.lang.reflect.Constructor; import java.security.GeneralSecurityException; import java.security.KeyStore; /** * Base class for implementations of the Decryption Tool * * Decryption Tool is applied to the raw data loaded from source files and decrypts them on-the-fly. */ public abstract class DecryptionTool extends Keyed<DecryptionTool> { DecryptionTool(Key<DecryptionTool> key) { super(key); } /** * Decrypts the beginning of the file and returns its clear-text binary representation. * @param bits the first chunk of data of the datafile. The input byte array can contain zero-bytes padding (eg. case of * DEFLATE compression in Zip files, the decompressed data can be smaller than the source chunk). * The implementation of the method should discard the padding (all zero bytes at the end of the array). * @return Decrypted binary data or the input byte array if Tool is not compatible with this input. */ public abstract byte[] decryptFirstBytes(final byte[] bits); /** * Wraps the source InputStream into deciphering input stream * @param is InputStream created by ByteVec (H2O-specific behavior is expected!) * @return InputStream of decrypted data */ public abstract InputStream decryptInputStream(final InputStream is); public boolean isTransparent() { return false; } /** * Retrieves a Decryption Tool from DKV using a given key. * @param key a valid DKV key or null * @return instance of Decryption Tool for a valid key, Null Decryption tool for a null key */ public static DecryptionTool get(Key<DecryptionTool> key) { if (key == null) return new NullDecryptionTool(); DecryptionTool decrypt = DKV.getGet(key); return decrypt == null ? new NullDecryptionTool() : decrypt; } /** * Retrieves a Secret Key using a given Decryption Setup. * @param ds decryption setup * @return SecretKey */ static SecretKeySpec readSecretKey(DecryptionSetup ds) { Keyed<?> ksObject = DKV.getGet(ds._keystore_id); ByteVec ksVec = (ByteVec) (ksObject instanceof Frame ? ((Frame) ksObject).vec(0) : ksObject); InputStream ksStream = ksVec.openStream(null /*job key*/); try { KeyStore keystore = KeyStore.getInstance(ds._keystore_type); keystore.load(ksStream, ds._password); if (! keystore.containsAlias(ds._key_alias)) { throw new IllegalArgumentException("Alias for key not found"); } java.security.Key key = keystore.getKey(ds._key_alias, ds._password); return new SecretKeySpec(key.getEncoded(), key.getAlgorithm()); } catch (GeneralSecurityException e) { throw new RuntimeException("Unable to load key " + ds._key_alias + " from keystore " + ds._keystore_id, e); } catch (IOException e) { throw new RuntimeException("Failed to read keystore " + ds._keystore_id, e); } } /** * Instantiates a Decryption Tool using a given Decryption Setup and installs it in DKV. * @param ds decryption setup * @return instance of a Decryption Tool */ public static DecryptionTool make(DecryptionSetup ds) { if (ds._decrypt_tool_id == null) ds._decrypt_tool_id = Key.make(); try { Class<?> dtClass = DecryptionTool.class.getClassLoader().loadClass(ds._decrypt_impl); if (! DecryptionTool.class.isAssignableFrom(dtClass)) { throw new IllegalArgumentException("Class " + ds._decrypt_impl + " doesn't implement a Decryption Tool."); } Constructor<?> constructor = dtClass.getConstructor(DecryptionSetup.class); DecryptionTool dt = (DecryptionTool) constructor.newInstance(ds); DKV.put(dt); return dt; } catch (ClassNotFoundException e) { throw new RuntimeException("Unknown decrypt tool: " + ds._decrypt_impl, e); } catch (NoSuchMethodException e) { throw new RuntimeException("Invalid implementation of Decryption Tool (missing constructor).", e); } catch (Exception e) { throw new RuntimeException(e); } } /** * Blueprint of the Decryption Tool */ public static class DecryptionSetup extends Iced<DecryptionSetup> { public Key<DecryptionTool> _decrypt_tool_id; // where will be the instantiated tool installed public String _decrypt_impl = GenericDecryptionTool.class.getName(); // implementation public Key<?> _keystore_id; // where to find Java KeyStore file (Frame key or Vec key) public String _keystore_type; // what kind of KeyStore is used public String _key_alias; // what is the alias of the key in the keystore public char[] _password; // password to the keystore and to the keyentry public String _cipher_spec; // specification of the cipher (and padding) } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/DefaultParserProviders.java
package water.parser; import java.util.List; import water.Job; import water.Key; import water.exceptions.H2OUnsupportedDataFileException; import water.fvec.ByteVec; import water.util.Log; /** * Default parsers provided by H2O. * * The parser are registered via service providers interface into * <code>{@link ParserService}</code>. */ public final class DefaultParserProviders { /** Default parser handles */ public static final ParserInfo ARFF_INFO = new ParserInfo("ARFF", 0, true); public static final ParserInfo XLS_INFO = new ParserInfo("XLS", 100, false); public static final ParserInfo XLSX_INFO = new ParserInfo("XLSX", 102, false); public static final ParserInfo SVMLight_INFO = new ParserInfo("SVMLight", 1000, true); public static final ParserInfo CSV_INFO = new ParserInfo("CSV", Integer.MAX_VALUE, true); public static final ParserInfo GUESS_INFO = new ParserInfo("GUESS", -10000, false); /** Priority of non-core parsers should begin here.*/ public static final int MAX_CORE_PRIO = 10000; public final static class ArffParserProvider extends AbstractParserProvide { @Override public ParserInfo info() { return ARFF_INFO; } @Override public Parser createParser(ParseSetup setup, Key<Job> jobKey) { return new ARFFParser(setup, jobKey); } @Override public ParseSetup guessInitSetup(ByteVec v, byte[] bits, ParseSetup ps) { return ARFFParser.guessSetup(v, bits, ps._separator, ps._single_quotes, ps._column_names, ps._na_strings, ps._nonDataLineMarkers, ps._escapechar); } } public final static class XlsParserProvider extends AbstractParserProvide { @Override public ParserInfo info() { return XLS_INFO; } @Override public Parser createParser(ParseSetup setup, Key<Job> jobKey) { return new XlsParser(setup, jobKey); } @Override public ParseSetup guessSetup(ByteVec bv, byte[] bits, byte sep, int ncols, boolean singleQuotes, int checkHeader, String[] columnNames, byte[] columnTypes, String[][] domains, String[][] naStrings) { return XlsParser.guessSetup(bits); } } public final static class SVMLightParserProvider extends AbstractParserProvide { @Override public ParserInfo info() { return SVMLight_INFO; } @Override public Parser createParser(ParseSetup setup, Key<Job> jobKey) { return new SVMLightParser(setup, jobKey); } @Override public ParseSetup guessSetup(ByteVec bv, byte[] bits, byte sep, int ncols, boolean singleQuotes, int checkHeader, String[] columnNames, byte[] columnTypes, String[][] domains, String[][] naStrings) { return SVMLightParser.guessSetup(bits); } } public final static class CsvParserProvider extends AbstractParserProvide { @Override public ParserInfo info() { return CSV_INFO; } @Override public Parser createParser(ParseSetup setup, Key<Job> jobKey) { return new CsvParser(setup, jobKey); } @Override public ParseSetup guessInitSetup(ByteVec v, byte[] bits, ParseSetup ps) { return CsvParser.guessSetup(bits, ps._separator, ps._number_columns, ps._single_quotes, ps._check_header, ps._column_names, ps._column_types, ps._na_strings, ps._nonDataLineMarkers, ps._escapechar); } } public final static class GuessParserProvider extends AbstractParserProvide { @Override public ParserInfo info() { return GUESS_INFO; } @Override protected ParseSetup guessSetup_impl(ByteVec bv, byte[] bits, ParseSetup userSetup) { List<ParserProvider> pps = ParserService.INSTANCE.getAllProviders(true); // Sort them based on priorities ParseSetup parseSetup = null; ParserProvider provider = null; for (ParserProvider pp : pps) { // Do not do recursive call if (pp == this || pp.info().equals(GUESS_INFO)) continue; // Else try to guess with given provider try { ParseSetup ps = pp.guessInitSetup(bv, bits, userSetup); if (ps != null) { // found a parser for the data type provider = pp; parseSetup = ps; break; } } catch (H2OUnsupportedDataFileException e) { throw e; } catch (Throwable ignore) { /*ignore failed parse attempt*/ Log.trace("Guesser failed for parser type", pp.info(), ignore); } } if (provider == null) throw new ParseDataset.H2OParseException("Cannot determine file type."); // finish parse setup & don't ignore the exceptions return provider.guessFinalSetup(bv, bits, parseSetup); } @Override public Parser createParser(ParseSetup setup, Key<Job> jobKey) { throw new UnsupportedOperationException("Guess parser provided does not know how to create a new parser! Use a specific parser!"); } } static abstract class AbstractParserProvide extends ParserProvider { @Override public ParseSetup createParserSetup(Key[] inputs, ParseSetup requiredSetup) { return requiredSetup; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/FVecParseReader.java
package water.parser; import water.fvec.Chunk; import water.fvec.Vec; /** * Parser data in taking data from fluid vec chunk. * @author tomasnykodym */ public class FVecParseReader implements ParseReader { final Vec _vec; Chunk _chk; int _idx; final long _firstLine; private long _goffset = 0; public FVecParseReader(Chunk chk){ _chk = chk; _idx = _chk.cidx(); _firstLine = chk.start(); _vec = chk.vec(); } @Override public byte[] getChunkData(int cidx) { if(cidx != _idx) _chk = cidx < _vec.nChunks()?_vec.chunkForChunkIdx(_idx = cidx):null; if(_chk == null) return null; _goffset = _chk.start(); return _chk.getBytes(); } @Override public int getChunkDataStart(int cidx) { return -1; } @Override public void setChunkDataStart(int cidx, int offset) { } @Override public long getGlobalByteOffset(){ return _goffset; } /** * Exposes directly the underlying chunk. This function is safe to be used only * in implementations of Parsers that cannot be used in a streaming context. * Use with caution. * @return underlying Chunk */ public Chunk getChunk() { return _chk; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/FVecParseWriter.java
package water.parser; import water.*; import water.fvec.AppendableVec; import water.fvec.NewChunk; import water.fvec.Vec; import water.util.ArrayUtils; import java.util.Arrays; import java.util.UUID; /** Parsed data output specialized for fluid vecs. * @author tomasnykodym */ public class FVecParseWriter extends Iced implements StreamParseWriter { protected AppendableVec[] _vecs; protected transient NewChunk[] _nvs; protected transient final Categorical [] _categoricals; protected transient final byte[] _ctypes; long _nLines; int _nCols; int _col = -1; final int _cidx; final int _chunkSize; ParseErr [] _errs = new ParseErr[0]; private final Vec.VectorGroup _vg; private final Key<Job> _jobKey; private long _errCnt; int[] _parse_columns_indices; public FVecParseWriter(Vec.VectorGroup vg, int cidx, Categorical[] categoricals, byte[] ctypes, int chunkSize, AppendableVec[] avs) { this(vg, cidx, categoricals, ctypes, chunkSize, avs, null, null); } // note that if parse_columns_indices==null, it implies all columns are parsed. public FVecParseWriter(Vec.VectorGroup vg, int cidx, Categorical[] categoricals, byte[] ctypes, int chunkSize, AppendableVec[] avs, int[] parse_columns_indices, Key<Job> jobKey) { boolean ctypesShrunk = false; // Required not-null if ((parse_columns_indices!=null) && (categoricals!=null) && (parse_columns_indices.length == categoricals.length)) { // for nextChunk() calls in gzip/zip parser _ctypes = ctypes; _categoricals = categoricals; _vecs = avs; _parse_columns_indices = parse_columns_indices; int num_parse_columns=parse_columns_indices.length; _nvs = new NewChunk[num_parse_columns]; for (int i = 0; i < num_parse_columns; ++i) { _nvs[i] = avs[i].chunkForChunkIdx(cidx); } } else { if (parse_columns_indices == null) { parse_columns_indices = new int[avs.length]; _ctypes = ctypes; _categoricals = categoricals; _vecs = avs; for (int index = 0; index < avs.length; index++) parse_columns_indices[index] = index; } else { int parseColNum = parse_columns_indices.length; _ctypes = ctypes == null ? null : new byte[parseColNum]; // svmlight file can have ctypes=null _categoricals = categoricals == null ? null : new Categorical[parseColNum]; // svmlight file can have categoricals==null ctypesShrunk = (categoricals==null)? avs.length <= parse_columns_indices.length: avs.length < categoricals.length; // may not be the same if user sets skipped_columns already. _vecs = new AppendableVec[parseColNum]; for (int index = 0; index < parse_columns_indices.length; index++) { if (ctypes != null) // happens with SVMlight _ctypes[index] = ctypes[parse_columns_indices[index]]; if (categoricals != null) // happens with SVMlight _categoricals[index] = categoricals[parse_columns_indices[index]]; // categoricals calculated for all columns _vecs[index] = ctypesShrunk?avs[index]:avs[parse_columns_indices[index]]; } } _parse_columns_indices = parse_columns_indices; int num_parse_columns = parse_columns_indices.length; _nvs = new NewChunk[num_parse_columns]; for (int i = 0; i < num_parse_columns; ++i) { _nvs[i] = ctypesShrunk?avs[i].chunkForChunkIdx(cidx):avs[parse_columns_indices[i]].chunkForChunkIdx(cidx); } } _nCols = _nvs.length; // actual columns being passed, exclude skipped columns. _cidx = cidx; _vg = vg; _chunkSize = chunkSize; _jobKey = jobKey; FrameSizeMonitor.register(jobKey, this); } @Override public FVecParseWriter reduce(StreamParseWriter sdout) { FVecParseWriter dout = (FVecParseWriter)sdout; _nCols = Math.max(_nCols,dout._nCols); // SVMLight: max of columns if( _vecs != dout._vecs ) { if( dout._vecs.length > _vecs.length ) { // Swap longer one over the returned value AppendableVec[] tmpv = _vecs; _vecs = dout._vecs; dout._vecs = tmpv; } for(int i = 0; i < dout._vecs.length; ++i) _vecs[i].reduce(dout._vecs[i]); } _errCnt += ((FVecParseWriter) sdout)._errCnt; if(_errs.length < 20 && ((FVecParseWriter) sdout)._errs.length > 0) { _errs = ArrayUtils.append(_errs, ((FVecParseWriter) sdout)._errs); if(_errs.length > 20) _errs = Arrays.copyOf(_errs,20); } return this; } @Override public FVecParseWriter close(){ Futures fs = new Futures(); close(fs); fs.blockForPending(); return this; } @Override public FVecParseWriter close(Futures fs){ if( _nvs == null ) return this; // Might call close twice long mem = 0; for(int i=0; i < _nvs.length; i++) { _nvs[i].close(_cidx, fs); mem += _nvs[i].chk2().byteSize(); _nvs[i] = null; // free immediately, don't wait for all columns to close } FrameSizeMonitor.closed(_jobKey, this, mem); _nvs = null; // Free for GC return this; } @Override public FVecParseWriter nextChunk(){ return new FVecParseWriter(_vg, _cidx+1, _categoricals, _ctypes, _chunkSize, _vecs, _parse_columns_indices, _jobKey); } @Override public void newLine() { if(_col >= 0){ ++_nLines; for(int i = _col+1; i < _nCols; ++i) addInvalidCol(i); } _col = -1; } @Override public void addNumCol(int colIdx, long number, int exp) { if( colIdx < _nCols ) { _nvs[_col = colIdx].addNum(number, exp); if(_ctypes != null && _ctypes[colIdx] == Vec.T_BAD ) _ctypes[colIdx] = Vec.T_NUM; } } @Override public final void addInvalidCol(int colIdx) { if(colIdx < _nCols) _nvs[_col = colIdx].addNA(); } @Override public void addNAs(int colIdx, int nrows) { (_nvs[colIdx] = _vecs[colIdx].chunkForChunkIdx(_cidx)).addNAs(nrows); } @Override public boolean isString(int colIdx) { return (colIdx < _nCols) && (_ctypes[colIdx] == Vec.T_CAT || _ctypes[colIdx] == Vec.T_STR);} @Override public void addStrCol(int colIdx, BufferedString str) { if(colIdx < _nvs.length){ if(_ctypes[colIdx] == Vec.T_NUM){ // support enforced types addInvalidCol(colIdx); return; } if(_ctypes[colIdx] == Vec.T_BAD && ParseTime.isTime(str)) _ctypes[colIdx] = Vec.T_TIME; if( _ctypes[colIdx] == Vec.T_BAD && ParseUUID.isUUID(str)) _ctypes[colIdx] = Vec.T_UUID; if( _ctypes[colIdx] == Vec.T_TIME ) { long l = ParseTime.attemptTimeParse(str); if( l == Long.MIN_VALUE ) addInvalidCol(colIdx); else { addNumCol(colIdx, l, 0); // Record time in msec _nvs[_col]._timCnt++; // Count histo of time parse patterns } } else if( _ctypes[colIdx] == Vec.T_UUID ) { // UUID column? Only allow UUID parses UUID uuid = ParseUUID.attemptUUIDParse(str); // FIXME: what if colIdx > _nCols if( colIdx < _nCols ) _nvs[_col = colIdx].addUUID(uuid); } else if( _ctypes[colIdx] == Vec.T_STR ) { _nvs[_col = colIdx].addStr(str); } else { // categoricals if(!_categoricals[colIdx].isMapFull()) { int id = _categoricals[_col = colIdx].addKey(str); if (_ctypes[colIdx] == Vec.T_BAD && id > 1) _ctypes[colIdx] = Vec.T_CAT; if(_ctypes[colIdx] == Vec.T_CAT) { _nvs[colIdx].addNum(id, 0); // if we are sure we have a categorical column, we can only store the integer (more efficient than remembering this value was categorical) } else _nvs[colIdx].addCategorical(id); } else { // maxed out categorical map throw new ParseDataset.H2OParseException("Exceeded categorical limit on column #"+(colIdx+1)+" (using 1-based indexing). Consider reparsing this column as a string."); } } } } /** Adds double value to the column. */ @Override public void addNumCol(int colIdx, double value) { if (Double.isNaN(value) || Double.isInfinite(value)) { addInvalidCol(colIdx); } else { if( colIdx < _nCols ) { _nvs[_col = colIdx].addNumDecompose(value); if(_ctypes != null && _ctypes[colIdx] == Vec.T_BAD ) _ctypes[colIdx] = Vec.T_NUM; } } } @Override public void setColumnNames(String [] names){} @Override public final void rollbackLine() {} @Override public void invalidLine(ParseErr err) { addErr(err); newLine(); } @Override public void addError(ParseErr err) { if(_errs == null) _errs = new ParseErr[]{err}; else if(_errs.length < 20) _errs = ArrayUtils.append(_errs,err); _errCnt++; } @Override public void setIsAllASCII(int colIdx, boolean b) { if(colIdx < _nvs.length) _nvs[colIdx]._isAllASCII = b; } @Override public boolean hasErrors() { return _errs != null && _errs.length > 0; } @Override public ParseErr[] removeErrors() { ParseErr [] res = _errs; _errs = null; return res; } @Override public long lineNum() {return _nLines;} public void addErr(ParseErr err){ if(_errs.length < 20) _errs = ArrayUtils.append(_errs,err); ++_errCnt; } public NewChunk[] getNvs() { return _nvs; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/GenericDecryptionTool.java
package water.parser; import javax.crypto.Cipher; import javax.crypto.CipherInputStream; import javax.crypto.spec.SecretKeySpec; import java.io.IOException; import java.io.InputStream; import java.security.GeneralSecurityException; public class GenericDecryptionTool extends DecryptionTool { private final byte[] _encoded_key; private final String _key_algo; private final String _cipher_spec; public GenericDecryptionTool(DecryptionSetup ds) { super(ds._decrypt_tool_id); SecretKeySpec secretKey = readSecretKey(ds); _key_algo = secretKey.getAlgorithm(); _encoded_key = secretKey.getEncoded(); _cipher_spec = ds._cipher_spec; } @Override public byte[] decryptFirstBytes(final byte[] bits) { Cipher cipher = createDecipherer(); final int bs = cipher.getBlockSize(); int len = bits.length; if (((bs > 0) && (len % bs != 0)) || bits[len - 1] == 0) { while (len > 0 && bits[len - 1] == 0) len--; len = bs > 0 ? len - (len % bs) : len; } return cipher.update(bits, 0, len); } @Override public InputStream decryptInputStream(final InputStream is) { Cipher cipher = createDecipherer(); return new CipherInputStream(is, cipher) { @Override public int read(byte[] b, int off, int len) throws IOException { if (b == null) { // Back-channel read of chunk idx (delegated to the original InputStream) return is.read(null, off, len); } return super.read(b, off, len); } @Override public int available() throws IOException { int avail = super.available(); // H2O's contract with the available() method differs from the contract specified by InputStream // we need to make sure we return a positive number (if we don't have anything in the buffer - ask the original IS) return avail > 0 ? avail : is.available(); } }; } private Cipher createDecipherer() { SecretKeySpec secKeySpec = new SecretKeySpec(_encoded_key, _key_algo); try { Cipher cipher = Cipher.getInstance(_cipher_spec); cipher.init(Cipher.DECRYPT_MODE, secKeySpec); return cipher; } catch (GeneralSecurityException e) { throw new RuntimeException("Cipher initialization failed", e); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/NullDecryptionTool.java
package water.parser; import water.Key; import java.io.InputStream; public class NullDecryptionTool extends DecryptionTool { public NullDecryptionTool() { super(null); } @Override public byte[] decryptFirstBytes(byte[] bits) { return bits; } @Override public InputStream decryptInputStream(InputStream is) { return is; } @Override public boolean isTransparent() { return true; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/PackedDomains.java
package water.parser; import water.MemoryManager; import water.util.StringUtils; import static water.util.ArrayUtils.*; public class PackedDomains { public static int sizeOf(byte[] domain) { return encodeAsInt(domain, 0); } public static String[] unpackToStrings(byte[] domain) { final int n = sizeOf(domain); String[] out = new String[n]; int pos = 4; for (int i = 0; i < n; i++) { int len = encodeAsInt(domain, pos); pos += 4; out[i] = StringUtils.toString(domain, pos, len); pos += len; } return out; } public static byte[] pack(BufferedString[] source) { int len = 0; for (BufferedString bs : source) len += bs.length(); byte[] data = new byte[len + (source.length + 1) * 4]; decodeAsInt(source.length, data, 0); int pos = 4; for (BufferedString bs : source) { byte[] buff = bs.getBuffer(); decodeAsInt(bs.length(), data, pos); pos += 4; for (int i = bs.getOffset(); i < bs.getOffset() + bs.length(); i++) data[pos++] = buff[i]; } return data; } static int calcMergedSize(byte[] as, byte[] bs) { int shared = 0; int pA = 4; int pB = 4; BufferedString bsA = new BufferedString(as, 0, 0); BufferedString bsB = new BufferedString(bs, 0, 0); while ((pA < as.length) && (pB < bs.length)) { int sizeA = encodeAsInt(as, pA); bsA.setOff(pA + 4); bsA.setLen(sizeA); int sizeB = encodeAsInt(bs, pB); bsB.setOff(pB + 4); bsB.setLen(sizeB); int x = bsA.compareTo(bsB); if (x < 0) { pA += sizeA + 4; } else if (x > 0) { pB += sizeB + 4; } else { shared += sizeA + 4; pA += sizeA + 4; pB += sizeA + 4; } } return as.length + bs.length - 4 - shared; } public static byte[] merge(byte[] as, byte[] bs) { int size = calcMergedSize(as, bs); if (size == as.length) return as; if (size == bs.length) return bs; byte[] data = MemoryManager.malloc1(size); int shared = 0; // number of shared words int pos = 4; // position in output int pA = 4; // position in A int pB = 4; // position in B while (pA < as.length && pB < bs.length) { int wordPos = pos; pos += 4; int wA = pA; int sizeA = encodeAsInt(as, pA); pA += 4; int endA = pA + sizeA; int wB = pB; int sizeB = encodeAsInt(bs, pB); pB += 4; int endB = pB + sizeB; int l = sizeA > sizeB ? sizeB : sizeA; int comp = sizeA - sizeB; for (int i = 0; i < l; i++) { int x = (0xFF & as[pA]) - (0xFF & bs[pB]); if (x != 0) { comp = x; break; } data[pos++] = as[pA++]; pB++; } if ((pA == endA) && (pB == endB)) { // words were the same decodeAsInt(sizeA, data, wordPos); shared++; } else if (comp < 0) { // output word A while (pA < endA) data[pos++] = as[pA++]; decodeAsInt(sizeA, data, wordPos); pB = wB; } else { // output word B while (pB < endB) data[pos++] = bs[pB++]; decodeAsInt(sizeB, data, wordPos); pA = wA; } } while (pA < as.length) data[pos++] = as[pA++]; while (pB < bs.length) data[pos++] = bs[pB++]; int len = encodeAsInt(as, 0) + encodeAsInt(bs, 0) - shared; decodeAsInt(len, data, 0); return data; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/ParseDataset.java
package water.parser; import com.github.luben.zstd.ZstdInputStream; import jsr166y.CountedCompleter; import jsr166y.ForkJoinTask; import jsr166y.RecursiveAction; import water.*; import water.H2O.H2OCountedCompleter; import water.exceptions.H2OIllegalArgumentException; import water.exceptions.H2OIllegalValueException; import water.fvec.*; import water.fvec.Vec.VectorGroup; import water.nbhm.NonBlockingHashMap; import water.nbhm.NonBlockingSetInt; import water.util.ArrayUtils; import water.util.FrameUtils; import water.util.Log; import water.util.PrettyPrint; import java.io.IOException; import java.io.InputStream; import java.text.SimpleDateFormat; import java.util.*; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.zip.GZIPInputStream; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import static water.parser.DefaultParserProviders.SVMLight_INFO; public final class ParseDataset { public Job<Frame> _job; private MultiFileParseTask _mfpt; // Access to partially built vectors for cleanup after parser crash // Keys are limited to ByteVec Keys and Frames-of-1-ByteVec Keys public static Frame parse(Key okey, Key... keys) { return parse(null, okey, keys); } public static Frame parse(int[] skippedColumns, Key okey, Key... keys) { return parse(okey,keys,true, false, ParseSetup.GUESS_HEADER,skippedColumns); } public static Frame parse(Key okey, Key[] keys, boolean deleteOnDone, boolean singleQuote, int checkHeader) { return parse(okey, keys, deleteOnDone, singleQuote, checkHeader, null); } // Guess setup from inspecting the first Key only, then parse. // Suitable for e.g. testing setups, where the data is known to be sane. // NOT suitable for random user input! public static Frame parse(Key okey, Key[] keys, boolean deleteOnDone, boolean singleQuote, int checkHeader, int[] skippedColumns) { ParseSetup guessParseSetup = ParseSetup.guessSetup(keys, singleQuote, checkHeader); if (skippedColumns!=null) { guessParseSetup.setSkippedColumns(skippedColumns); guessParseSetup.setParseColumnIndices(guessParseSetup.getNumberColumns(), skippedColumns); } return parse(okey,keys,deleteOnDone,guessParseSetup); } public static Frame parse(Key okey, Key[] keys, boolean deleteOnDone, ParseSetup globalSetup) { return parse(okey,keys,deleteOnDone,globalSetup,true)._job.get(); } public static ParseDataset parse(Key okey, Key[] keys, boolean deleteOnDone, ParseSetup globalSetup, boolean blocking) { ParseDataset pds = forkParseDataset(okey, keys, globalSetup, deleteOnDone); if( blocking ) pds._job.get(); return pds; } // Allow both ByteVec keys and Frame-of-1-ByteVec static ByteVec getByteVec(Key key) { Iced ice = DKV.getGet(key); if(ice == null) throw new H2OIllegalArgumentException("Missing data","Did not find any data under key " + key); return (ByteVec)(ice instanceof ByteVec ? ice : ((Frame)ice).vecs()[0]); } static String [] getColumnNames(int ncols, String[] colNames) { if(colNames == null) { // no names, generate colNames = new String[ncols]; for(int i=0; i < ncols; i++ ) colNames[i] = "C" + Integer.toString(i+1); } else { // some or all names exist, fill in blanks HashSet<String> nameSet = new HashSet<>(Arrays.asList(colNames)); colNames = Arrays.copyOf(colNames, ncols); for(int i=0; i < ncols; i++ ) { if (colNames[i] == null || colNames[i].equals("")) { String tmp = "C" + Integer.toString(i+1); while (nameSet.contains(tmp)) // keep building name until unique tmp = tmp + tmp; colNames[i] = tmp; } } } return colNames; } public static Job forkParseSVMLight(final Key<Frame> dest, final Key [] keys, final ParseSetup setup) { int nchunks = 0; Vec v = null; // set the parse chunk size for files for( int i = 0; i < keys.length; ++i ) { Iced ice = DKV.getGet(keys[i]); if(ice instanceof FileVec) { if(i == 0) v = ((FileVec) ice); ((FileVec) ice).setChunkSize(setup._chunk_size); nchunks += ((FileVec) ice).nChunks(); Log.info("Parse chunk size " + setup._chunk_size); } else if(ice instanceof Frame && ((Frame)ice).vec(0) instanceof FileVec) { if(i == 0) v = ((Frame)ice).vec(0); ((FileVec) ((Frame) ice).vec(0)).setChunkSize((Frame) ice, setup._chunk_size); nchunks += (((Frame) ice).vec(0)).nChunks(); Log.info("Parse chunk size " + setup._chunk_size); } } final VectorGroup vg = v.group(); final ParseDataset pds = new ParseDataset(dest); new Frame(pds._job._result,new String[0],new Vec[0]).delete_and_lock(pds._job); // Write-Lock BEFORE returning return pds._job.start(new H2OCountedCompleter() { @Override public void compute2() { ParseDataset.parseAllKeys(pds,keys,setup,true); tryComplete(); } },nchunks); } /** * The entry-point for data set parsing. * * @param dest name for destination key * @param keys input keys * @param parseSetup a generic parser setup * @param deleteOnDone delete input data when finished * @return a new parse job */ public static ParseDataset forkParseDataset(final Key<Frame> dest, final Key[] keys, final ParseSetup parseSetup, boolean deleteOnDone) { // Get a parser specific setup // FIXME: ParseSetup should be separated into two classes - one for using via Rest API as user setup // and another as an internal parser setup to drive parsing. final ParseSetup setup = parseSetup.getFinalSetup(keys, parseSetup); HashSet<String> conflictingNames = setup.checkDupColumnNames(); for( String x : conflictingNames ) if ( x != null && !x.equals("")) throw new IllegalArgumentException("Found duplicate column name "+x); // Some quick sanity checks: no overwriting your input key, and a resource check. long totalParseSize=0; for( int i=0; i<keys.length; i++ ) { Key k = keys[i]; if( dest.equals(k) ) throw new IllegalArgumentException("Destination key "+dest+" must be different from all sources"); if( deleteOnDone ) for( int j=i+1; j<keys.length; j++ ) if( k==keys[j] ) throw new IllegalArgumentException("Source key "+k+" appears twice, deleteOnDone must be false"); // estimate total size in bytes totalParseSize += getByteVec(k).length(); } Log.info("Total file size: "+ PrettyPrint.bytes(totalParseSize)); // no need to set this for ORC, it is already done: if (!setup.getParseType().name().contains("ORC")) { for( int i = 0; i < keys.length; ++i ) { Iced ice = DKV.getGet(keys[i]); // set the parse chunk size for files if (ice instanceof FileVec) { ((FileVec) ice).setChunkSize(setup._chunk_size); Log.info("Parse chunk size " + setup._chunk_size); } else if (ice instanceof Frame && ((Frame) ice).vec(0) instanceof FileVec) { ((FileVec) ((Frame) ice).vec(0)).setChunkSize((Frame) ice, setup._chunk_size); Log.info("Parse chunk size " + setup._chunk_size); } } } else Log.info("Orc Parse chunk sizes may be different across files"); long memsz = H2O.CLOUD.free_mem(); if( totalParseSize > memsz*4 ) throw new IllegalArgumentException("Total input file size of "+PrettyPrint.bytes(totalParseSize)+" is much larger than total cluster memory of "+PrettyPrint.bytes(memsz)+", please use either a larger cluster or smaller data."); // Fire off the parse ParseDataset pds = new ParseDataset(dest); new Frame(pds._job._result,new String[0],new Vec[0]).delete_and_lock(pds._job); // Write-Lock BEFORE returning for( Key k : keys ) Lockable.read_lock(k,pds._job); // Read-Lock BEFORE returning ParserFJTask fjt = new ParserFJTask(pds, keys, setup, deleteOnDone); // Fire off background parse pds._job.start(fjt, totalParseSize); return pds; } // Setup a private background parse job private ParseDataset(Key<Frame> dest) { _job = new Job<>(dest, Frame.class.getName(), "Parse"); } // ------------------------------- // Simple internal class doing background parsing, with trackable Job status public static class ParserFJTask extends water.H2O.H2OCountedCompleter { final ParseDataset _pds; final Key[] _keys; final ParseSetup _setup; final boolean _deleteOnDone; public ParserFJTask( ParseDataset pds, Key[] keys, ParseSetup setup, boolean deleteOnDone) { _pds = pds; _keys = keys; _setup = setup; _deleteOnDone = deleteOnDone; } @Override public void compute2() { parseAllKeys(_pds, _keys, _setup, _deleteOnDone); tryComplete(); } // Took a crash/NPE somewhere in the parser. Attempt cleanup. @Override public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller){ parseCleanup(); // Can get called many tims return true; } @Override public void onCompletion(CountedCompleter caller) { if( _pds._job.stop_requested() ) parseCleanup(); _pds._mfpt = null; } private void parseCleanup() { assert !_pds._job.isStopped(); // Job still running till job.onExCompletion returns Futures fs = new Futures(); // Find & remove all partially-built output vecs & chunks. // Since this is racily called, perhaps multiple times, read _mfpt only exactly once. MultiFileParseTask mfpt = _pds._mfpt; _pds._mfpt = null; // Read once, test for null once. if (mfpt != null) mfpt.onExceptionCleanup(fs); // Assume the input is corrupt - or already partially deleted after // parsing. Nuke it all - no partial Vecs lying around. for (Key k : _keys) Keyed.remove(k, fs, true); Keyed.remove(_pds._job._result, fs, true); fs.blockForPending(); } } private static class CategoricalUpdateMap extends Iced { final int [][] map; public CategoricalUpdateMap(int[][] map){this.map = map;} } // -------------------------------------------------------------------------- // Top-level parser driver private static ParseDataset parseAllKeys(ParseDataset pds, Key[] fkeys, ParseSetup setup, boolean deleteOnDone) { final Job<Frame> job = pds._job; assert setup._number_columns > 0; if( setup._column_names != null && ( (setup._column_names.length == 0) || (setup._column_names.length == 1 && setup._column_names[0].isEmpty())) ) setup._column_names = null; // // FIXME: annoyingly front end sends column names as String[] {""} even if setup returned null if(setup._na_strings != null && setup._na_strings.length != setup._number_columns) setup._na_strings = null; if( fkeys.length == 0) { job.stop(); return pds; } job.update(0, "Ingesting files."); VectorGroup vg = getByteVec(fkeys[0]).group(); MultiFileParseTask mfpt = pds._mfpt = new MultiFileParseTask(vg,setup,job._key,fkeys,deleteOnDone); mfpt.doAll(fkeys); Log.trace("Done ingesting files."); if( job.stop_requested() ) return pds; final AppendableVec[] avs = mfpt.vecs(); // Calculate categorical domain // Filter down to columns with some categoricals int n = 0; int parseCols = setup._parse_columns_indices.length; boolean sameParseColumns = setup._number_columns == parseCols; // _number_columns represent number of columns parsed String[] parse_column_names; byte[] parse_column_types; int colNumbers = setup._column_types==null?setup._number_columns:setup._column_types.length;; if (setup._column_names == null) { setup._column_names = getColumnNames(colNumbers, null); } boolean typesSameParseColumns = colNumbers==parseCols; boolean namesSameparseColumns = setup._column_names.length==parseCols; if (sameParseColumns) { parse_column_names = setup._column_names; } else { parse_column_names = new String[parseCols]; parse_column_types = new byte[parseCols]; for (int cindex = 0; cindex < parseCols; cindex++) { parse_column_names[cindex] = namesSameparseColumns?setup._column_names[cindex]: setup._column_names[setup._parse_columns_indices[cindex]]; parse_column_types[cindex] = typesSameParseColumns?setup._column_types[cindex]: setup._column_types[setup._parse_columns_indices[cindex]]; } setup._column_types=parse_column_types; } setup._column_names = getColumnNames(avs.length, parse_column_names); int[] ecols2 = new int[parseCols]; for (int i = 0; i < parseCols; i++) { if (avs[i].get_type() == Vec.T_CAT) // Intended type is categorical (even though no domain has been set)? ecols2[n++] = i; } final int[] ecols = Arrays.copyOf(ecols2, n); // skipped columns are excluded already Frame fr; ParseFinalizer finalizer = ParseFinalizer.get(setup); // If we have any, go gather unified categorical domains if( n > 0 ) { if (!setup.getParseType().isDomainProvided) { // Domains are not provided via setup we need to collect them job.update(0, "Collecting categorical domains across nodes."); { GatherCategoricalDomainsTask gcdt = new GatherCategoricalDomainsTask(mfpt._cKey, ecols, mfpt._parseSetup._parse_columns_indices).doAllNodes(); //Test domains for excessive length. List<String> offendingColNames = new ArrayList<>(); for (int i = 0; i < ecols.length; i++) { if (gcdt.getDomainLength(i) < Categorical.MAX_CATEGORICAL_COUNT) { if( gcdt.getDomainLength(i)==0 ) avs[ecols[i]].setBad(); // The all-NA column else avs[ecols[i]].setDomain(gcdt.getDomain(i)); } else offendingColNames.add(setup._column_names[ecols[i]]); } if (offendingColNames.size() > 0) throw new H2OParseException("Exceeded categorical limit on columns "+ offendingColNames+". " + "Consider reparsing these columns as a string or skip parsing the offending columns by setting" + " the skipped_columns list in Python/R/Java APIs."); } Log.trace("Done collecting categorical domains across nodes."); } else { // Ignore offending domains for (int i = 0; i < ecols.length; i++) { avs[ecols[i]].setDomain(setup._domains[ecols[i]]); } } job.update(0, "Compressing data."); fr = finalizer.finalize(job, AppendableVec.closeAll(avs), setup, mfpt._fileChunkOffsets); fr.update(job); Log.trace("Done compressing data."); if (!setup.getParseType().isDomainProvided) { // Update categoricals to the globally agreed numbering Vec[] evecs = new Vec[ecols.length]; for( int i = 0; i < evecs.length; ++i ) evecs[i] = fr.vecs()[ecols[i]]; job.update(0, "Unifying categorical domains across nodes."); { // new CreateParse2GlobalCategoricalMaps(mfpt._cKey).doAll(evecs); // Using Dtask since it starts and returns faster than an MRTask CreateParse2GlobalCategoricalMaps[] fcdt = new CreateParse2GlobalCategoricalMaps[H2O.CLOUD.size()]; RPC[] rpcs = new RPC[H2O.CLOUD.size()]; for (int i = 0; i < fcdt.length; i++){ H2ONode[] nodes = H2O.CLOUD.members(); fcdt[i] = new CreateParse2GlobalCategoricalMaps(mfpt._cKey, fr._key, ecols, mfpt._parseSetup._parse_columns_indices); rpcs[i] = new RPC<>(nodes[i], fcdt[i]).call(); } for (RPC rpc : rpcs) rpc.get(); new UpdateCategoricalChunksTask(mfpt._cKey, mfpt._chunk2ParseNodeMap).doAll(evecs); MultiFileParseTask._categoricals.remove(mfpt._cKey); } Log.trace("Done unifying categoricals across nodes."); } } else { // No categoricals case job.update(0,"Compressing data."); fr = finalizer.finalize(job, AppendableVec.closeAll(avs), setup, mfpt._fileChunkOffsets); Log.trace("Done closing all Vecs."); } // Check for job cancellation if ( job.stop_requested() ) return pds; // SVMLight is sparse format, there may be missing chunks with all 0s, fill them in if (setup._parse_type.equals(SVMLight_INFO)) new SVFTask(fr).doAllNodes(); // Check for job cancellation if ( job.stop_requested() ) return pds; ParseWriter.ParseErr [] errs = ArrayUtils.append(setup.errs(),mfpt._errors); if(errs.length > 0) { // compute global line numbers for warnings/errs HashMap<String, Integer> fileChunkOffsets = new HashMap<>(); for (int i = 0; i < mfpt._fileChunkOffsets.length; ++i) fileChunkOffsets.put(fkeys[i].toString(), mfpt._fileChunkOffsets[i]); long[] espc = fr.anyVec().espc(); for (int i = 0; i < errs.length; ++i) { if(fileChunkOffsets.containsKey(errs[i]._file)) { int espcOff = fileChunkOffsets.get(errs[i]._file); errs[i]._gLineNum = espc[espcOff + errs[i]._cidx] + errs[i]._lineNum; errs[i]._lineNum = errs[i]._gLineNum - espc[espcOff]; } } SortedSet<ParseWriter.ParseErr> s = new TreeSet<>(new Comparator<ParseWriter.ParseErr>() { @Override public int compare(ParseWriter.ParseErr o1, ParseWriter.ParseErr o2) { long res = o1._gLineNum - o2._gLineNum; if (res == 0) res = o1._byteOffset - o2._byteOffset; if (res == 0) return o1._err.compareTo(o2._err); return (int) res < 0 ? -1 : 1; } }); Collections.addAll(s, errs); String[] warns = new String[s.size()]; int i = 0; for (ParseWriter.ParseErr err : s) Log.warn(warns[i++] = err.toString()); job.setWarnings(warns); } job.update(0,"Calculating data summary."); logParseResults(fr); if (setup.getForceColTypes()) { String parseType = setup.getParseType().name(); String[] originalColumnTypes = "PARQUET".equals(parseType) ? setup.getParquetColumnTypes() : setup.getOrigColumnTypes(); final int[] skippedColumns = setup.getSkippedColumns(); String[] newColumnTypes; if (skippedColumns != null) { // need to remove column types of skipped columns Set<Integer> skippedColIndices = Arrays.stream(skippedColumns).boxed().collect(Collectors.toSet()); newColumnTypes = IntStream.range(0, originalColumnTypes.length).filter(x -> !(skippedColIndices.contains(x))).mapToObj(x -> originalColumnTypes[x]).toArray(String[]::new); } else { newColumnTypes = originalColumnTypes; } if (newColumnTypes != null) { if ("PARQUET".equals(parseType)) // force change the column types specified by user forceChangeColumnTypesParquet(fr, newColumnTypes); else forceChangeColumnTypes(fr, newColumnTypes); } } // Release the frame for overwriting fr.update(job); Frame fr2 = DKV.getGet(fr._key); assert fr2._names.length == fr2.numCols(); fr.unlock(job); // Remove CSV files from H2O memory if( deleteOnDone ) for( Key k : fkeys ) { DKV.remove(k); assert DKV.get(k) == null : "Input key " + k + " not deleted during parse"; } return pds; } public static void forceChangeColumnTypesParquet(Frame fr, String[] columnTypes) { int numCols = columnTypes.length; for (int index=0; index<numCols; index++) { switch (columnTypes[index]) { case "INT32": case "INT64": if (!fr.vec(index).isInt() && !fr.vec(index).isBad()) fr.replace((index), fr.vec(index).toIntegerVec()); break; case "FLOAT": case "DOUBLE": if (fr.vec(index).isInt() && !fr.vec(index).isBad()) fr.vec(index).asDouble(); break; default: break; // no change for other types } } } private static void forceChangeColumnTypes(Frame fr, String[] columnTypes) { int numCols = columnTypes.length; for (int index=0; index<numCols; index++) { switch (columnTypes[index]) { case "int": case "long": if (!fr.vec(index).isInt() && !fr.vec(index).isBad()) fr.replace((index), fr.vec(index).toIntegerVec()).remove(); break; case "float": case "double": case "real": if (fr.vec(index).isInt() && !fr.vec(index).isBad()) fr.vec(index).asDouble(); break; default: break; // no conversion for other data types. } } } private static class CreateParse2GlobalCategoricalMaps extends DTask<CreateParse2GlobalCategoricalMaps> { private final Key _parseCatMapsKey; private final Key _frKey; private final int[] _ecol; private final int[] _parseColumns; private CreateParse2GlobalCategoricalMaps(Key parseCatMapsKey, Key key, int[] ecol, int[] parseColumns) { _parseCatMapsKey = parseCatMapsKey; _frKey = key; _ecol = ecol; // contains the categoricals column indices only _parseColumns = parseColumns; } @Override public void compute2() { Frame _fr = DKV.getGet(_frKey); // does not contain skipped columns // get the node local category->ordinal maps for each column from initial parse pass if( !MultiFileParseTask._categoricals.containsKey(_parseCatMapsKey) ) { tryComplete(); return; } final Categorical[] parseCatMaps = MultiFileParseTask._categoricals.get(_parseCatMapsKey); // include skipped columns int[][] _nodeOrdMaps = new int[_ecol.length][]; // create old_ordinal->new_ordinal map for each cat column for (int eColIdx = 0; eColIdx < _ecol.length; eColIdx++) { int colIdx = _parseColumns[_ecol[eColIdx]]; if (parseCatMaps[colIdx].size() != 0) { _nodeOrdMaps[eColIdx] = MemoryManager.malloc4(parseCatMaps[colIdx].maxId() + 1); Arrays.fill(_nodeOrdMaps[eColIdx], -1); //Bulk String->BufferedString conversion is slightly faster, but consumes memory final BufferedString[] unifiedDomain = _fr.vec(_ecol[eColIdx]).isCategorical()? BufferedString.toBufferedString(_fr.vec(_ecol[eColIdx]).domain()):new BufferedString[0]; //final String[] unifiedDomain = _fr.vec(colIdx).domain(); for (int i = 0; i < unifiedDomain.length; i++) { //final BufferedString cat = new BufferedString(unifiedDomain[i]); if (parseCatMaps[colIdx].containsKey(unifiedDomain[i])) { _nodeOrdMaps[eColIdx][parseCatMaps[colIdx].getTokenId(unifiedDomain[i])] = i; } } } else { Log.debug("Column " + colIdx + " was marked as categorical but categorical map is empty!"); } } // Store the local->global ordinal maps in DKV by node parse categorical key and node index DKV.put(Key.make(_parseCatMapsKey.toString() + "parseCatMapNode" + H2O.SELF.index()), new CategoricalUpdateMap(_nodeOrdMaps)); tryComplete(); } } // -------------------------------------------------------------------------- /** Task to update categorical (categorical) values to match the global numbering scheme. * Performs update in place so that values originally numbered using * node-local unordered numbering will be numbered using global numbering. * @author tomasnykodym */ private static class UpdateCategoricalChunksTask extends MRTask<UpdateCategoricalChunksTask> { private final Key _parseCatMapsKey; private final int [] _chunk2ParseNodeMap; private UpdateCategoricalChunksTask(Key parseCatMapsKey, int[] chunk2ParseNodeMap) { _parseCatMapsKey = parseCatMapsKey; _chunk2ParseNodeMap = chunk2ParseNodeMap; } @Override public void map(Chunk [] chks){ CategoricalUpdateMap temp = DKV.getGet(Key.make(_parseCatMapsKey.toString() + "parseCatMapNode" + _chunk2ParseNodeMap[chks[0].cidx()])); if ( temp == null || temp.map == null) throw new H2OIllegalValueException("Missing categorical update map",this); int[][] _parse2GlobalCatMaps = temp.map; //update the chunk with the new map final int cidx = chks[0].cidx(); for(int i = 0; i < chks.length; ++i) { Chunk chk = chks[i]; if (!(chk instanceof CStrChunk)) { for( int j = 0; j < chk._len; ++j){ if( chk.isNA(j) )continue; final int old = (int) chk.at8(j); if (old < 0 || (_parse2GlobalCatMaps[i] != null && old >= _parse2GlobalCatMaps[i].length)) chk.reportBrokenCategorical(i, j, old, _parse2GlobalCatMaps[i], _fr.vec(i).domain().length); if(_parse2GlobalCatMaps[i] != null && _parse2GlobalCatMaps[i][old] < 0) throw new H2OParseException("Error in unifying categorical values. This is typically " +"caused by unrecognized characters in the data.\n The problem categorical value " +"occurred in the " + PrettyPrint.withOrdinalIndicator(i+1)+ " categorical col, " +PrettyPrint.withOrdinalIndicator(chk.start() + j) +" row."); if (_parse2GlobalCatMaps[i] != null) chk.set(j, _parse2GlobalCatMaps[i][old]); } Log.trace("Updated domains for "+PrettyPrint.withOrdinalIndicator(i+1)+ " categorical column."); } chk.close(cidx, _fs); } } @Override public void postGlobal() { for (int i=0; i < H2O.CLOUD.size(); i++) DKV.remove(Key.make(_parseCatMapsKey.toString() + "parseCatMapNode" + i)); } } private static class GatherCategoricalDomainsTask extends MRTask<GatherCategoricalDomainsTask> { private final Key _k; private final int[] _catColIdxs; private byte[][] _packedDomains; private final int[] _parseColumns; private GatherCategoricalDomainsTask(Key k, int[] ccols, int[] parseColumns) { _k = k; _catColIdxs = ccols; _parseColumns = parseColumns; } @Override public void setupLocal() { if (!MultiFileParseTask._categoricals.containsKey(_k)) return; _packedDomains = new byte[_catColIdxs.length][]; final BufferedString[][] _perColDomains = new BufferedString[_catColIdxs.length][]; final Categorical[] _colCats = MultiFileParseTask._categoricals.get(_k); // still refer to all columns int i = 0; for (int col : _catColIdxs) { _colCats[_parseColumns[col]].convertToUTF8(_parseColumns[col] + 1); _perColDomains[i] = _colCats[_parseColumns[col]].getColumnDomain(); Arrays.sort(_perColDomains[i]); _packedDomains[i] = PackedDomains.pack(_perColDomains[i]); i++; } Log.trace("Done locally collecting domains on each node."); } @Override public void reduce(final GatherCategoricalDomainsTask other) { if (_packedDomains == null) { _packedDomains = other._packedDomains; } else if (other._packedDomains != null) { // merge two packed domains H2OCountedCompleter[] domtasks = new H2OCountedCompleter[_catColIdxs.length]; for (int i = 0; i < _catColIdxs.length; i++) { final int fi = i; domtasks[i] = new H2OCountedCompleter(currThrPriority()) { @Override public void compute2() { _packedDomains[fi] = PackedDomains.merge(_packedDomains[fi], other._packedDomains[fi]); tryComplete(); } }; } ForkJoinTask.invokeAll(domtasks); } Log.trace("Done merging domains."); } public int getDomainLength(int colIdx) { return _packedDomains == null ? 0 : PackedDomains.sizeOf(_packedDomains[colIdx]); } public String[] getDomain(int colIdx) { return _packedDomains == null ? null : PackedDomains.unpackToStrings(_packedDomains[colIdx]); } } // -------------------------------------------------------------------------- // Run once on all nodes; fill in missing zero chunks private static class SVFTask extends MRTask<SVFTask> { private final Frame _f; private SVFTask( Frame f ) { _f = f; } @Override public void setupLocal() { if( _f.numCols() == 0 ) return; Vec v0 = _f.anyVec(); ArrayList<RecursiveAction> rs = new ArrayList<RecursiveAction>(); for( int i = 0; i < v0.nChunks(); ++i ) { if( !v0.chunkKey(i).home() ) continue; final int fi = i; rs.add(new RecursiveAction() { @Override protected void compute() { // First find the nrows as the # rows of non-missing chunks; done on // locally-homed chunks only - to keep the data distribution. int nlines = 0; for( Vec vec : _f.vecs() ) { Value val = Value.STORE_get(vec.chunkKey(fi)); // Local-get only if( val != null ) { nlines = ((Chunk)val.get())._len; break; } } final int fnlines = nlines; // Now fill in appropriate-sized zero chunks for(int j = 0; j < _f.numCols(); ++j) { Vec vec = _f.vec(j); Key k = vec.chunkKey(fi); Value val = Value.STORE_get(k); // Local-get only if( val == null ) // Missing? Fill in w/zero chunk H2O.putIfMatch(k, new Value(k, new C0LChunk(0, fnlines)), null); } } }); } ForkJoinTask.invokeAll(rs); } @Override public void reduce( SVFTask drt ) {} } // -------------------------------------------------------------------------- // We want to do a standard MRTask with a collection of file-keys (so the // files are parsed in parallel across the cluster), but we want to throttle // the parallelism on each node. private static class MultiFileParseTask extends MRTask<MultiFileParseTask> { private final ParseSetup _parseSetup; // The expected column layout private final VectorGroup _vg; // vector group of the target dataset private final int _vecIdStart; // Start of available vector keys // Shared against all concurrent unrelated parses, a map to the node-local // categorical lists for each concurrent parse. private static NonBlockingHashMap<Key, Categorical[]> _categoricals = new NonBlockingHashMap<>(); // The Key used to sort out *this* parse's Categorical[] private final Key _cKey = Key.make(); // Eagerly delete Big Data private final boolean _deleteOnDone; // Mapping from Chunk# to node index holding the initial category mappings. // It is either self for all the non-parallel parses, or the Chunk-home for parallel parses. private int[] _chunk2ParseNodeMap; // Job Key, to unlock & remove raw parsed data; to report progress private final Key<Job> _jobKey; // A mapping of Key+ByteVec to rolling total Chunk counts. private final int[] _fileChunkOffsets; // OUTPUT fields: FVecParseWriter[] _dout; int _reservedKeys; private ParseWriter.ParseErr[] _errors = new ParseWriter.ParseErr[0]; MultiFileParseTask(VectorGroup vg, ParseSetup setup, Key<Job> jobKey, Key[] fkeys, boolean deleteOnDone ) { _vg = vg; _parseSetup = setup; _vecIdStart = _vg.reserveKeys(_reservedKeys = _parseSetup._parse_type.equals(SVMLight_INFO) ? 100000000 : setup._number_columns); _deleteOnDone = deleteOnDone; _jobKey = jobKey; // A mapping of Key+ByteVec to rolling total Chunk counts. _fileChunkOffsets = new int[fkeys.length]; int len = 0; for( int i = 0; i < fkeys.length; ++i ) { _fileChunkOffsets[i] = len; len += getByteVec(fkeys[i]).nChunks(); } // Mapping from Chunk# to cluster-node-number _chunk2ParseNodeMap = MemoryManager.malloc4(len); Arrays.fill(_chunk2ParseNodeMap, -1); } private AppendableVec [] _vecs; @Override public void postGlobal(){ Log.trace("Begin file parse cleanup."); // Compress nulls out of _dout array int n=0; for( int i=0; i<_dout.length; i++ ) if( _dout[i] != null ) _dout[n++] = _dout[i]; if( n < _dout.length ) _dout = Arrays.copyOf(_dout,n); // Fast path: only one Vec result, so never needs to have his Chunks renumbered if(_dout.length == 1) { _vecs = _dout[0]._vecs; return; } int nchunks = 0; // Count chunks across all Vecs int nCols = 0; // SVMLight special: find max columns for( FVecParseWriter dout : _dout ) { nchunks += dout._vecs[0]._tmp_espc.length; nCols = Math.max(dout._vecs.length,nCols); } // One Big Happy Shared ESPC long[] espc = MemoryManager.malloc8(nchunks); // AppendableVecs that are sized across the sum of all files. // Preallocated a bunch of Keys, but if we didn't get enough (for very // wide SVMLight) we need to get more here. if( nCols > _reservedKeys ) throw H2O.unimpl(); AppendableVec[] res = new AppendableVec[nCols]; if(_parseSetup._parse_type.equals(SVMLight_INFO)) { _parseSetup._number_columns = res.length; _parseSetup._column_types = new byte[res.length]; Arrays.fill(_parseSetup._column_types,Vec.T_NUM); } boolean columnsSkipped = nCols<_parseSetup._number_columns; for(int i = 0; i < res.length; ++i) { byte columnTypes = columnsSkipped ? _parseSetup._column_types[_parseSetup._parse_columns_indices[i]] : _parseSetup._column_types[i]; int vecIDStartPI = columnsSkipped?(_vecIdStart+_parseSetup._parse_columns_indices[i]):_vecIdStart + i; res[i] = new AppendableVec(_vg.vecKey(vecIDStartPI), espc, columnTypes, 0); } // Load the global ESPC from the file-local ESPCs for( FVecParseWriter fvpw : _dout ) { AppendableVec[] avs = fvpw._vecs; long[] file_local_espc = avs[0]._tmp_espc; // Quick assert that all partial AVs in each DOUT are sharing a common chunkOff, and common Vec Keys for( int j = 0; j < avs.length; ++j ) { assert res[j]._key.equals(avs[j]._key); assert avs[0]._chunkOff == avs[j]._chunkOff; assert file_local_espc == avs[j]._tmp_espc || Arrays.equals(file_local_espc,avs[j]._tmp_espc); } System.arraycopy(file_local_espc, 0, espc, avs[0]._chunkOff, file_local_espc.length); } _vecs = res; Log.trace("Finished file parse cleanup."); } private AppendableVec[] vecs(){ return _vecs; } @Override public void setupLocal() { _dout = new FVecParseWriter[_keys.length]; } // Fetch out the node-local Categorical[] using _cKey and _categoricals hashtable private static Categorical[] categoricals(Key cKey, int ncols) { Categorical[] categoricals = _categoricals.get(cKey); if( categoricals != null ) return categoricals; categoricals = new Categorical[ncols]; for( int i = 0; i < categoricals.length; ++i ) categoricals[i] = new Categorical(); _categoricals.putIfAbsent(cKey, categoricals); return _categoricals.get(cKey); // Re-get incase lost insertion race } // Flag all chunk categoricals as being on local (self) private void chunksAreLocal( Vec vec, int chunkStartIdx, Key key ) { for(int i = 0; i < vec.nChunks(); ++i) _chunk2ParseNodeMap[chunkStartIdx + i] = H2O.SELF.index(); // For Big Data, must delete data as eagerly as possible. Iced ice = DKV.get(key).get(); if( ice==vec ) { if(_deleteOnDone) vec.remove(); } else { Frame fr = (Frame)ice; if(_deleteOnDone) fr.delete(_jobKey,new Futures(), true).blockForPending(); else if( fr._key != null ) fr.unlock(_jobKey); } } private FVecParseWriter makeDout(ParseSetup localSetup, int chunkOff, int nchunks) { AppendableVec [] avs = new AppendableVec[localSetup._number_columns]; final long [] espc = MemoryManager.malloc8(nchunks); final byte[] ctypes = localSetup._column_types; // SVMLight only uses numeric types, sparsely represented as a null for(int i = 0; i < avs.length; ++i) avs[i] = new AppendableVec(_vg.vecKey(i + _vecIdStart), espc, ctypes==null ? /*SVMLight*/Vec.T_NUM : ctypes[i], chunkOff); return localSetup._parse_type.equals(SVMLight_INFO) ? new SVMLightFVecParseWriter(_vg, _vecIdStart,chunkOff, _parseSetup._chunk_size, avs, _parseSetup._parse_columns_indices, _jobKey) : new FVecParseWriter(_vg, chunkOff, categoricals(_cKey, localSetup._number_columns), localSetup._column_types, _parseSetup._chunk_size, avs, _parseSetup._parse_columns_indices, _jobKey); } // Called once per file @Override public void map( Key key ) { if( _jobKey.get().stop_requested() ) return; // FIXME: refactor parser setup to be configurable via parser object ParseSetup localSetup = (ParseSetup) _parseSetup.clone(); ByteVec vec = getByteVec(key); final int chunkStartIdx = _fileChunkOffsets[_lo]; Log.trace("Begin a map stage of a file parse with start index " + chunkStartIdx + "."); DecryptionTool decryptionTool = _parseSetup.getDecryptionTool(); final byte[] zips = decryptionTool.decryptFirstBytes(vec.getFirstBytes()); ZipUtil.Compression cpr = ZipUtil.guessCompressionMethod(zips); if (localSetup._check_header == ParseSetup.HAS_HEADER) { //check for header on local file byte[] bits = ZipUtil.unzipBytes(zips, cpr, localSetup._chunk_size); localSetup._check_header = localSetup.parser(_jobKey).fileHasHeader(bits, localSetup); } // Parse the file try { switch( cpr ) { case NONE: ParserInfo.ParseMethod pm = _parseSetup.parseMethod(_keys.length, vec); Log.info("Key " + key + " will be parsed using method " + pm + "."); if(pm == ParserInfo.ParseMethod.DistributedParse) { new DistributedParse(_vg, localSetup, _vecIdStart, chunkStartIdx, this, key, vec.nChunks()).dfork(vec).getResult(false); for( int i = 0; i < vec.nChunks(); ++i ) _chunk2ParseNodeMap[chunkStartIdx + i] = vec.chunkKey(i).home_node().index(); } else if(pm == ParserInfo.ParseMethod.StreamParse || pm == ParserInfo.ParseMethod.SequentialParse){ localSetup = ParserService.INSTANCE.getByInfo(localSetup._parse_type).setupLocal(vec,localSetup); Parser p = localSetup.parser(_jobKey); final FVecParseWriter writer = makeDout(localSetup,chunkStartIdx,vec.nChunks()); final ParseWriter dout; if (pm == ParserInfo.ParseMethod.StreamParse) { try (InputStream bvs = vec.openStream(_jobKey)) { dout = p.streamParse(decryptionTool.decryptInputStream(bvs), writer); } } else { // pm == ParserInfo.ParseMethod.SequentialParse dout = p.sequentialParse(vec, writer); } _dout[_lo] = ((FVecParseWriter) dout).close(_fs); _errors = _dout[_lo].removeErrors(); chunksAreLocal(vec,chunkStartIdx,key); } else throw H2O.unimpl(); break; case ZIP: { localSetup = ParserService.INSTANCE.getByInfo(localSetup._parse_type).setupLocal(vec,localSetup); // Zipped file; no parallel decompression; try (InputStream bvs = vec.openStream(_jobKey); InputStream dec = decryptionTool.decryptInputStream(bvs); ZipInputStream zis = new ZipInputStream(dec)) { if (ZipUtil.isZipDirectory(key)) { // file is a zip if multiple files zis.getNextEntry(); // first ZipEntry describes the directory } ZipEntry ze = zis.getNextEntry(); // Get the *FIRST* entry // There is at least one entry in zip file and it is not a directory. if (ze != null && !ze.isDirectory()) _dout[_lo] = streamParse(zis, localSetup, makeDout(localSetup, chunkStartIdx, vec.nChunks()), bvs); } _errors = _dout[_lo].removeErrors(); chunksAreLocal(vec,chunkStartIdx,key); break; } case GZIP: { localSetup = ParserService.INSTANCE.getByInfo(localSetup._parse_type).setupLocal(vec,localSetup); try (InputStream bvs = vec.openStream(_jobKey); InputStream dec = decryptionTool.decryptInputStream(bvs); GZIPInputStream gzis = new GZIPInputStream(dec)) { // gzipped file; no parallel decompression _dout[_lo] = streamParse(gzis, localSetup, makeDout(localSetup, chunkStartIdx, vec.nChunks()), bvs); } _errors = _dout[_lo].removeErrors(); // set this node as the one which processed all the chunks chunksAreLocal(vec,chunkStartIdx,key); break; } case ZSTD: { localSetup = ParserService.INSTANCE.getByInfo(localSetup._parse_type).setupLocal(vec, localSetup); try (InputStream bvs = vec.openStream(_jobKey); InputStream dec = decryptionTool.decryptInputStream(bvs); ZstdInputStream zstdIs = new ZstdInputStream(dec)) { _dout[_lo] = streamParse(zstdIs, localSetup, makeDout(localSetup, chunkStartIdx, vec.nChunks()), bvs); } _errors = _dout[_lo].removeErrors(); chunksAreLocal(vec, chunkStartIdx, key); break; } } Log.trace("Finished a map stage of a file parse with start index "+chunkStartIdx+"."); } catch( IOException ioe ) { throw new RuntimeException(ioe); } catch (H2OParseException pe0) { // Rebuild identical exception and stack trace, but add key to msg throw pe0.resetMsg(pe0.getMessage()+" for "+key); } } // Reduce: combine errors from across files. // Roll-up other meta data @Override public void reduce( MultiFileParseTask mfpt ) { assert this != mfpt; Log.trace("Begin a reduce stage of a file parse."); // Collect & combine columns across files if( _dout == null ) _dout = mfpt._dout; else if(_dout != mfpt._dout) _dout = ArrayUtils.append(_dout,mfpt._dout); if( _chunk2ParseNodeMap == null ) _chunk2ParseNodeMap = mfpt._chunk2ParseNodeMap; else if(_chunk2ParseNodeMap != mfpt._chunk2ParseNodeMap) { // we're sharing global array! for( int i = 0; i < _chunk2ParseNodeMap.length; ++i ) { if( _chunk2ParseNodeMap[i] == -1 ) _chunk2ParseNodeMap[i] = mfpt._chunk2ParseNodeMap[i]; else assert mfpt._chunk2ParseNodeMap[i] == -1 : Arrays.toString(_chunk2ParseNodeMap) + " :: " + Arrays.toString(mfpt._chunk2ParseNodeMap); } } if(_errors == null) _errors = mfpt._errors; else if(_errors.length < 20) { _errors = ArrayUtils.append(_errors, mfpt._errors); if(_errors.length > 20) _errors = Arrays.copyOf(_errors,20); } Log.trace("Finished a reduce stage of a file parse."); } // ------------------------------------------------------------------------ // Zipped file; no parallel decompression; decompress into local chunks, // parse local chunks; distribute chunks later. private FVecParseWriter streamParse(final InputStream is, final ParseSetup localSetup,FVecParseWriter dout, InputStream bvs) throws IOException { // All output into a fresh pile of NewChunks, one per column Parser p = localSetup.parser(_jobKey); // assume 2x inflation rate if(localSetup._parse_type.isParallelParseSupported()) p.streamParseZip(is, dout, bvs); else p.streamParse(is,dout); // Parse all internal "chunks", until we drain the zip-stream dry. Not // real chunks, just flipping between 32K buffers. Fills up the single // very large NewChunk. dout.close(_fs); return dout; } // ------------------------------------------------------------------------ private static class DistributedParse extends MRTask<DistributedParse> { private ParseSetup _setup; private final int _vecIdStart; private final int _startChunkIdx; // for multifile parse, offset of the first chunk in the final dataset private final VectorGroup _vg; private FVecParseWriter _dout; private final Key _cKey; // Parse-local-categoricals key private final Key<Job> _jobKey; private transient final MultiFileParseTask _outerMFPT; private transient final Key _srckey; // Source/text file to delete on done private transient NonBlockingSetInt _visited; private transient long [] _espc; final int _nchunks; DistributedParse(VectorGroup vg, ParseSetup setup, int vecIdstart, int startChunkIdx, MultiFileParseTask mfpt, Key srckey, int nchunks) { super(null); _vg = vg; _setup = setup; _vecIdStart = vecIdstart; _startChunkIdx = startChunkIdx; _outerMFPT = mfpt; _cKey = mfpt._cKey; _jobKey = mfpt._jobKey; _srckey = srckey; _nchunks = nchunks; } @Override public void setupLocal(){ super.setupLocal(); _visited = new NonBlockingSetInt(); _espc = MemoryManager.malloc8(_nchunks); _setup = ParserService.INSTANCE.getByInfo(_setup._parse_type).setupLocal(_fr.anyVec(),_setup); } @Override public void map( Chunk in ) { if( _jobKey.get().stop_requested() ) throw new Job.JobCancelledException(); AppendableVec [] avs = new AppendableVec[_setup._parse_columns_indices.length]; boolean notShrunkColumns = _setup._parse_columns_indices.length==_setup._number_columns; for (int i = 0; i < avs.length; ++i) if (_setup._column_types == null) // SVMLight which does not support skip columns anyway avs[i] = new AppendableVec(_vg.vecKey(_vecIdStart + i), _espc, Vec.T_NUM, _startChunkIdx); else avs[i] = notShrunkColumns? new AppendableVec(_vg.vecKey(_vecIdStart + i), _espc, _setup._column_types[i], _startChunkIdx) :new AppendableVec(_vg.vecKey(_vecIdStart + _setup._parse_columns_indices[i]), _espc, _setup._column_types[_setup._parse_columns_indices[i]], _startChunkIdx); // Break out the input & output vectors before the parse loop FVecParseReader din = new FVecParseReader(in); FVecParseWriter dout; // Get a parser Parser p = _setup.parser(_jobKey); switch(_setup._parse_type.name()) { case "ARFF": case "CSV": case "PARQUET": Categorical [] categoricals = categoricals(_cKey, _setup._number_columns); dout = new FVecParseWriter(_vg,_startChunkIdx + in.cidx(), categoricals, _setup._column_types, _setup._chunk_size, avs, _setup._parse_columns_indices, _jobKey); //TODO: use _setup._domains instead of categoricals break; case "SVMLight": dout = new SVMLightFVecParseWriter(_vg, _vecIdStart, in.cidx() + _startChunkIdx, _setup._chunk_size, avs, _setup._parse_columns_indices, _jobKey); break; case "ORC": // setup special case for ORC Categorical [] orc_categoricals = categoricals(_cKey, _setup._number_columns); dout = new FVecParseWriter(_vg, in.cidx() + _startChunkIdx, orc_categoricals, _setup._column_types, _setup._chunk_size, avs, _setup._parse_columns_indices, _jobKey); break; default: // FIXME: should not be default and creation strategy should be forwarded to ParserProvider dout = new FVecParseWriter(_vg, in.cidx() + _startChunkIdx, null, _setup._column_types, _setup._chunk_size, avs, _setup._parse_columns_indices, _jobKey); break; } if ((_setup.getParseType().name().toLowerCase().equals("svmlight") || (_setup.getParseType().name().toLowerCase().equals("avro") )) && ((_setup.getSkippedColumns() != null) && (_setup.getSkippedColumns().length >0))) throw new H2OIllegalArgumentException("Parser: skipped_columns are not supported for " + "SVMlight or Avro parsers."); if (_setup.getSkippedColumns() !=null && ((_setup.get_parse_columns_indices()==null) || (_setup.get_parse_columns_indices().length==0))) throw new H2OIllegalArgumentException("Parser: all columns in the file are skipped and no H2OFrame" + " can be returned."); // Need this to send error message to R p.parseChunk(in.cidx(), din, dout); (_dout = dout).close(_fs); Job.update(in._len, _jobKey); // Record bytes parsed // remove parsed data right away freeMem(in); } /** * This marks parsed byteVec chunks as ready to be freed. If this is the second * time a chunk has been marked, it is freed. The reason two marks are required * is that each chunk parse typically needs to read the remaining bytes of the * current row from the next chunk. Thus each task typically touches two chunks. * * @param in - chunk to be marked and possibly freed */ private void freeMem(Chunk in) { int cidx = in.cidx(); for(int i=0; i < 2; i++) { // iterate over this chunk and the next one cidx += i; if (!_visited.add(cidx)) { // Second visit Value v = Value.STORE_get(in.vec().chunkKey(cidx)); if (v == null || !v.isPersisted()) return; // Not found, or not on disk somewhere v.freePOJO(); // Eagerly toss from memory v.freeMem(); } } } @Override public void reduce(DistributedParse dp) { _dout.reduce(dp._dout); } @Override public void postGlobal() { _outerMFPT._dout[_outerMFPT._lo] = _dout; if(_dout.hasErrors()) { ParseWriter.ParseErr [] errs = _dout.removeErrors(); for(ParseWriter.ParseErr err:errs)err._file = FileVec.getPathForKey(_srckey).toString(); Arrays.sort(errs, new Comparator<ParseWriter.ParseErr>() { @Override public int compare(ParseWriter.ParseErr o1, ParseWriter.ParseErr o2) { return (int)(o1._byteOffset - o2._byteOffset); } }); _outerMFPT._errors = errs; } _dout = null; // Reclaim GC eagerly // For Big Data, must delete data as eagerly as possible. Value val = DKV.get(_srckey); if( val == null ) return; Iced ice = val.get(); if( ice instanceof ByteVec ) { if( _outerMFPT._deleteOnDone) ((ByteVec)ice).remove(); } else { Frame fr = (Frame)ice; if( _outerMFPT._deleteOnDone) fr.delete(_outerMFPT._jobKey,new Futures(), true).blockForPending(); else if( fr._key != null ) fr.unlock(_outerMFPT._jobKey); } } } // Find & remove all partially built output chunks & vecs Futures onExceptionCleanup(Futures fs) { int nchunks = _chunk2ParseNodeMap.length; int ncols = _parseSetup._number_columns; for( int i = 0; i < ncols; ++i ) { Key vkey = _vg.vecKey(_vecIdStart + i); Keyed.remove(vkey, fs, true); for( int c = 0; c < nchunks; ++c ) DKV.remove(Vec.chunkKey(vkey,c),fs); } cancel(true); return fs; } } // ------------------------------------------------------------------------ // Log information about the dataset we just parsed. public static void logParseResults(Frame fr) { long numRows = fr.anyVec().length(); Log.info("Parse result for " + fr._key + " (" + Long.toString(numRows) + " rows, "+Integer.toString(fr.numCols())+" columns):"); // get all rollups started in parallell, otherwise this takes ages! Futures fs = new Futures(); Vec[] vecArr = fr.vecs(); for(Vec v:vecArr) v.startRollupStats(fs); fs.blockForPending(); int namelen = 0; for (String s : fr.names()) namelen = Math.max(namelen, s.length()); String format = " %"+namelen+"s %7s %12.12s %12.12s %12.12s %12.12s %11s %8s %6s"; Log.info(String.format(format, "ColV2", "type", "min", "max", "mean", "sigma", "NAs", "constant", "cardinality")); SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); for( int i = 0; i < vecArr.length; i++ ) { Vec v = vecArr[i]; boolean isCategorical = v.isCategorical(); boolean isConstant = v.isConst(); String CStr = String.format("%"+namelen+"s:", fr.names()[i]); String typeStr; String minStr; String maxStr; String meanStr=""; String sigmaStr=""; switch( v.get_type() ) { case Vec.T_BAD : typeStr = "all_NA" ; minStr = ""; maxStr = ""; break; case Vec.T_UUID: typeStr = "UUID" ; minStr = ""; maxStr = ""; break; case Vec.T_STR : typeStr = "string" ; minStr = ""; maxStr = ""; break; case Vec.T_NUM : typeStr = "numeric"; minStr = String.format("%g", v.min()); maxStr = String.format("%g", v.max()); meanStr = String.format("%g", v.mean()); sigmaStr = String.format("%g", v.sigma()); break; case Vec.T_CAT : typeStr = "factor" ; minStr = v.factor(0); maxStr = v.factor(v.cardinality()-1); break; case Vec.T_TIME: typeStr = "time" ; minStr = sdf.format(v.min()); maxStr = sdf.format(v.max()); break; default: throw H2O.unimpl(); } long numNAs = v.naCnt(); String naStr = (numNAs > 0) ? String.format("%d", numNAs) : ""; String isConstantStr = isConstant ? "constant" : ""; String numLevelsStr = isCategorical ? String.format("%d", v.domain().length) : ""; boolean launchedWithHadoopJar = H2O.ARGS.launchedWithHadoopJar(); boolean printLogSeparatorToStdout = false; boolean printColumnToStdout; { // Print information to stdout for this many leading columns. final int MAX_HEAD_TO_PRINT_ON_STDOUT = 10; // Print information to stdout for this many trailing columns. final int MAX_TAIL_TO_PRINT_ON_STDOUT = 10; if (launchedWithHadoopJar) { printColumnToStdout = true; } else if (vecArr.length <= (MAX_HEAD_TO_PRINT_ON_STDOUT + MAX_TAIL_TO_PRINT_ON_STDOUT)) { // For small numbers of columns, print them all. printColumnToStdout = true; } else if (i < MAX_HEAD_TO_PRINT_ON_STDOUT) { printColumnToStdout = true; } else if (i == MAX_HEAD_TO_PRINT_ON_STDOUT) { printLogSeparatorToStdout = true; printColumnToStdout = false; } else if ((i + MAX_TAIL_TO_PRINT_ON_STDOUT) < vecArr.length) { printColumnToStdout = false; } else { printColumnToStdout = true; } } if (printLogSeparatorToStdout) { Log.info("Additional column information only sent to log file..."); } String s = String.format(format, CStr, typeStr, minStr, maxStr, meanStr, sigmaStr, naStr, isConstantStr, numLevelsStr); if (printColumnToStdout) { Log.info(s); } else { Log.trace(s); } } Log.info(FrameUtils.chunkSummary(fr).toString()); } public static class H2OParseException extends RuntimeException { public H2OParseException(String msg){super(msg);} public H2OParseException(String msg, Throwable cause){super(msg,cause);} public H2OParseException(Throwable cause){super(cause);} public H2OParseException resetMsg(String msg) { H2OParseException pe1 = new H2OParseException(msg,getCause()); pe1.setStackTrace(getStackTrace()); return pe1; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/ParseFinalizer.java
package water.parser; import water.Job; import water.fvec.Frame; import water.fvec.Vec; public abstract class ParseFinalizer { public abstract Frame finalize(Job<Frame> job, Vec[] parsedVecs, ParseSetup setup, int[] fileChunkOffsets); private static final ParseFinalizer DEFAULT = new ParseFinalizer() { @Override public Frame finalize(Job<Frame> job, Vec[] parsedVecs, ParseSetup setup, int[] fileChunkOffsets) { return new Frame(job._result, setup._column_names, parsedVecs); } }; public static ParseFinalizer get(ParseSetup setup) { if (setup._synthetic_column_names != null) { return new SyntheticColumnGenerator(); } else { return DEFAULT; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/ParseReader.java
package water.parser; /** Manage bulk streaming input data to the parser. Sometimes the data comes * from parallel raw byte file reads, with speculative line starts. * Sometimes the data comes from an InputStream - probably a GZIP stream. */ public interface ParseReader { // Get another chunk of byte data byte[] getChunkData( int cidx ); int getChunkDataStart( int cidx ); void setChunkDataStart( int cidx, int offset ); long getGlobalByteOffset(); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/ParseSetup.java
package water.parser; import water.*; import water.api.schemas3.ParseSetupV3; import water.exceptions.H2OIllegalArgumentException; import water.fvec.*; import water.util.ArrayUtils; import water.util.FileUtils; import water.util.Log; import water.util.StringUtils; import java.lang.reflect.Field; import java.util.Arrays; import java.util.HashSet; import static water.parser.DefaultParserProviders.*; /** * A generic configuration and base guesser for a parser. */ public class ParseSetup extends Iced { public static final byte GUESS_SEP = -1; public static final int NO_HEADER = -1; public static final int GUESS_HEADER = 0; public static final int HAS_HEADER = 1; public static final int GUESS_COL_CNT = -1; public static final byte DEFAULT_ESCAPE_CHAR = 0; ParserInfo _parse_type; // CSV, XLS, XSLX, SVMLight, Auto, ARFF, ORC byte _separator; // Field separator, usually comma ',' or TAB or space ' ' // Whether or not single-quotes quote a field. E.g. how do we parse: // raw data: 123,'Mally,456,O'Mally // singleQuotes==True ==> 2 columns: 123 and Mally,456,OMally // singleQuotes==False ==> 4 columns: 123 and 'Mally and 456 and O'Mally boolean _single_quotes; int _check_header; // 1st row: 0: guess, +1 header, -1 data int _number_columns; // Columns to parse String[] _column_names; byte[] _column_types; // Column types int[] _skipped_columns; // column indices that are to be skipped String[][] _domains; // Domains for each column (null if numeric) String[][] _na_strings; // Strings for NA in a given column String[][] _data; // First few rows of parsed/tokenized data int[] _parse_columns_indices; // store column indices to be parsed into the final file byte[] _nonDataLineMarkers; boolean _force_col_types = false; // at end of parsing, change column type to users specified ones boolean _tz_adjust_to_local = false; String[] _orig_column_types; // copy over the original column type setup before translating to byte[] String[] _synthetic_column_names; // Columns with constant values to be added to parsed Frame String[][] _synthetic_column_values; // For each imported file contains array of values for each synthetic column byte _synthetic_column_type = Vec.T_STR; // By default, all synthetic columns are treated as strings byte _escapechar = DEFAULT_ESCAPE_CHAR; // One ASCII character used to escape other characters, by default '\' String [] _fileNames = new String[]{"unknown"}; public boolean disableParallelParse; Key<DecryptionTool> _decrypt_tool; public void setFileName(String name) {_fileNames[0] = name;} private ParseWriter.ParseErr[] _errs; public final ParseWriter.ParseErr[] errs() { return _errs;} public void addErrs(ParseWriter.ParseErr... errs){ _errs = ArrayUtils.append(_errs,errs); } public int _chunk_size = FileVec.DFLT_CHUNK_SIZE; // Optimal chunk size to be used store values PreviewParseWriter _column_previews = null; public String[] parquetColumnTypes; // internal parameters only public ParseSetup(ParseSetup ps) { this(ps._parse_type, ps._separator, ps._single_quotes, ps._check_header, ps._number_columns, ps._column_names, ps._column_types, ps._domains, ps._na_strings, ps._data, new ParseWriter.ParseErr[0], ps._chunk_size, ps._decrypt_tool, ps._skipped_columns, ps._nonDataLineMarkers, ps._escapechar, ps._tz_adjust_to_local); } public static ParseSetup makeSVMLightSetup(){ return new ParseSetup(SVMLight_INFO, ParseSetup.GUESS_SEP, false,ParseSetup.NO_HEADER,1,null,new byte[]{Vec.T_NUM},null,null,null, new ParseWriter.ParseErr[0], null, false); } // This method was called during guess setup, lot of things are null, like ctypes. // when it is called again, it either contains the guess column types or it will have user defined column types public ParseSetup(ParserInfo parse_type, byte sep, boolean singleQuotes, int checkHeader, int ncols, String[] columnNames, byte[] ctypes, String[][] domains, String[][] naStrings, String[][] data, ParseWriter.ParseErr[] errs, int chunkSize, byte[] nonDataLineMarkers, byte escapeChar, boolean tzAdjustToLocal) { this(parse_type, sep, singleQuotes, checkHeader, ncols, columnNames, ctypes, domains, naStrings, data, errs, chunkSize, null, null, nonDataLineMarkers, escapeChar, tzAdjustToLocal); } public ParseSetup(ParserInfo parse_type, byte sep, boolean singleQuotes, int checkHeader, int ncols, String[] columnNames, byte[] ctypes, String[][] domains, String[][] naStrings, String[][] data, ParseWriter.ParseErr[] errs, int chunkSize, Key<DecryptionTool> decrypt_tool, int[] skipped_columns, byte[] nonDataLineMarkers, byte escapeChar, boolean tzAdjustToLocal) { this(parse_type, sep, singleQuotes, checkHeader, ncols, columnNames, ctypes, domains, naStrings, data, errs, chunkSize, decrypt_tool, skipped_columns, nonDataLineMarkers, escapeChar, false, tzAdjustToLocal); } public ParseSetup(ParserInfo parse_type, byte sep, boolean singleQuotes, int checkHeader, int ncols, String[] columnNames, byte[] ctypes, String[][] domains, String[][] naStrings, String[][] data, ParseWriter.ParseErr[] errs, int chunkSize, Key<DecryptionTool> decrypt_tool, int[] skipped_columns, byte[] nonDataLineMarkers, byte escapeChar, boolean force_col_types, boolean tz_adjust_to_local) { _parse_type = parse_type; _separator = sep; _nonDataLineMarkers = nonDataLineMarkers; _single_quotes = singleQuotes; _check_header = checkHeader; _number_columns = ncols; _column_names = columnNames; _column_types = ctypes; _domains = domains; _na_strings = naStrings; _data = data; _chunk_size = chunkSize; _errs = errs; _decrypt_tool = decrypt_tool; _skipped_columns = skipped_columns; _escapechar = escapeChar; _force_col_types = force_col_types; _tz_adjust_to_local = tz_adjust_to_local; setParseColumnIndices(ncols, _skipped_columns); } public void setParseColumnIndices(int ncols, int[] skipped_columns) { if (skipped_columns != null) { int num_parse_columns = ncols - skipped_columns.length; if (num_parse_columns >= 0) { _parse_columns_indices = new int[num_parse_columns]; int counter = 0; for (int index = 0; index < ncols; index++) { if (!ArrayUtils.contains(skipped_columns, index)) { _parse_columns_indices[counter++] = index; } } } } else if (ncols > 0) { _parse_columns_indices = new int[ncols]; for (int index=0; index < ncols; index++) _parse_columns_indices[index] = index; } } public void setSyntheticColumns(String[] names, String[][] valueMapping, byte synthetic_column_type) { _synthetic_column_names = names; _synthetic_column_values = valueMapping; _synthetic_column_type = synthetic_column_type; } public void setParquetColumnTypes(String[] columnTypes) { parquetColumnTypes = columnTypes.clone(); } /** * Create a ParseSetup with parameters from the client. * * Typically used to guide sampling in the data * to verify chosen settings, and fill in missing settings. * * @param ps Parse setup settings from client */ public ParseSetup(ParseSetupV3 ps) { this(ps.parse_type != null ? ParserService.INSTANCE.getByName(ps.parse_type).info() : GUESS_INFO, ps.separator != 0 ? ps.separator : GUESS_SEP, ps.single_quotes, ps.check_header, GUESS_COL_CNT, ps.column_names, strToColumnTypes(ps.column_types), null, ps.na_strings, null, new ParseWriter.ParseErr[0], ps.chunk_size, ps.decrypt_tool != null ? ps.decrypt_tool.key() : null, ps.skipped_columns, ps.custom_non_data_line_markers != null ? ps.custom_non_data_line_markers.getBytes() : null, ps.escapechar, ps.force_col_types, ps.tz_adjust_to_local); this._force_col_types = ps.force_col_types; this._orig_column_types = this._force_col_types ? (ps.column_types == null ? null : ps.column_types.clone()) : null; } /** * Create a ParseSetup with all parameters except chunk size. * <p> * Typically used by file type parsers for returning final valid results * _chunk_size will be set later using results from all files. */ public ParseSetup(ParserInfo parseType, byte sep, boolean singleQuotes, int checkHeader, int ncols, String[] columnNames, byte[] ctypes, String[][] domains, String[][] naStrings, String[][] data, byte[] nonDataLineMarkers, byte escapeChar, boolean tzAdjustToLocal) { this(parseType, sep, singleQuotes, checkHeader, ncols, columnNames, ctypes, domains, naStrings, data, new ParseWriter.ParseErr[0], FileVec.DFLT_CHUNK_SIZE, nonDataLineMarkers, escapeChar, tzAdjustToLocal); } /** * Create a ParseSetup with all parameters except chunk size. * <p> * Typically used by file type parsers for returning final valid results * _chunk_size will be set later using results from all files. */ public ParseSetup(ParserInfo parseType, byte sep, boolean singleQuotes, int checkHeader, int ncols, String[] columnNames, byte[] ctypes, String[][] domains, String[][] naStrings, String[][] data, byte escapeChar, boolean tzAdjustToLocal) { this(parseType, sep, singleQuotes, checkHeader, ncols, columnNames, ctypes, domains, naStrings, data, new ParseWriter.ParseErr[0], FileVec.DFLT_CHUNK_SIZE, null, escapeChar, tzAdjustToLocal); } public ParseSetup(ParserInfo parseType, byte sep, boolean singleQuotes, int checkHeader, int ncols, String[] columnNames, byte[] ctypes, String[][] domains, String[][] naStrings, String[][] data, boolean tzAdjustToLocal) { this(parseType, sep, singleQuotes, checkHeader, ncols, columnNames, ctypes, domains, naStrings, data, new ParseWriter.ParseErr[0], FileVec.DFLT_CHUNK_SIZE, null, ParseSetup.DEFAULT_ESCAPE_CHAR, tzAdjustToLocal); } public ParseSetup(ParserInfo parseType, byte sep, boolean singleQuotes, int checkHeader, int ncols, String[] columnNames, byte[] ctypes, String[][] domains, String[][] naStrings, String[][] data, ParseWriter.ParseErr[] errs, byte[] nonDataLineMarkers, boolean tzAdjustToLocal) { this(parseType, sep, singleQuotes, checkHeader, ncols, columnNames, ctypes, domains, naStrings, data, errs, FileVec.DFLT_CHUNK_SIZE, nonDataLineMarkers, ParseSetup.DEFAULT_ESCAPE_CHAR, tzAdjustToLocal); } public ParseSetup(ParserInfo parseType, byte sep, boolean singleQuotes, int checkHeader, int ncols, String[] columnNames, byte[] ctypes, String[][] domains, String[][] naStrings, String[][] data, ParseWriter.ParseErr[] errs) { this(parseType, sep, singleQuotes, checkHeader, ncols, columnNames, ctypes, domains, naStrings, data, errs, FileVec.DFLT_CHUNK_SIZE, null, ParseSetup.DEFAULT_ESCAPE_CHAR, false); } /** * Create a ParseSetup without any column information * <p> * Typically used by file type parsers for returning final invalid results */ public ParseSetup(ParserInfo parseType, byte sep, boolean singleQuotes, int checkHeader, int ncols, String[][] data, ParseWriter.ParseErr[] errs) { this(parseType, sep, singleQuotes, checkHeader, ncols, null, null, null, null, data, errs, FileVec.DFLT_CHUNK_SIZE, null, ParseSetup.DEFAULT_ESCAPE_CHAR, false); } /** * Create a default ParseSetup * * Used by Ray's schema magic */ public ParseSetup() {} public String[] getColumnNames() { return _column_names; } public int[] getSkippedColumns() { return _skipped_columns; } public int[] get_parse_columns_indices() { return _parse_columns_indices; } public String[][] getData() { return _data; } public String[] getColumnTypeStrings() { String[] types = new String[_column_types.length]; for(int i=0; i< types.length; i++) types[i] = Vec.TYPE_STR[_column_types[i]]; return types; } public String[] getOrigColumnTypes() { return _orig_column_types; } public boolean getForceColTypes() { return _force_col_types; } public boolean gettzAdjustToLocal() { return _tz_adjust_to_local; } public byte[] getColumnTypes() { return _column_types; } public static byte[] strToColumnTypes(String[] strs) { if (strs == null) return null; byte[] types = new byte[strs.length]; for(int i=0; i< types.length;i++) { switch (strs[i].toLowerCase()) { case "unknown": types[i] = Vec.T_BAD; break; case "uuid": types[i] = Vec.T_UUID; break; case "string": types[i] = Vec.T_STR; break; case "float": case "real": case "double": case "int": case "long": case "numeric": types[i] = Vec.T_NUM; break; case "categorical": case "factor": case "enum": types[i] = Vec.T_CAT; break; case "time": types[i] = Vec.T_TIME; break; default: types[i] = Vec.T_BAD; throw new H2OIllegalArgumentException("Provided column type "+ strs[i] + " is unknown. Cannot proceed with parse due to invalid argument."); } } return types; } /** This is a single entry-point to create a parser. * * Should be override in subclasses. */ protected Parser parser(Key jobKey) { ParserProvider pp = ParserService.INSTANCE.getByInfo(_parse_type); if (pp != null) { // fill up parquet setup here return pp.createParser(this, jobKey); } throw new H2OIllegalArgumentException("Unknown file type. Parse cannot be completed.", "Attempted to invoke a parser for ParseType:" + _parse_type + ", which doesn't exist."); } /** Return create a final parser-specific setup * for this configuration. * * @param inputKeys inputs * @param demandedSetup setup demanded by a user * * @return a parser specific setup based on demanded setup */ public final ParseSetup getFinalSetup(Key[] inputKeys, ParseSetup demandedSetup) { ParserProvider pp = ParserService.INSTANCE.getByInfo(_parse_type); if (pp != null) { ParseSetup ps = pp.createParserSetup(inputKeys, demandedSetup); if (demandedSetup._decrypt_tool != null) ps._decrypt_tool = demandedSetup._decrypt_tool; ps.setSkippedColumns(demandedSetup.getSkippedColumns()); ps.setParseColumnIndices(demandedSetup.getNumberColumns(), demandedSetup.getSkippedColumns()); // final consistent check between skipped_columns and parse_columns_indices return ps; } throw new H2OIllegalArgumentException("Unknown parser configuration! Configuration=" + this); } public int getNumberColumns() { return _number_columns; } public final DecryptionTool getDecryptionTool() { return DecryptionTool.get(_decrypt_tool); } public final String[] getParquetColumnTypes() { return parquetColumnTypes; } public final ParserInfo.ParseMethod parseMethod(int nfiles, Vec v) { boolean isEncrypted = ! getDecryptionTool().isTransparent(); return _parse_type.parseMethod(nfiles, v.nChunks(), disableParallelParse, isEncrypted); } // Set of duplicated column names HashSet<String> checkDupColumnNames() { HashSet<String> conflictingNames = new HashSet<>(); if( null==_column_names ) return conflictingNames; HashSet<String> uniqueNames = new HashSet<>(); for( String n : _column_names) if( !uniqueNames.add(n) ) conflictingNames.add(n); return conflictingNames; } @Override public String toString() { return _parse_type.toString(); } static boolean allStrings(String [] line){ BufferedString str = new BufferedString(); for( String s : line ) { try { Double.parseDouble(s); return false; // Number in 1st row guesses: No Column Header } catch (NumberFormatException e) { /*Pass - determining if number is possible*/ } str.set(s); if(ParseTime.isTime(str)) return false; if(ParseUUID.isUUID(str)) return false; } return true; } // simple heuristic to determine if we have headers: // return true iff the first line is all strings and second line has at least one number static boolean hasHeader(String[] l1, String[] l2) { return allStrings(l1) && !allStrings(l2); } /** * Used by test harnesses for simple parsing of test data. Presumes * auto-detection for file and separator types. * * @param fkeys Keys to input vectors to be parsed * @param singleQuote single quotes quote fields * @param checkHeader check for a header * @return ParseSetup settings from looking at all files */ public static ParseSetup guessSetup(Key[] fkeys, boolean singleQuote, int checkHeader) { return guessSetup(fkeys, new ParseSetup(GUESS_INFO, GUESS_SEP, singleQuote, checkHeader, GUESS_COL_CNT, null, new ParseWriter.ParseErr[0])); } /** * Discover the parse setup needed to correctly parse all files. * This takes a ParseSetup as guidance. Each file is examined * individually and then results merged. If a conflict exists * between any results all files are re-examined using the * best guess from the first examination. * * @param fkeys Keys to input vectors to be parsed * @param userSetup Setup guidance from user * @return ParseSetup settings from looking at all files */ public static ParseSetup guessSetup( Key[] fkeys, ParseSetup userSetup ) { //Guess setup of each file and collect results GuessSetupTsk t = new GuessSetupTsk(userSetup); t.doAll(fkeys).getResult(); //Calc chunk-size // FIXME: should be a parser specific - or at least parser should be able to override defaults Iced ice = DKV.getGet(fkeys[0]); if (ice instanceof Frame && ((Frame) ice).vec(0) instanceof UploadFileVec) { t._gblSetup._chunk_size = FileVec.DFLT_CHUNK_SIZE; } else { t._gblSetup._chunk_size = FileVec.calcOptimalChunkSize(t._totalParseSize, t._gblSetup._number_columns, t._maxLineLength, H2ORuntime.availableProcessors(), H2O.getCloudSize(), false /*use new heuristic*/, true); } return t._gblSetup; } /** * Try to determine the ParseSetup on a file by file basis * and merge results. */ public static class GuessSetupTsk extends MRTask<GuessSetupTsk> { // Input final ParseSetup _userSetup; boolean _empty = true; // Output public ParseSetup _gblSetup; public long _totalParseSize; public long _maxLineLength; String _file; /** * * @param userSetup ParseSetup to guide examination of files */ public GuessSetupTsk(ParseSetup userSetup) { _userSetup = userSetup; } /** * Runs once on each file to guess that file's ParseSetup * * For ByteVecs, UploadFileVecs, compressed files and small files, * the ParseSetup is guessed from a single DFLT_CHUNK_SIZE chunk from * the start of the file. This is because UploadFileVecs and compressed * files don't allow random sampling, small files don't need it, and * ByteVecs tend to be small. * * For larger NSFFileVecs and HDFSFileVecs 1M samples are taken at the * beginning of every 100M, and an additional sample is taken from the * last chunk of the file. The results of these samples are merged * together (and compared for consistency). * * Sampling more than the first bytes is preferred, since large data sets * with sorted columns may have all the same value in their first bytes, * making for poor type guesses. * */ @Override public void map(Key key) { _file = key.toString(); Iced ice = DKV.getGet(key); if(ice == null) throw new H2OIllegalArgumentException("Missing data","Did not find any data under key " + key); ByteVec bv = (ByteVec)(ice instanceof ByteVec ? ice : ((Frame)ice).vecs()[0]); byte[] bits; try { bits = bv.getFirstBytes(); } catch (Exception e) { throw new RuntimeException("This H2O node couldn't read data from '" + _file + "'. " + "Please make sure the file is available on all H2O nodes and/or check the working directories.", e); } Key<DecryptionTool> decryptToolKey = _userSetup._decrypt_tool != null ? _userSetup._decrypt_tool : H2O.defaultDecryptionTool(); DecryptionTool decrypt = DKV.getGet(decryptToolKey); if (decrypt != null) { byte[] plainBits = decrypt.decryptFirstBytes(bits); if (plainBits != bits) bits = plainBits; else decryptToolKey = null; } bits = ZipUtil.getFirstUnzippedBytes(bits); // The bits can be null if (bits != null && bits.length > 0) { _empty = false; // get file size // float decompRatio = ZipUtil.decompressionRatio(bv); // if (decompRatio > 1.0) // _totalParseSize += bv.length() * decompRatio; // estimate file size // else // avoid numerical distortion of file size when not compressed // since later calculation of chunk size and later number of chunks do not consider the // compression ratio, we should not do that here either. Quick fix proposed by Tomas. Sleek! _totalParseSize += bv.length(); // Check for supported encodings checkEncoding(bits); // Compute the max line length (to help estimate the number of bytes to read per Parse map) _maxLineLength = maxLineLength(bits); if (_maxLineLength==-1) throw new H2OIllegalArgumentException("The first 4MB of the data don't contain any line breaks. Cannot parse."); // only preview 1 DFLT_CHUNK_SIZE for ByteVecs, UploadFileVecs, compressed, and small files /* if (ice instanceof ByteVec || ((Frame)ice).vecs()[0] instanceof UploadFileVec || bv.length() <= FileVec.DFLT_CHUNK_SIZE || decompRatio > 1.0) { */ try { _gblSetup = guessSetup(bv, bits, _userSetup); _gblSetup._decrypt_tool = decryptToolKey; for(ParseWriter.ParseErr e:_gblSetup._errs) { // e._byteOffset += e._cidx*Parser.StreamData.bufSz; e._cidx = 0; e._file = _file; } } catch (ParseDataset.H2OParseException pse) { throw pse.resetMsg(pse.getMessage()+" for "+key); } /* } else { // file is aun uncompressed NFSFileVec or HDFSFileVec & larger than the DFLT_CHUNK_SIZE FileVec fv = (FileVec) ((Frame) ice).vecs()[0]; // reset chunk size to 1M (uncompressed) int chkSize = (int) ((1<<20) /decompRatio); fv.setChunkSize((Frame) ice, chkSize); // guessSetup from first chunk _gblSetup = guessSetup(fv.getPreviewChunkBytes(0), _userSetup); _userSetup._check_header = -1; // remaining chunks shouldn't check for header _userSetup._parse_type = _gblSetup._parse_type; // or guess parse type //preview 1M data every 100M int numChunks = fv.nChunks(); for (int i=100; i < numChunks;i += 100) { bits = fv.getPreviewChunkBytes(i); if (bits != null) _gblSetup = mergeSetups(_gblSetup, guessSetup(bits, _userSetup)); } // grab sample at end of file (if not done by prev loop) if (numChunks % 100 > 1){ bits = fv.getPreviewChunkBytes(numChunks - 1); if (bits != null) _gblSetup = mergeSetups(_gblSetup, guessSetup(bits, _userSetup)); } // return chunk size to DFLT fv.setChunkSize((Frame) ice, FileVec.DFLT_CHUNK_SIZE); } */ // report if multiple files exist in zip archive /* if (ZipUtil.getFileCount(bv) > 1) { if (_gblSetup._errors != null) _gblSetup._errors = Arrays.copyOf(_gblSetup._errors, _gblSetup._errors.length + 1); else _gblSetup._errors = new String[1]; _gblSetup._errors[_gblSetup._errors.length - 1] = "Only single file zip " + "archives are currently supported, only the first file has been parsed. " + "Remaining files have been ignored."; }*/ } if (_gblSetup==null) throw new RuntimeException("This H2O node couldn't find the file(s) to parse. Please check files and/or working directories."); _gblSetup.settzAdjustToLocal(_userSetup.gettzAdjustToLocal()); _gblSetup.setFileName(FileUtils.keyToFileName(key)); } /** * Merges ParseSetup results, conflicts, and errors from several files */ @Override public void reduce(GuessSetupTsk other) { if (other._empty) return; if (_gblSetup == null) { _empty = false; _gblSetup = other._gblSetup; assert (_gblSetup != null); return; } _gblSetup = mergeSetups(_gblSetup, other._gblSetup, _file, other._file); _totalParseSize += other._totalParseSize; _maxLineLength = Math.max(_maxLineLength, other._maxLineLength); } @Override public void postGlobal() { if (_gblSetup._column_previews != null && !_gblSetup._parse_type.equals(ARFF_INFO)) { _gblSetup._column_types = _gblSetup._column_previews.guessTypes(); if (_userSetup._na_strings == null) _gblSetup._na_strings = _gblSetup._column_previews.guessNAStrings(_gblSetup._column_types); else _gblSetup._na_strings = _userSetup._na_strings; } _gblSetup._tz_adjust_to_local = _gblSetup._tz_adjust_to_local || _userSetup._tz_adjust_to_local; // if(_gblSetup._errs != null) for(ParseWriter.ParseErr err:_gblSetup._errs) Log.warn("ParseSetup: " + err.toString()); } private ParseSetup mergeSetups(ParseSetup setupA, ParseSetup setupB, String fileA, String fileB) { // FIXME: have a merge function defined on a specific parser setup (each parser setup is responsible for merge) if (setupA == null) return setupB; if(setupA._parse_type.equals(DefaultParserProviders.SVMLight_INFO) && setupB._parse_type.equals(DefaultParserProviders.SVMLight_INFO)){ // no merging for svm light, all columns are numeric and we take the max of number of columns (it's an estimate anyways) return setupA._number_columns >= setupB._number_columns?setupA:setupB; } ParseSetup mergedSetup = setupA; mergedSetup._tz_adjust_to_local = setupA._tz_adjust_to_local || setupB._tz_adjust_to_local; mergedSetup._check_header = unifyCheckHeader(setupA._check_header, setupB._check_header); mergedSetup._separator = unifyColumnSeparators(setupA._separator, setupB._separator); if (setupA._parse_type.equals(ARFF_INFO) && setupB._parse_type.equals(CSV_INFO)) ;// do nothing parse_type and col_types are already set correctly else if (setupA._parse_type.equals(CSV_INFO) && setupB._parse_type.equals(ARFF_INFO)) { mergedSetup._parse_type = ARFF_INFO; mergedSetup._column_types = setupB._column_types; mergedSetup._nonDataLineMarkers = setupB._nonDataLineMarkers; } else if (setupA.isCompatible(setupB)) { mergedSetup._column_previews = PreviewParseWriter.unifyColumnPreviews(setupA._column_previews, setupB._column_previews); } else throw new ParseDataset.H2OParseException("File type mismatch. Cannot parse files " + setupA.file() + " and " + setupB.file() + " of type " + setupA._parse_type.name() + " and " + setupB._parse_type.name() + " as one dataset."); mergedSetup._column_names = unifyColumnNames(setupA._column_names, setupB._column_names); mergedSetup._number_columns = mergedSetup._parse_type.equals(CSV_INFO) ? Math.max(setupA._number_columns,setupB._number_columns):unifyColumnCount(setupA._number_columns, setupB._number_columns,mergedSetup, fileA, fileB); if (mergedSetup._data.length < PreviewParseWriter.MAX_PREVIEW_LINES) { int n = mergedSetup._data.length; int m = Math.min(PreviewParseWriter.MAX_PREVIEW_LINES, n + setupB._data.length - 1); mergedSetup._data = Arrays.copyOf(mergedSetup._data, m); System.arraycopy(setupB._data, 1, mergedSetup._data, n, m - n); } mergedSetup._errs = ArrayUtils.append(setupA._errs,setupB._errs); mergedSetup._fileNames = ArrayUtils.append(setupA._fileNames,setupB._fileNames); if(mergedSetup._errs.length > 20) mergedSetup._errs = Arrays.copyOf(mergedSetup._errs,20); return mergedSetup; } private static int unifyCheckHeader(int chkHdrA, int chkHdrB){ if (chkHdrA == GUESS_HEADER || chkHdrB == GUESS_HEADER) throw new ParseDataset.H2OParseException("Unable to determine header on a file. Not expected."); if (chkHdrA == HAS_HEADER || chkHdrB == HAS_HEADER) return HAS_HEADER; else return NO_HEADER; } private static byte unifyColumnSeparators(byte sepA, byte sepB) { if( sepA == sepB) return sepA; else if (sepA == GUESS_SEP) return sepB; else if (sepB == GUESS_SEP) return sepA; // TODO: Point out which file is problem throw new ParseDataset.H2OParseException("Column separator mismatch. One file seems to use \"" + (char) sepA + "\" and the other uses \"" + (char) sepB + "\"."); } private int unifyColumnCount(int cntA, int cntB, ParseSetup mergedSetup, String fileA, String fileB) { if (cntA == cntB) return cntA; else if (cntA == 0) return cntB; else if (cntB == 0) return cntA; else { // files contain different numbers of columns ParseWriter.ParseErr err = new ParseWriter.ParseErr(); err._err = "Incompatible number of columns, " + cntA + " != " + cntB; err._file = fileA + ", " + fileB; mergedSetup._errs = ArrayUtils.append(mergedSetup._errs,err); return Math.max(cntA,cntB); } } private static String[] unifyColumnNames(String[] namesA, String[] namesB){ if (namesA == null) return namesB; else if (namesB == null) return namesA; else { for (int i = 0; i < namesA.length; i++) { if (i > namesB.length || !namesA[i].equals(namesB[i])) { // TODO improvement: if files match except for blanks, merge? throw new ParseDataset.H2OParseException("Column names do not match between files."); } } return namesA; } } } private String file() { String [] names = _fileNames; if(names.length > 5) names = Arrays.copyOf(names,5); return Arrays.toString(names); } protected boolean isCompatible(ParseSetup setupB) { return _parse_type.equals(setupB._parse_type) && _parse_type.equals(DefaultParserProviders.SVMLight_INFO) || _number_columns == setupB._number_columns; } /** * Guess everything from a single pile-o-bits. Used in tests, or in initial * parser inspections when the user has not told us anything about separators * or headers. * * @param bits Initial bytes from a parse source * @return ParseSetup settings from looking at all files */ public static ParseSetup guessSetup(ByteVec bv, byte [] bits, ParseSetup userSetup) { ParserProvider pp = ParserService.INSTANCE.getByInfo(userSetup._parse_type); if (pp != null) { return pp.guessSetup(bv, bits, userSetup.toInitialSetup()); } throw new ParseDataset.H2OParseException("Cannot determine file type."); } /** * Sanitizes a user-provided Parse Setup * @return initial ParseSetup object to be passed to the ParserProvider */ private ParseSetup toInitialSetup() { return new ParseSetup(_parse_type, _separator, _single_quotes, _check_header, GUESS_COL_CNT, _column_names, _column_types, null, null, null, _nonDataLineMarkers, _escapechar, false); } /** * Cleans up the file name to make .hex name * to be used as a destination key. Eliminates * common file extensions, and replaces odd * characters. * * @param n filename to be cleaned * @return cleaned name */ public static String createHexName(String n) { // blahblahblah/myName.ext ==> myName // blahblahblah/myName.csv.ext ==> myName int sep = n.lastIndexOf(java.io.File.separatorChar); if( sep > 0 ) n = n.substring(sep+1); int dot = n.lastIndexOf('.'); while ( dot > 0 && (n.endsWith("zip") || n.endsWith("gz") || n.endsWith("csv") || n.endsWith("xls") || n.endsWith("txt") || n.endsWith("svm") || n.endsWith("orc") || n.endsWith("arff"))) { n = n.substring(0, dot); dot = n.lastIndexOf('.'); } // "2012_somedata" ==> "X2012_somedata" if( !Character.isJavaIdentifierStart(n.charAt(0)) ) n = "X"+n; // "human%Percent" ==> "human_Percent" n = StringUtils.sanitizeIdentifier(n); // "myName" ==> "myName.hex" int i = 0; String res = n + ".hex"; Key k = Key.make(res); // Renumber to handle dup names while(DKV.get(k) != null) k = Key.make(res = n + ++i + ".hex"); return res; } /** * Reject unsupported encodings * * For the curious, this is hardly a complete test, it only catches the * most polite UTF-16 cases. Switch to jChardet or guessEncoding libraries * for more robust solutions. WARNING: not all UTF-16 files * use BOM to indicate their encoding. Even worse, some datasets may be * made from disparate sources, and could used a mix that wouldn't be * detected by this. * * @param bits data to be examined for encoding */ private static final void checkEncoding(byte[] bits) { if (bits.length >= 2) { if ((bits[0] == (byte) 0xff && bits[1] == (byte) 0xfe) /* UTF-16, little endian */ || (bits[0] == (byte) 0xfe && bits[1] == (byte) 0xff) /* UTF-16, big endian */) { throw new ParseDataset.H2OParseException("UTF16 encoding detected, but is not supported."); } } } /** * Compute the longest line length in an array of bytes * @param bytes Array of bytes (containing 0 or more newlines) * @return The longest line length in the given bytes */ private static final int maxLineLength(byte[] bytes) { int start = bytes.length; int max = -1; for(int i = 0; i < bytes.length; ++i){ if(CsvParser.isEOL(bytes[i])){ int delta = i-start+1; max = Math.max(max,delta); start = i+1; } } return Math.max(max,bytes.length-start+1); } /** * Copies the common setup to another object (that is possibly and extension of the base setup). * Note: this method only copies fields directly declared in ParseSetup class, it doesn't handle * fields that are declared in classes derived from ParseSetup. * @param setup target setup object * @param <T> class derived from ParseSetup * @return the target setup object (for convenience) */ public <T extends ParseSetup> T copyTo(T setup) { try { for (Field field : ParseSetup.class.getDeclaredFields()) { if (! java.lang.reflect.Modifier.isStatic(field.getModifiers())) field.set(setup, field.get(this)); } return setup; } catch (IllegalAccessException e) { throw new RuntimeException(e); } } /** * Tests whether a given string represents a NA in a given column. * Note: NAs are expected to be made ONLY of ASCII (7-bit) characters, NA constants in unicode won't be recognized. * @param colIdx index of the column * @param str string to be tested for NA * @return true - string is one of the column's NAs, false otherwise */ public boolean isNA(int colIdx, BufferedString str) { if (_na_strings == null || colIdx >= _na_strings.length || _na_strings[colIdx] == null) return false; for (String naStr : _na_strings[colIdx]) if (str.equalsAsciiString(naStr)) return true; return false; } public ParserInfo getParseType() { return _parse_type; } public ParseSetup setParseType(ParserInfo parse_type) { this._parse_type = parse_type; return this; } public ParseSetup setSeparator(byte separator) { this._separator = separator; return this; } public ParseSetup setSingleQuotes(boolean single_quotes) { this._single_quotes = single_quotes; return this; } public ParseSetup setCheckHeader(int check_header) { this._check_header = check_header; return this; } public ParseSetup setNumberColumns(int number_columns) { this._number_columns = number_columns; return this; } public ParseSetup setColumnNames(String[] column_names) { this._column_names = column_names; return this; } public ParseSetup setSkippedColumns(int[] skipped_columns) { this._skipped_columns = skipped_columns; return this; } public ParseSetup setColumnTypes(byte[] column_types) { this._column_types = column_types; return this; } public ParseSetup setOrigColumnTypes(String[] col_types) { this._orig_column_types = col_types; return this; } public ParseSetup setForceColTypes(boolean force_col_types) { this._force_col_types = force_col_types; return this; } public ParseSetup settzAdjustToLocal(boolean tz_adjust_to_local) { this._tz_adjust_to_local = tz_adjust_to_local; return this; } public ParseSetup setDomains(String[][] domains) { this._domains = domains; return this; } public ParseSetup setNAStrings(String[][] na_strings) { this._na_strings = na_strings; return this; } public ParseSetup setChunkSize(int chunk_size) { this._chunk_size = chunk_size; return this; } public ParseSetup setDecryptTool(Key<DecryptionTool> decrypt_tool) { this._decrypt_tool = decrypt_tool; return this; } } // ParseSetup state class
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/ParseTime.java
package water.parser; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormatter; import org.joda.time.format.DateTimeFormatterBuilder; import water.MRTask; import water.util.Log; import java.util.Iterator; import java.util.Map; import java.util.Set; import java.util.TreeMap; import static water.util.StringUtils.*; public abstract class ParseTime { // Deduce if we are looking at a Date/Time value, or not. // If so, return time as msec since Jan 1, 1970 or Long.MIN_VALUE. // I tried java.util.SimpleDateFormat, but it just throws too many // exceptions, including ParseException, NumberFormatException, and // ArrayIndexOutOfBoundsException... and the Piece de resistance: a // ClassCastException deep in the SimpleDateFormat code: // "sun.util.calendar.Gregorian$Date cannot be cast to sun.util.calendar.JulianCalendar$Date" public static boolean isTime(BufferedString str) { return attemptTimeParse(str) != Long.MIN_VALUE; } private static final byte MMS[][][] = new byte[][][] { {bytesOf("jan"),bytesOf("january")}, {bytesOf("feb"),bytesOf("february")}, {bytesOf("mar"),bytesOf("march")}, {bytesOf("apr"),bytesOf("april")}, {bytesOf("may"),bytesOf("may")}, {bytesOf("jun"),bytesOf("june")}, {bytesOf("jul"),bytesOf("july")}, {bytesOf("aug"),bytesOf("august")}, {bytesOf("sep"),bytesOf("september")}, {bytesOf("oct"),bytesOf("october")}, {bytesOf("nov"),bytesOf("november")}, {bytesOf("dec"),bytesOf("december")} }; public static long attemptTimeParse( BufferedString str ) { try { long t0 = attemptYearFirstTimeParse(str); // "yyyy-MM-dd" and time if present if( t0 != Long.MIN_VALUE ) return t0; long t1 = attemptDayFirstTimeParse1(str); // "dd-MMM-yy" and time if present if( t1 != Long.MIN_VALUE ) return t1; long t2 = attemptYearMonthTimeParse(str); // "yy-MMM", not ambiguous with dd-MMM-yy because of trailing "-yy" if( t2 != Long.MIN_VALUE ) return t2; long t3 = attemptTimeOnlyParse(str); // Time if present, no date if( t3 != Long.MIN_VALUE ) return t3; long t4 = attemptDayFirstTimeParse2(str); // "dd/MM/yy" and time if present; note that this format is ambiguous if( t4 != Long.MIN_VALUE ) return t4; // Cant tell which date: 3/2/10 is } catch( org.joda.time.IllegalFieldValueException | // Not time at all org.joda.time.IllegalInstantException | // Parsed as time, but falls into e.g. a daylight-savings hour hole ArrayIndexOutOfBoundsException e) { } return Long.MIN_VALUE; } // Tries to parse "yyyy-MM[-dd] [HH:mm:ss.SSS aa]" // Tries to parse "yyyyMMdd-HH:mm:ss.SSS aa". In this form the dash and trailing time is required private static long attemptYearFirstTimeParse(BufferedString str) { final byte[] buf = str.getBuffer(); int i=str.getOffset(); final int end = i+str.length(); while( i < end && buf[i] == ' ' ) i++; if ( i < end && buf[i] == '"' ) i++; if( (end-i) < 6 ) return Long.MIN_VALUE; int yyyy=0, MM=0, dd=0; // Parse date yyyy = digit(yyyy,buf[i++]); yyyy = digit(yyyy,buf[i++]); yyyy = digit(yyyy,buf[i++]); yyyy = digit(yyyy,buf[i++]); final boolean dash = buf[i] == '-'; if( dash ) i++; MM = digit(MM,buf[i++]); // note: at this point we need guard every increment of "i" to avoid reaching outside of the buffer MM = i<end && buf[i]!='-' ? digit(MM,buf[i++]) : MM; if( MM < 1 || MM > 12 ) return Long.MIN_VALUE; if( (end-i)>=2 ) { if( dash && buf[i++] != '-' ) return Long.MIN_VALUE; dd = digit(dd, buf[i++]); dd = i < end && buf[i] >= '0' && buf[i] <= '9' ? digit(dd, buf[i++]) : dd; if( dd < 1 || dd > 31 ) return Long.MIN_VALUE; } else { if( !dash ) return Long.MIN_VALUE; // yyyyMM is ambiguous with plain numbers dd=1; // yyyy-MM; no day } if( dash ) { // yyyy-MM[-dd] while( i < end && buf[i] == ' ' ) i++; // optional seperator or trailing blanks if( i==end ) return new DateTime(yyyy,MM,dd,0,0,0, getTimezone()).getMillis(); } else { // yyyyMMdd-HH:mm:ss.SSS; dash AND time is now required if( i==end || buf[i++] != '-' ) return Long.MIN_VALUE; } //Parse time return parseTime(buf, i, end, yyyy, MM, dd, false); } // Tries to parse "[dd[-]]MMM[-]yy[yy][:' '][HH:mm:ss.SSS aa]" where MMM is a // text representation of the month (e.g. Jul or July). Day is optional. private static long attemptDayFirstTimeParse1(BufferedString str) { final byte[] buf = str.getBuffer(); int i=str.getOffset(); final int end = i+str.length(); while( i < end && buf[i] == ' ' ) i++; if ( i < end && buf[i] == '"' ) i++; if( (end-i) < 5 ) return Long.MIN_VALUE; int yyyy=0, MM=0, dd=0; // Parse day if( isDigit(buf[i]) ) { dd = digit(dd,buf[i++]); if( isDigit(buf[i]) ) dd = digit(dd,buf[i++]); if( dd < 1 || dd > 31 ) return Long.MIN_VALUE; if( buf[i] == '-' ) i++; } else dd = 1; // No date, assume 1st if( !isChar(buf[i]) ) return Long.MIN_VALUE; // Parse month MM = parseMonth(buf,i,end); if( MM == -1 ) return Long.MIN_VALUE; // No matching month i += (MM>>4); // Skip parsed month bytes MM &= 0xF; // 1-based month in low nybble if( end-i>=1 && buf[i] == '-' ) i++; if( end-i < 2 ) return Long.MIN_VALUE; // Parse year yyyy = digit(yyyy,buf[i++]); // 2-digit year yyyy = digit(yyyy,buf[i++]); if( end-i>=2 && buf[i] != '"' && buf[i] != ' ' && buf[i] != ':') { yyyy = digit(yyyy,buf[i++]); // 4-digit year yyyy = digit(yyyy,buf[i++]); } else { //POSIX 2004 & 2008 says 69-99 -> 1900s, 00-68 -> 2000s yyyy += (yyyy >= 69) ? 1900 : 2000; } while( i<end && buf[i] == ' ' ) i++; if( i<end && buf[i] == '"' ) i++; if( i==end ) return new DateTime(yyyy,MM,dd,0,0,0, getTimezone()).getMillis(); // Parse time if( buf[i] == ':') i++; return parseTime(buf, i, end, yyyy, MM, dd, false); } // Tries to parse "MM/dd/yy[yy][:' '][HH:mm:ss.SSS aa]" where MM is a value // from 1 to 12, and the separator is required. Note that this is ambiguous // and is defaulting to American, not European time. Example: 3/2/10 parses // as March 2, 2010 and NOT February 3, 2010. private static long attemptDayFirstTimeParse2(BufferedString str) { final byte[] buf = str.getBuffer(); int i=str.getOffset(); final int end = i+str.length(); while( i < end && buf[i] == ' ' ) i++; if ( i < end && buf[i] == '"' ) i++; if( (end-i) < 6 ) return Long.MIN_VALUE; int yyyy=0, MM=0, dd=0; // Parse date MM = digit(MM,buf[i++]); if( isDigit(buf[i]) ) MM = digit(MM,buf[i++]); if( MM < 1 || MM > 12 ) return Long.MIN_VALUE; byte sep = buf[i++]; if( sep != '-' && sep != '/' ) return Long.MIN_VALUE; dd = digit(dd,buf[i++]); if( isDigit(buf[i]) ) dd = digit(dd,buf[i++]); if( dd < 1 || dd > 31 ) return Long.MIN_VALUE; if( sep != buf[i++] ) return Long.MIN_VALUE; yyyy = digit(yyyy,buf[i++]); // 2-digit year yyyy = digit(yyyy,buf[i++]); if( end-i>=2 && isDigit(buf[i]) ) { yyyy = digit(yyyy,buf[i++]); // 4-digit year yyyy = digit(yyyy,buf[i++]); } else { //POSIX 2004 & 2008 says 69-99 -> 1900s, 00-68 -> 2000s yyyy += (yyyy >= 69) ? 1900 : 2000; } while( i<end && buf[i] == ' ' ) i++; if( i<end && buf[i] == '"' ) i++; if( i==end ) return new DateTime(yyyy,MM,dd,0,0,0, getTimezone()).getMillis(); // Parse time if( buf[i] == ':') i++; return parseTime(buf, i, end, yyyy, MM, dd, false); } // Tries to parse "yy-MMM". Note that this is not ambiguous with dd-MMM-yy // which requires a trailing "-yy" year. private static long attemptYearMonthTimeParse(BufferedString str) { final byte[] buf = str.getBuffer(); int i=str.getOffset(); final int end = i+str.length(); while( i < end && buf[i] == ' ' ) i++; if ( i < end && buf[i] == '"' ) i++; if( (end-i) < 6 ) return Long.MIN_VALUE; int yyyy=0, MM=0; // Parse year yyyy = digit(yyyy,buf[i++]); yyyy = digit(yyyy,buf[i++]); if( buf[i++] != '-' ) return Long.MIN_VALUE; yyyy += (yyyy >= 69) ? 1900 : 2000; //POSIX 2004 & 2008 says 69-99 -> 1900s, 00-68 -> 2000s // Parse month MM = parseMonth(buf,i,end); if( MM == -1 ) return Long.MIN_VALUE; // No matching month i += (MM>>4); // Skip parsed month bytes MM &= 0xF; // 1-based month in low nybble while( i < end && buf[i] == ' ' ) i++; if( i==end ) return new DateTime(yyyy,MM,1,0,0,0, getTimezone()).getMillis(); return Long.MIN_VALUE; // Something odd } // Tries to parse time without any date. private static long attemptTimeOnlyParse(BufferedString str) { final byte[] buf = str.getBuffer(); int i=str.getOffset(); final int end = i+str.length(); while( i < end && buf[i] == ' ' ) i++; if ( i < end && buf[i] == '"' ) i++; if( end-i < 5 ) return Long.MIN_VALUE; long t1 = parseTime(buf,i,end,1970,1,1,true); // Unix Epoch dates if( t1 == Long.MIN_VALUE ) return Long.MIN_VALUE; // Remove all TZ info; return bare msec from the morning of the epoch return t1+getTimezone().getOffsetFromLocal(t1); } /** Parse textual (not numeric) month * @param buf - byte buffer containing text to parse * @param i - index of expected start of time string in buffer * @return -1 if failed parse, or (bytes_parsed<<4)|(month). One-based month * is returned in the low nybble. */ private static int parseMonth(byte[] buf, int i, int end) { int MM=0; byte[] MMM = null; OUTER: for( ; MM<MMS.length; MM++ ) { byte[][] mss = MMS[MM]; INNER: for (byte[] ms : mss) { MMM = ms; if (MMM == null) continue; if (i + MMM.length > end) continue INNER; for (int j = 0; j < MMM.length; j++) if (MMM[j] != Character.toLowerCase(buf[i + j])) continue INNER; if (i+MMM.length==end || buf[i + MMM.length] == '-' || isDigit(buf[i + MMM.length])) break OUTER; } } if( MM == MMS.length ) return -1; // No matching month MM++; // 1-based month return (MMM.length<<4)|MM; // Return two values; skip in upper bytes, month in low nybble } /** * Attempts to parse time. Expects at least: * HH:mm:ss where : or . are accepted as delimiters * Additionally the time can contain either 1 or 3 or 9 places for fractions of a second * e.g. HH:mm:ss.SSS or HH:mm:ss.SSSnnnnnn * Note that only millisecond accuracy is stored * Additionally the time can end with AM|PM. * When AM or PM is present, HH must be 1-12. When absent, HH must be 0-23. * If the text doesn't fit this format it returns Long.MIN_VALUE to indicate failed parse * * @param buf - byte buffer containing text to parse * @param i - index of expected start of time string in buffer * @param end - index for end of time in buffer * @param yyyy - 4 digit year * @param MM - month of year (1-12) * @param dd - day of of month (1-31) * @return long representing time in currently timezone as milliseconds since UNIX epoch * or Long.MIN_VALUE to represent failed time parse */ private static long parseTime(byte[] buf, int i, int end, int yyyy, int MM, int dd, boolean timeOnly) { int HH =0, mm=0, ss=0, SSS=0, ndots=0; HH = digit(HH,buf[i++]); HH = buf[i]>='0' && buf[i]<= '9' ? digit(HH,buf[i++]) : HH; if(HH < 0 || HH > 23 ) return Long.MIN_VALUE; if( buf[i] != ':' && buf[i] != '.' ) return Long.MIN_VALUE; if( buf[i]=='.' ) ndots++; ++i; mm = digit(mm,buf[i++]); mm = buf[i]>='0' && buf[i]<= '9' ? digit(mm,buf[i++]) : mm; if( mm < 0 || mm > 59 ) return Long.MIN_VALUE; if( i+2 >= buf.length ) return Long.MIN_VALUE; if( buf[i] != ':' && buf[i] != '.' ) return Long.MIN_VALUE; if( buf[i]=='.' ) ndots++; ++i; ss = digit(ss,buf[i++]); ss = buf[i]>='0' && buf[i]<= '9' ? digit(ss,buf[i++]) : ss; if( ss < 0 || ss > 59 ) return Long.MIN_VALUE; if( i<end && (buf[i] == ':' || buf[i] == '.' )) { if( buf[i]=='.' ) ndots++; i++; if( i<end ) SSS = digit(SSS,buf[i++]); if( i<end ) SSS = digit(SSS,buf[i++]); if( i<end ) SSS = digit(SSS,buf[i++]); if( SSS < 0 || SSS > 999 ) return Long.MIN_VALUE; while( i<end && isDigit(buf[i]) ) i++; // skip micros and nanos } if( i<end && buf[i] == '"' ) i++; if( i == end) { if( timeOnly && ndots==3 ) return Long.MIN_VALUE; // Ambiguous: tell 1.2.3.4 apart from an IP address return new DateTime(yyyy, MM, dd, HH, mm, ss, getTimezone()).getMillis() + SSS; } // extract halfday of day, if present if( buf[i] == ' ' ) { ++i; if( i==end ) return new DateTime(yyyy, MM, dd, HH, mm, ss, getTimezone()).getMillis() + SSS; } if( (buf[i] == 'A' || buf[i] == 'P') && buf[i+1] == 'M') { if (HH < 1 || HH > 12) return Long.MIN_VALUE; // convert 1-12 hours into 0-23 if (buf[i] == 'P') // PM if (HH < 12) HH += 12; else // AM if (HH == 12) HH = 0; i += 2; } else return Long.MIN_VALUE; if( i<end && buf[i] == '"' ) i++; if( i<end ) return Long.MIN_VALUE; return new DateTime(yyyy,MM,dd,HH,mm,ss,getTimezone()).getMillis()+SSS; } private static int digit( int x, int c ) { if( x < 0 || c < '0' || c > '9' ) return -1; return x*10+(c-'0'); } private static boolean isDigit(byte b) { return (b >= '0' && b <= '9'); } private static boolean isChar(byte b) { if (b < 'A' || (b >'Z' && b < 'a') || b > 'z') return false; else return true; } private static DateTimeZone _timezone = DateTimeZone.forID("UTC"); /** * Set the Time Zone on the H2O Cloud * * @param tz Timezone * @throws IllegalArgumentException if the timezone(tz) is invalid */ public static void setTimezone(final String tz) { Set<String> idSet = DateTimeZone.getAvailableIDs(); if (idSet.contains(tz)) { new MRTask() { @Override protected void setupLocal() { ParseTime._timezone = DateTimeZone.forID(tz); } }.doAllNodes(); } else { Log.err("Attempted to set unrecognized timezone: "+ tz); throw new IllegalArgumentException("Attempted to set unrecognized timezone: "+ tz); } } public static DateTimeZone getTimezone() { return _timezone == null ? DateTimeZone.getDefault() : _timezone; } public static String listTimezones() { DateTimeFormatter offsetFormatter = new DateTimeFormatterBuilder().appendTimeZoneOffset(null, true, 2, 4).toFormatter(); Set<String> idSet = DateTimeZone.getAvailableIDs(); Map<String, String> tzMap = new TreeMap(); Iterator<String> it = idSet.iterator(); String id, cid, offset, key, output; DateTimeZone tz; int i = 0; long millis = System.currentTimeMillis(); // collect canonical and alias IDs into a map while (it.hasNext()) { id = it.next(); tz = DateTimeZone.forID(id); cid = tz.getID(); offset = offsetFormatter.withZone(tz).print(tz.getStandardOffset(millis)); key = offset + " " + cid; if (id == cid) { // Canonical ID if (!tzMap.containsKey(key)) tzMap.put(key, ""); } else {// alias ID if (!tzMap.containsKey(key)) tzMap.put(key, ""); tzMap.put(key, tzMap.get(key) + ", " + id); } } // assemble result output = "StandardOffset CanonicalID, Aliases\n"; for (Map.Entry<String, String> e : tzMap.entrySet()) output += e.getKey() + e.getValue()+"\n"; return output; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/ParseUUID.java
package water.parser; import water.fvec.C16Chunk; import java.util.UUID; /** * Utility class for parsing UUIDs. * * This class creates a hash value of two longs from * a {@link BufferedString} containing a correct UUID. * */ public class ParseUUID { /** * Confirms whether the provided UUID is considered * valid. * * @param str * @return TRUE if str represents a valid UUID */ public static boolean isUUID(BufferedString str) { boolean res; int old = str.getOffset(); attemptUUIDParseLow(str); attemptUUIDParseHigh(str); res = str.getOffset() != -1; str.setOff(old); return res; } /** * Attempts to parse the provided {@link BufferedString} as * a UUID into hash value in two longs. * * Warning: as written, this method does modify the state * of the passed in BufferedString. * * @param str * @return A parsed UUID, or a null if parsing failed. */ public static UUID attemptUUIDParse(BufferedString str) { Long lo = attemptUUIDParseLow(str); Long hi = attemptUUIDParseHigh(str); return (str.getOffset() == -1) ? null : buildUUID(lo, hi); } private static UUID buildUUID(Long lo, Long hi) { return (lo == null || hi == null || (C16Chunk.isNA(lo, hi))) ? null : new UUID(hi, lo); } // -------------------------------- // Parse XXXXXXXX-XXXX-XXXX and return an arbitrary long, or set str.off==-1 // (and return Long.MIN_VALUE but this is a valid long return value). private static Long attemptUUIDParseLow(BufferedString str) { final byte[] buf = str.getBuffer(); int i=str.getOffset(); if( i+36 > buf.length ) return markBad(str); long lo=0; lo = get2(lo,buf,(i+=2)-2); lo = get2(lo,buf,(i+=2)-2); lo = get2(lo,buf,(i+=2)-2); lo = get2(lo,buf,(i+=2)-2); if( buf[i++]!='-' ) return markBad(str); lo = get2(lo,buf,(i+=2)-2); lo = get2(lo,buf,(i+=2)-2); if( buf[i++]!='-' ) return markBad(str); lo = get2(lo,buf,(i+=2)-2); return attemptUUIDParseEnd(str, lo, buf, i); } // Parse -XXXX-XXXXXXXXXXXX and return an arbitrary long, or set str.off==-1 // (and return null). public static Long attemptUUIDParseHigh(BufferedString str) { final byte[] buf = str.getBuffer(); int i=str.getOffset(); if ( i== -1 ) return markBad(str); long hi=0; if( buf[i++]!='-' ) return markBad(str); hi = get2(hi,buf,(i+=2)-2); hi = get2(hi,buf,(i+=2)-2); if( buf[i++]!='-' ) return markBad(str); hi = get2(hi,buf,(i+=2)-2); hi = get2(hi,buf,(i+=2)-2); hi = get2(hi,buf,(i+=2)-2); hi = get2(hi,buf,(i+=2)-2); hi = get2(hi,buf,(i+=2)-2); return attemptUUIDParseEnd(str, hi, buf, i); } private static Long attemptUUIDParseEnd(BufferedString str, long lo, byte[] buf, int i) { // Can never equal MIN_VALUE since only parsed 14 of 16 digits, unless // failed parse already. if( lo == Long.MIN_VALUE ) return markBad(str); // If the last 2 digits are 0x8000 and the first 14 are all 0's then might // legitimately parse MIN_VALUE, need to check for it special. str.setOff(i+2); // Mark as parsed if( lo == 0x80000000000000L && buf[i]=='0' && buf[i+1]=='0' ) return Long.MIN_VALUE; // Valid MIN_VALUE parse // First 14 digits are a random scramble; will never equal MIN_VALUE result // unless we have a failed parse in the last 2 digits lo = get2(lo,buf,i); return (lo == Long.MIN_VALUE || // broken UUID already, OR // too many valid UUID digits (i + 2 < buf.length && hdigit(0, buf[i + 2]) != Long.MIN_VALUE)) ? null : lo; } private static long get2( long x, byte[] buf, int i ) { if( x == Long.MIN_VALUE ) return x; x = hdigit(x,buf[i++]); x = hdigit(x,buf[i++]); return x; } private static long hdigit( long x, byte b ) { if( x == Long.MIN_VALUE ) return Long.MIN_VALUE; else if( b >= '0' && b <= '9' ) return (x<<4)+b-'0'; else if( b >= 'A' && b <= 'F' ) return (x<<4)+b-'A'+10; else if( b >= 'a' && b <= 'f' ) return (x<<4)+b-'a'+10; else return Long.MIN_VALUE; } private static Long markBad(BufferedString str) { str.setOff(-1); return null; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/ParseWriter.java
package water.parser; import water.Freezable; import water.Iced; /** Interface for writing results of parsing, accumulating numbers and * strings or handling invalid lines & parse errors. */ public interface ParseWriter extends Freezable { class ParseErr extends Iced implements Comparable<ParseErr>{ @Override public int compareTo(ParseErr o) { long res = _gLineNum - o._gLineNum; if (res == 0) res = _byteOffset - _byteOffset; if (res == 0) return _err.compareTo(o._err); return (int) res < 0 ? -1 : 1; } public ParseErr(){} public ParseErr(String file, String err) { this(err, 0, -1, -1); _file = file; } public ParseErr(String err, int cidx, long lineNum, long byteOff){ _err = err; _cidx = cidx; _lineNum = lineNum; _byteOffset = byteOff; } // as recorded during parsing String _file = "unknown"; String _err = "unknown"; long _byteOffset = -1; int _cidx = -1; long _lineNum = -1; // filled int he end (when we now the line-counts) long _gLineNum = -1; public String toString(){ return "ParseError at file " + _file + (_gLineNum == -1?"":" at line " + _lineNum + " ( destination line " + _gLineNum + " )") + (_byteOffset == -1 ? "" : " at byte offset " + _byteOffset) + "; error = \'" + _err + "\'"; } } class UnsupportedTypeOverride extends ParseErr { public UnsupportedTypeOverride(String fileName, String origType, String targetType, String columnName){ super(fileName,"Unsupported type override (" + origType + " -> " + targetType + "). Column " + columnName + " will be parsed as " + origType); } } void setColumnNames(String [] names); // Register a newLine from the parser void newLine(); // True if already forced into a string column (skip number parsing) boolean isString(int colIdx); // Add a number column with given digits & exp void addNumCol(int colIdx, long number, int exp); // Add a number column with given digits & exp void addNumCol(int colIdx, double d); // An an invalid / missing entry void addInvalidCol(int colIdx); void addNAs(int colIdx, int nrow); // Add a String column void addStrCol( int colIdx, BufferedString str ); // Final rolling back of partial line void rollbackLine(); // ignore (and report the error) the rest of the line void invalidLine(ParseErr err); // report an error (e.g. invalid number) void addError(ParseErr err); void setIsAllASCII(int colIdx, boolean b); boolean hasErrors(); ParseErr [] removeErrors(); long lineNum(); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/Parser.java
package water.parser; import water.H2O; import water.Iced; import water.Job; import water.Key; import water.fvec.Vec; import java.io.IOException; import java.io.InputStream; import java.util.Arrays; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import static water.parser.DefaultParserProviders.GUESS_INFO; /** A collection of utility classes for parsing. * * Interfaces: * DataIn - Manage bulk streaming input data to the parser. Sometimes the data * comes from parallel raw byte file reads, with speculative line * starts. Sometimes the data comes from an InputStream - probably a * GZIP stream. * DataOut- Interface for writing results of parsing, accumulating numbers and * strings or handling invalid lines &amp; parse errors. * * static classes: * StreamData - Class implementing DataIn from a Stream (probably a GZIP stream) * InspectDataOut - Class implementing DataOut, on behalf of the GUI, for * parsing &amp; previewing the first few lines &amp; columns of a file. */ public abstract class Parser extends Iced { static final byte CHAR_TAB = '\t'; static final byte CHAR_CR = 13; static final byte CHAR_LF = 10; static final byte CHAR_SPACE = ' '; static final byte CHAR_DOUBLE_QUOTE = '"'; static final byte CHAR_SINGLE_QUOTE = '\''; // State for the CSV & SVMLight Parser's FSAs protected static final byte SKIP_LINE = 0; protected static final byte EXPECT_COND_LF = 1; protected static final byte EOL = 2; protected static final byte TOKEN = 3; protected static final byte COND_QUOTED_TOKEN = 4; protected static final byte NUMBER = 5; protected static final byte NUMBER_SKIP = 6; protected static final byte NUMBER_SKIP_NO_DOT = 7; protected static final byte NUMBER_FRACTION = 8; protected static final byte NUMBER_EXP = 9; protected static final byte NUMBER_EXP_START = 11; protected static final byte NUMBER_END = 12; protected static final byte STRING = 13; protected static final byte COND_QUOTE = 14; protected static final byte SEPARATOR_OR_EOL = 15; protected static final byte WHITESPACE_BEFORE_TOKEN = 16; protected static final byte STRING_END = 17; protected static final byte COND_QUOTED_NUMBER_END = 18; protected static final byte POSSIBLE_EMPTY_LINE = 19; protected static final byte POSSIBLE_CURRENCY = 20; protected static final byte HASHTAG = 35; protected static final byte POSSIBLE_ESCAPED_QUOTE = 36; protected final byte CHAR_DECIMAL_SEP = '.'; protected final byte CHAR_SEPARATOR; protected final byte CHAR_ESCAPE; protected static final long LARGEST_DIGIT_NUMBER = Long.MAX_VALUE/10; protected static boolean isEOL(byte c) { return (c == CHAR_LF) || (c == CHAR_CR); } public boolean[] _keepColumns; protected final ParseSetup _setup; protected final Key<Job> _jobKey; protected Parser( ParseSetup setup, Key<Job> jobKey ) { _setup = setup; CHAR_SEPARATOR = setup._separator; _jobKey = jobKey; CHAR_ESCAPE = setup._escapechar; if (_setup!=null && _setup._number_columns > 0) { _keepColumns = new boolean[_setup._number_columns]; for (int colIdx = 0; colIdx < _setup._number_columns; colIdx++) _keepColumns[colIdx] = true; if (_setup._skipped_columns!=null) { for (int colIdx : _setup._skipped_columns) if (colIdx < _setup._number_columns) _keepColumns[colIdx] = false; else throw new IllegalArgumentException("Skipped column index "+colIdx+" is illegal. It exceeds the actual" + " number of columns in your file."); } } } protected int fileHasHeader(byte[] bits, ParseSetup ps) { return ParseSetup.NO_HEADER; } // Parse this one Chunk (in parallel with other Chunks) protected abstract ParseWriter parseChunk(int cidx, final ParseReader din, final ParseWriter dout); // Parse the Vec sequentially writing out one chunk after another protected StreamParseWriter sequentialParse(Vec vec, StreamParseWriter dout) { throw new UnsupportedOperationException("Sequential Parsing is not supported by " + this.getClass().getName()); } protected ParseWriter streamParse( final InputStream is, final StreamParseWriter dout) throws IOException { return streamParseZip(is,dout,is); } /** * This method performs guess setup with each file. If will return true only if the number of columns/separator * found in the current file match that of files parsed earlier. In addition, it will also check for headers * within a file. However, it will only check for headers if the user has included column names in the very * first file. * * @param is * @param dout * @param din * @param cidx * @return * @throws IOException */ private boolean checkFileNHeader(final InputStream is, final StreamParseWriter dout, StreamData din, int cidx) throws IOException { byte[] headerBytes = ZipUtil.unzipForHeader(din.getChunkData(cidx), this._setup._chunk_size); ParseSetup ps = ParseSetup.guessSetup(null, headerBytes, new ParseSetup(GUESS_INFO, ParseSetup.GUESS_SEP, this._setup._single_quotes, ParseSetup.GUESS_HEADER, ParseSetup.GUESS_COL_CNT, null, null)); // check to make sure datasets in file belong to the same dataset // just check for number for number of columns/separator here. Ignore the column type, user can force it if ((this._setup._number_columns != ps._number_columns) || (this._setup._separator != ps._separator)) { String warning = "Your zip file contains a file that belong to another dataset with different " + "number of column or separator. Number of columns for files that have been parsed = "+ this._setup._number_columns + ". Number of columns in new file = "+ps._number_columns+ ". This new file is skipped and not parsed."; dout.addError(new ParseWriter.ParseErr(warning, -1, -1L, -2L)); // something is wrong return false; } else { // assume column names must appear in the first file. If column names appear in first and other // files, they will be recognized. Otherwise, if no column name ever appear in the first file, the other // column names in the other files will not be recognized. if (ps._check_header == ParseSetup.HAS_HEADER) { if (this._setup._column_names != null) { // found header in later files, only incorporate it if the column names are the same as before String[] thisColumnName = this._setup.getColumnNames(); String[] psColumnName = ps.getColumnNames(); Boolean sameColumnNames = true; for (int index = 0; index < this._setup._number_columns; index++) { if (!(thisColumnName[index].equals(psColumnName[index]))) { sameColumnNames = false; break; } } if (sameColumnNames) // only recognize current file header if it has the same column names as previous files this._setup.setCheckHeader(ps._check_header); } } else // should refresh _setup with correct check_header this._setup.setCheckHeader(ps._check_header); } return true; // everything is fine } /** * This method will try to get the next file to be parsed. It will skip over directories if encountered. * * @param is * @throws IOException */ private void getNextFile(final InputStream is) throws IOException { if (is instanceof java.util.zip.ZipInputStream) { ZipEntry ze = ((ZipInputStream) is).getNextEntry(); while (ze != null && ze.isDirectory()) ze = ((ZipInputStream) is).getNextEntry(); } } private class StreamInfo { int _zidx; StreamParseWriter _nextChunk; StreamInfo(int zidx, StreamParseWriter nextChunk) { this._zidx = zidx; this._nextChunk = nextChunk; } } /** * This method reads in one zip file. Before reading the file, it will check if the current file has the same * number of columns and separator type as the previous files it has parssed. If they do not match, no file will * be parsed in this case. * * @param is * @param dout * @param bvs * @param nextChunk * @param zidx * @return * @throws IOException */ private StreamInfo readOneFile(final InputStream is, final StreamParseWriter dout, InputStream bvs, StreamParseWriter nextChunk, int zidx, int fileIndex) throws IOException { int cidx = 0; StreamData din = new StreamData(is); // only check header for 2nd file onward since guess setup is already done on first file. if ((fileIndex > 0) && (!checkFileNHeader(is, dout, din, cidx))) // cidx should be the actual column index return new StreamInfo(zidx, nextChunk); // header is bad, quit now int streamAvailable = is.available(); while (streamAvailable > 0) { parseChunk(cidx++, din, nextChunk); // cidx here actually goes and get the right column chunk. streamAvailable = is.available(); // Can (also!) rollover to the next input chunk int xidx = bvs.read(null, 0, 0); // Back-channel read of chunk index if (xidx > zidx) { // Advanced chunk index of underlying ByteVec stream? zidx = xidx; // Record advancing of chunk nextChunk.close(); // Match output chunks to input zipfile chunks if (dout != nextChunk) { dout.reduce(nextChunk); if (_jobKey != null && _jobKey.get().stop_requested()) break; } nextChunk = nextChunk.nextChunk(); } } parseChunk(cidx, din, nextChunk); return new StreamInfo(zidx, nextChunk); } // ------------------------------------------------------------------------ // Zipped file; no parallel decompression; decompress into local chunks, // parse local chunks; distribute chunks later. protected ParseWriter streamParseZip( final InputStream is, final StreamParseWriter dout, InputStream bvs ) throws IOException { // All output into a fresh pile of NewChunks, one per column if (!_setup._parse_type.isParallelParseSupported) throw H2O.unimpl(); StreamParseWriter nextChunk = dout; int zidx = bvs.read(null, 0, 0); // Back-channel read of chunk index assert zidx == 1; int fileIndex = 0; // count files being passed. 0 is first file, 1 is second and so on... StreamInfo streamInfo = new StreamInfo(zidx, nextChunk); while (is.available() > 0) { // loop over all files in zip file streamInfo = readOneFile(is, dout, bvs, streamInfo._nextChunk, streamInfo._zidx, fileIndex++); // read one file in // streamInfo = readOneFile(is, dout, bvs, nextChunk, streamInfo._zidx, fileIndex++); // read one file in if (is.available() <= 0) { // done reading one file, get the next one or quit if at the end getNextFile(is); } } streamInfo._nextChunk.close(); bvs.close(); is.close(); if( dout != nextChunk ) dout.reduce(nextChunk); return dout; } final static class ByteAryData implements ParseReader { private final byte [] _bits; public int _off; final long _globalOffset; public ByteAryData(byte [] bits, long globalOffset){ _bits = bits; _globalOffset = globalOffset; } @Override public byte[] getChunkData(int cidx) { return cidx == 0?_bits:null; } @Override public int getChunkDataStart(int cidx) {return -1;} @Override public void setChunkDataStart(int cidx, int offset) { if(cidx == 0) _off = offset; } @Override public long getGlobalByteOffset() {return _globalOffset;} } /** Class implementing DataIns from a Stream (probably a GZIP stream) * Implements a classic double-buffer reader. */ final static class StreamData implements ParseReader { final int bufSz; final transient InputStream _is; private byte[] _bits0; private byte[] _bits1; private int _cidx0=-1, _cidx1=-1; // Chunk #s private int _coff0=-1, _coff1=-1; // Last used byte in a chunk protected StreamData(InputStream is){this(is,64*1024);} protected StreamData(InputStream is, int bufSz){ _is = is; this.bufSz = bufSz; _bits0 = new byte[bufSz]; _bits1 = new byte[bufSz]; } long _gOff; @Override public byte[] getChunkData(int cidx) { if( cidx == _cidx0 ) return _bits0; _gOff = _bits0.length; if( cidx == _cidx1 ) return _bits1; assert cidx==_cidx0+1 || cidx==_cidx1+1; byte[] bits = _cidx0<_cidx1 ? _bits0 : _bits1; _gOff += bits.length; if( _cidx0<_cidx1 ) { _cidx0 = cidx; _coff0 = -1; } else { _cidx1 = cidx; _coff1 = -1; } // Read as much as the buffer will hold int off=0; try { while( off < bits.length ) { int len = _is.read(bits,off,bits.length-off); if( len == -1 ) break; off += len; } assert off == bits.length || _is.available() <= 0; } catch( IOException ioe ) { throw new RuntimeException(ioe); } if( off == bits.length ) return bits; // Final read is short; cache the short-read byte[] bits2 = (off == 0) ? null : Arrays.copyOf(bits,off); if( _cidx0==cidx ) _bits0 = bits2; else _bits1 = bits2; return bits2; } @Override public int getChunkDataStart(int cidx) { if( _cidx0 == cidx ) return _coff0; if( _cidx1 == cidx ) return _coff1; return 0; } @Override public void setChunkDataStart(int cidx, int offset) { if( _cidx0 == cidx ) _coff0 = offset; if( _cidx1 == cidx ) _coff1 = offset; } @Override public long getGlobalByteOffset() { return 0; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/ParserInfo.java
package water.parser; import water.H2O; import water.Iced; /** * A lightweight handle with basic information about parser. */ public class ParserInfo extends Iced<ParserInfo> { /** Name of parser */ final String name; /** Priority of the parser which is used by guesser */ final int prior; /** Does this parser support parallel parse. */ final boolean isParallelParseSupported; /** Does this parser support stream parse. */ final boolean isStreamParseSupported; /** Does this parser support sequential parse. */ final boolean isSequentialParseSupported; /** Does this parser need post update of vector categoricals. */ final boolean isDomainProvided; public ParserInfo(String name, int prior, boolean isParallelParseSupported, boolean isStreamParseSupported, boolean isSequentialParseSupported, boolean isDomainProvided) { this.name = name; this.prior = prior; this.isParallelParseSupported = isParallelParseSupported; this.isStreamParseSupported = isStreamParseSupported; this.isSequentialParseSupported = isSequentialParseSupported; this.isDomainProvided = isDomainProvided; } public ParserInfo(String name, int prior, boolean isParallelParseSupported, boolean isStreamParseSupported, boolean isDomainProvided) { this(name, prior, isParallelParseSupported, isStreamParseSupported, false, isDomainProvided); } public ParserInfo(String name, int prior, boolean isParallelParseSupported, boolean isDomainProvided) { this(name, prior, isParallelParseSupported, true, false, isDomainProvided); } public ParserInfo(String name, int prior, boolean isParallelParseSupported) { this(name, prior, isParallelParseSupported, false); } /** Get name for this parser */ public String name() { return name; } /** Get order priority for this parser. */ public int priority() { return prior; } // TOO_MANY_KEYS_COUNT specifies when to disable parallel parse. We want to cover a scenario when // we are working with too many keys made of small files - in this case the distributed parse // doesn't work well because of the way chunks are distributed to nodes. We should switch to a local // parse to make sure the work is uniformly distributed across the whole cluster. public static final int TOO_MANY_KEYS_COUNT = 128; // A file is considered to be small if it can fit into <SMALL_FILE_NCHUNKS> number of chunks. public static final int SMALL_FILE_NCHUNKS = 10; public enum ParseMethod {StreamParse, DistributedParse, SequentialParse} public ParseMethod parseMethod(int nfiles, int nchunks, boolean disableParallelParse, boolean isEncrypted){ if (isEncrypted) { if (! isStreamParseSupported()) throw new UnsupportedOperationException("Parser " + name + " doesn't support encrypted files."); return ParseMethod.StreamParse; } if(isLocalParseSupported()) { if (disableParallelParse || !isParallelParseSupported() || (nfiles > TOO_MANY_KEYS_COUNT && (nchunks <= SMALL_FILE_NCHUNKS))) return getLocalParseMethod(); } if(isParallelParseSupported()) return ParseMethod.DistributedParse; throw H2O.unimpl(); } /** Does the parser support parallel parse? */ public boolean isParallelParseSupported() { return isParallelParseSupported; } /** Does the parser support stream parse? */ public boolean isStreamParseSupported() { return isStreamParseSupported; } public boolean isSequentialParseSupported() { return isSequentialParseSupported; } private boolean isLocalParseSupported() { return isStreamParseSupported() || isSequentialParseSupported(); } private ParseMethod getLocalParseMethod() { if (isStreamParseSupported()) return ParseMethod.StreamParse; if (isSequentialParseSupported()) return ParseMethod.SequentialParse; throw new UnsupportedOperationException("Local parse not supported."); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ParserInfo that = (ParserInfo) o; return name.equals(that.name); } @Override public int hashCode() { return name.hashCode(); } @Override public String toString() { return "ParserInfo{" + "name='" + name + '\'' + ", prior=" + prior + ", isParallelParseSupported=" + isParallelParseSupported + '}'; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/ParserProvider.java
package water.parser; import water.Job; import water.Key; import water.fvec.ByteVec; import water.fvec.Vec; /** * Generic Parser provider. */ public abstract class ParserProvider { /** Technical information for this parser */ public abstract ParserInfo info(); /** Create a new parser */ public abstract Parser createParser(ParseSetup setup, Key<Job> jobKey); /** * * @param v optional ByteVec (can be null if extracting eg. from a compressed file) * @param bits first bytes of the file * @param userSetup user specified setup * @return derived ParseSetup */ public final ParseSetup guessSetup(ByteVec v, byte[] bits, ParseSetup userSetup) { return guessSetup_impl(v, bits, userSetup); } /** * Actual implementation of the guessSetup method. Should almost never be overridden (the only * exception is the GuessParserProvider). * @param v * @param bits * @param userSetup * @return */ protected ParseSetup guessSetup_impl(ByteVec v, byte[] bits, ParseSetup userSetup) { ParseSetup ps = guessInitSetup(v, bits, userSetup); return guessFinalSetup(v, bits, ps).settzAdjustToLocal(userSetup._tz_adjust_to_local); } /** * Constructs initial ParseSetup from a given user setup * * Any exception thrown by this method will signal that this ParserProvider doesn't support * the input data. * * Parsers of data formats that provide metadata (eg. a binary file formats like Parquet) should use the * file metadata to identify the parse type and possibly other properties of the ParseSetup * that can be determined just from the metadata itself. The goal should be perform the least amount of operations * to correctly determine the ParseType (any exception means - format is not supported!). * * Note: Some file formats like CSV don't provide any metadata. In that case this method can return the final * ParseSetup. * * @param v optional ByteVec * @param bits first bytes of the file * @param userSetup user specified setup * @return null if this Provider cannot provide a parser for this file, otherwise an instance of ParseSetup * with correct setting of ParseSetup._parse_type */ public ParseSetup guessInitSetup(ByteVec v, byte[] bits, ParseSetup userSetup) { return guessSetup(v, bits, userSetup._separator, userSetup._number_columns, userSetup._single_quotes, userSetup._check_header, userSetup._column_names, userSetup._column_types, userSetup._domains, userSetup._na_strings); } /** * Finalizes ParseSetup created by {@see guessInitSetup} using data read from a given ByteVec/bits. * * @param v optional ByteVec * @param bits first bytes of the file * @param ps parse setup as created by {@see guessInitSetup} * @return fully initialized ParseSetup */ public ParseSetup guessFinalSetup(ByteVec v, byte[] bits, ParseSetup ps) { return ps; // by default assume the setup is already finalized } /** Returns parser setup of throws exception if input is not recognized */ public ParseSetup guessSetup(ByteVec v, byte[] bits, byte sep, int ncols, boolean singleQuotes, int checkHeader, String[] columnNames, byte[] columnTypes, String[][] domains, String[][] naStrings) { throw new UnsupportedOperationException("Not implemented. This method is kept only for backwards compatibility. " + "Override methods guessInitSetup & guessFinalSetup if you are implementing a new parser."); } /** Create a parser specific setup. * * Useful if parser need a single * @param inputs input keys * @param requiredSetup user given parser setup * @return parser specific setup */ public abstract ParseSetup createParserSetup(Key[] inputs, ParseSetup requiredSetup); /** * Executed exactly once per-file-per-node during parse. * Do any file-related non-distributed setup here. E.g. ORC reader creates node-shared instance of a (non-serializable) Reader. * @param v * @param setup */ public ParseSetup setupLocal(Vec v, ParseSetup setup){ return setup;} }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/ParserService.java
package water.parser; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.Iterator; import java.util.List; import java.util.ServiceLoader; /** * Service to manage optional implementation of parsers. */ public final class ParserService { public static ParserService INSTANCE = new ParserService(); /** Service loader. * * Based on JavaDoc of SPI: "Instances of this class are not safe for use by multiple concurrent threads." - all usages of the loader * are protected by synchronized block. */ private final ServiceLoader<ParserProvider> loader; public ParserService() { loader = ServiceLoader.load(ParserProvider.class); } /** Return list of all parser providers sorted based on priority. */ public List<ParserProvider> getAllProviders() { return getAllProviders(true); } /** * Returns all parser providers sorted based on priority if required. * * @param sort * @return */ synchronized public List<ParserProvider> getAllProviders(boolean sort) { List<ParserProvider> providers = new ArrayList<>(); for(ParserProvider pp : loader) { providers.add(pp); } if (sort) { Collections.sort(providers, PARSER_PROVIDER_COMPARATOR); } return providers; } synchronized public String[] getAllProviderNames(boolean sort) { List<ParserProvider> providers = getAllProviders(sort); String[] names = new String[providers.size()]; int i = 0; for (ParserProvider pp : providers) { names[i++] = pp.info().name(); } return names; } public ParserProvider getByInfo(ParserInfo info) { return getByName(info.name()); } synchronized public ParserProvider getByName(String name) { if (name != null) for (ParserProvider pp : loader) { if (pp.info().name().equalsIgnoreCase(name)) { return pp; } } return null; } private static Comparator<ParserProvider> PARSER_PROVIDER_COMPARATOR = new Comparator<ParserProvider>() { @Override public int compare(ParserProvider o1, ParserProvider o2) { int x = o1.info().prior; int y = o2.info().prior; // Cannot use Integer.compare(int, int) since it is available from Java7 and also cannot // use `-` for comparison return (x < y) ? -1 : ((x == y) ? 0 : 1); } }; }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/PreviewParseWriter.java
package water.parser; import water.Futures; import water.H2O; import water.Iced; import water.fvec.Vec; import water.util.ArrayUtils; import water.util.IcedHashMap; /** Class implementing ParseWriter, on behalf ParseSetup * to examine the contents of a file for guess the column types. */ public class PreviewParseWriter extends Iced implements StreamParseWriter { protected final static int MAX_PREVIEW_COLS = 100; protected final static int MAX_PREVIEW_LINES = 10; protected int _nlines; protected int _ncols; protected int _invalidLines; private String [] _colNames; protected String [][] _data = new String[MAX_PREVIEW_LINES][]; private IcedHashMap<String,String>[] _domains; //used in leiu of a HashSet int [] _nnums; int [] _nstrings; int [] _ndates; int [] _nUUID; int [] _nzeros; int [] _nempty; protected ParseErr [] _errs = new ParseErr[0]; protected PreviewParseWriter() {} protected PreviewParseWriter(int ncols) { setColumnCount(ncols); } String[] colNames() { return _colNames; } @Override public void setColumnNames(String[] names) { _colNames = names; _data[0] = names; ++_nlines; setColumnCount(names.length); } private void setColumnCount(int n) { // initialize if (_ncols == 0 && n > 0) { _ncols = n; _nzeros = new int[n]; _nstrings = new int[n]; _nUUID = new int[n]; _ndates = new int[n]; _nnums = new int[n]; _nempty = new int[n]; _domains = new IcedHashMap[n]; for(int i = 0; i < n; ++i) _domains[i] = new IcedHashMap<>(); for(int i =0; i < MAX_PREVIEW_LINES; i++) _data[i] = new String[n]; } /*else if (n > _ncols) { // resize _nzeros = Arrays.copyOf(_nzeros, n); _nstrings = Arrays.copyOf(_nstrings, n); _nUUID = Arrays.copyOf(_nUUID, n); _ndates = Arrays.copyOf(_ndates, n); _nnums = Arrays.copyOf(_nnums, n); _nempty = Arrays.copyOf(_nempty, n); _domains = Arrays.copyOf(_domains, n); for (int i=_ncols; i < n; i++) _domains[i] = new HashSet<String>(); for(int i =0; i < MAX_PREVIEW_LINES; i++) _data[i] = Arrays.copyOf(_data[i], n); _ncols = n; }*/ } @Override public void newLine() { ++_nlines; } @Override public boolean isString(int colIdx) { return false; } @Override public void addNumCol(int colIdx, long number, int exp) { if(colIdx < _ncols) { if (number == 0) ++_nzeros[colIdx]; else ++_nnums[colIdx]; if (_nlines < MAX_PREVIEW_LINES) _data[_nlines][colIdx] = Double.toString(water.util.PrettyPrint.pow10(number,exp)); } } @Override public void addNumCol(int colIdx, double d) { if(colIdx < _ncols) { if (d == 0) ++_nzeros[colIdx]; else ++_nnums[colIdx]; if (_nlines < MAX_PREVIEW_LINES) _data[_nlines][colIdx] = Double.toString(d); } } @Override public void addInvalidCol(int colIdx) { if(colIdx < _ncols) { ++_nempty[colIdx]; if (_nlines < MAX_PREVIEW_LINES) _data[_nlines][colIdx] = "NA"; } } @Override public void addNAs(int colIdx, int nrow) { throw H2O.unimpl(); } @Override public void addStrCol(int colIdx, BufferedString str) { if(colIdx < _ncols) { // Check for time if (ParseTime.isTime(str)) { ++_ndates[colIdx]; return; } //Check for UUID if(ParseUUID.isUUID(str)) { ++_nUUID[colIdx]; return; } //Add string to domains list for later determining string, NA, or categorical ++_nstrings[colIdx]; _domains[colIdx].put(str.toString(),""); if (_nlines < MAX_PREVIEW_LINES) _data[_nlines][colIdx] = str.toString(); } } @Override public void rollbackLine() {--_nlines;} @Override public void setIsAllASCII(int colIdx, boolean b) {} public byte[] guessTypes() { byte[] types = new byte[_ncols]; for (int i = 0; i < _ncols; ++i) { IcedHashMap<String, String> sourceDomain = _domains[i]; IDomain domain = new IDomain() { public int size() { return sourceDomain.size(); } public boolean contains(String value) { return sourceDomain.containsKey(value); } }; types[i] = PreviewParseWriter.guessType( _nlines, _nnums[i], _nstrings[i], _ndates[i], _nUUID[i], _nzeros[i], _nempty[i], domain); } return types; } public interface IDomain { int size(); boolean contains(String value); } public static byte guessType( int nlines, int nnums, int nstrings, int ndates, int nUUID, int nzeros, int nempty, IDomain domain) { int nonemptyLines = nlines - nempty - 1; //During guess, some columns may be shorted one line based on 4M boundary //Very redundant tests, but clearer and not speed critical // is it clearly numeric? if ((nnums + nzeros) >= ndates && (nnums + nzeros) >= nUUID && nnums >= nstrings) { // 0s can be an NA among categoricals, ignore return Vec.T_NUM; } // All same string or empty? if (domain.size() == 1 && ndates==0 ) { // Obvious NA, or few instances of the single string, declare numeric // else categorical return (domain.contains("NA") || domain.contains("na") || domain.contains("Na") || domain.contains("N/A") || nstrings < nnums+nzeros) ? Vec.T_NUM : Vec.T_CAT; } // with NA, but likely numeric if (domain.size() <= 1 && (nnums + nzeros) > ndates + nUUID) { return Vec.T_NUM; } // Datetime if (ndates > nUUID && ndates > (nnums + nzeros) && (ndates > nstrings || domain.size() <= 1)) { return Vec.T_TIME; } // UUID if (nUUID > ndates && nUUID > (nnums + nzeros) && (nUUID > nstrings || domain.size() <= 1)) { return Vec.T_UUID; } // Strings, almost no dups if (nstrings > ndates && nstrings > nUUID && nstrings > (nnums + nzeros) && domain.size() >= 0.95 * nstrings) { return Vec.T_STR; } // categorical or string? // categorical with 0s for NAs if (nzeros > 0 && ((nzeros + nstrings) >= nonemptyLines) //just strings and zeros for NA (thus no empty lines) && (domain.size() <= 0.95 * nstrings)) { // not all unique strings return Vec.T_CAT; } // categorical mixed with numbers if (nstrings >= (nnums + nzeros) // mostly strings && (domain.size() <= 0.95 * nstrings)) { // but not all unique return Vec.T_CAT; } // All guesses failed return Vec.T_NUM; } public String[][] guessNAStrings(byte[] types) { //For now just catch 0's as NA in categoricals String[][] naStrings = new String[_ncols][]; boolean empty = true; for (int i = 0; i < _ncols; ++i) { int nonemptyLines = _nlines - _nempty[i] - 1; //During guess, some columns may be shorted one line (based on 4M boundary) if (types[i] == Vec.T_CAT && _nzeros[i] > 0 && ((_nzeros[i] + _nstrings[i]) >= nonemptyLines) //just strings and zeros for NA (thus no empty lines) && (_domains[i].size() <= 0.95 * _nstrings[i])) { // not all unique strings naStrings[i] = new String[1]; naStrings[i][0] = "0"; empty = false; } } if (empty) return null; else return naStrings; } public static PreviewParseWriter unifyColumnPreviews(PreviewParseWriter prevA, PreviewParseWriter prevB) { if (prevA == null) return prevB; else if (prevB == null) return prevA; else { //sanity checks if (prevA._ncols != prevB._ncols) throw new ParseDataset.H2OParseException("Files conflict in number of columns. " + prevA._ncols + " vs. " + prevB._ncols + "."); prevA._nlines += prevB._nlines; prevA._invalidLines += prevB._invalidLines; for (int i = 0; i < prevA._ncols; i++) { prevA._nnums[i] += prevB._nnums[i]; prevA._nstrings[i] += prevB._nstrings[i]; prevA._ndates[i] += prevB._ndates[i]; prevA._nUUID[i] += prevB._nUUID[i]; prevA._nzeros[i] += prevB._nzeros[i]; prevA._nempty[i] += prevB._nempty[i]; if (prevA._domains[i] != null) { if (prevB._domains[i] != null) for(String s:prevB._domains[i].keySet()) prevA._domains[i].put(s,""); } else if (prevB._domains[i] != null) prevA._domains = prevB._domains; } } return prevA; } @Override public void invalidLine(ParseErr err) { addError(err); ++_invalidLines; } @Override public void addError(ParseErr err) { if(_errs == null) _errs = new ParseErr[]{err}; else if(_errs.length < 20) _errs = ArrayUtils.append(_errs,err); } @Override public boolean hasErrors() {return _errs != null && _errs.length > 0;} @Override public ParseErr[] removeErrors() {return _errs;} @Override public long lineNum() {return _nlines;} @Override public StreamParseWriter nextChunk() {throw H2O.unimpl();} @Override public StreamParseWriter reduce(StreamParseWriter dout) {throw H2O.unimpl();} @Override public StreamParseWriter close() {return this;} @Override public StreamParseWriter close(Futures fs) {return this;} }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/SVMLightFVecParseWriter.java
package water.parser; import water.Futures; import water.Job; import water.Key; import water.fvec.AppendableVec; import water.fvec.NewChunk; import water.fvec.Vec; import java.util.Arrays; // -------------------------------------------------------- public class SVMLightFVecParseWriter extends FVecParseWriter { protected final Vec.VectorGroup _vg; int _vecIdStart; public SVMLightFVecParseWriter( Vec.VectorGroup vg, int vecIdStart, int cidx, int chunkSize, AppendableVec[] avs, int[] parse_columns_indices, Key<Job> jobKey ){ super(vg, cidx, null, null, chunkSize, avs, parse_columns_indices, jobKey); _vg = vg; _vecIdStart = vecIdStart; int numParseCols = parse_columns_indices.length; _nvs = new NewChunk[numParseCols]; for(int i = 0; i < numParseCols; ++i) _nvs[i] = new NewChunk(_vecs[_parse_columns_indices[i]], _cidx, true); _col = 0; } @Override public void addNumCol(int colIdx, long number, int exp) { assert colIdx >= _col; if(colIdx >= _vecs.length) addColumns(colIdx+1); _nvs[colIdx].addZeros((int)_nLines - _nvs[colIdx]._len); _nvs[colIdx].addNum(number, exp); _col = colIdx+1; } @Override public void newLine() { ++_nLines; _col = 0; } @Override public void addStrCol(int idx, BufferedString str){addInvalidCol(idx);} @Override public boolean isString(int idx){return false;} @Override public FVecParseWriter close(Futures fs) { if (_nvs != null) { for(NewChunk nc:_nvs) { nc.addZeros((int) _nLines - nc._len); assert nc._len == _nLines:"incompatible number of lines after parsing chunk, " + _nLines + " != " + nc._len; } } _nCols = _nvs == null ? 0 : _nvs.length; return super.close(fs); } private void addColumns(int newColCnt){ int oldColCnt = _vecs.length; if(newColCnt > oldColCnt){ _nvs = Arrays.copyOf(_nvs, newColCnt); _vecs = Arrays.copyOf(_vecs , newColCnt); for(int i = oldColCnt; i < newColCnt; ++i) { _vecs[i] = new AppendableVec(_vg.vecKey(i+_vecIdStart),_vecs[0]._tmp_espc,Vec.T_NUM,_vecs[0]._chunkOff); _nvs[i] = new NewChunk(_vecs[i], _cidx, true); } _nCols = newColCnt; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/SVMLightParser.java
package water.parser; import java.util.Arrays; import water.Key; import water.fvec.Vec; import water.util.PrettyPrint; import static water.parser.DefaultParserProviders.SVMLight_INFO; class SVMLightParser extends Parser { private static final byte SKIP_TOKEN = 21; private static final byte INVALID_NUMBER = 22; private static final byte QID0 = 23; private static final byte QID1 = 24; // line global states private static final int TGT = 1; private static final int COL = 2; private static final int VAL = 3; SVMLightParser( ParseSetup ps, Key jobkey ) { super(ps, jobkey); } /** Try to parse the bytes as svm light format, return a ParseSetupHandler with type * SVMLight if the input is in svm light format, throw an exception otherwise. */ public static ParseSetup guessSetup(byte [] bits) { int lastNewline = bits.length-1; while(lastNewline > 0 && !CsvParser.isEOL(bits[lastNewline]))lastNewline--; if(lastNewline > 0) bits = Arrays.copyOf(bits,lastNewline+1); SVMLightParser p = new SVMLightParser(new ParseSetup(SVMLight_INFO, ParseSetup.GUESS_SEP, false,ParseSetup.GUESS_HEADER,ParseSetup.GUESS_COL_CNT, null,null,null,null,null, false), null); SVMLightInspectParseWriter dout = new SVMLightInspectParseWriter(); p.parseChunk(0,new ByteAryData(bits,0), dout); if (dout._ncols > 0 && dout._nlines > 0 && dout._nlines > dout._invalidLines) return new ParseSetup(SVMLight_INFO, ParseSetup.GUESS_SEP, false,ParseSetup.NO_HEADER,dout._ncols,null,dout.guessTypes(),null,null,dout._data, dout.removeErrors()); else throw new ParseDataset.H2OParseException("Could not parse file as an SVMLight file."); } public static byte[] col_types(int ncols) { byte[] res = new byte[ncols]; Arrays.fill(res,Vec.T_NUM); return res; } final boolean isWhitespace(byte c){return c == ' ' || c == '\t';} @SuppressWarnings("fallthrough") @Override public final ParseWriter parseChunk(int cidx, final ParseReader din, final ParseWriter dout) { BufferedString _str = new BufferedString(); byte[] bits = din.getChunkData(cidx); if( bits == null ) return dout; final byte[] bits0 = bits; // Bits for chunk0 boolean firstChunk = true; // Have not rolled into the 2nd chunk byte[] bits1 = null; // Bits for chunk1, loaded lazily. int offset = 0; // General cursor into the giant array of bytes // Starting state. Are we skipping the first (partial) line, or not? Skip // a header line, or a partial line if we're in the 2nd and later chunks. int lstate = (cidx > 0)? SKIP_LINE : WHITESPACE_BEFORE_TOKEN; int gstate = TGT; long number = 0; int zeros = 0; int exp = 0; int sgnExp = 1; boolean decimal = false; int fractionDigits = 0; int colIdx = 0; byte c = bits[offset]; // skip comments for the first chunk (or if not a chunk) if( cidx == 0 ) { while (c == '#') { while ((offset < bits.length) && (bits[offset] != CHAR_CR) && (bits[offset ] != CHAR_LF)) ++offset; if ((offset+1 < bits.length) && (bits[offset] == CHAR_CR) && (bits[offset+1] == CHAR_LF)) ++offset; ++offset; if (offset >= bits.length) return dout; c = bits[offset]; } } MAIN_LOOP: while (true) { NEXT_CHAR: switch (lstate) { // --------------------------------------------------------------------- case SKIP_LINE: if (!isEOL(c)) break; // fall through case EOL: if (colIdx != 0) { colIdx = 0; if(lstate != SKIP_LINE) dout.newLine(); } if( !firstChunk ) break MAIN_LOOP; // second chunk only does the first row lstate = (c == CHAR_CR) ? EXPECT_COND_LF : POSSIBLE_EMPTY_LINE; gstate = TGT; break; // --------------------------------------------------------------------- case EXPECT_COND_LF: lstate = POSSIBLE_EMPTY_LINE; if (c == CHAR_LF) break; continue MAIN_LOOP; // --------------------------------------------------------------------- // --------------------------------------------------------------------- // --------------------------------------------------------------------- case POSSIBLE_EMPTY_LINE: if (isEOL(c)) { if (c == CHAR_CR) lstate = EXPECT_COND_LF; break; } lstate = WHITESPACE_BEFORE_TOKEN; // fallthrough to WHITESPACE_BEFORE_TOKEN // --------------------------------------------------------------------- case WHITESPACE_BEFORE_TOKEN: if (isWhitespace(c)) break; if (isEOL(c)){ lstate = EOL; continue MAIN_LOOP; } // fallthrough to TOKEN case TOKEN: if (((c >= '0') && (c <= '9')) || (c == '-') || (c == CHAR_DECIMAL_SEP) || (c == '+')) { lstate = NUMBER; number = 0; fractionDigits = 0; decimal = false; if (c == '-') { exp = -1; break; } else if(c == '+'){ exp = 1; break; } else { exp = 1; } // fallthrough } else if(c == 'q'){ lstate = QID0; } else { // failed, skip the line String err = "Unexpected character, expected number or qid, got '" + new String(Arrays.copyOfRange(bits, offset,Math.min(bits.length,offset+5))) + "...'"; dout.invalidLine(new ParseWriter.ParseErr(err,cidx,dout.lineNum(),offset + din.getGlobalByteOffset())); lstate = SKIP_LINE; continue MAIN_LOOP; } // fallthrough to NUMBER // --------------------------------------------------------------------- case NUMBER: if ((c >= '0') && (c <= '9')) { number = (number*10)+(c-'0'); if (number >= LARGEST_DIGIT_NUMBER) lstate = INVALID_NUMBER; break; } else if (c == CHAR_DECIMAL_SEP) { lstate = NUMBER_FRACTION; fractionDigits = offset; decimal = true; break; } else if ((c == 'e') || (c == 'E')) { lstate = NUMBER_EXP_START; sgnExp = 1; break; } if (exp == -1) { number = -number; } exp = 0; // fallthrough NUMBER_END case NUMBER_END: exp = exp - fractionDigits; switch(gstate){ case COL: if(c == ':'){ if(exp == 0 && number >= colIdx && (int)number == number){ colIdx = (int)number; gstate = VAL; lstate = WHITESPACE_BEFORE_TOKEN; } else { // wrong col Idx, just skip the token and try to continue // col idx is either too small (according to spec, cols must come in strictly increasing order) // or too small (col ids currently must fit into int) String err; if(number <= colIdx) err = "Columns come in non-increasing sequence. Got " + number + " after " + colIdx + ". Rest of the line is skipped."; else if(exp != 0) err = "Got non-integer as column id: " + PrettyPrint.pow10(number,exp) + ". Rest of the line is skipped."; else err = "column index out of range, " + number + " does not fit into integer." + " Rest of the line is skipped."; dout.invalidLine(new ParseWriter.ParseErr(err,cidx,dout.lineNum(),offset + din.getGlobalByteOffset())); lstate = SKIP_LINE; } } else { // we're probably out of sync, skip the rest of the line String err = "Unexpected character after column id: " + c; dout.invalidLine(new ParseWriter.ParseErr(err,cidx,dout.lineNum(),offset + din.getGlobalByteOffset())); lstate = SKIP_LINE; } break NEXT_CHAR; case TGT: case VAL: dout.addNumCol(colIdx++,number,exp); lstate = WHITESPACE_BEFORE_TOKEN; gstate = COL; continue MAIN_LOOP; } // --------------------------------------------------------------------- case NUMBER_FRACTION: if(c == '0'){ ++zeros; break; } if ((c > '0') && (c <= '9')) { if (number < LARGEST_DIGIT_NUMBER) { number = (number*PrettyPrint.pow10i(zeros+1))+(c-'0'); } else { String err = "number " + number + " is out of bounds."; dout.invalidLine(new ParseWriter.ParseErr(err,cidx,dout.lineNum(),offset + din.getGlobalByteOffset())); lstate = SKIP_LINE; } zeros = 0; break; } else if ((c == 'e') || (c == 'E')) { if (decimal) fractionDigits = offset - zeros - 1 - fractionDigits; lstate = NUMBER_EXP_START; sgnExp = 1; zeros = 0; break; } lstate = NUMBER_END; if (decimal) fractionDigits = offset - zeros - fractionDigits-1; if (exp == -1) { number = -number; } exp = 0; zeros = 0; continue MAIN_LOOP; // --------------------------------------------------------------------- case NUMBER_EXP_START: if (exp == -1) { number = -number; } exp = 0; if (c == '-') { sgnExp *= -1; break; } else if (c == '+'){ break; } if ((c < '0') || (c > '9')){ lstate = INVALID_NUMBER; continue MAIN_LOOP; } lstate = NUMBER_EXP; // fall through to NUMBER_EXP // --------------------------------------------------------------------- case NUMBER_EXP: if ((c >= '0') && (c <= '9')) { exp = (exp*10)+(c-'0'); break; } exp *= sgnExp; lstate = NUMBER_END; continue MAIN_LOOP; // --------------------------------------------------------------------- case INVALID_NUMBER: if(gstate == TGT) { // invalid tgt -> skip the whole row lstate = SKIP_LINE; String err = "invalid number (expecting target)"; dout.invalidLine(new ParseWriter.ParseErr(err,cidx,dout.lineNum(),offset + din.getGlobalByteOffset())); continue MAIN_LOOP; } if(gstate == VAL){ // add invalid value and skip until whitespace or eol dout.addInvalidCol(colIdx++); gstate = COL; } case QID0: if(c == 'i'){ lstate = QID1; break; } else { lstate = SKIP_TOKEN; break; } case QID1: if(c == 'd'){ lstate = SKIP_TOKEN; // skip qid for now break; } else { // TODO report an error lstate = SKIP_TOKEN; break; } // fall through case SKIP_TOKEN: if(isEOL(c)) lstate = EOL; else if(isWhitespace(c)) lstate = WHITESPACE_BEFORE_TOKEN; break; default: assert (false) : " We have wrong state "+lstate; } // end NEXT_CHAR ++offset; // do not need to adjust for offset increase here - the offset is set to tokenStart-1! if (offset < 0) { // Offset is negative? assert !firstChunk; // Caused by backing up from 2nd chunk into 1st chunk firstChunk = true; bits = bits0; offset += bits.length; _str.set(bits,offset,0); } else if (offset >= bits.length) { // Off end of 1st chunk? Parse into 2nd chunk // Attempt to get more data. if( firstChunk && bits1 == null ){ bits1 = din.getChunkData(cidx+1); // linePrefix = new String(Arrays.copyOfRange(bits, linestart, bits.length)); } // if we can't get further we might have been the last one and we must // commit the latest guy if we had one. if( !firstChunk || bits1 == null ) { // No more data available or allowed // If we are mid-parse of something, act like we saw a LF to end the // current token. if ((lstate != EXPECT_COND_LF) && (lstate != POSSIBLE_EMPTY_LINE)) { c = CHAR_LF; continue; } break; // Else we are just done } // Now parsing in the 2nd chunk. All offsets relative to the 2nd chunk start. firstChunk = false; if (lstate == NUMBER_FRACTION) fractionDigits -= bits.length; offset -= bits.length; bits = bits1; // Set main parsing loop bits if( bits[0] == CHAR_LF && lstate == EXPECT_COND_LF ) break; // when the first character we see is a line end } c = bits[offset]; } // end MAIN_LOOP return dout; } // -------------------------------------------------------- // Used for previewing datasets. // Fill with zeros not NAs, and grow columns on-demand. private static class SVMLightInspectParseWriter extends PreviewParseWriter { public SVMLightInspectParseWriter() { for (int i = 0; i < MAX_PREVIEW_LINES;++i) _data[i] = new String[MAX_PREVIEW_COLS]; for (String[] datum : _data) Arrays.fill(datum, "0"); } // Expand columns on-demand @Override public void addNumCol(int colIdx, long number, int exp) { _ncols = Math.max(_ncols,colIdx); if(colIdx < MAX_PREVIEW_COLS && _nlines < MAX_PREVIEW_LINES) _data[_nlines][colIdx] = Double.toString(PrettyPrint.pow10(number,exp)); } @Override public void addNumCol(int colIdx, double d) { _ncols = Math.max(_ncols,colIdx); if(colIdx < MAX_PREVIEW_COLS && _nlines < MAX_PREVIEW_LINES) _data[_nlines][colIdx] = Double.toString(d); } public byte[] guessTypes() { return col_types(_ncols); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/StreamParseWriter.java
package water.parser; import water.Futures; public interface StreamParseWriter extends ParseWriter { StreamParseWriter nextChunk(); StreamParseWriter reduce(StreamParseWriter dout); StreamParseWriter close(); StreamParseWriter close(Futures fs); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/SyntheticColumnGenerator.java
package water.parser; import water.Job; import water.MRTask; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.Vec; public class SyntheticColumnGenerator extends ParseFinalizer { @Override public Frame finalize(Job<Frame> job, Vec[] parsedVecs, ParseSetup setup, int[] fileChunkOffsets) { Vec[] withSynth = new Vec[parsedVecs.length + setup._synthetic_column_names.length]; System.arraycopy(parsedVecs, 0, withSynth, 0, parsedVecs.length); for (int synthIdx = 0; synthIdx < setup._synthetic_column_names.length; synthIdx++) { withSynth[parsedVecs.length + synthIdx] = parsedVecs[0].makeCon(Vec.T_STR); } new SyntheticColumnGeneratorTask(setup, fileChunkOffsets).doAll(withSynth); if (Vec.T_CAT == setup._synthetic_column_type) { for (int synthIdx = 0; synthIdx < setup._synthetic_column_names.length; synthIdx++) { Vec originalSyntheticVec = withSynth[parsedVecs.length + synthIdx]; withSynth[parsedVecs.length + synthIdx] = withSynth[parsedVecs.length + synthIdx].toCategoricalVec(); originalSyntheticVec.remove(); } } return new Frame(job._result, mergeColumnNames(setup), withSynth); } private String[] mergeColumnNames(ParseSetup parseSetup) { String[] names = new String[parseSetup._column_names.length + parseSetup._synthetic_column_names.length]; System.arraycopy(parseSetup._column_names, 0, names, 0, parseSetup._column_names.length); System.arraycopy(parseSetup._synthetic_column_names, 0, names, parseSetup._column_names.length, parseSetup._synthetic_column_names.length); return names; } static class SyntheticColumnGeneratorTask extends MRTask<SyntheticColumnGeneratorTask> { private final ParseSetup _setup; private final int[] _fileChunkOffsets; SyntheticColumnGeneratorTask(ParseSetup setup, int[] fileChunkOffsets) { _setup = setup; _fileChunkOffsets = fileChunkOffsets; } @Override public void map(Chunk[] cs) { int synColCnt = _setup._synthetic_column_names.length; for (int colIdx = 0; colIdx < synColCnt; colIdx++) { int fileIdx = findFileIndexForChunk(cs[0].cidx()); String colValue = _setup._synthetic_column_values[fileIdx][colIdx]; for (int row = 0; row < cs[0]._len; row++) { cs[cs.length - synColCnt + colIdx].set(row, colValue); } } } private int findFileIndexForChunk(int cidx) { for (int i = 0; i < _fileChunkOffsets.length; i++) { if (_fileChunkOffsets[i] <= cidx && (i+1 == _fileChunkOffsets.length || _fileChunkOffsets[i+1] > cidx)) { return i; } } throw new RuntimeException("Failed to find file for chunk index " + cidx); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/XlsParser.java
package water.parser; import java.io.*; import java.util.Arrays; import java.util.HashMap; import java.util.ArrayList; import water.Key; import water.H2O; import water.util.UnsafeUtils; import static water.parser.DefaultParserProviders.XLS_INFO; class XlsParser extends Parser { XlsParser( ParseSetup ps, Key jobKey ) { super(ps, jobKey); } @Override protected ParseWriter parseChunk(int cidx, final ParseReader din, final ParseWriter dout) { throw H2O.unimpl(); } // A Stream, might be a Zip stream private InputStream _is; // The unpacked data. We expect we can fully hold the unzipped data. private byte[] _buf; private int _lim; // What was read so-far // Simple offset / lim over the underlying buffer private class Buf { final byte[] _buf; byte[] _bbuf; int _off, _lim; Buf( byte[] buf, int off, int size ) throws IOException { _buf = _bbuf = buf; _off = off; _lim = off+size; readAtLeast(_lim); } Buf( Buf B, int off, int size ) { _buf = _bbuf = B._bbuf; _off = off; _lim = off+size; assert _lim <= _buf.length; } void concat( int off, int size ) throws IOException { readAtLeast(off+size); if( _off == _lim ) { // Empty Buf, so concat is really assign _off = off; _lim = off+size; return; } if( off == _lim ) { // Adjacent, so just extend _lim += size; return; } _bbuf = Arrays.copyOfRange(_bbuf,_off,_lim+size); _lim = _lim-_off+size; _off = 0; System.arraycopy(_buf,off,_bbuf,_lim-size,size); } char get1(int pos ) { assert _off+pos+1<_lim; return (char)_bbuf[_off+pos]; } int get2( int pos ) { assert _off+pos+2<_lim; return UnsafeUtils.get2(_bbuf, _off + pos); } int get4( int pos ) { assert _off+pos+4<_lim; return UnsafeUtils.get4(_bbuf,_off+pos); } double get8d( int pos ) { assert _off+pos+8<_lim; return UnsafeUtils.get8d(_bbuf,_off+pos); } String getStr( int pos, int len ) { return new String(_bbuf,_off+pos,len); } } // Read & keep in _buf from the unpacked stream at least 'lim' bytes. // Toss a range-check if the stream runs dry too soon. private void readAtLeast(int lim) throws IOException{ if( lim <= _lim ) return; // Already read at least if( _buf == null ) _buf = new byte[0]; if( lim > _buf.length ) { // Need to grow buffer int oldlen = _buf.length, newlen = oldlen; if( newlen==0 ) newlen=1024; while( newlen < lim ) newlen<<=1; _buf = Arrays.copyOf(_buf,newlen); } // Now read/unzip until lim int x; while( _lim < lim && (x = _is.read(_buf,_lim,_buf.length-_lim)) != -1 ) _lim += x; if( _lim < lim ) throw new java.lang.ArrayIndexOutOfBoundsException("not an XLS file: reading at "+lim+" but file is only "+_lim+" bytes"); } // Wrapper to fetch an int at a random offset private int get4( int pos ) throws IOException { readAtLeast(pos+4); return UnsafeUtils.get4(_buf,pos); } /** Try to parse the bytes as XLS format */ public static ParseSetup guessSetup( byte[] bytes ) { XlsParser p = new XlsParser(new ParseSetup(XLS_INFO, ParseSetup.GUESS_SEP, false, ParseSetup.GUESS_HEADER, ParseSetup.GUESS_COL_CNT, null, null, null, null, null, false), null); p._buf = bytes; // No need to copy already-unpacked data; just use it directly p._lim = bytes.length; PreviewParseWriter dout = new PreviewParseWriter(); try{ p.streamParse(new ByteArrayInputStream(bytes), dout); } catch(IOException e) { throw new RuntimeException(e); } if (dout._ncols > 0 && dout._nlines > 0 && dout._nlines > dout._invalidLines) return new ParseSetup(XLS_INFO, ParseSetup.GUESS_SEP, false, dout.colNames()==null?ParseSetup.NO_HEADER:ParseSetup.HAS_HEADER,dout._ncols, dout.colNames(), dout.guessTypes(),null,null,dout._data, false); else throw new ParseDataset.H2OParseException("Could not parse file as an XLS file."); } /** Ported to Java from excel_reader2.php. * Found at: http://code.google.com/p/php-excel-reader/downloads/detail?name=php-excel-reader-2.21.zip&can=2&q= * * Originally developed by Vadim Tkachenko under the name PHPExcelReader. * (http://sourceforge.net/projects/phpexcelreader) * Based on the Java version by Andy Khan (http://www.andykhan.com). Now * maintained by David Sanders. Reads only Biff 7 and Biff 8 formats. * * PHP versions 4 and 5 * * LICENSE: This source file is subject to version 3.0 of the PHP license * that is available through the world-wide-web at the following URI: * http://www.php.net/license/3_0.txt. If you did not receive a copy of * the PHP License and are unable to obtain it through the web, please * send a note to license@php.net so we can mail you a copy immediately. */ private static final int NUM_BIG_BLOCK_DEPOT_BLOCKS_POS = 0x2c; private static final int SMALL_BLOCK_DEPOT_BLOCK_POS = 0x3c; private static final int ROOT_START_BLOCK_POS = 0x30; private static final int BIG_BLOCK_SIZE = 0x200; private static final int SMALL_BLOCK_SIZE = 0x40; private static final int EXTENSION_BLOCK_POS = 0x44; private static final int NUM_EXTENSION_BLOCK_POS = 0x48; private static final int PROPERTY_STORAGE_BLOCK_SIZE = 0x80; private static final int BIG_BLOCK_DEPOT_BLOCKS_POS = 0x4c; private static final int SMALL_BLOCK_THRESHOLD = 0x1000; // property storage offsets private static final int SIZE_OF_NAME_POS = 0x40; private static final int TYPE_POS = 0x42; private static final int START_BLOCK_POS = 0x74; private static final int SIZE_POS = 0x78; private static final byte[] IDENTIFIER_OLE = new byte[] { (byte)0xd0,(byte)0xcf,(byte)0x11,(byte)0xe0,(byte)0xa1,(byte)0xb1,(byte)0x1a,(byte)0xe1 }; // Breakdown of the OLE structure private int _numBigBlockDepotBlocks; private int _sbdStartBlock; private int _rootStartBlock; private int _extensionBlock; private int _numExtensionBlocks; private int[] _bigBlockChain; private int[] _smallBlockChain; private ArrayList<Props> _props = new ArrayList<>(); private static class Props { final String _name; final int _type, _startBlock, _size; Props( String name, int type, int startBlock, int size ) { _name = name; _type = type; _startBlock = startBlock; _size = size; } } private Props _wrkbook, _rootentry; @Override public ParseWriter streamParse( final InputStream is, final StreamParseWriter dout) throws IOException { _is = is; // Check for magic first readAtLeast(IDENTIFIER_OLE.length); for( int i=0; i<IDENTIFIER_OLE.length; i++ ) if( _buf[i] != IDENTIFIER_OLE[i] ) throw new ParseDataset.H2OParseException("Not a valid XLS file, lacks correct starting bits (aka magic number)."); _numBigBlockDepotBlocks = get4(NUM_BIG_BLOCK_DEPOT_BLOCKS_POS); _sbdStartBlock = get4(SMALL_BLOCK_DEPOT_BLOCK_POS); _rootStartBlock = get4(ROOT_START_BLOCK_POS); _extensionBlock = get4(EXTENSION_BLOCK_POS); _numExtensionBlocks = get4(NUM_EXTENSION_BLOCK_POS); int pos = BIG_BLOCK_DEPOT_BLOCKS_POS; int bbdBlocks = _numExtensionBlocks == 0 ? _numBigBlockDepotBlocks : (BIG_BLOCK_SIZE - BIG_BLOCK_DEPOT_BLOCKS_POS)/4; final int[] bigBlockDepotBlocks = new int[bbdBlocks]; for( int i = 0; i < bbdBlocks; i++ ) bigBlockDepotBlocks[i] = get4((pos+=4)-4); for( int j = 0; j < _numExtensionBlocks; j++ ) { pos = (_extensionBlock + 1) * BIG_BLOCK_SIZE; final int blocksToRead = Math.min(_numBigBlockDepotBlocks - bbdBlocks, BIG_BLOCK_SIZE / 4 - 1); for( int i = bbdBlocks; i < bbdBlocks + blocksToRead; i++ ) bigBlockDepotBlocks[i] = get4((pos+=4)-4); bbdBlocks += blocksToRead; if( bbdBlocks < _numBigBlockDepotBlocks ) _extensionBlock = get4(pos); } // readBigBlockDepot int index = 0; _bigBlockChain = new int[1]; for( int i = 0; i < _numBigBlockDepotBlocks; i++ ) { pos = (bigBlockDepotBlocks[i] + 1) * BIG_BLOCK_SIZE; for( int j = 0 ; j < BIG_BLOCK_SIZE / 4; j++ ) { _bigBlockChain[index++] = get4((pos+=4)-4); if( index==_bigBlockChain.length ) _bigBlockChain = Arrays.copyOf(_bigBlockChain,index<<1); } } // readSmallBlockDepot(); index = 0; int sbdBlock = _sbdStartBlock; int[] smallBlockChain = new int[1]; while( sbdBlock != -2 ) { pos = (sbdBlock + 1) * BIG_BLOCK_SIZE; for( int j = 0; j < BIG_BLOCK_SIZE / 4; j++ ) { smallBlockChain[index++] = get4((pos+=4)-4); if( index==smallBlockChain.length ) smallBlockChain = Arrays.copyOf(smallBlockChain,index<<1); } sbdBlock = _bigBlockChain[sbdBlock]; } // Read workbook & root entries __readPropertySets(__readData(_rootStartBlock)); // Read the workbook - this holds all the csv data Buf data = getWorkBook(); // Parse the workbook boolean res = parseWorkbook(data,dout); if( !res ) throw new IOException("not an XLS file"); return dout; } private Buf __readData(int block) throws IOException { Buf data = new Buf(_buf,0,0); while( block != -2 ) { int pos = (block + 1) * BIG_BLOCK_SIZE; data.concat(pos, BIG_BLOCK_SIZE); block = _bigBlockChain[block]; } return data; } // Find the workbook & root entries private void __readPropertySets(Buf entry) { int offset = 0; while( offset < entry._lim ) { Buf d = new Buf(entry, offset, PROPERTY_STORAGE_BLOCK_SIZE); int nameSize = d.get2(SIZE_OF_NAME_POS); int type = d._bbuf[TYPE_POS]; int startBlock = d.get4(START_BLOCK_POS); int size = d.get4(SIZE_POS); String name = ""; for( int i = 0; i < nameSize ; i+=2 ) name += (char)d.get2(i); name = name.replaceAll("\0", ""); // remove trailing nul (C string?) Props p = new Props(name,type,startBlock,size); _props.add(p); if( name.equalsIgnoreCase("workbook") || name.equalsIgnoreCase("book") ) _wrkbook = p; if( name.equals("Root Entry") ) _rootentry = p; offset += PROPERTY_STORAGE_BLOCK_SIZE; } } private Buf getWorkBook() throws IOException { if( _wrkbook._size < SMALL_BLOCK_THRESHOLD ) { Buf rootdata = __readData(_rootentry._startBlock); Buf streamData = new Buf(rootdata,0,0); int block = _wrkbook._startBlock; while( block != -2 ) { int pos = block * SMALL_BLOCK_SIZE; streamData.concat(pos, SMALL_BLOCK_SIZE); block = _smallBlockChain[block]; } return streamData; } else { int numBlocks = _wrkbook._size / BIG_BLOCK_SIZE; if( _wrkbook._size % BIG_BLOCK_SIZE != 0 ) numBlocks++; Buf streamData = new Buf(_buf,0,0); if( numBlocks == 0 ) return streamData; int block = _wrkbook._startBlock; while( block != -2 ) { int pos = (block + 1) * BIG_BLOCK_SIZE; streamData.concat(pos, BIG_BLOCK_SIZE); block = _bigBlockChain[block]; } return streamData; } } private static final int SPREADSHEET_EXCEL_READER_BIFF8 = 0x600; private static final int SPREADSHEET_EXCEL_READER_BIFF7 = 0x500; private static final int SPREADSHEET_EXCEL_READER_WORKBOOKGLOBALS = 0x5; private static final int SPREADSHEET_EXCEL_READER_WORKSHEET = 0x10; private static final int SPREADSHEET_EXCEL_READER_TYPE_BOF = 0x809; private static final int SPREADSHEET_EXCEL_READER_TYPE_EOF = 0x0a; private static final int SPREADSHEET_EXCEL_READER_TYPE_BOUNDSHEET = 0x85; private static final int SPREADSHEET_EXCEL_READER_TYPE_DIMENSION = 0x200; private static final int SPREADSHEET_EXCEL_READER_TYPE_ROW = 0x208; private static final int SPREADSHEET_EXCEL_READER_TYPE_DBCELL = 0xd7; private static final int SPREADSHEET_EXCEL_READER_TYPE_FILEPASS = 0x2f; private static final int SPREADSHEET_EXCEL_READER_TYPE_NOTE = 0x1c; private static final int SPREADSHEET_EXCEL_READER_TYPE_TXO = 0x1b6; private static final int SPREADSHEET_EXCEL_READER_TYPE_RK = 0x7e; private static final int SPREADSHEET_EXCEL_READER_TYPE_RK2 = 0x27e; private static final int SPREADSHEET_EXCEL_READER_TYPE_MULRK = 0xbd; private static final int SPREADSHEET_EXCEL_READER_TYPE_MULBLANK = 0xbe; private static final int SPREADSHEET_EXCEL_READER_TYPE_INDEX = 0x20b; private static final int SPREADSHEET_EXCEL_READER_TYPE_SST = 0xfc; private static final int SPREADSHEET_EXCEL_READER_TYPE_EXTSST = 0xff; private static final int SPREADSHEET_EXCEL_READER_TYPE_CONTINUE = 0x3c; private static final int SPREADSHEET_EXCEL_READER_TYPE_LABEL = 0x204; private static final int SPREADSHEET_EXCEL_READER_TYPE_LABELSST = 0xfd; private static final int SPREADSHEET_EXCEL_READER_TYPE_NUMBER = 0x203; private static final int SPREADSHEET_EXCEL_READER_TYPE_NAME = 0x18; private static final int SPREADSHEET_EXCEL_READER_TYPE_ARRAY = 0x221; private static final int SPREADSHEET_EXCEL_READER_TYPE_STRING = 0x207; private static final int SPREADSHEET_EXCEL_READER_TYPE_FORMULA = 0x406; private static final int SPREADSHEET_EXCEL_READER_TYPE_FORMULA2 = 0x6; private static final int SPREADSHEET_EXCEL_READER_TYPE_FORMAT = 0x41e; private static final int SPREADSHEET_EXCEL_READER_TYPE_XF = 0xe0; private static final int SPREADSHEET_EXCEL_READER_TYPE_BOOLERR = 0x205; private static final int SPREADSHEET_EXCEL_READER_TYPE_FONT = 0x0031; private static final int SPREADSHEET_EXCEL_READER_TYPE_PALETTE = 0x0092; private static final int SPREADSHEET_EXCEL_READER_TYPE_UNKNOWN = 0xffff; private static final int SPREADSHEET_EXCEL_READER_TYPE_NINETEENFOUR = 0x22; private static final int SPREADSHEET_EXCEL_READER_TYPE_MERGEDCELLS = 0xE5; private static final int SPREADSHEET_EXCEL_READER_UTCOFFSETDAYS = 25569; private static final int SPREADSHEET_EXCEL_READER_UTCOFFSETDAYS1904 = 24107; private static final int SPREADSHEET_EXCEL_READER_MSINADAY = 86400; private static final int SPREADSHEET_EXCEL_READER_TYPE_HYPER = 0x01b8; private static final int SPREADSHEET_EXCEL_READER_TYPE_COLINFO = 0x7d; private static final int SPREADSHEET_EXCEL_READER_TYPE_DEFCOLWIDTH = 0x55; private static final int SPREADSHEET_EXCEL_READER_TYPE_STANDARDWIDTH = 0x99; private static final String SPREADSHEET_EXCEL_READER_DEF_NUM_FORMAT = "%s"; // Excel spreadsheet specific stuff private int _version; private boolean _nineteenFour; private String[] _formatRecords = new String[1]; private ArrayList<String> _sst = new ArrayList<>(); private ArrayList<Sheet> _boundsheets = new ArrayList<>(); private static class XF { final int _indexCode; enum Type { Date, Number, Other } final Type _type; XF( int code, Type type ) { _indexCode = code; _type = type; } } private ArrayList<XF> _xfRecords = new ArrayList<>(); /** List of default date formats used by Excel */ private static HashMap<Integer,String> DATEFORMATS = new HashMap<>(); static { DATEFORMATS.put(0xe,"m/d/Y"); DATEFORMATS.put(0xf,"M-d-Y"); DATEFORMATS.put(0x10,"d-M"); DATEFORMATS.put(0x11,"M-Y"); DATEFORMATS.put(0x12,"h:i a"); DATEFORMATS.put(0x13,"h:i:s a"); DATEFORMATS.put(0x14,"H:i"); DATEFORMATS.put(0x15,"H:i:s"); DATEFORMATS.put(0x16,"d/m/Y H:i"); DATEFORMATS.put(0x2d,"i:s"); DATEFORMATS.put(0x2e,"H:i:s"); DATEFORMATS.put(0x2f,"i:s.S"); } /** Default number formats used by Excel */ private static HashMap<Integer,String> NUMBERFORMATS = new HashMap<>(); static { NUMBERFORMATS.put(0x1 ,"0"); NUMBERFORMATS.put(0x2 ,"0.00"); NUMBERFORMATS.put(0x3 ,"#,##0"); NUMBERFORMATS.put(0x4 ,"#,##0.00"); NUMBERFORMATS.put(0x5 ,"$#,##0;($#,##0)"); NUMBERFORMATS.put(0x6 ,"$#,##0;[Red]($#,##0)"); NUMBERFORMATS.put(0x7 ,"$#,##0.00;($#,##0.00)"); NUMBERFORMATS.put(0x8 ,"$#,##0.00;[Red]($#,##0.00)"); NUMBERFORMATS.put(0x9 ,"0%"); NUMBERFORMATS.put(0xa ,"0.00%"); NUMBERFORMATS.put(0xb ,"0.00E+00"); NUMBERFORMATS.put(0x25,"#,##0;(#,##0)"); NUMBERFORMATS.put(0x26,"#,##0;[Red](#,##0)"); NUMBERFORMATS.put(0x27,"#,##0.00;(#,##0.00)"); NUMBERFORMATS.put(0x28,"#,##0.00;[Red](#,##0.00)"); NUMBERFORMATS.put(0x29,"#,##0;(#,##0)"); // Not exactly NUMBERFORMATS.put(0x2a,"$#,##0;($#,##0)"); // Not exactly NUMBERFORMATS.put(0x2b,"#,##0.00;(#,##0.00)"); // Not exactly NUMBERFORMATS.put(0x2c,"$#,##0.00;($#,##0.00)"); // Not exactly NUMBERFORMATS.put(0x30,"##0.0E+0"); } /** * Parse a workbook */ private boolean parseWorkbook(Buf data, final ParseWriter dout) { int pos = 0; int code = data.get2(pos); int length = data.get2(pos+2); int version = data.get2(pos+4); int substreamType = data.get2(pos+6); _version = version; if( version != SPREADSHEET_EXCEL_READER_BIFF8 && version != SPREADSHEET_EXCEL_READER_BIFF7 ) return false; if( substreamType != SPREADSHEET_EXCEL_READER_WORKBOOKGLOBALS ) return false; pos += length + 4; code = data.get2(pos); length = data.get2(pos+2); while( code != SPREADSHEET_EXCEL_READER_TYPE_EOF ) { switch( code ) { case SPREADSHEET_EXCEL_READER_TYPE_SST: { int spos = pos + 4; int limitpos = spos + length; int uniqueStrings = data.get4(spos+4); spos += 8; for( int i = 0; i < uniqueStrings; i++ ) { // Read in the number of characters if (spos == limitpos) { int conlength = data.get2(spos+2); spos += 4; limitpos = spos + conlength; } int numChars = data.get2(spos); spos += 2; int optionFlags = data.get1(spos); spos++; boolean asciiEncoding = ((optionFlags & 0x01) == 0); boolean extendedString = ( (optionFlags & 0x04) != 0); // See if string contains formatting information boolean richString = ( (optionFlags & 0x08) != 0); int formattingRuns=0; if( richString ) // Read in the crun formattingRuns = data.get2((spos+=2)-2); int extendedRunLength=0; if( extendedString ) // Read in cchExtRst extendedRunLength = data.get4((spos+=4)-4); String retstr = null; int len = (asciiEncoding)? numChars : numChars*2; if( spos + len < limitpos ) { retstr = data.getStr((spos+=len)-len, len); } else { // found continue retstr = data.getStr(spos, limitpos - spos); int bytesRead = limitpos - spos; int charsLeft = numChars - ((asciiEncoding) ? bytesRead : (bytesRead / 2)); spos = limitpos; while (charsLeft > 0) { int opcode = data.get2(spos); int conlength = data.get2(spos+2); if( opcode != 0x3c ) return false; spos += 4; limitpos = spos + conlength; int option = data.get1(spos); spos += 1; // if (asciiEncoding && (option == 0)) { // len = min(charsLeft, limitpos - spos); // min(charsLeft, conlength); // retstr .= substr(data, spos, len); // charsLeft -= len; // asciiEncoding = true; // } // elseif (!asciiEncoding && (option != 0)) { // len = min(charsLeft * 2, limitpos - spos); // min(charsLeft, conlength); // retstr .= substr(data, spos, len); // charsLeft -= len/2; // asciiEncoding = false; // } // elseif (!asciiEncoding && (option == 0)) { // // Bummer - the string starts off as Unicode, but after the // // continuation it is in straightforward ASCII encoding // len = min(charsLeft, limitpos - spos); // min(charsLeft, conlength); // for (j = 0; j < len; j++) { // retstr .= data[spos + j].chr(0); // } // charsLeft -= len; // asciiEncoding = false; // } // else{ // newstr = ''; // for (j = 0; j < strlen(retstr); j++) { // newstr = retstr[j].chr(0); // } // retstr = newstr; // len = min(charsLeft * 2, limitpos - spos); // min(charsLeft, conlength); // retstr .= substr(data, spos, len); // charsLeft -= len/2; // asciiEncoding = false; // } // spos += len; throw H2O.unimpl(); } } retstr = (asciiEncoding) ? retstr : __encodeUTF16(retstr); if (richString) spos += 4 * formattingRuns; // For extended strings, skip over the extended string data if (extendedString) spos += extendedRunLength; _sst.add(retstr); } break; } case SPREADSHEET_EXCEL_READER_TYPE_FILEPASS: return false; case SPREADSHEET_EXCEL_READER_TYPE_NAME: break; case SPREADSHEET_EXCEL_READER_TYPE_FORMAT: { String formatString = version == SPREADSHEET_EXCEL_READER_BIFF8 ? data.getStr(pos+9, data.get2(pos+6)*(data.get1(pos+8) == 0 ? 1 : 2)) : data.getStr(pos+7, data.get1(pos+6)*2); int indexCode = data.get2(pos+4); while( indexCode >= _formatRecords.length ) _formatRecords = Arrays.copyOf(_formatRecords,_formatRecords.length<<1); _formatRecords[indexCode] = formatString; break; } case SPREADSHEET_EXCEL_READER_TYPE_FONT: break; // While the original php file parsed the font here, H2O just wants the data case SPREADSHEET_EXCEL_READER_TYPE_PALETTE: break; // While the original php file parsed the color palaette info here, H2O just wants the data case SPREADSHEET_EXCEL_READER_TYPE_XF: { // While the original php file parsed the extensive formatting info // here, H2O just wants the data. Limit to figuring out if excel thinks // this is a date-formatted field or not int indexCode = data.get2(pos+6); XF.Type t=null; if( DATEFORMATS.containsKey(indexCode) ) t = XF.Type.Date; else if( NUMBERFORMATS.containsKey(indexCode) ) t = XF.Type.Number; else if( indexCode < _formatRecords.length && _formatRecords[indexCode] != null ) t = XF.Type.Other; _xfRecords.add(new XF(indexCode,t)); break; } case SPREADSHEET_EXCEL_READER_TYPE_NINETEENFOUR: _nineteenFour = data.get1(pos+4) == 1; break; case SPREADSHEET_EXCEL_READER_TYPE_BOUNDSHEET: int recOffset = data.get4(pos+4); int recLength = data.get1(pos+10); String recName = version == SPREADSHEET_EXCEL_READER_BIFF8 ? data.getStr(pos+12, recLength*(data.get1(pos+11) == 0 ? 1 : 2)) : data.getStr(pos+11, recLength); _boundsheets.add(new Sheet(data,dout,recName,recOffset)); break; default: // nothing; ignore this block typed } pos += length + 4; code = data.get2(pos); length = data.get2(pos+2); } // Parse all Sheets, although honestly H2O probably only wants the 1st sheet for( Sheet sheet : _boundsheets ) sheet.parse(); return true; } // ------------------------------ // A single Excel Sheet private class Sheet { final String _name; final Buf _data; final int _offset; final ParseWriter _dout; int _numRows, _numCols; String[] _labels; int _currow = 0; double[] _ds; Sheet( Buf data, ParseWriter dout, String name, int offset ) { _data = data; _dout = dout; _name = name; _offset = offset; } // Get the next row spec - and thus cleanup the prior row int row(int spos) { int row = _data.get2(spos); if( row < _currow ) throw new RuntimeException("XLS file but rows running backwards"); return doRow(row); } int doRow(int row) { // Once we're done with row 0, look at the collection of Strings on this // row. If all columns have a String, declare it a label row. Else, // inject the partial Strings as categoricals. if( row > _currow && _currow == 0 ) { // Moving off of row 0 boolean header=true; for( String s : _labels ) header &= (s!=null); // All strings? if( header ) { // It's a header row _dout.setColumnNames(_labels.clone()); Arrays.fill(_labels,null); // Dont reuse them labels as categoricals _currow=1; // Done with this row } } // Advance to the next row while( _currow < row ) { _currow++; // Next row internally // Forward collected row to _dout. for( int i=0; i<_ds.length; i++ ) { if( _labels[i] != null ) { _dout.addStrCol(i,new BufferedString(_labels[i])); _labels[i] = null; } else { _dout.addNumCol(i,_ds[i]); _ds[i] = Double.NaN; } } _dout.newLine(); // And advance dout a line } return row; } boolean parse() { // read BOF int spos = _offset; int code = _data.get2(spos); int length = _data.get2(spos+2); int version = _data.get2(spos + 4); if( (version != SPREADSHEET_EXCEL_READER_BIFF8) && (version != SPREADSHEET_EXCEL_READER_BIFF7) ) return false; int substreamType = _data.get2(spos + 6); if( substreamType != SPREADSHEET_EXCEL_READER_WORKSHEET ) return false; spos += length + 4; String recType = null; while(true) { code = _data.get1(spos); if( code != SPREADSHEET_EXCEL_READER_TYPE_EOF) { code = _data.get2(spos); length = _data.get2(spos+2); recType = null; spos += 4; } switch( code ) { case SPREADSHEET_EXCEL_READER_TYPE_DIMENSION: if( _numRows == 0 && _numCols == 0 ) { if( length == 10 || version == SPREADSHEET_EXCEL_READER_BIFF7 ) { _numRows = _data.get2(spos+ 2); _numCols = _data.get2(spos+ 6); } else { _numRows = _data.get2(spos+ 4); _numCols = _data.get2(spos+10); } _labels = new String[_numCols]; _ds = new double[_numCols]; Arrays.fill(_ds,Double.NaN); } break; case SPREADSHEET_EXCEL_READER_TYPE_MERGEDCELLS: break; // While the original php file parsed merged-cells here, H2O just wants the _data case SPREADSHEET_EXCEL_READER_TYPE_RK: case SPREADSHEET_EXCEL_READER_TYPE_RK2: { int row = row(spos); int col = _data.get2(spos+2); double d = _GetIEEE754(_data.get4(spos+6)); if( isDate(_data, spos) ) throw H2O.unimpl(); _ds[col] = d; break; } case SPREADSHEET_EXCEL_READER_TYPE_LABELSST: { int row = row(spos); int col = _data.get2(spos+2); int index = _data.get4(spos+6); _labels[col] = _sst.get(index); // Set label break; } case SPREADSHEET_EXCEL_READER_TYPE_MULRK: { int row = row(spos); int colFirst= _data.get2(spos+2); int colLast = _data.get2(spos+length-2); int columns = colLast - colFirst + 1; int tmppos = spos+4; for( int i = 0; i < columns; i++ ) { double numValue = _GetIEEE754(_data.get4(tmppos + 2)); if( isDate( _data, tmppos-4) ) throw H2O.unimpl(); tmppos += 6; _ds[colFirst+i] = numValue; } break; } case SPREADSHEET_EXCEL_READER_TYPE_NUMBER: { int row = row(spos); int col = _data.get2(spos+2); double d = _data.get8d(spos+6); if( isDate(_data,spos) ) throw H2O.unimpl(); _ds[col] = d; break; } case SPREADSHEET_EXCEL_READER_TYPE_MULBLANK: { int row = row(spos); int col = _data.get2(spos+2); int cols= (length / 2) - 3; for( int c = 0; c < cols; c++ ) { if( isDate( _data, spos+(c*2)) ) throw H2O.unimpl(); _ds[col+c] = 0; } break; } case SPREADSHEET_EXCEL_READER_TYPE_FORMULA: case SPREADSHEET_EXCEL_READER_TYPE_FORMULA2: throw H2O.unimpl(); //row = ord(_data[spos]) | ord(_data[spos+1])<<8; //column = ord(_data[spos+2]) | ord(_data[spos+3])<<8; //if ((ord(_data[spos+6])==0) && (ord(_data[spos+12])==255) && (ord(_data[spos+13])==255)) { // //String formula. Result follows in a STRING record // // This row/col are stored to be referenced in that record // // http://code.google.com/p/php-excel-reader/issues/detail?id=4 // previousRow = row; // previousCol = column; //} elseif ((ord(_data[spos+6])==1) && (ord(_data[spos+12])==255) && (ord(_data[spos+13])==255)) { // //Boolean formula. Result is in +2; 0=false,1=true // // http://code.google.com/p/php-excel-reader/issues/detail?id=4 // if (ord(this->_data[spos+8])==1) { // this->addcell(row, column, "TRUE"); // } else { // this->addcell(row, column, "FALSE"); // } //} elseif ((ord(_data[spos+6])==2) && (ord(_data[spos+12])==255) && (ord(_data[spos+13])==255)) { // //Error formula. Error code is in +2; //} elseif ((ord(_data[spos+6])==3) && (ord(_data[spos+12])==255) && (ord(_data[spos+13])==255)) { // //Formula result is a null string. // this->addcell(row, column, ''); //} else { // // result is a number, so first 14 bytes are just like a _NUMBER record // tmp = unpack("ddouble", substr(_data, spos + 6, 8)); // It machine machine dependent // if (this->isDate(spos)) { // numValue = tmp['double']; // } // else { // numValue = this->createNumber(spos); // } // info = this->_getCellDetails(spos,numValue,column); // this->addcell(row, column, info['string'], info); //} //break; case SPREADSHEET_EXCEL_READER_TYPE_BOOLERR: throw H2O.unimpl(); //row = ord(_data[spos]) | ord(_data[spos+1])<<8; //column = ord(_data[spos+2]) | ord(_data[spos+3])<<8; //string = ord(_data[spos+6]); //this->addcell(row, column, string); //break; case SPREADSHEET_EXCEL_READER_TYPE_STRING: throw H2O.unimpl(); //// http://code.google.com/p/php-excel-reader/issues/detail?id=4 //if (version == SPREADSHEET_EXCEL_READER_BIFF8){ // // Unicode 16 string, like an SST record // xpos = spos; // numChars =ord(_data[xpos]) | (ord(_data[xpos+1]) << 8); // xpos += 2; // optionFlags =ord(_data[xpos]); // xpos++; // asciiEncoding = ((optionFlags &0x01) == 0) ; // extendedString = ((optionFlags & 0x04) != 0); // // See if string contains formatting information // richString = ((optionFlags & 0x08) != 0); // if (richString) { // // Read in the crun // formattingRuns =ord(_data[xpos]) | (ord(_data[xpos+1]) << 8); // xpos += 2; // } // if (extendedString) { // // Read in cchExtRst // extendedRunLength =this->_GetInt4d(this->_data, xpos); // xpos += 4; // } // len = (asciiEncoding)?numChars : numChars*2; // retstr =substr(_data, xpos, len); // xpos += len; // retstr = (asciiEncoding)? retstr : this->_encodeUTF16(retstr); //} //elseif (version == SPREADSHEET_EXCEL_READER_BIFF7){ // // Simple byte string // xpos = spos; // numChars =ord(_data[xpos]) | (ord(_data[xpos+1]) << 8); // xpos += 2; // retstr =substr(_data, xpos, numChars); //} //this->addcell(previousRow, previousCol, retstr); //break; case SPREADSHEET_EXCEL_READER_TYPE_ROW: break; // While the original php file parsed the row info here, H2O just wants the _data case SPREADSHEET_EXCEL_READER_TYPE_DBCELL: break; case SPREADSHEET_EXCEL_READER_TYPE_LABEL: throw H2O.unimpl(); //row = ord(_data[spos]) | ord(_data[spos+1])<<8; //column = ord(_data[spos+2]) | ord(_data[spos+3])<<8; //this->addcell(row, column, substr(_data, spos + 8, ord(_data[spos + 6]) | ord(_data[spos + 7])<<8)); //break; case SPREADSHEET_EXCEL_READER_TYPE_EOF: // Push out the final row doRow(_currow+1); return true; case SPREADSHEET_EXCEL_READER_TYPE_HYPER: throw H2O.unimpl(); //// Only handle hyperlinks to a URL //row = ord(this->_data[spos]) | ord(this->_data[spos+1])<<8; //row2 = ord(this->_data[spos+2]) | ord(this->_data[spos+3])<<8; //column = ord(this->_data[spos+4]) | ord(this->_data[spos+5])<<8; //column2 = ord(this->_data[spos+6]) | ord(this->_data[spos+7])<<8; //linkData = Array(); //flags = ord(this->_data[spos + 28]); //udesc = ""; //ulink = ""; //uloc = 32; //linkData['flags'] = flags; //if ((flags & 1) > 0 ) { // is a type we understand // // is there a description ? // if ((flags & 0x14) == 0x14 ) { // has a description // uloc += 4; // descLen = ord(this->_data[spos + 32]) | ord(this->_data[spos + 33]) << 8; // udesc = substr(this->_data, spos + uloc, descLen * 2); // uloc += 2 * descLen; // } // ulink = this->read16bitstring(this->_data, spos + uloc + 20); // if (udesc == "") { // udesc = ulink; // } //} //linkData['desc'] = udesc; //linkData['link'] = this->_encodeUTF16(ulink); //for (r=row; r<=row2; r++) { // for (c=column; c<=column2; c++) { // this['cellsInfo'][r+1][c+1]['hyperlink'] = linkData; // } //} //break; case SPREADSHEET_EXCEL_READER_TYPE_DEFCOLWIDTH: break; // Set default column width case SPREADSHEET_EXCEL_READER_TYPE_STANDARDWIDTH: break; // While the original php file parsed the standard width here, H2O just wants the _data case SPREADSHEET_EXCEL_READER_TYPE_COLINFO: break; // While the original php file parsed the column info here, H2O just wants the _data default: break; } spos += length; } } } boolean isDate( Buf data, int spos ) { int xfindex = data.get2(spos+4); return _xfRecords.get(xfindex)._type == XF.Type.Date; } static double _GetIEEE754(long rknum) { double value; if( (rknum & 0x02) != 0) { value = rknum >> 2; } else { //mmp // I got my info on IEEE754 encoding from // http://research.microsoft.com/~hollasch/cgindex/coding/ieeefloat.html // The RK format calls for using only the most significant 30 bits of the // 64 bit floating point value. The other 34 bits are assumed to be 0 // So, we use the upper 30 bits of rknum as follows... int exp = (int)((rknum & 0x7ff00000L) >> 20); long mantissa = (0x100000 | (rknum & 0x000ffffc)); value = mantissa / Math.pow( 2 , (20- (exp - 1023))); if( ((rknum & 0x80000000) >> 31) != 0 ) value *= -1; //end of changes by mmp } if( (rknum & 0x01) != 0 ) value /= 100; return value; } // Ignore all encodings private String __encodeUTF16( String s ) { return s; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/parser/ZipUtil.java
package water.parser; import com.github.luben.zstd.ZstdInputStream; import water.DKV; import water.Iced; import water.Key; import water.exceptions.H2OIllegalArgumentException; import water.fvec.ByteVec; import water.fvec.FileVec; import water.fvec.Frame; import water.util.Log; import water.util.UnsafeUtils; import java.io.ByteArrayInputStream; import java.io.EOFException; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.Enumeration; import java.util.zip.*; import static water.fvec.FileVec.getPathForKey; public abstract class ZipUtil { public enum Compression { NONE, ZIP, GZIP, ZSTD } public static int ZSTD_MAGIC = 0xFD2FB528; /** * This method will attempt to read the few bytes off a file which will in turn be used * to guess what kind of parsers we should use to parse the file. * * @param bits * @return */ static byte[] getFirstUnzippedBytes(byte[] bits) { ZipUtil.Compression guessedCompression = guessCompressionMethod(bits); return unzipBytes(bits, guessedCompression, FileVec.DFLT_CHUNK_SIZE); } public static boolean isCompressed(ByteVec bv) { byte[] bits = bv.getFirstBytes(); ZipUtil.Compression guessedCompression = guessCompressionMethod(bits); return guessedCompression != Compression.NONE; } /** * This method check if the input argument is a zip directory containing files. * * @param key * @return true if bv is a zip directory containing files, false otherwise. */ static boolean isZipDirectory(Key key) { Iced ice = DKV.getGet(key); if (ice == null) throw new H2OIllegalArgumentException("Missing data", "Did not find any data under " + "key " + key); ByteVec bv = (ByteVec) (ice instanceof ByteVec ? ice : ((Frame) ice).vecs()[0]); return isZipDirectory(bv); } static boolean isZipDirectory(ByteVec bv) { byte[] bits = bv.getFirstBytes(); ZipUtil.Compression compressionMethod = guessCompressionMethod(bits); try { if (compressionMethod == Compression.ZIP) { ByteArrayInputStream bais = new ByteArrayInputStream(bits); ZipInputStream zis = new ZipInputStream(bais); ZipEntry ze = zis.getNextEntry(); // Get the *FIRST* entry boolean isDir = ze.isDirectory(); zis.close(); // There is at least one entry in zip file and it is not a directory. return isDir; } } catch (IOException e) { Log.err(e); } return false; } static ArrayList<String> getFileNames(ByteVec bv) { ArrayList<String> fileList = new ArrayList<String>(); if (bv instanceof FileVec) { String strPath = getPathForKey(((FileVec) bv)._key); try { ZipFile zipFile = new ZipFile(strPath); Enumeration<? extends ZipEntry> entries = zipFile.entries(); while (entries.hasMoreElements()) { ZipEntry entry = entries.nextElement(); if (!entry.isDirectory()) {// add file to list to parse if not a directory. fileList.add(entry.getName()); } } zipFile.close(); } catch (IOException e) { Log.err(e); } } return fileList; } /** * When a file is a zip file that contains multiple files, this method will return the decompression ratio. * * @param bv * @return */ static float getDecompressionRatio(ByteVec bv) { long totalSize = 0L; long totalCompSize = 0L; if (bv instanceof FileVec) { String strPath = getPathForKey(((FileVec) bv)._key); try { ZipFile zipFile = new ZipFile(strPath); Enumeration<? extends ZipEntry> entries = zipFile.entries(); while (entries.hasMoreElements()) { ZipEntry entry = entries.nextElement(); if (!entry.isDirectory()) {// add file to list to parse if not a directory. totalSize = totalSize + entry.getSize(); totalCompSize = totalCompSize + entry.getCompressedSize(); } } zipFile.close(); } catch (IOException e) { Log.err(e); } } if (totalCompSize == 0) // something is wrong. Return no compression. return 1; else return totalSize/totalCompSize; } static Compression guessCompressionMethod(byte [] bits) { // Look for ZIP magic if( bits.length > ZipFile.LOCHDR && UnsafeUtils.get4(bits, 0) == ZipFile.LOCSIG ) return Compression.ZIP; if( bits.length > 2 && (UnsafeUtils.get2(bits,0)&0xffff) == GZIPInputStream.GZIP_MAGIC ) return Compression.GZIP; if (bits.length >= 4 && UnsafeUtils.get4(bits, 0) == ZSTD_MAGIC) return Compression.ZSTD; return Compression.NONE; } static float decompressionRatio(ByteVec bv) { byte[] zips = bv.getFirstBytes(); ZipUtil.Compression cpr = ZipUtil.guessCompressionMethod(zips); if (cpr == Compression.NONE ) return 1; // no compression else if (cpr == Compression.ZIP) { ByteArrayInputStream bais = new ByteArrayInputStream(zips); ZipInputStream zis = new ZipInputStream(bais); ZipEntry ze = null; // Get the *FIRST* entry try { ze = zis.getNextEntry(); boolean isDir = ze.isDirectory(); if (isDir) { return getDecompressionRatio(bv); } else { byte[] bits = ZipUtil.unzipBytes(zips, cpr, FileVec.DFLT_CHUNK_SIZE); return bits.length / zips.length; } } catch (IOException e) { Log.err(e); } } else { byte[] bits = ZipUtil.unzipBytes(zips, cpr, FileVec.DFLT_CHUNK_SIZE); return bits.length / zips.length; } return 1; } static byte[] unzipBytes( byte[] bs, Compression cmp, int chkSize ) { if( cmp == Compression.NONE ) return bs; // No compression // Wrap the bytes in a stream ByteArrayInputStream bais = new ByteArrayInputStream(bs); InputStream is = null; try { if (cmp == Compression.ZIP) { ZipInputStream zis = new ZipInputStream(bais); ZipEntry ze = zis.getNextEntry(); // Get the *FIRST* entry // There is at least one entry in zip file and it is not a directory. if (ze == null || ze.isDirectory()) zis.getNextEntry(); // read the next entry which should be a file is = zis; } else if (cmp == Compression.ZSTD) { is = new ZstdInputStream(bais); } else { assert cmp == Compression.GZIP; is = new GZIPInputStream(bais); } // If reading from a compressed stream, estimate we can read 2x uncompressed bs = new byte[bs.length * 2]; // Now read from the compressed stream int off = 0; while (off < bs.length) { int len = is.read(bs, off, bs.length - off); if (len < 0) break; off += len; if (off == bs.length) { // Dataset is uncompressing alot! Need more space... if (bs.length >= chkSize) break; // Already got enough bs = Arrays.copyOf(bs, bs.length * 2); } } } catch (EOFException eof) { // EOF Exception happens for data with low compression factor (eg. DEFLATE method) // There is generally no way to avod this exception, we have to ignore it here Log.trace(eof); } catch( IOException ioe ) { throw Log.throwErr(ioe); } finally { try { if( is != null ) is.close(); } catch( IOException ignore ) { } } return bs; } /** * This method will read a compressed zip file and return the uncompressed bits so that we can * check the beginning of the file and make sure it does not contain the column names. * * @param bs * @param chkSize * @return */ static byte[] unzipForHeader( byte[] bs, int chkSize ) { ByteArrayInputStream bais = new ByteArrayInputStream(bs); ZipInputStream zis = new ZipInputStream(bais); InputStream is = zis; try { int off = 0; while( off < bs.length ) { int len = is.read(bs, off, bs.length - off); if (len < 0) break; off += len; if( off == bs.length ) { // Dataset is uncompressing alot! Need more space... if( bs.length >= chkSize ) break; // Already got enough bs = Arrays.copyOf(bs, bs.length * 2); } } } catch (IOException e) { Log.err(e); } try { is.close(); } catch (IOException e) { Log.err(e); } return bs; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/persist/EagerPersistBase.java
package water.persist; import water.H2O; import water.Key; import water.Value; import java.io.IOException; import java.net.URI; /** * Parent class for Persist implementations that do eager-load * (data is loaded at import time as opposed to parse time). */ public abstract class EagerPersistBase extends Persist { /* ********************************************* */ /* UNIMPLEMENTED methods (inspired by PersistS3) */ /* ********************************************* */ @Override public Key uriToKey(URI uri) { throw new UnsupportedOperationException(); } // Store Value v to disk. @Override public void store(Value v) { if( !v._key.home() ) return; throw H2O.unimpl(); // VA only } @Override public void delete(Value v) { throw H2O.unimpl(); } @Override public void cleanUp() { throw H2O.unimpl(); /* user-mode swapping not implemented */ } @Override public byte[] load(Value v) throws IOException { throw H2O.unimpl(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/persist/Persist.java
package water.persist; import java.io.*; import java.net.URI; import java.util.*; import water.*; import water.fvec.Vec; import water.util.Log; /** Abstract class describing various persistence targets. * <p><ul> * <li>{@link #store(Value v)} - Store a Value, using storage space.</li> * <li>{@link #load(Value v)} - Load a previously stored Value.</li> * <li>{@link #delete(Value v)} - Free storage from a previously store Value.</li> * </ul> * This class is used to implement both user-mode swapping, and the initial * load of files - typically raw text for parsing. */ public abstract class Persist { /** Store a Value into persistent storage, consuming some storage space. */ abstract public void store(Value v) throws IOException; /** Load a previously stored Value */ abstract public byte[] load(Value v) throws IOException; public byte[] load(Key k, long skip, int max) throws IOException { throw new UnsupportedOperationException( "Persist Backend " + this.getClass().getSimpleName() + " doesn't support direct data read."); } /** Reclaim space from a previously stored Value */ abstract public void delete(Value v); /** Usable storage space, or -1 for unknown */ public long getUsableSpace() { return /*UNKNOWN*/-1; } /** Total storage space, or -1 for unknown */ public long getTotalSpace() { return /*UNKNOWN*/-1; } /** Transform given uri into file vector holding file name. */ abstract public Key uriToKey(URI uri) throws IOException; /** Delete persistent storage on startup and shutdown */ abstract public void cleanUp(); /** * Calculate typeahead matches for src * * @param filter Source string to match for typeahead * @param limit Max number of entries to return * @return List of matches */ abstract public List<String> calcTypeaheadMatches(String filter, int limit); abstract public void importFiles(String path, String pattern, ArrayList<String> files, ArrayList<String> keys, ArrayList<String> fails, ArrayList<String> dels); // The filename can be either byte encoded if it starts with % followed by // a number, or is a normal key name with special characters encoded in // special ways. // It is questionable whether we need this because the only keys we have on // ice are likely to be Chunks static String getIceName(Value v) { return getIceName(v._key); } static String getIceName(Key k) { return getIceDirectory(k) + File.separator + key2Str(k); } static String getIceDirectory(Key key) { if( !key.isChunkKey() ) return "not_a_Chunk"; // Reverse Chunk key generation return key2Str(key.getVecKey()); } // Verify bijection of key/file-name mappings. protected static String key2Str(Key k) { String s = key2Str_impl(k); Key x; assert (x = str2Key_impl(s)).equals(k) : "bijection fail " + k + " <-> " + s + " <-> " + x; return s; } // Verify bijection of key/file-name mappings. static Key str2Key(String s) { Key k = str2Key_impl(s); assert key2Str_impl(k).equals(s) : "bijection fail " + s + " <-> " + k; return k; } // Convert a Key to a suitable filename string private static String key2Str_impl(Key k) { // check if we are system key StringBuilder sb = new StringBuilder(k._kb.length / 2 + 4); int i = 0; if( k._kb[0] < 32 ) { // System keys: hexalate all the leading non-ascii bytes sb.append('%'); int j = k._kb.length - 1; // Backwards scan for 1st non-ascii while( j >= 0 && k._kb[j] >= 32 && k._kb[j] < 128 ) j--; for( ; i <= j; i++ ) { byte b = k._kb[i]; int nib0 = ((b >>> 4) & 15) + '0'; if( nib0 > '9' ) nib0 += 'A' - 10 - '0'; int nib1 = ((b >>> 0) & 15) + '0'; if( nib1 > '9' ) nib1 += 'A' - 10 - '0'; sb.append((char) nib0).append((char) nib1); } sb.append('%'); } // Escape the special bytes from 'i' to the end return escapeBytes(k._kb, i, sb).toString(); } private static StringBuilder escapeBytes(byte[] bytes, int i, StringBuilder sb) { for( ; i < bytes.length; i++ ) { char b = (char)bytes[i], c=0; switch( b ) { case '%': c='%'; break; case '.': c='d'; break; case '/': c='s'; break; case ':': c='c'; break; case '"': c='q'; break; case '>': c='g'; break; case '<': c='l'; break; case '\\':c='b'; break; case '\0':c='z'; break; } if( c!=0 ) sb.append('%').append(c); else sb.append(b); } return sb; } // Convert a filename string to a Key private static Key str2Key_impl(String s) { String key = s; byte[] kb = new byte[(key.length() - 1) / 2]; int i = 0, j = 0; if( (key.length() > 2) && (key.charAt(0) == '%') && (key.charAt(1) >= '0') && (key.charAt(1) <= '9') ) { // Dehexalate until '%' for( i = 1; i < key.length(); i += 2 ) { if( key.charAt(i) == '%' ) break; char b0 = (char) (key.charAt(i ) - '0'); if( b0 > 9 ) b0 += '0' + 10 - 'A'; char b1 = (char) (key.charAt(i + 1) - '0'); if( b1 > 9 ) b1 += '0' + 10 - 'A'; kb[j++] = (byte) ((b0 << 4) | b1); // De-hexelated byte } i++; // Skip the trailing '%' } // a normal key - ASCII with special characters encoded after % sign for( ; i < key.length(); ++i ) { byte b = (byte) key.charAt(i); if( b == '%' ) { switch( key.charAt(++i) ) { case '%': b = '%'; break; case 'c': b = ':'; break; case 'd': b = '.'; break; case 'g': b = '>'; break; case 'l': b = '<'; break; case 'q': b = '"'; break; case 's': b = '/'; break; case 'b': b = '\\'; break; case 'z': b = '\0'; break; default: Log.warn("Invalid format of filename " + s + " at index " + i); } } if( j >= kb.length ) kb = Arrays.copyOf(kb, Math.max(2, j * 2)); kb[j++] = b; } // now in kb we have the key name return Key.make(Arrays.copyOf(kb, j)); } // ------------------------------- // Node Persistent Storage helpers // ------------------------------- public final static class PersistEntry implements Comparable<PersistEntry> { public PersistEntry(String name, long size, long timestamp) { _name = name; _size = size; _timestamp_millis = timestamp; } public final String _name; public final long _size; public final long _timestamp_millis; @Override public int compareTo(PersistEntry persistEntry) { return _name.compareTo(persistEntry._name); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; PersistEntry that = (PersistEntry) o; return _size == that._size && _timestamp_millis == that._timestamp_millis && _name.equals(that._name); } @Override public int hashCode() { return Objects.hash(_name, _size, _timestamp_millis); } } public String getHomeDirectory() { throw new RuntimeException("Not implemented"); } public PersistEntry[] list(String path) { throw new RuntimeException("Not implemented"); } public boolean exists(String path) { throw new RuntimeException("Not implemented"); } public String getParent(String path) { throw new RuntimeException("Not implemented"); } public boolean isDirectory(String path) { throw new RuntimeException("Not implemented"); } public long length(String path) { throw new RuntimeException("Not implemented"); } public InputStream open(String path) { throw new RuntimeException("Not implemented"); } /** * Creates a seekable Hadoop implementation of InputStream (FSDataInputStream) * * h2o-core doesn't depend on Hadoop libraries and can thus not declare the return type specifically * * @param path any H2O-3 allowed path * @return instance of FSDataInputStream */ public InputStream openSeekable(String path) { throw new RuntimeException("Not implemented"); } /** * Indicates whether this Persist backend can natively support Seekable InputStreams * * @return true, if openSeekable can be safely called */ public boolean isSeekableOpenSupported() { return false; } /** * Creates a Seekable InputStream for a given Vec * * @param vec any Vec in theory typically a ByteVec/FileVec * @return instance of FSDataInputStream */ public InputStream wrapSeekable(Vec vec) { throw new RuntimeException("Not implemented"); } public boolean mkdirs(String path) { throw new RuntimeException("Not implemented"); } public boolean rename(String fromPath, String toPath) { throw new RuntimeException("Not implemented"); } /** * Create a new file and return OutputStream for writing. * * The method creates all directories which does not exists on the * referenced path. * * @param path persist layer specific path * @param overwrite overwrite destination file * @return output stream * * @throws water.api.FSIOException in case of underlying FS error */ public OutputStream create(String path, boolean overwrite) { throw new RuntimeException("Not implemented"); } public boolean delete(String path) { throw new RuntimeException("Not implemented"); } /** Returns true if the persist layer understands given path. */ public boolean canHandle(String path) { throw new RuntimeException("Not implemented"); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/persist/PersistEagerHTTP.java
package water.persist; import water.Key; import water.util.FrameUtils; import water.util.Log; import java.util.ArrayList; import java.util.Collections; import java.util.List; public class PersistEagerHTTP extends EagerPersistBase { @Override public void importFiles(String path, String pattern, /*OUT*/ ArrayList<String> files, ArrayList<String> keys, ArrayList<String> fails, ArrayList<String> dels) { try { Key destination_key = FrameUtils.eagerLoadFromHTTP(path); files.add(path); keys.add(destination_key.toString()); } catch (Exception e) { Log.err("Loading from `" + path + "` failed.", e); fails.add(path); } } @Override public List<String> calcTypeaheadMatches(String filter, int limit) { return Collections.emptyList(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/persist/PersistFS.java
package water.persist; import java.io.*; import java.net.URI; import java.util.ArrayList; import water.*; import water.api.FSIOException; import water.fvec.NFSFileVec; import water.util.Log; /** * Persistence backend using local file system. */ public final class PersistFS extends Persist { final File _root; final File _dir; PersistFS(File root) { _root = root; _dir = new File(root, "ice" + H2O.API_PORT); //deleteRecursive(_dir); // Make the directory as-needed root.mkdirs(); if( !(root.isDirectory() && root.canRead() && root.canWrite()) ) H2O.die("ice_root not a read/writable directory"); } public void cleanUp() { deleteRecursive(_dir); } private static void deleteRecursive(File path) { if( !path.exists() ) return; if( path.isDirectory() ) for (File f : path.listFiles()) deleteRecursive(f); path.delete(); } /** * Get destination file where value is stored * * @param v any value from K/V * @return location of file where value is/could be stored */ public File getFile(Value v) { return new File(_dir, getIceName(v)); } @Override public byte[] load(Value v) throws IOException { File f = getFile(v); if( f.length() < v._max ) { // Should be fully on disk... // or it's a racey delete of a spilled value assert !v.isPersisted() : f.length() + " " + v._max + " " + v._key; return null; // No value } try (FileInputStream s = new FileInputStream(f)) { AutoBuffer ab = new AutoBuffer(s.getChannel(), true, Value.ICE); byte[] b = ab.getA1(v._max); ab.close(); return b; } } // Store Value v to disk. @Override public void store(Value v) throws IOException { assert !v.isPersisted(); File dirs = new File(_dir, getIceDirectory(v._key)); if (!dirs.mkdirs() && !dirs.exists()) throw new java.io.IOException("mkdirs failed making " + dirs); try (FileOutputStream s = new FileOutputStream(getFile(v))) { byte[] m = v.memOrLoad(); // we are not single threaded anymore if (m != null && m.length != v._max) { Log.warn("Value size mismatch? " + v._key + " byte[].len=" + m.length + " v._max=" + v._max); v._max = m.length; // Implies update of underlying POJO, then re-serializing it without K/V storing it } new AutoBuffer(s.getChannel(), false, Value.ICE).putA1(m, m.length).close(); } catch (AutoBuffer.AutoBufferException abe) { throw abe._ioe; } } @Override public boolean delete(String path) { return new File(URI.create(path)).delete(); } @Override public void delete(Value v) { getFile(v).delete(); // Silently ignore errors // Attempt to delete empty containing directory new File(_dir, getIceDirectory(v._key)).delete(); } @Override public long getUsableSpace() { return _root.getUsableSpace(); } @Override public long getTotalSpace() { return _root.getTotalSpace(); } @Override public Key uriToKey(URI uri) { return NFSFileVec.make(new File(uri.toString()))._key; } @Override public ArrayList<String> calcTypeaheadMatches(String src, int limit) { assert false; return new ArrayList<>(); } @Override public void importFiles(String path, String pattern, ArrayList<String> files, ArrayList<String> keys, ArrayList<String> fails, ArrayList<String> dels) { assert false; } @Override public OutputStream create(String path, boolean overwrite) { File f; boolean windowsPath = path.matches("^[a-zA-Z]:.*$"); if (windowsPath) { f = new File(path); } else { f = new File(URI.create(path)); } if (f.exists() && !overwrite) throw new FSIOException(path, "File already exists"); try { if (!f.getParentFile().exists()) { // Shortcut since we know that this is local FS f.getParentFile().mkdirs(); } return new FileOutputStream(f, false); } catch (IOException e) { throw new FSIOException(path, e); } } @Override public PersistEntry[] list(String path) { File f = new File(URI.create(path)); if (f.isFile()) { return new PersistEntry[] { getPersistEntry(f) }; } else if (f.isDirectory()) { File[] files = f.listFiles(); PersistEntry[] entries = new PersistEntry[files.length]; for (int i = 0; i < files.length; i++) { entries[i] = getPersistEntry(files[i]); } return entries; } throw H2O.unimpl(); } @Override public InputStream open(String path) { try { File f = new File(URI.create(path)); return new FileInputStream(f); } catch (FileNotFoundException e) { throw new FSIOException(path, "File not found"); } catch (Exception e) { throw new FSIOException(path, e); } } @Override public boolean mkdirs(String path) { return new File(URI.create(path)).mkdirs(); } @Override public boolean exists(String path) { return new File(URI.create(path)).exists(); } @Override public String getParent(String path) { return new File(URI.create(path)).getParentFile().toURI().toString(); } @Override public boolean isDirectory(String path) { return new File(URI.create(path)).isDirectory(); } private PersistEntry getPersistEntry(File f) { return new PersistEntry(f.getName(), f.length(), f.lastModified()); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/persist/PersistH2O.java
package water.persist; import org.apache.commons.io.IOUtils; import water.Key; import water.util.FrameUtils; import water.util.Log; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.net.URL; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; public class PersistH2O extends EagerPersistBase { public static final String SCHEME = "h2o"; public static final String PREFIX = SCHEME + "://"; private static final String H2O_RESOURCE_PATH = "/extdata/"; private static final List<String> CONTENTS; static { List<String> contents = Collections.emptyList(); try { contents = readExtDataContents(); } catch (IOException e) { Log.trace(e); } CONTENTS = Collections.unmodifiableList(contents); } @Override public void importFiles(String path, String pattern, /*OUT*/ ArrayList<String> files, ArrayList<String> keys, ArrayList<String> fails, ArrayList<String> dels) { try { URL resourceURL = pathToURL(path); if (resourceURL == null) { fails.add(path); Log.err("Resource '" + path + "' is not available in H2O."); return; } Key<?> destination_key = FrameUtils.eagerLoadFromURL(path, resourceURL); files.add(path); keys.add(destination_key.toString()); } catch (Exception e) { Log.err("Loading from `" + path + "` failed.", e); fails.add(path); } } static URL pathToURL(String path) { String[] pathItems = path.split("://", 2); if (!SCHEME.equalsIgnoreCase(pathItems[0])) { throw new IllegalArgumentException("Path is expected to start with '" + PREFIX + "', got '" + path + "'."); } URL resource = PersistH2O.class.getResource(H2O_RESOURCE_PATH + pathItems[1]); if (resource == null) { resource = PersistH2O.class.getResource(H2O_RESOURCE_PATH + pathItems[1] + ".csv"); } return resource; } @Override public List<String> calcTypeaheadMatches(String filter, int limit) { return CONTENTS.stream() .filter(it -> it.startsWith(filter)) .collect(Collectors.toList()); } static List<String> readExtDataContents() throws IOException { List<String> contents = new ArrayList<>(); InputStream is = PersistH2O.class.getResourceAsStream("/extdata.list"); if (is == null) { return Collections.emptyList(); } try (BufferedReader reader = new BufferedReader(new InputStreamReader(is))) { String line; while ((line = reader.readLine()) != null) contents.add(PREFIX + line); } finally { IOUtils.closeQuietly(is); } return contents; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/persist/PersistHex.java
package water.persist; import water.DKV; import water.H2O; import water.Key; import water.Value; import water.fvec.C1NChunk; import java.io.*; import java.net.URI; import java.util.ArrayList; import java.util.List; import java.util.Objects; public class PersistHex extends Persist { static final String HEX_PATH_PREFIX = PersistManager.Schemes.HEX + "://"; Key<?> fromHexPath(String path) { Key<?> key = Key.make(path.substring(HEX_PATH_PREFIX.length())); if (! key.isChunkKey()) { throw new IllegalArgumentException("Only Chunk keys are supported for HEX schema"); } return key; } InputStream open(Key<?> key) { Objects.requireNonNull(key); byte[] bytes = ((C1NChunk) DKV.getGet(key)).getBytes(); return new ByteArrayInputStream(bytes); } @Override public InputStream open(String path) { return open(fromHexPath(path)); } @Override public OutputStream create(String path, boolean overwrite) { Key<?> ck = fromHexPath(path); return new ByteChunkOutputStream(ck); } private static class ByteChunkOutputStream extends ByteArrayOutputStream { private final Key<?> _chunkKey; public ByteChunkOutputStream(Key<?> chunkKey) { super(); _chunkKey = chunkKey; } @Override public void close() throws IOException { super.close(); byte[] myBytes = toByteArray(); DKV.put(_chunkKey, new Value(_chunkKey, new C1NChunk(myBytes))); } } /* ********************************************* */ /* UNIMPLEMENTED methods (inspired by PersistS3) */ /* ********************************************* */ @Override public List<String> calcTypeaheadMatches(String filter, int limit) { throw H2O.unimpl(); } @Override public void importFiles(String path, String pattern, ArrayList<String> files, ArrayList<String> keys, ArrayList<String> fails, ArrayList<String> dels) { throw H2O.unimpl(); } @Override public Key uriToKey(URI uri) { throw H2O.unimpl(); } // Store Value v to disk. @Override public void store(Value v) { if( !v._key.home() ) return; throw H2O.unimpl(); // VA only } @Override public void delete(Value v) { throw H2O.unimpl(); } @Override public void cleanUp() { throw H2O.unimpl(); } @Override public byte[] load(Value v) { throw H2O.unimpl(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/persist/PersistManager.java
package water.persist; import water.*; import water.api.FSIOException; import water.api.HDFSIOException; import water.exceptions.H2OIllegalArgumentException; import water.fvec.FileVec; import water.fvec.Vec; import water.parser.BufferedString; import water.util.FileUtils; import water.util.Log; import water.persist.Persist.PersistEntry; import water.util.fp.Function2; import java.io.*; import java.net.HttpURLConnection; import java.net.URI; import java.net.URL; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicLong; import java.util.regex.Pattern; import java.util.regex.Matcher; import static water.H2O.OptArgs.SYSTEM_PROP_PREFIX; /** * One true persistence manager which hides the implementations from H2O. * In particular, HDFS support or S3 support may or may not exist depending * on what is on the classpath. */ public class PersistManager { public static final int MAX_BACKENDS = 8; public static final int VALUE_DRIVE = MAX_BACKENDS; /** Property which enable HDFS as default fallback persistent layer. For example, * if swift fs is regirestered properly under HDFS and user specifies swift based URI, the persist * layer forwards the request through HDFS API. */ private static final String PROP_ENABLE_HDFS_FALLBACK = SYSTEM_PROP_PREFIX + "persist.enable.hdfs.fallback"; private static final String PROP_FORCE_HDFS_FOR_S3 = SYSTEM_PROP_PREFIX + "persist.enable.hdfs.for.s3"; /** Persistence schemes; used as file prefixes eg "hdfs://some_hdfs_path/some_file" */ public interface Schemes { String FILE = "file"; String HDFS = "hdfs"; String S3 = "s3"; String S3N = "s3n"; String S3A = "s3a"; String GCS = "gs"; String NFS = "nfs"; String HEX = "hex"; } public static class PersistStatsEntry { public PersistStatsEntry() { store_count = new AtomicLong(); store_bytes = new AtomicLong(); delete_count = new AtomicLong(); load_count = new AtomicLong(); load_bytes = new AtomicLong(); } public AtomicLong store_count; public AtomicLong store_bytes; public AtomicLong delete_count; public AtomicLong load_count; public AtomicLong load_bytes; } private Persist[] I; private PersistHex HEX = new PersistHex(); // not part of I because it cannot be a backend for DKV private PersistH2O persistH2O = new PersistH2O(); private PersistStatsEntry[] stats; public PersistStatsEntry[] getStats() { return stats; } boolean isS3Path(String path) { String s = path.toLowerCase(); return s.startsWith("s3:"); } public boolean isHdfsPath(String path) { if (isS3Path(path) && !forceHdfsForS3()) { return false; // S3 will only be handled by HDFS Persist if it is enabled } String s = path.toLowerCase(); return s.startsWith("hdfs:") || s.startsWith("s3:") || s.startsWith("s3n:") || s.startsWith("s3a:") || s.startsWith("maprfs:") || useHdfsAsFallback() && I[Value.HDFS] != null && I[Value.HDFS].canHandle(path); } private void validateHdfsConfigured() { if (hdfsNotConfigured()) { throw new H2OIllegalArgumentException("HDFS and S3A support is not configured"); } } private boolean hdfsNotConfigured() { return I[Value.HDFS] == null; } public boolean isGcsPath(String path) { return path.toLowerCase().startsWith("gs://"); } public boolean isHexPath(String path) { return path.toLowerCase().startsWith(Schemes.HEX + "://"); } public String toHexPath(Key<?> key) { if (! key.isChunkKey()) { throw new IllegalArgumentException("Only Chunk keys are supported for HEX schema"); } return PersistHex.HEX_PATH_PREFIX + key; } public PersistManager(URI iceRoot) { I = new Persist[MAX_BACKENDS + 1]; stats = new PersistStatsEntry[I.length]; for (int i = 0; i < stats.length; i++) { stats[i] = new PersistStatsEntry(); } if (iceRoot == null) { Log.err("ice_root must be specified. Exiting."); H2O.exit(1); } Persist ice = null; boolean windowsPath = iceRoot.toString().matches("^[a-zA-Z]:.*"); if (windowsPath) { ice = new PersistFS(new File(iceRoot.toString())); } else if ((iceRoot.getScheme() == null) || Schemes.FILE.equals(iceRoot.getScheme())) { ice = new PersistFS(new File(iceRoot.getPath())); } else if( Schemes.HDFS.equals(iceRoot.getScheme()) ) { Log.err("HDFS ice_root not yet supported. Exiting."); H2O.exit(1); // I am not sure anyone actually ever does this. // H2O on Hadoop launches use local disk for ice root. // This has a chance to work, but turn if off until it gets tested. // // try { // Class klass = Class.forName("water.persist.PersistHdfs"); // java.lang.reflect.Constructor constructor = klass.getConstructor(new Class[]{URI.class}); // ice = (Persist) constructor.newInstance(iceRoot); // } catch (Exception e) { // Log.err("Could not initialize HDFS"); // throw new RuntimeException(e); // } } I[Value.ICE] = ice; I[Value.NFS] = new PersistNFS(); try { Class klass = Class.forName("water.persist.PersistHTTP"); java.lang.reflect.Constructor constructor = klass.getConstructor(); I[Value.HTTP] = (Persist) constructor.newInstance(); Log.info("Subsystem for distributed import from HTTP/HTTPS successfully initialized"); } catch (Throwable ignore) { I[Value.HTTP] = new PersistEagerHTTP(); Log.info("Distributed HTTP import not available (import from HTTP/HTTPS will be eager)"); } try { Class klass = Class.forName("water.persist.PersistHdfs"); java.lang.reflect.Constructor constructor = klass.getConstructor(); I[Value.HDFS] = (Persist) constructor.newInstance(); Log.info("HDFS subsystem successfully initialized"); } catch (Throwable ignore) { Log.info("HDFS subsystem not available"); } try { Class klass = Class.forName("water.persist.PersistS3"); java.lang.reflect.Constructor constructor = klass.getConstructor(); I[Value.S3] = (Persist) constructor.newInstance(); Log.info("S3 subsystem successfully initialized"); } catch (Throwable ignore) { Log.info("S3 subsystem not available"); } try { Class klass = Class.forName("water.persist.PersistGcs"); java.lang.reflect.Constructor constructor = klass.getConstructor(); I[Value.GCS] = (Persist) constructor.newInstance(); Log.info("GCS subsystem successfully initialized"); } catch (Throwable ignore) { Log.info("GCS subsystem not available"); } try { Class<?> klass = Class.forName("water.persist.PersistDrive"); java.lang.reflect.Constructor<?> constructor = klass.getConstructor(); I[VALUE_DRIVE] = (Persist) constructor.newInstance(); Log.info("Drive subsystem successfully initialized"); } catch (Throwable ignore) { Log.info("Drive subsystem not available"); } } public void store(int backend, Value v) throws IOException { stats[backend].store_count.incrementAndGet(); I[backend].store(v); } public void delete(int backend, Value v) { stats[backend].delete_count.incrementAndGet(); I[backend].delete(v); } public byte[] load(int backend, Value v) throws IOException { stats[backend].load_count.incrementAndGet(); byte[] arr = I[backend].load(v); stats[backend].load_bytes.addAndGet(arr.length); return arr; } public byte[] load(int backend, Key k, long skip, int max) throws IOException { stats[backend].load_count.incrementAndGet(); byte[] arr = I[backend].load(k, skip, max); stats[backend].load_bytes.addAndGet(arr.length); return arr; } /** Get the current Persist flavor for user-mode swapping. */ public Persist getIce() { return I[Value.ICE]; } /** Convert given URI into a specific H2O key representation. * * The representation depends on persistent backend, since it will * deduce file location from the key content. * * The method will look at scheme of URI and based on it, it will * ask a backend to provide a conversion to a key (i.e., URI with scheme * 'hdfs' will be forwared to HDFS backend). * * @param uri file location * @return a key encoding URI * @throws IOException in the case of uri conversion problem * @throws water.exceptions.H2OIllegalArgumentException in case of unsupported scheme */ public final Key anyURIToKey(URI uri) throws IOException { Key ikey; String scheme = uri.getScheme(); if ("s3".equals(scheme)) { ikey = I[Value.S3].uriToKey(uri); } else if ("hdfs".equals(scheme)) { ikey = I[Value.HDFS].uriToKey(uri); } else if ("s3n".equals(scheme) || "s3a".equals(scheme)) { ikey = I[Value.HDFS].uriToKey(uri); } else if ("gs".equals(scheme)) { ikey = I[Value.GCS].uriToKey(uri); } else if ("file".equals(scheme) || scheme == null) { ikey = I[Value.NFS].uriToKey(uri); } else if (useHdfsAsFallback() && I[Value.HDFS].canHandle(uri.toString())) { ikey = I[Value.HDFS].uriToKey(uri); } else { throw new H2OIllegalArgumentException("Unsupported schema '" + scheme + "' for given uri " + uri); } return ikey; } private static boolean httpUrlExists(String URLName){ try { HttpURLConnection con = (HttpURLConnection) new URL(URLName).openConnection(); con.setInstanceFollowRedirects(false); con.setRequestMethod("HEAD"); return (con.getResponseCode() == HttpURLConnection.HTTP_OK); } catch (Exception e) { return false; } } /** * Calculate typeahead matches for src * * @param filter Source string to match for typeahead * @param limit Max number of entries to return * @return List of matches */ public List<String> calcTypeaheadMatches(String filter, int limit) { filter = filter.trim(); if (filter.isEmpty()) { return Collections.emptyList(); } String s = filter.toLowerCase(); if (s.startsWith("http:") || s.startsWith("https:")) { if (httpUrlExists(filter)) { ArrayList<String> arrayList = new ArrayList<>(); arrayList.add(filter); return arrayList; } else { return new ArrayList<>(); } } else if(s.startsWith(PersistH2O.PREFIX)) { return persistH2O.calcTypeaheadMatches(filter, limit); } else if(s.startsWith("s3://")) { return I[Value.S3].calcTypeaheadMatches(filter, limit); } else if(s.startsWith("gs://")) { return I[Value.GCS].calcTypeaheadMatches(filter, limit); } else if(s.startsWith("drive://")) { return I[VALUE_DRIVE].calcTypeaheadMatches(filter, limit); } else if (s.startsWith("hdfs:") || s.startsWith("s3n:") || s.startsWith("s3a:") || s.startsWith("maprfs:") || useHdfsAsFallback() && I[Value.HDFS] != null && I[Value.HDFS].canHandle(s)) { if (I[Value.HDFS] == null) { throw new H2OIllegalArgumentException("HDFS, S3, S3N, and S3A support is not configured"); } return I[Value.HDFS].calcTypeaheadMatches(filter, limit); } return I[Value.NFS].calcTypeaheadMatches(filter, limit); } public void importFiles(String[] paths, String pattern, ArrayList<String> files, ArrayList<String> keys, ArrayList<String> fails, ArrayList<String> dels) { if (paths.length == 1) { importFiles(paths[0], pattern, files, keys, fails, dels); return; } ImportFilesTask importFilesTask = new ImportFilesTask(paths, pattern); H2O.submitTask(new LocalMR(importFilesTask, paths.length)).join(); ImportFilesTask.addAllTo(importFilesTask._pFiles, files); ImportFilesTask.addAllTo(importFilesTask._pKeys, keys); ImportFilesTask.addAllTo(importFilesTask._pFails, fails); ImportFilesTask.addAllTo(importFilesTask._pDels, dels); } private static class ImportFilesTask extends MrFun<ImportFilesTask> { private final String[] _paths; private final String _pattern; BufferedString[][] _pFiles; BufferedString[][] _pKeys; BufferedString[][] _pFails; BufferedString[][] _pDels; public ImportFilesTask(String[] paths, String pattern) { _paths = paths; _pattern = pattern; _pFiles = new BufferedString[paths.length][]; _pKeys = new BufferedString[paths.length][]; _pFails = new BufferedString[paths.length][]; _pDels = new BufferedString[paths.length][]; } @Override protected void map(int t) { ArrayList<String> pFiles = new ArrayList<>(); ArrayList<String> pKeys = new ArrayList<>(); ArrayList<String> pFails = new ArrayList<>(); ArrayList<String> pDels = new ArrayList<>(); H2O.getPM().importFiles(_paths[t], _pattern, pFiles, pKeys, pFails, pDels); _pFiles[t] = toArray(pFiles); _pKeys[t] = toArray(pKeys); _pFails[t] = toArray(pFails); _pDels[t] = toArray(pDels); } private static BufferedString[] toArray(List<String> ls) { BufferedString[] bss = new BufferedString[ls.size()]; int i = 0; for (String s : ls) { bss[i++] = new BufferedString(s); } return bss; } private static void addAllTo(BufferedString[][] bssAry, ArrayList<String> target) { for (BufferedString[] bss : bssAry) { for (BufferedString bs : bss) target.add(bs.toString()); } } } /** * From a path produce a list of files and keys for parsing. * * Use as follows: * * ArrayList<String> files = new ArrayList(); * ArrayList<String> keys = new ArrayList(); * ArrayList<String> fails = new ArrayList(); * ArrayList<String> dels = new ArrayList(); * importFiles(importFiles.path, files, keys, fails, dels); * * @param path (Input) Path to import data from * @param pattern (Input) Regex pattern to match files by * @param files (Output) List of files found * @param keys (Output) List of keys corresponding to files * @param fails (Output) List of failed files which mismatch among nodes * @param dels (Output) I don't know what this is */ public void importFiles(String path, String pattern, ArrayList<String> files, ArrayList<String> keys, ArrayList<String> fails, ArrayList<String> dels) { URI uri = FileUtils.getURI(path); String scheme = uri.getScheme(); if (scheme == null || "file".equals(scheme)) { I[Value.NFS].importFiles(path, pattern, files, keys, fails, dels); } else if ("http".equals(scheme) || "https".equals(scheme)) { I[Value.HTTP].importFiles(path, pattern, files, keys, fails, dels); } else if (PersistH2O.SCHEME.equals(scheme)) { persistH2O.importFiles(path, pattern, files, keys, fails, dels); } else if ("s3".equals(scheme)) { if (I[Value.S3] == null) throw new H2OIllegalArgumentException("S3 support is not configured"); I[Value.S3].importFiles(path, pattern, files, keys, fails, dels); } else if ("gs".equals(scheme)) { if (I[Value.GCS] == null) throw new H2OIllegalArgumentException("GCS support is not configured"); I[Value.GCS].importFiles(path, pattern, files, keys, fails, dels); } else if ("drive".equals(scheme)) { if (I[VALUE_DRIVE] == null) throw new H2OIllegalArgumentException("Drive support is not configured"); I[VALUE_DRIVE].importFiles(path, pattern, files, keys, fails, dels); } else if ("hdfs".equals(scheme) || "s3n:".equals(scheme) || "s3a:".equals(scheme) || "maprfs:".equals(scheme) || (useHdfsAsFallback() && I[Value.HDFS] != null && I[Value.HDFS].canHandle(path))) { if (I[Value.HDFS] == null) throw new H2OIllegalArgumentException("HDFS, S3N, and S3A support is not configured"); I[Value.HDFS].importFiles(path, pattern, files, keys, fails, dels); } if (pattern != null && !pattern.isEmpty()) filterFiles((prefix, elements) -> matchPattern(prefix, elements, pattern), path, files, keys, fails); filterMetadataFiles(path, files, keys, fails); } private void filterMetadataFiles(String path, ArrayList<String> files, ArrayList<String> keys, ArrayList<String> fails) { filterFiles(new DeltaLakeMetadataFilter(), path, files, keys, fails); } static class DeltaLakeMetadataFilter implements Function2<String, ArrayList<String>, ArrayList<String>> { private static final String DELTA_LOG_DIRNAME = "_delta_log"; @Override public ArrayList<String> apply(String unused, ArrayList<String> ids) { ArrayList<String> filteredIds = new ArrayList<>(ids.size()); Exception firstFailure = null; int failureCount = 0; for (String id : ids) { try { URI uri = URI.create(id); String path = uri.getPath(); if (path != null) { String[] segments = path.split("/"); if (segments.length > 1 && DELTA_LOG_DIRNAME.equalsIgnoreCase(segments[segments.length - 2])) continue; } } catch (Exception e) { failureCount++; firstFailure = firstFailure == null ? e : firstFailure; Log.trace("Cannot create uri", e); } filteredIds.add(id); } if (firstFailure != null) { Log.warn("There were " + failureCount + " failures during file filtering (only the first one logged)", firstFailure); } return filteredIds; } } private void filterFiles(Function2<String, ArrayList<String>, ArrayList<String>> matcher, String path, ArrayList<String> files, ArrayList<String> keys, ArrayList<String> fails) { files.retainAll(matcher.apply(path, files)); //New files ArrayList after matching pattern of choice List<String> retainKeys = matcher.apply(path, keys); if (retainKeys.size() != keys.size()) { Futures fs = new Futures(); @SuppressWarnings("unchecked") List<String> removed = ((List<String>) keys.clone()); removed.removeAll(retainKeys); for (String r : removed) Keyed.remove(Key.make(r), fs, true); fs.blockForPending(); keys.retainAll(retainKeys); //New keys ArrayList after matching pattern of choice } //New fails ArrayList after matching pattern of choice. Only show failures that match pattern if (!fails.isEmpty()) { fails.retainAll(matcher.apply(path, fails)); } } // ------------------------------- // Node Persistent Storage helpers // ------------------------------- // Reads public String getHdfsHomeDirectory() { if (I[Value.HDFS] == null) { return null; } return I[Value.HDFS].getHomeDirectory(); } public PersistEntry[] list(String path) { if (isHdfsPath(path)) { validateHdfsConfigured(); PersistEntry[] arr = I[Value.HDFS].list(path); return arr; } else if (isGcsPath(path)) { return I[Value.GCS].list(path); } else if (isS3Path(path)) { return I[Value.S3].list(path); } File dir = new File(path); File[] files = dir.listFiles(); if (files == null) { return new PersistEntry[0]; } ArrayList<PersistEntry> arr = new ArrayList<>(); for (File f : files) { PersistEntry entry = new PersistEntry(f.getName(), f.length(), f.lastModified()); arr.add(entry); } return arr.toArray(new PersistEntry[arr.size()]); } public boolean exists(String path) { if (isHdfsPath(path)) { validateHdfsConfigured(); boolean b = I[Value.HDFS].exists(path); return b; } else if (isGcsPath(path)) { return I[Value.GCS].exists(path); } else if (isS3Path(path)) { return I[Value.S3].exists(path); } File f = new File(path); return f.exists(); } /** * Checks whether a given path is either an empty directory or it doesn't yet exist. * This is trivial if the filesystem where the path leads is distributed. * If we are working with a local filesystem we need to make sure that this property * is satisfied on all the nodes. * @param path path we want to check * @return true the path is an empty or non-existent directory everywhere, false otherwise */ public boolean isEmptyDirectoryAllNodes(final String path) { if (isHdfsPath(path)) { validateHdfsConfigured(); if (! I[Value.HDFS].exists(path)) return true; if (! I[Value.HDFS].isDirectory(path)) return false; PersistEntry[] content = I[Value.HDFS].list(path); return (content == null) || (content.length == 0); } else if (isS3Path(path)) { PersistEntry[] content = I[Value.S3].list(path); return content.length == 0; } return new CheckLocalDirTask(path).doAllNodes()._result; } /** * Check whether given path represents a writable directory. If such path does not exist * it will try to create the directory and if successful it is safe to assume that such * directory is writable. * @param path Path to check if a writable directory * @return true if given path is a writable directory, false otherwise */ public boolean isWritableDirectory(String path) { URI pathAsUri = FileUtils.getURI(path); Persist persist = getPersistForURI(pathAsUri); String pathUriStr = pathAsUri.toString(); if (persist.isDirectory(pathUriStr)) { return isDirectoryWritable(persist, path); } else if (persist.exists(pathUriStr)) { // exists but its not a directory return false; } else { String existingParent = getExistingParent(persist, pathUriStr); if (existingParent != null) { return isDirectoryWritable(persist, existingParent); } else { return false; } } } /* Check that a directory is writable by creating and deleting a file. */ private boolean isDirectoryWritable(Persist persist, String path) { OutputStream os = null; try { String testFileUriStr = FileUtils.getURI(path + "/.h2oWriteCheck").toString(); os = persist.create(testFileUriStr, true); os.close(); persist.delete(testFileUriStr); return true; } catch (IOException | HDFSIOException | FSIOException e) { return false; } finally { FileUtils.close(os); } } private String getExistingParent(Persist persist, String path) { String parent = persist.getParent(path); if (parent == null) { return null; } else if (persist.exists(parent)) { return parent; } else { return getExistingParent(persist, parent); } } private static class CheckLocalDirTask extends MRTask<CheckLocalDirTask> { String _path; // OUT boolean _result; CheckLocalDirTask(String _path) { this._path = _path; } @Override public void reduce(CheckLocalDirTask mrt) { _result = _result && mrt._result; } @Override protected void setupLocal() { File f = new File(_path); if (! f.exists()) _result = true; else if (f.isDirectory()) { File[] content = f.listFiles(); _result = (content != null) && (content.length == 0); } else _result = false; } } public long length(String path) { if (isHdfsPath(path)) { validateHdfsConfigured(); return I[Value.HDFS].length(path); } else if (isGcsPath(path)) { return I[Value.GCS].length(path); } else if (isS3Path(path)) { return I[Value.S3].length(path); } File f = new File(path); if (! f.exists()) { throw new IllegalArgumentException("File not found (" + path + ")"); } return f.length(); } public InputStream open(String path) { if (isHdfsPath(path)) { validateHdfsConfigured(); return I[Value.HDFS].open(path); } else if (isGcsPath(path)) { return I[Value.GCS].open(path); } else if (isHexPath(path)) { return HEX.open(path); } else if (isS3Path(path)) { return I[Value.S3].open(path); } try { File f = new File(path); return new FileInputStream(f); } catch (FileNotFoundException e) { throw new IllegalArgumentException("File not found (" + path + ")"); } catch (Exception e) { throw new RuntimeException(e); } } /** * Opens a {@link Vec} in seekable implementation of an {@link InputStream}. * * @param vec An instance of {@link Vec} to open * @return A seekable instanceo of {@link InputStream}, never null. * @throws IOException When the underlying resource does not allow seekable resource creation and all fallback solutions * failed. Or on reading error. */ public InputStream openSeekable(final Vec vec) throws IOException { if (vec instanceof FileVec) { FileVec fileVec = (FileVec) vec; final String path = fileVec.getPath(); final Persist p = I[fileVec.getBackend()]; if (p != null) { if (p.isSeekableOpenSupported()) { return p.openSeekable(path); } } else if (isHdfsPath(path)) { validateHdfsConfigured(); return I[Value.HDFS].openSeekable(path); } } // fallback if (hdfsNotConfigured()) { throw new IllegalArgumentException(String.format( "Failed to open Vec '%s' for reading. " + "Persistence backend doesn't provide implementation of a Seekable InputStream and HDFS fallback is not available.", vec._key)); } Log.debug("Persist doesn't provide openSeekable. Falling back to HDFS wrapper (VecDataInputStream)."); return I[Value.HDFS].wrapSeekable(vec); } // Writes public boolean mkdirs(String path) { if (isHdfsPath(path)) { validateHdfsConfigured(); boolean b = I[Value.HDFS].mkdirs(path); return b; } else if (isGcsPath(path)){ return I[Value.GCS].mkdirs(path); } else if (isS3Path(path)) { return I[Value.S3].mkdirs(path); } File f = new File(path); boolean b = f.mkdirs(); return b; } public boolean rename(String fromPath, String toPath) { if (isHdfsPath(fromPath) || isHdfsPath(toPath)) { validateHdfsConfigured(); boolean b = I[Value.HDFS].rename(fromPath, toPath); return b; } if (isGcsPath(fromPath) || isGcsPath(toPath)) { return I[Value.GCS].rename(fromPath, toPath); } if (isS3Path(fromPath) || isS3Path(toPath)) { return I[Value.S3].rename(fromPath, toPath); } File f = new File(fromPath); File t = new File(toPath); boolean b = f.renameTo(t); return b; } public OutputStream create(String path, boolean overwrite) { if (isHdfsPath(path)) { validateHdfsConfigured(); return I[Value.HDFS].create(path, overwrite); } else if (isGcsPath(path)) { return I[Value.GCS].create(path, overwrite); } else if (isHexPath(path)) { return HEX.create(path, overwrite); } if (isS3Path(path)) { return I[Value.S3].create(path, overwrite); } try { if (! overwrite) { File f = new File(path); if (f.exists()) { throw new IllegalArgumentException("File already exists (" + path + ")"); } } return new BufferedOutputStream(new FileOutputStream(path)); } catch (Exception e) { throw new RuntimeException(e); } } public boolean delete(String path) { if (isHdfsPath(path)) { validateHdfsConfigured(); boolean b = I[Value.HDFS].delete(path); return b; } else if (isGcsPath(path)) { return I[Value.GCS].delete(path); } else if (isS3Path(path)) { return I[Value.S3].delete(path); } File f = new File(path); boolean b = f.delete(); return b; } public Persist getPersistForURI(URI uri) { String scheme = uri.getScheme(); boolean windowsPath = scheme.matches("^[a-zA-Z]$"); if (windowsPath) { return I[Value.ICE]; } if (scheme != null) { switch (scheme) { case Schemes.FILE: return I[Value.ICE]; // Local FS case Schemes.HDFS: case Schemes.S3N: case Schemes.S3A: return I[Value.HDFS]; case Schemes.S3: return I[Value.S3]; case Schemes.GCS: return I[Value.GCS]; default: if (useHdfsAsFallback() && I[Value.HDFS] != null && I[Value.HDFS].canHandle(uri.toString())) { return I[Value.HDFS]; } else { throw new IllegalArgumentException("Cannot find persist manager for scheme " + scheme); } } } else { return I[Value.ICE]; } } /** * Returns true when path matches file_deny_glob input argument * * @param path path to a file * @return boolean */ public boolean isFileAccessDenied(String path) { if (isHdfsPath(path) || isGcsPath(path) || isS3Path(path)) { return false; } File f = new File(FileUtils.getURI(path)); return H2O.ARGS.file_deny_glob.matches(f.toPath().normalize()); } /** * Finds all entries in the list that matches the regex * @param prefix The substring to extract before pattern matching * @param fileList The list of strings to check * @param matchStr The regular expression to use on the string after prefix * @return list containing the matching entries */ private ArrayList<String> matchPattern(String prefix, ArrayList<String> fileList, String matchStr){ ArrayList<String> result = new ArrayList<>(); Pattern pattern = Pattern.compile(matchStr); for (String s : fileList) { Matcher matcher = pattern.matcher(afterPrefix(s,prefix)); if (matcher.find()) { result.add(s); } } return result; } /** * Returns the part of the string that occurs after the first index of the substring * @param wholeString A string that needs to be subsetted * @param substring The substring to extract * @return string after substring */ private static String afterPrefix(String wholeString , String substring) { // Returns a substring containing all characters after a string. int posSubstring = wholeString.lastIndexOf(substring); if (posSubstring == -1) { return ""; } int adjustedPosSubstring = posSubstring + substring.length(); if (adjustedPosSubstring >= wholeString.length()) { return ""; } return wholeString.substring(adjustedPosSubstring); } /** Should HDFS persist layer be used as default persist layer * for unknown URL schema. * @return true if HDFS should handle unknown URL schema. */ static boolean useHdfsAsFallback() { return System.getProperty(PROP_ENABLE_HDFS_FALLBACK, "true").equals("true"); } static boolean forceHdfsForS3() { return Boolean.parseBoolean(System.getProperty(PROP_FORCE_HDFS_FOR_S3, "false")); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/persist/PersistNFS.java
package water.persist; import java.io.*; import java.net.URI; import java.nio.channels.FileChannel; import java.util.ArrayList; import water.*; import water.exceptions.H2OFileAccessDeniedException; import water.exceptions.H2ONotFoundArgumentException; import water.fvec.NFSFileVec; import water.util.FileIntegrityChecker; import water.util.FileUtils; import water.util.Log; // Persistence backend for network file system. // Just for loading or storing files. // // @author cliffc public final class PersistNFS extends Persist { static final String KEY_PREFIX = "nfs:" + File.separator; static final int KEY_PREFIX_LENGTH = KEY_PREFIX.length(); // file implementation ------------------------------------------------------- /** * Key from file */ public static Key decodeFile(File f) { return Key.make(KEY_PREFIX + f.toString()); } // Returns the file for given key. private static File getFileForKey(Key k) { final int off = k._kb[0] == Key.CHK ? water.fvec.Vec.KEY_PREFIX_LEN : 0; assert new String(k._kb, off, KEY_PREFIX_LENGTH).equals(KEY_PREFIX) : "Not an NFS key: " + k; String s = new String(k._kb, KEY_PREFIX_LENGTH + off, k._kb.length - (KEY_PREFIX_LENGTH + off)); return new File(s); } public void cleanUp() { } /** * InputStream from a NFS-based Key */ public static InputStream openStream(Key k) throws IOException { return new FileInputStream(getFileForKey(k)); } @Override public byte[] load(Value v) throws IOException { assert v.isPersisted(); // Convert a file chunk into a long-offset from the base file. Key<?> k = v._key; final long skip = k.isChunkKey() ? water.fvec.NFSFileVec.chunkOffset(k) : 0; try (FileInputStream s = new FileInputStream(getFileForKey(k)); FileChannel fc = s.getChannel()) { fc.position(skip); AutoBuffer ab = new AutoBuffer(fc, true, Value.NFS); byte[] b = ab.getA1(v._max); ab.close(); return b; } } @Override public void store(Value v) { // Only the home node does persistence on NFS if (!v._key.home()) return; // A perhaps useless cutout: the upper layers should test this first. if (v.isPersisted()) return; try { File f = getFileForKey(v._key); if (!f.mkdirs()) throw new IOException("Unable to create directory " + f); try (FileOutputStream s = new FileOutputStream(f)) { byte[] m = v.memOrLoad(); assert (m == null || m.length == v._max); // Assert not saving partial files if (m != null) new AutoBuffer(s.getChannel(), false, Value.NFS).putA1(m, m.length).close(); } } catch (IOException e) { Log.err(e); } } @Override public void delete(Value v) { throw H2O.fail(); } @Override public Key uriToKey(URI uri) { return NFSFileVec.make(uri.getScheme() == null ? new File(uri.toString()) : new File(uri))._key; } @Override public ArrayList<String> calcTypeaheadMatches(String filter, int limit) { File base = null; String filterPrefix = ""; if (limit == 0) limit--; if (!filter.isEmpty()) { File file = new File(filter); if (file.isDirectory()) { base = file; } else { base = file.getParentFile(); filterPrefix = file.getName().toLowerCase(); } } if (base == null) base = new File("."); ArrayList<String> array = new ArrayList<>(); File[] files = base.listFiles(); if (files != null) { for (File file : files) { if (file.isHidden()) continue; if (file.getName().toLowerCase().startsWith(filterPrefix)) array.add(file.getPath()); if (array.size() == limit) break; // When limit == -1, check all files/directories in directory for matches } } return array; } @Override public void importFiles(String path, String pattern, ArrayList<String> files, ArrayList<String> keys, ArrayList<String> fails, ArrayList<String> dels) { File f = new File(FileUtils.getURI(path)); if (H2O.ARGS.file_deny_glob.matches(f.toPath().normalize())) { throw new H2OFileAccessDeniedException("File " + path + " access denied"); } if( !f.exists() ) throw new H2ONotFoundArgumentException("File " + path + " does not exist"); FileIntegrityChecker.check(f).syncDirectory(files,keys,fails,dels); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/persist/S3ClientFactory.java
package water.persist; public interface S3ClientFactory { <T> T getOrMakeClient(String bucket, Object configuration); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/Assembly.java
package water.rapids; import water.H2O; import water.Key; import water.Keyed; import water.fvec.Frame; import water.rapids.transforms.Transform; /** * Assemblies are essentially Pipelines. * TODO: add in/out col names, in/out col types */ public class Assembly extends Keyed<Assembly> { private Transform[] _steps; public Assembly(Key key, Transform[] steps) { super(key); _steps = steps; } public String[] names() { String[] names = new String[_steps.length]; for (int i = 0; i < names.length; ++i) names[i] = _steps[i].name(); return names; } public Transform[] steps() { return _steps; } public Frame fit(Frame f) { for (Transform step: _steps) f = step.fitTransform(f); return f; } public String toJava(String pojoName) { if (pojoName == null) pojoName = "GeneratedMungingPojo"; StringBuilder sb = new StringBuilder( "import hex.genmodel.GenMunger;\n"+ "import hex.genmodel.easy.RowData;\n\n" + "public class " + pojoName + " extends GenMunger {\n"+ " public " + pojoName + "() {\n"+ " _steps = new Step[" + _steps.length + "];\n" ); int i=0; for (Transform step: _steps) sb.append(" _steps[").append(i++).append("] = new ").append(step.name()).append("();\n"); sb.append(" }\n"); for (Transform step: _steps) sb.append(step.genClass()); sb.append("}\n"); return sb.toString(); } @Override protected long checksum_impl() { throw H2O.unimpl(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/BinaryMerge.java
package water.rapids; // Since we have a single key field in H2O (different to data.table), bmerge() becomes a lot simpler (no // need for recursion through join columns) with a downside of transfer-cost should we not need all the key. import water.*; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.NewChunk; import water.fvec.Vec; import water.parser.BufferedString; import water.util.ArrayUtils; import water.util.Log; import java.math.BigInteger; import java.util.Arrays; import static java.math.BigInteger.ONE; import static water.rapids.SingleThreadRadixOrder.getSortedOXHeaderKey; class BinaryMerge extends DTask<BinaryMerge> { long _numRowsInResult=0; // returned to caller, so not transient int _chunkSizes[]; // TODO: only _chunkSizes.length is needed by caller, so return that length only double _timings[]; private transient long _ret1st[/*n2GB*/][]; // The row number of the first right table's index key that matches private transient long _retLen[/*n2GB*/][]; // How many rows does it match to? final FFSB _leftSB, _riteSB; final boolean _onlyLeftFrame; // denote if only left frame is available which implies sorting. private transient KeyOrder _leftKO, _riteKO; private final int _numJoinCols; private transient long _leftFrom; private transient int _retBatchSize; // no need to match batchsize of RadixOrder. private final boolean _allLeft, _allRight; private boolean[] _stringCols; private boolean[] _intCols; final long _mergeId; // does any left row match to more than 1 right row? If not, can allocate // and loop more efficiently, and mark the resulting key'd frame with a // 'unique' index. // TODO: implement private transient boolean _oneToManyMatch = false; // Data which is duplicated left and rite, but only one copy is needed // per-map. This data is made in the constructor and shallow-copy shared // around the cluster. static class FFSB extends Iced<FFSB> { private final Frame _frame; private final Vec _vec; // fast lookups to save repeated calls to node.index() which calls // binarysearch within it. private final int _chunkNode[]; // Chunk homenode index final int _msb; private final int _shift; private final BigInteger _base[]; // the col.min() of each column in the key private final int _fieldSizes[]; // the widths of each column in the key private final int _keySize; // the total width in bytes of the key, sum of field sizes FFSB( Frame frame, int msb, int shift, int fieldSizes[], BigInteger base[]) { assert -1<=msb && msb<=255; // left ranges from 0 to 255, right from -1 to 255 _frame = frame; _msb = msb; _shift = shift; _fieldSizes = fieldSizes; _keySize = ArrayUtils.sum(fieldSizes); _base = base; // Create fast lookups to go from chunk index to node index of that chunk Vec vec = _vec = frame.anyVec(); _chunkNode = vec==null ? null : MemoryManager.malloc4(vec.nChunks()); if( vec == null ) return; // Zero-columns for Sort for( int i=0; i<_chunkNode.length; i++ ) _chunkNode[i] = vec.chunkKey(i).home_node().index(); } long min() { return BigInteger.valueOf(((long)_msb) << _shift).add(_base[0].subtract(ONE)).longValue(); } long max() { return BigInteger.valueOf(((long)_msb+1) << _shift).add(_base[0].subtract(ONE).subtract(ONE)).longValue(); } } // In X[Y], 'left'=i and 'right'=x BinaryMerge(FFSB leftSB, FFSB riteSB, boolean allLeft, long mergeId) { assert riteSB._msb!=-1 || allLeft; _mergeId = mergeId; _leftSB = leftSB; _riteSB = riteSB; _onlyLeftFrame = (_leftSB._frame.numCols() > 0 && _riteSB._frame.numCols()==0); // the number of columns in the key i.e. length of _leftFieldSizes and _riteSB._fieldSizes _numJoinCols = Math.min(_leftSB._fieldSizes.length, _riteSB._fieldSizes.length); _allLeft = allLeft; _allRight = false; // TODO: pass through int columnsInResult = (_leftSB._frame == null?0:_leftSB._frame.numCols()) + (_riteSB._frame == null?0:_riteSB._frame.numCols())-_numJoinCols; _stringCols = MemoryManager.mallocZ(columnsInResult); _intCols = MemoryManager.mallocZ(columnsInResult); // check left frame first if (_leftSB._frame!=null) { for (int col=0; col <_numJoinCols; col++) { if (_leftSB._frame.vec(col).isInt()) _intCols[col] = true; } for (int col = _numJoinCols; col < _leftSB._frame.numCols(); col++) { if (_leftSB._frame.vec(col).isString()) _stringCols[col] = true; if( _leftSB._frame.vec(col).isInt() ) _intCols[col] = true; } } // check right frame next if (_riteSB._frame != null) { int colOffset = _leftSB._frame==null?0:_leftSB._frame.numCols()-_numJoinCols; for (int col = _numJoinCols; col < _riteSB._frame.numCols(); col++) { if (_riteSB._frame.vec(col).isString()) _stringCols[col + colOffset] = true; if( _riteSB._frame.vec(col).isInt() ) _intCols[col+colOffset] = true; } } } @Override public void compute2() { _timings = MemoryManager.malloc8d(20); long t0 = System.nanoTime(); SingleThreadRadixOrder.OXHeader leftSortedOXHeader = DKV.getGet(getSortedOXHeaderKey(/*left=*/true, _leftSB._msb, _mergeId)); if (leftSortedOXHeader == null) { if( !_allRight ) { tryComplete(); return; } throw H2O.unimpl(); // TODO pass through _allRight and implement } _leftKO = new KeyOrder(leftSortedOXHeader, _mergeId); SingleThreadRadixOrder.OXHeader rightSortedOXHeader = DKV.getGet(getSortedOXHeaderKey(/*left=*/false, _riteSB._msb, _mergeId)); //if (_riteSB._msb==-1) assert _allLeft && rightSortedOXHeader == null; // i.e. it's known nothing on right can join if (rightSortedOXHeader == null) { if( !_allLeft ) { tryComplete(); return; } // enables general case code to run below without needing new special case code rightSortedOXHeader = new SingleThreadRadixOrder.OXHeader(0, 0, 0); } _riteKO = new KeyOrder(rightSortedOXHeader, _mergeId); // get left batches _leftKO.initKeyOrder(_leftSB._msb,/*left=*/true); final long leftN = leftSortedOXHeader._numRows; // number of leftframe rows to fetch for leftMSB assert leftN >= 1; // get right batches _riteKO.initKeyOrder(_riteSB._msb, /*left=*/false); final long rightN = rightSortedOXHeader._numRows; _timings[0] += (System.nanoTime() - t0) / 1e9; // Now calculate which subset of leftMSB and which subset of rightMSB we're // joining here by going into the detail of the key values present rather // than the extents of the range (the extents themselves may not be // present). // We see where the right extents occur in the left keys present; and if // there is an overlap we find the full extent of the overlap on the left // side (nothing less). // We only _need_ do this for left outer join otherwise we'd end up with // too many no-match left rows. // We'll waste allocating the retFirst and retLen vectors though if only a // small overlap is needed, so for that reason it's useful to restrict size // of retFirst and retLen even for inner join too. // Find left and right MSB extents in terms of the key boundaries they represent // _riteSB._msb==-1 indicates that no right MSB should be looked at final long leftMin = _leftSB.min(); // the minimum possible key value in this bucket final long leftMax = _leftSB.max(); // the maximum possible key value in this bucket // if _riteSB._msb==-1 then the values in riteMin and riteMax here are redundant and not used final long riteMin = _riteSB._msb==-1 ? -1 : _riteSB.min(); // the minimum possible key value in this bucket final long riteMax = _riteSB._msb==-1 ? -1 : _riteSB.max(); // the maximum possible key value in this bucket // _leftFrom and leftTo refers to the row indices to perform merging/search for each MSB value _leftFrom = (_riteSB._msb==-1 || leftMin>=riteMin || (_allLeft && _riteSB._msb==0 )) ? -1 : bsearchLeft(riteMin, /*retLow*/true , leftN); long leftTo = (_riteSB._msb==-1 || leftMax<=riteMax || (_allLeft && _riteSB._msb==255)) ? leftN : bsearchLeft(riteMax, /*retLow*/false, leftN); // The (_allLeft && rightMSB==0) part is to include those keys in that // leftMSB just below the right base. They won't be caught by rightMSBs to // the left because there are no more rightMSBs below 0. Only when // _allLeft do we need to create NA match for them. They must be created // in the same MSB/MSB pair along with the keys that may match the very // lowest right keys, because stitching assumes unique MSB/MSB pairs. long retSize = leftTo - _leftFrom - 1; // since leftTo and leftFrom are 1 outside the extremes assert retSize >= 0; // retSize is number of rows to include in final merged frame if (retSize==0) { tryComplete(); return; } // nothing can match, even when allLeft _retBatchSize = 1048576; // must set to be the same from RadixOrder.java int retNBatch = (int)((retSize - 1) / _retBatchSize + 1); int retLastSize = (int)(retSize - (retNBatch - 1) * _retBatchSize); _ret1st = new long[retNBatch][]; _retLen = new long[retNBatch][]; for( int b=0; b<retNBatch; b++) { _ret1st[b] = MemoryManager.malloc8(b==retNBatch-1 ? retLastSize : _retBatchSize); _retLen[b] = MemoryManager.malloc8(b==retNBatch-1 ? retLastSize : _retBatchSize); } // always look at the whole right bucket. Even though in types -1 and 1, // we know range is outside so nothing should match. if types -1 and 1 do // occur, they only happen for leftMSB 0 and 255, and will quickly resolve // to no match in the right bucket via bmerge t0 = System.nanoTime(); bmerge_r(_leftFrom, leftTo, -1, rightN); _timings[1] += (System.nanoTime() - t0) / 1e9; if (_allLeft) { assert _leftKO.numRowsToFetch() == retSize; } else { long tt = 0; for( long[] retFirstx : _ret1st ) // i.e. sum(_ret1st>0) in R for( long rF : retFirstx ) tt += (rF > 0) ? 1 : 0; // TODO: change to tt.privateAssertMethod() containing the loop above to // avoid that loop when asserts are off, or accumulate the tt // inside the merge_r, somehow assert tt <= retSize; assert _leftKO.numRowsToFetch() == tt; } if (_numRowsInResult > 0) createChunksInDKV(); // TODO: set 2 Frame and 2 int[] to NULL at the end of compute2 to save // some traffic back, but should be small and insignificant // TODO: recheck transients or null out here before returning tryComplete(); } // Holder for Key & Order info private static class KeyOrder { public final transient long _batchSize; private final transient byte _key [/*n2GB*/][/*i mod 2GB * _keySize*/]; private final transient long _order[/*n2GB*/][/*i mod 2GB * _keySize*/]; private final transient long _perNodeNumRowsToFetch[]; private final transient long _mergeId; KeyOrder( SingleThreadRadixOrder.OXHeader sortedOXHeader , long bTime) { _batchSize = sortedOXHeader._batchSize; final int nBatch = sortedOXHeader._nBatch; _key = new byte[nBatch][]; _order = new long[nBatch][]; _perNodeNumRowsToFetch = new long[H2O.CLOUD.size()]; _mergeId = bTime; } void initKeyOrder( int msb, boolean isLeft ) { for( int b=0; b<_key.length; b++ ) { Value v = DKV.get(SplitByMSBLocal.getSortedOXbatchKey(isLeft, msb, b, _mergeId)); SplitByMSBLocal.OXbatch ox = v.get(); //mem version (obtained from remote) of the Values gets turned into POJO version v.freeMem(); //only keep the POJO version of the Value _key [b] = ox._x; _order[b] = ox._o; } } long numRowsToFetch() { return ArrayUtils.sum(_perNodeNumRowsToFetch); } // Do a mod/div long _order array lookup long at8order( long idx ) { return _order[(int)(idx / _batchSize)][(int)(idx % _batchSize)]; } long[][] fillPerNodeRows( int i, final int batchSizeLong) { // final int batchSizeLong = 256*1024*1024 / 16; // 256GB DKV limit / sizeof(UUID) if( _perNodeNumRowsToFetch[i] <= 0 ) return null; int nbatch = (int) ((_perNodeNumRowsToFetch[i] - 1) / batchSizeLong + 1); // TODO: wrap in class to avoid this boiler plate assert nbatch >= 1; int lastSize = (int) (_perNodeNumRowsToFetch[i] - (nbatch - 1) * batchSizeLong); assert lastSize > 0; long[][] res = new long[nbatch][]; for( int b = 0; b < nbatch; b++ ) res[b] = MemoryManager.malloc8(b==nbatch-1 ? lastSize : batchSizeLong); return res; } } // TODO: specialize keycmp for cases when no join column contains NA (very // very often) and make this totally branch free; i.e. without the two `==0 ? :` private int keycmp(byte xss[][], long xi, byte yss[][], long yi) { // Must be passed a left key and a right key to avoid call overhead of // extra arguments. Only need left to left for equality only and that's // optimized in leftKeyEqual below. byte xbatch[] = xss[(int)(xi / _leftKO._batchSize)]; byte ybatch[] = yss[(int)(yi / _riteKO._batchSize)]; int xoff = (int)(xi % _leftKO._batchSize) * _leftSB._keySize; int yoff = (int)(yi % _riteKO._batchSize) * _riteSB._keySize; long xval=0, yval=0; // We avoid the NewChunk compression because we want finer grain // compression than 1,2,4 or 8 bytes types. In particular, a range just // greater than 4bn can use 5 bytes rather than 8 bytes; a 38% RAM saving // over the wire in that possibly common case. Note this is tight and // almost branch free. int i=0; while( i<_numJoinCols && xval==yval ) { // TODO: pass i in to start at a later key column, when known int xlen = _leftSB._fieldSizes[i]; int ylen = _riteSB._fieldSizes[i]; xval = xbatch[xoff] & 0xFFL; while (xlen>1) { xval <<= 8; xval |= xbatch[++xoff] & 0xFFL; xlen--; } xoff++; yval = ybatch[yoff] & 0xFFL; while (ylen>1) { yval <<= 8; yval |= ybatch[++yoff] & 0xFFL; ylen--; } yoff++; xval = xval==0 ? Long.MIN_VALUE : updateVal(xval,_leftSB._base[i]); yval = yval==0 ? Long.MIN_VALUE : updateVal(yval,_riteSB._base[i]); i++; } // The magnitude of the difference is used for limiting staleness in a // rolling join, capped at Integer.MAX|(MIN+1). Roll's type is chosen to // be int so staleness can't be requested over int's limit. // Same return value as strcmp in C. <0 => xi<yi. long diff = xval-yval; // could overflow even in long; e.g. joining to a prevailing NA, or very large gaps O(2^62) if (BigInteger.valueOf(xval).subtract(BigInteger.valueOf(yval)).bitLength() > 64) Log.warn("Overflow in BinaryMerge.java"); // detects overflow if (xval>yval) { // careful not diff>0 here due to overflow return( (diff<0 | diff>Integer.MAX_VALUE ) ? Integer.MAX_VALUE : (int)diff); } else { return( (diff>0 | diff<Integer.MIN_VALUE+1) ? Integer.MIN_VALUE+1 : (int)diff); } } private long updateVal(Long oldVal, BigInteger baseD) { // we know oldVal is not zero BigInteger xInc = baseD.add(BigInteger.valueOf(oldVal).subtract(ONE)); if (xInc.bitLength() > 64) { Log.warn("Overflow in BinaryMerge.java"); return oldVal; // should have died sooner or later } else return xInc.longValue(); } // binary search to the left MSB in the 1st column only private long bsearchLeft(long x, boolean returnLow, long upp) { long low = -1; while (low < upp - 1) { long mid = low + (upp - low) / 2; byte keyBatch[] = _leftKO._key[(int)(mid / _leftKO._batchSize)]; int off = (int)(mid % _leftKO._batchSize) * _leftSB._keySize; int len = _leftSB._fieldSizes[0]; long val = keyBatch[off] & 0xFFL; while( len>1 ) { val <<= 8; val |= keyBatch[++off] & 0xFFL; len--; } val = val==0 ? Long.MIN_VALUE : updateVal(val,_leftSB._base[0]); if (x<val || (x==val && returnLow)) { upp = mid; } else { low = mid; } } return returnLow ? low : upp; } // Must be passed two leftKeys only. // Optimized special case for the two calling points; see usages in bmerge_r below. private boolean leftKeyEqual(byte x[][], long xi, long yi) { byte xbatch[] = x[(int)(xi / _leftKO._batchSize)]; byte ybatch[] = x[(int)(yi / _leftKO._batchSize)]; int xoff = (int)(xi % _leftKO._batchSize) * _leftSB._keySize; int yoff = (int)(yi % _leftKO._batchSize) * _leftSB._keySize; int i=0; while (i<_leftSB._keySize && xbatch[xoff++] == ybatch[yoff++]) i++; return(i==_leftSB._keySize); } private void bmerge_r(long lLowIn, long lUppIn, long rLowIn, long rUppIn) { // TODO: parallel each of the 256 bins long lLow = lLowIn, lUpp = lUppIn, rLow = rLowIn, rUpp = rUppIn; long mid, tmpLow, tmpUpp; // i.e. (lLow+lUpp)/2 but being robust to one day in the future someone // somewhere overflowing long; e.g. 32 exabytes of 1-column ints long lr = lLow + (lUpp - lLow) / 2; while (rLow < rUpp - 1) { mid = rLow + (rUpp - rLow) / 2; int cmp = keycmp(_leftKO._key, lr, _riteKO._key, mid); // -1, 0 or 1, like strcmp if (cmp < 0) { rUpp = mid; } else if (cmp > 0) { rLow = mid; } else { // rKey == lKey including NA == NA // branch mid to find start and end of this group in this column // TODO?: not if mult=first|last and col<ncol-1 tmpLow = mid; tmpUpp = mid; while (tmpLow < rUpp - 1) { mid = tmpLow + (rUpp - tmpLow) / 2; if (keycmp(_leftKO._key, lr, _riteKO._key, mid) == 0) tmpLow = mid; else rUpp = mid; } while (rLow < tmpUpp - 1) { mid = rLow + (tmpUpp - rLow) / 2; if (keycmp(_leftKO._key, lr, _riteKO._key, mid) == 0) tmpUpp = mid; else rLow = mid; } break; } } // rLow and rUpp now surround the group in the right table. // The left table key may (unusually, and not recommended, but sometimes needed) be duplicated. // Linear search outwards from left row. // Most commonly, the first test shows this left key is unique. // This saves (i) re-finding the matching rows in the right for all the // dup'd left and (ii) recursive bounds logic gets awkward if other left // rows can find the same right rows // Related to 'allow.cartesian' in data.table. // TODO: if index stores attribute that it is unique then we don't need // this step. However, each of these while()s would run at most once in // that case, which may not be worth optimizing. tmpLow = lr + 1; // TODO: these while's could be rolled up inside leftKeyEqual saving call overhead while (tmpLow<lUpp && leftKeyEqual(_leftKO._key, tmpLow, lr)) tmpLow++; lUpp = tmpLow; tmpUpp = lr - 1; while (tmpUpp>lLow && leftKeyEqual(_leftKO._key, tmpUpp, lr)) tmpUpp--; lLow = tmpUpp; // lLow and lUpp now surround the group in the left table. If left key is unique then lLow==lr-1 and lUpp==lr+1. assert lUpp - lLow >= 2; // if value found, rLow and rUpp surround it, unlike standard binary search where rLow falls on it long len = rUpp - rLow - 1; // TODO - we don't need loop here :) Why does perNodeNumRightRowsToFetch increase so much? if (len > 0 || _allLeft) { long t0 = System.nanoTime(); if (len > 1) _oneToManyMatch = true; _numRowsInResult += Math.max(1,len) * (lUpp-lLow-1); // 1 for NA row when _allLeft for (long j = lLow + 1; j < lUpp; j++) { // usually iterates once only for j=lr, but more than once if there are dup keys in left table // may be a range of left dup'd join-col values, but we need to fetch // each one since the left non-join columns are likely not dup'd and // may be the reason for the cartesian join long t00 = System.nanoTime(); // TODO could loop through batches rather than / and % wastefully long globalRowNumber = _leftKO.at8order(j); _timings[17] += (System.nanoTime() - t00)/1e9; t00 = System.nanoTime(); int chkIdx = _leftSB._vec.elem2ChunkIdx(globalRowNumber); //binary search in espc _timings[15] += (System.nanoTime() - t00)/1e9; // the key is the same within this left dup range, but still need to fetch left non-join columns _leftKO._perNodeNumRowsToFetch[_leftSB._chunkNode[chkIdx]]++; if (len==0) continue; // _allLeft must be true if len==0 // TODO: initial MSB splits should split down to small enough chunk // size - but would that require more passes and if so, how long? Code // simplification benefits would be welcome! long outLoc = j - (_leftFrom + 1); // outOffset is 0 here in the standard scaling up high cardinality test // outBatchSize can be different, and larger since known to be 8 bytes // per item, both retFirst and retLen. (Allowing 8 byte here seems // wasteful, actually.) final int jb2 = (int)(outLoc/_retBatchSize); final int jo2 = (int)(outLoc%_retBatchSize); // TODO - take outside the loop. However when we go deep-msb, this'll go away. // rLow surrounds row, so +1. Then another +1 for 1-based // row-number. 0 (default) means nomatch and saves extra set to -1 for // no match. Could be significant in large edge cases by not needing // to write at all to _ret1st if it has no matches. _ret1st[jb2][jo2] = rLow + 2; _retLen[jb2][jo2] = len; } // if we have dup'd left row, we only need to fetch the right rows once // for the first dup. Those should then be recycled locally later. for (long i=0; i<len; i++) { long loc = rLow+1+i; long t00 = System.nanoTime(); // TODO could loop through batches rather than / and % wastefully long globalRowNumber = _riteKO.at8order(loc); _timings[18] += (System.nanoTime() - t00)/1e9; t00 = System.nanoTime(); int chkIdx = _riteSB._vec.elem2ChunkIdx(globalRowNumber); //binary search in espc _timings[16] += (System.nanoTime() - t00)/1e9; // just count the number per node. So we can allocate arrays precisely // up front, and also to return early to use in case of memory errors // or other distribution problems _riteKO._perNodeNumRowsToFetch[_riteSB._chunkNode[chkIdx]]++; } _timings[14] += (System.nanoTime() - t0)/1e9; } // TODO: check assumption that retFirst and retLength are initialized to 0, for case of no match // Now branch (and TODO in parallel) to merge below and merge above // '|| _allLeft' is needed here in H2O (but not data.table) for the // _leftKO._perNodeNumRowsToFetch above to populate and pass the assert near // the end of the compute2() above. if (lLow > lLowIn && (rLow > rLowIn || _allLeft)) // '|| _allLeft' is needed here in H2O (but not data.table) bmerge_r(lLowIn, lLow+1, rLowIn, rLow+1); if (lUpp < lUppIn && (rUpp < rUppIn || _allLeft)) bmerge_r(lUpp-1, lUppIn, rUpp-1, rUppIn); // We don't feel tempted to reduce the global _ansN here and make a global // frame, since we want to process each MSB l/r combo individually without // allocating them all. Since recursive, no more code should be here (it // would run too much) } private void createChunksInDKV() { // Collect all matches // Create the final frame (part) for this MSB combination // Cannot use a List<Long> as that's restricted to 2Bn items and also isn't an Iced datatype long t0 = System.nanoTime(), t1; final int cloudSize = H2O.CLOUD.size(); final long perNodeRightRows[][][] = new long[cloudSize][][]; final long perNodeLeftRows [][][] = new long[cloudSize][][]; // Allocate memory to split this MSB combn's left and right matching rows // into contiguous batches sent to the nodes they reside on for( int i = 0; i < cloudSize; i++ ) { perNodeRightRows[i] = _riteKO.fillPerNodeRows(i, (int) _riteKO._batchSize); perNodeLeftRows [i] = _leftKO.fillPerNodeRows(i, (int) _leftKO._batchSize); } _timings[2] += ((t1=System.nanoTime()) - t0) / 1e9; t0=t1; // Loop over _ret1st and _retLen and populate the batched requests for // each node helper. _ret1st and _retLen are the same shape final long perNodeRightLoc[] = MemoryManager.malloc8(cloudSize); final long perNodeLeftLoc [] = MemoryManager.malloc8(cloudSize); chunksPopulatePerNode(perNodeLeftLoc,perNodeLeftRows,perNodeRightLoc,perNodeRightRows); _timings[3] += ((t1=System.nanoTime()) - t0) / 1e9; t0=t1; // Create the chunks for the final frame from this MSB pair. // 16 bytes for each UUID (biggest type). Enum will be long (8). TODO: How is non-Enum 'string' handled by H2O? final int batchSizeUUID = _retBatchSize; final int nbatch = (int) ((_numRowsInResult-1)/batchSizeUUID +1); // TODO: wrap in class to avoid this boiler plate assert nbatch >= 1; final int lastSize = (int) (_numRowsInResult - (nbatch-1)*batchSizeUUID); assert lastSize > 0; final int numLeftCols = _leftSB._frame.numCols(); final int numColsInResult = _leftSB._frame.numCols() + _riteSB._frame.numCols() - _numJoinCols; final double[][][] frameLikeChunks = new double[numColsInResult][nbatch][]; //TODO: compression via int types final long[][][] frameLikeChunksLongs = new long[numColsInResult][nbatch][]; //TODO: compression via int types BufferedString[][][] frameLikeChunks4Strings = new BufferedString[numColsInResult][nbatch][]; // cannot allocate before hand _chunkSizes = new int[nbatch]; final GetRawRemoteRows grrrsLeft[][] = new GetRawRemoteRows[cloudSize][]; final GetRawRemoteRows grrrsRite[][] = new GetRawRemoteRows[cloudSize][]; if (_onlyLeftFrame) { // sorting only long[] resultLeftLocPrevlPrevf = new long[4]; // element 0 store resultLoc, element 1 store leftLoc resultLeftLocPrevlPrevf[1] = _leftFrom; // sweep through left table along the sorted row locations. resultLeftLocPrevlPrevf[0] = 0; resultLeftLocPrevlPrevf[2] = -1; resultLeftLocPrevlPrevf[3] = -1; for (int b = 0; b < nbatch; b++) { allocateFrameLikeChunks(b, nbatch, lastSize, batchSizeUUID, frameLikeChunks, frameLikeChunks4Strings, frameLikeChunksLongs, numColsInResult); // Now loop through _ret1st and _retLen and populate chunksPopulateRetFirst(perNodeLeftRows, resultLeftLocPrevlPrevf, b, numColsInResult, numLeftCols, perNodeLeftLoc, grrrsLeft, frameLikeChunks, frameLikeChunks4Strings, frameLikeChunksLongs); _timings[10] += ((t1 = System.nanoTime()) - t0) / 1e9; t0 = t1; // compress all chunks and store them chunksCompressAndStore(b, numColsInResult, frameLikeChunks, frameLikeChunks4Strings, frameLikeChunksLongs); if (nbatch > 1) { cleanUpMemory(grrrsLeft, b); // clean up memory used by grrrsLeft and grrrsRite } } } else { // merging for (int b = 0; b < nbatch; b++) { // allocate all frameLikeChunks in one shot allocateFrameLikeChunks(b, nbatch, lastSize, batchSizeUUID, frameLikeChunks, frameLikeChunks4Strings, frameLikeChunksLongs, numColsInResult); // allocate Frame is ok _timings[6] += ((t1 = System.nanoTime()) - t0) / 1e9; t0 = t1; // all this time is expected to be in [5] } _timings[4] += ((t1 = System.nanoTime()) - t0) / 1e9; t0 = t1; chunksGetRawRemoteRows(perNodeLeftRows, perNodeRightRows, grrrsLeft, grrrsRite); // need this one chunksPopulateRetFirst(batchSizeUUID, numColsInResult, numLeftCols, perNodeLeftLoc, grrrsLeft, perNodeRightLoc, grrrsRite, frameLikeChunks, frameLikeChunks4Strings, frameLikeChunksLongs); _timings[10] += ((t1 = System.nanoTime()) - t0) / 1e9; t0 = t1; chunksCompressAndStoreO(nbatch, numColsInResult, frameLikeChunks, frameLikeChunks4Strings, frameLikeChunksLongs); } _timings[11] += (System.nanoTime() - t0) / 1e9; } // compress all chunks and store them private void chunksCompressAndStoreO(final int nbatch, final int numColsInResult, final double[][][] frameLikeChunks, BufferedString[][][] frameLikeChunks4String, long[][][] frameLikeChunksLong) { // compress all chunks and store them Futures fs = new Futures(); for (int col = 0; col < numColsInResult; col++) { if (this._stringCols[col]) { for (int b = 0; b < nbatch; b++) { NewChunk nc = new NewChunk(null, 0); for (int index = 0; index < frameLikeChunks4String[col][b].length; index++) nc.addStr(frameLikeChunks4String[col][b][index]); Chunk ck = nc.compress(); DKV.put(getKeyForMSBComboPerCol(_leftSB._msb, _riteSB._msb, col, b, _mergeId), ck, fs, true); frameLikeChunks4String[col][b] = null; //free mem as early as possible (it's now in the store) } } else if( _intCols[col] ) { for (int b = 0; b < nbatch; b++) { NewChunk nc = new NewChunk(null,-1); for(long l: frameLikeChunksLong[col][b]) { if( l==Long.MIN_VALUE ) nc.addNA(); else nc.addNum(l, 0); } Chunk ck = nc.compress(); DKV.put(getKeyForMSBComboPerCol(_leftSB._msb, _riteSB._msb, col, b, _mergeId), ck, fs, true); frameLikeChunksLong[col][b] = null; //free mem as early as possible (it's now in the store) } } else { for (int b = 0; b < nbatch; b++) { Chunk ck = new NewChunk(frameLikeChunks[col][b]).compress(); DKV.put(getKeyForMSBComboPerCol(_leftSB._msb, _riteSB._msb, col, b, _mergeId), ck, fs, true); frameLikeChunks[col][b] = null; //free mem as early as possible (it's now in the store) } } } fs.blockForPending(); } private void allocateFrameLikeChunks(final int b, final int nbatch, final int lastSize, final int batchSizeUUID, final double[][][] frameLikeChunks, final BufferedString[][][] frameLikeChunks4Strings, final long[][][] frameLikeChunksLongs, final int numColsInResult) { for (int col = 0; col < numColsInResult; col++) { // allocate memory for frameLikeChunks for this batch if (this._stringCols[col]) { frameLikeChunks4Strings[col][b] = new BufferedString[_chunkSizes[b] = (b == nbatch - 1 ? lastSize : batchSizeUUID)]; } else if (this._intCols[col]) { frameLikeChunksLongs[col][b] = MemoryManager.malloc8(_chunkSizes[b] = (b == nbatch - 1 ? lastSize : batchSizeUUID)); Arrays.fill(frameLikeChunksLongs[col][b], Long.MIN_VALUE); } else { frameLikeChunks[col][b] = MemoryManager.malloc8d(_chunkSizes[b] = (b == nbatch - 1 ? lastSize : batchSizeUUID)); Arrays.fill(frameLikeChunks[col][b], Double.NaN); // NA by default to save filling with NA for nomatches when allLeft } } } // Loop over _ret1st and _retLen and populate the batched requests for // each node helper. _ret1st and _retLen are the same shape private void chunksPopulatePerNode( final long perNodeLeftLoc[], final long perNodeLeftRows[][][], final long perNodeRightLoc[], final long perNodeRightRows[][][] ) { final int batchSizeLong = _retBatchSize; // 256GB DKV limit / sizeof(UUID) long prevf = -1, prevl = -1; // TODO: hop back to original order here for [] syntax. long leftLoc=_leftFrom; // sweep through left table along the sorted row locations. for (int jb=0; jb<_ret1st.length; ++jb) { // jb = j batch for (int jo=0; jo<_ret1st[jb].length; ++jo) { // jo = j offset leftLoc++; // to save jb*_ret1st[0].length + jo; long f = _ret1st[jb][jo]; // TODO: take _ret1st[jb] outside inner loop long l = _retLen[jb][jo]; if (f==0) { // left row matches to no right row assert l == 0; // doesn't have to be 0 (could be 1 already if allLeft==true) but currently it should be, so check it if (!_allLeft) continue; // now insert the left row once and NA for the right columns i.e. left outer join } { // new scope so 'row' can be declared in the for() loop below and registerized (otherwise 'already defined in this scope' in that scope) // Fetch the left rows and mark the contiguous from-ranges each left row should be recycled over // TODO: when single node, not needed // TODO could loop through batches rather than / and % wastefully long row = _leftKO.at8order(leftLoc); // global row number of matched row in left frame int chkIdx = _leftSB._vec.elem2ChunkIdx(row); //binary search in espc int ni = _leftSB._chunkNode[chkIdx]; // node index long pnl = perNodeLeftLoc[ni]++; // pnl = per node location perNodeLeftRows[ni][(int)(pnl/batchSizeLong)][(int)(pnl%batchSizeLong)] = row; // ask that node for global row number row } if (f==0) continue; assert l > 0; if (prevf == f && prevl == l) continue; // don't re-fetch the same matching rows (cartesian). We'll repeat them locally later. prevf = f; prevl = l; for (int r=0; r<l; r++) { // locate the corresponding matching row in right frame long loc = f+r-1; // -1 because these are 0-based where 0 means no-match and 1 refers to the first row // TODO: could take / and % outside loop in cases where it doesn't span a batch boundary long row = _riteKO.at8order(loc); // right frame global row number that matches left frame // find the owning node for the row, using local operations here int chkIdx = _riteSB._vec.elem2ChunkIdx(row); //binary search in espc int ni = _riteSB._chunkNode[chkIdx]; // TODO Split to an if() and batch and offset separately long pnl = perNodeRightLoc[ni]++; // pnl = per node location. perNodeRightRows[ni][(int)(pnl/batchSizeLong)][(int)(pnl%batchSizeLong)] = row; // ask that node for global row number row } } } // TODO assert that perNodeRite and Left are exactly equal to the number // expected and allocated. Arrays.fill(perNodeLeftLoc ,0); // clear for reuse below Arrays.fill(perNodeRightLoc,0); // denotes number of rows fetched } private void chunksGetRawRemoteRows(final long perNodeLeftRows[][][], final long perNodeRightRows[][][], GetRawRemoteRows grrrsLeft[][], GetRawRemoteRows grrrsRite[][]) { RPC<GetRawRemoteRows> grrrsRiteRPC[][] = new RPC[H2O.CLOUD.size()][]; RPC<GetRawRemoteRows> grrrsLeftRPC[][] = new RPC[H2O.CLOUD.size()][]; // Launch remote tasks left and right for( H2ONode node : H2O.CLOUD._memary ) { final int ni = node.index(); final int bUppRite = perNodeRightRows[ni] == null ? 0 : perNodeRightRows[ni].length; final int bUppLeft = perNodeLeftRows[ni] == null ? 0 : perNodeLeftRows[ni].length; grrrsRiteRPC[ni] = new RPC[bUppRite]; grrrsLeftRPC[ni] = new RPC[bUppLeft]; grrrsRite[ni] = new GetRawRemoteRows[bUppRite]; grrrsLeft[ni] = new GetRawRemoteRows[bUppLeft]; for (int b = 0; b < bUppRite; b++) { // TODO try again now with better surrounding method // Arrays.sort(perNodeRightRows[ni][b]); Simple quick test of fetching in monotonic order. Doesn't seem to help so far. grrrsRiteRPC[ni][b] = new RPC<>(node, new GetRawRemoteRows(_riteSB._frame, perNodeRightRows[ni][b])).call(); } for (int b = 0; b < bUppLeft; b++) { // Arrays.sort(perNodeLeftRows[ni][b]); grrrsLeftRPC[ni][b] = new RPC<>(node, new GetRawRemoteRows(_leftSB._frame, perNodeLeftRows[ni][b])).call(); } } for( H2ONode node : H2O.CLOUD._memary ) { // TODO: just send and wait for first batch on each node and then .get() next batch as needed. int ni = node.index(); final int bUppRite = perNodeRightRows[ni] == null ? 0 : perNodeRightRows[ni].length; for (int b = 0; b < bUppRite; b++) _timings[5] += (grrrsRite[ni][b] = grrrsRiteRPC[ni][b].get()).timeTaken; final int bUppLeft = perNodeLeftRows[ni] == null ? 0 : perNodeLeftRows[ni].length; for (int b = 0; b < bUppLeft; b++) _timings[5] += (grrrsLeft[ni][b] = grrrsLeftRPC[ni][b].get()).timeTaken; } } // Get Raw Remote Rows private void chunksGetRawRemoteRows(final long perNodeLeftRows[][][], GetRawRemoteRows grrrsLeft[][], int batchNumber) { RPC<GetRawRemoteRows> grrrsRiteRPC[][] = new RPC[H2O.CLOUD.size()][]; RPC<GetRawRemoteRows> grrrsLeftRPC[][] = new RPC[H2O.CLOUD.size()][]; // Launch remote tasks left and right for( H2ONode node : H2O.CLOUD._memary ) { final int ni = node.index(); final int bUppLeft = perNodeLeftRows[ni] == null ? 0 : perNodeLeftRows[ni].length; // denote nbatch grrrsLeftRPC[ni] = new RPC[bUppLeft]; grrrsLeft[ni] = new GetRawRemoteRows[bUppLeft]; if (batchNumber < bUppLeft) { grrrsLeftRPC[ni][batchNumber] = new RPC<>(node, new GetRawRemoteRows(_leftSB._frame, perNodeLeftRows[ni][batchNumber])).call(); } } for( H2ONode node : H2O.CLOUD._memary ) { // TODO: just send and wait for first batch on each node and then .get() next batch as needed. int ni = node.index(); final int bUppLeft = perNodeLeftRows[ni] == null ? 0 : perNodeLeftRows[ni].length; if (batchNumber < bUppLeft) _timings[5] += (grrrsLeft[ni][batchNumber] = grrrsLeftRPC[ni][batchNumber].get()).timeTaken; } } // Now loop through _ret1st and _retLen and populate private void chunksPopulateRetFirst(final long perNodeLeftRows[][][], long[] resultLeftLocPrevlPrevf, final int jb, final int numColsInResult, final int numLeftCols, final long perNodeLeftLoc[], final GetRawRemoteRows grrrsLeft[][], final double[][][] frameLikeChunks, BufferedString[][][] frameLikeChunks4String, final long[][][] frameLikeChunksLong) { // 16 bytes for each UUID (biggest type). Enum will be long (8). // TODO: How is non-Enum 'string' handled by H2O? final int batchSizeUUID = _retBatchSize; // number of rows per chunk to fit in 256GB DKV limit. // TODO: hop back to original order here for [] syntax. long resultLoc = resultLeftLocPrevlPrevf[0]; long leftLoc = resultLeftLocPrevlPrevf[1]; long prevl = resultLeftLocPrevlPrevf[2]; long prevf = resultLeftLocPrevlPrevf[3]; if (jb < _ret1st.length) { for (int jo=0; jo<_ret1st[jb].length; ++jo) { // jo = j offset leftLoc++; // to save jb*_ret1st[0].length + jo; long f = _ret1st[jb][jo]; // TODO: take _ret1st[jb] outside inner loop long l = _retLen[jb][jo]; if (f==0 && !_allLeft) continue; // f==0 => left row matches to no right row // else insert the left row once and NA for the right columns i.e. left outer join // Fetch the left rows and recycle it if more than 1 row in the right table is matched to. // TODO could loop through batches rather than / and % wastefully long row = _leftKO.at8order(leftLoc); // TODO should leftOrder and retFirst/retLen have the same batch size to make this easier? // TODO Can we not just loop through _leftKO._order only? Why jb and jo too through int chkIdx = _leftSB._vec.elem2ChunkIdx(row); //binary search in espc int ni = _leftSB._chunkNode[chkIdx]; long pnl = perNodeLeftLoc[ni]++; // pnl = per node location. TODO: batch increment this rather than int b = (int)(pnl / batchSizeUUID); // however, the batch number of remote nodes may not match with final batch number int o = (int)(pnl % batchSizeUUID); if (grrrsLeft[ni]==null || grrrsLeft[ni][b] == null || grrrsLeft[ni][b]._chk==null) { // fetch chunk from remote nodes chunksGetRawRemoteRows(perNodeLeftRows, grrrsLeft, b); } long[][] chksLong= grrrsLeft[ni][b]._chkLong; double[][] chks = grrrsLeft[ni][b]._chk; BufferedString[][] chksString = grrrsLeft[ni][b]._chkString; final int l1 = Math.max((int)l,1); for (int rep = 0; rep < l1; rep++) { long a = resultLoc + rep; // TODO: loop into batches to save / and % for each repeat and still // cater for crossing multiple batch boundaries int whichChunk = (int) (a / batchSizeUUID); // this actually points to batch number int offset = (int) (a % batchSizeUUID); for (int col=0; col<chks.length; col++) { // copy over left frame to frameLikeChunks if (this._stringCols[col]) { frameLikeChunks4String[col][whichChunk][offset] = chksString[col][o]; } else if (this._intCols[col]) { frameLikeChunksLong[col][whichChunk][offset] = chksLong[col][o]; } else { frameLikeChunks[col][whichChunk][offset] = chks[col][o]; // colForBatch.atd(row); } } } if (f==0) { resultLoc++; continue; } // no match so just one row (NA for right table) to advance over assert l > 0; if (prevf == f && prevl == l) { // just copy from previous batch in the result (populated by for() // below). Contiguous easy in-cache copy (other than batches). for (int r=0; r<l; r++) { // TODO: loop into batches to save / and % for each repeat and // still cater for crossing multiple batch boundaries int toChunk = (int) (resultLoc / batchSizeUUID); int toOffset = (int) (resultLoc % batchSizeUUID); int fromChunk = (int) ((resultLoc - l) / batchSizeUUID); int fromOffset = (int) ((resultLoc - l) % batchSizeUUID); for (int col=0; col<numColsInResult-numLeftCols; col++) { int colIndex = numLeftCols + col; if (this._stringCols[colIndex]) { frameLikeChunks4String[colIndex][toChunk][toOffset] = frameLikeChunks4String[colIndex][fromChunk][fromOffset]; } else if (this._intCols[colIndex]) { frameLikeChunksLong[colIndex][toChunk][toOffset] = frameLikeChunksLong[colIndex][fromChunk][fromOffset]; } else { frameLikeChunks[colIndex][toChunk][toOffset] = frameLikeChunks[colIndex][fromChunk][fromOffset]; } } resultLoc++; } continue; } prevf = f; prevl = l; } } resultLeftLocPrevlPrevf[0] = resultLoc; resultLeftLocPrevlPrevf[1] = leftLoc; resultLeftLocPrevlPrevf[2] = prevl; resultLeftLocPrevlPrevf[3] = prevf; } // Now loop through _ret1st and _retLen and populate private void chunksPopulateRetFirst(final int batchSizeUUID, final int numColsInResult, final int numLeftCols, final long perNodeLeftLoc[], final GetRawRemoteRows grrrsLeft[][], final long perNodeRightLoc[], final GetRawRemoteRows grrrsRite[][], final double[][][] frameLikeChunks, BufferedString[][][] frameLikeChunks4String, final long[][][] frameLikeChunksLong) { // 16 bytes for each UUID (biggest type). Enum will be long (8). // TODO: How is non-Enum 'string' handled by H2O? long resultLoc=0; // sweep upwards through the final result, filling it in // TODO: hop back to original order here for [] syntax. long leftLoc=_leftFrom; // sweep through left table along the sorted row locations. long prevf = -1, prevl = -1; for (int jb=0; jb<_ret1st.length; ++jb) { // jb = j batch for (int jo=0; jo<_ret1st[jb].length; ++jo) { // jo = j offset leftLoc++; // to save jb*_ret1st[0].length + jo; long f = _ret1st[jb][jo]; // TODO: take _ret1st[jb] outside inner loop long l = _retLen[jb][jo]; if (f==0 && !_allLeft) continue; // f==0 => left row matches to no right row // else insert the left row once and NA for the right columns i.e. left outer join // Fetch the left rows and recycle it if more than 1 row in the right table is matched to. // TODO could loop through batches rather than / and % wastefully long row = _leftKO.at8order(leftLoc); // TODO should leftOrder and retFirst/retLen have the same batch size to make this easier? // TODO Can we not just loop through _leftKO._order only? Why jb and jo too through int chkIdx = _leftSB._vec.elem2ChunkIdx(row); //binary search in espc int ni = _leftSB._chunkNode[chkIdx]; long pnl = perNodeLeftLoc[ni]++; // pnl = per node location. TODO: batch increment this rather than int b = (int)(pnl / batchSizeUUID); int o = (int)(pnl % batchSizeUUID); long[][] chksLong= grrrsLeft[ni][b]._chkLong; double[][] chks = grrrsLeft[ni][b]._chk; BufferedString[][] chksString = grrrsLeft[ni][b]._chkString; final int l1 = Math.max((int)l,1); for (int rep = 0; rep < l1; rep++) { long a = resultLoc + rep; // TODO: loop into batches to save / and % for each repeat and still // cater for crossing multiple batch boundaries int whichChunk = (int) (a / batchSizeUUID); int offset = (int) (a % batchSizeUUID); for (int col=0; col<chks.length; col++) { // copy over left frame to frameLikeChunks if (this._stringCols[col]) { if (chksString[col][o] != null) frameLikeChunks4String[col][whichChunk][offset] = chksString[col][o]; } else if( _intCols[col] ) { frameLikeChunksLong[col][whichChunk][offset] = chksLong[col][o]; } else frameLikeChunks[col][whichChunk][offset] = chks[col][o]; // colForBatch.atd(row); } } if (f==0) { resultLoc++; continue; } // no match so just one row (NA for right table) to advance over assert l > 0; if (prevf == f && prevl == l) { // just copy from previous batch in the result (populated by for() // below). Contiguous easy in-cache copy (other than batches). for (int r=0; r<l; r++) { // TODO: loop into batches to save / and % for each repeat and // still cater for crossing multiple batch boundaries int toChunk = (int) (resultLoc / batchSizeUUID); int toOffset = (int) (resultLoc % batchSizeUUID); int fromChunk = (int) ((resultLoc - l) / batchSizeUUID); int fromOffset = (int) ((resultLoc - l) % batchSizeUUID); for (int col=0; col<numColsInResult-numLeftCols; col++) { int colIndex = numLeftCols + col; if (this._stringCols[colIndex]) { frameLikeChunks4String[colIndex][toChunk][toOffset] = frameLikeChunks4String[colIndex][fromChunk][fromOffset]; } else if( _intCols[colIndex] ) { frameLikeChunksLong[colIndex][toChunk][toOffset] = frameLikeChunksLong[colIndex][fromChunk][fromOffset]; } else { frameLikeChunks[colIndex][toChunk][toOffset] = frameLikeChunks[colIndex][fromChunk][fromOffset]; } } resultLoc++; } continue; } prevf = f; prevl = l; for (int r=0; r<l; r++) { // TODO: loop into batches to save / and % for each repeat and still // cater for crossing multiple batch boundaries int whichChunk = (int) (resultLoc / batchSizeUUID); int offset = (int) (resultLoc % batchSizeUUID); long loc = f+r-1; // -1 because these are 0-based where 0 means no-match and 1 refers to the first row // TODO: could take / and % outside loop in cases where it doesn't span a batch boundary row = _riteKO.at8order(loc); // find the owning node for the row, using local operations here chkIdx = _riteSB._vec.elem2ChunkIdx(row); //binary search in espc ni = _riteSB._chunkNode[chkIdx]; pnl = perNodeRightLoc[ni]++; // pnl = per node location. // TODO Split to an if() and batch and offset separately chks = grrrsRite[ni][(int)(pnl / batchSizeUUID)]._chk; chksLong = grrrsRite[ni][(int)(pnl / batchSizeUUID)]._chkLong; chksString = grrrsRite[ni][(int)(pnl / batchSizeUUID)]._chkString; o = (int)(pnl % batchSizeUUID); for (int col=0; col<numColsInResult-numLeftCols; col++) { // TODO: this only works for numeric columns (not for UUID, strings, etc.) int colIndex = numLeftCols + col; if (this._stringCols[colIndex]) { if (chksString[_numJoinCols + col][o]!=null) frameLikeChunks4String[colIndex][whichChunk][offset] = chksString[_numJoinCols + col][o]; // colForBatch.atd(row); } else if( _intCols[colIndex] ) { frameLikeChunksLong[colIndex][whichChunk][offset] = chksLong[_numJoinCols + col][o]; } else frameLikeChunks[colIndex][whichChunk][offset] = chks[_numJoinCols + col][o]; // colForBatch.atd(row); } resultLoc++; } } } } private void cleanUpMemory(GetRawRemoteRows[][] grrr, int batchIdx) { if (grrr != null) { int nodeNum = grrr.length; for (int nodeIdx = 0; nodeIdx < nodeNum; nodeIdx++) { int batchLimit = Math.min(batchIdx + 1, grrr[nodeIdx].length); if ((grrr[nodeIdx] != null) && (grrr[nodeIdx].length > 0)) { for (int bIdx = 0; bIdx < batchLimit; bIdx++) { // clean up memory int chkLen = grrr[nodeIdx][bIdx] == null ? 0 : (grrr[nodeIdx][bIdx]._chk == null ? 0 : grrr[nodeIdx][bIdx]._chk.length); for (int cindex = 0; cindex < chkLen; cindex++) { grrr[nodeIdx][bIdx]._chk[cindex] = null; grrr[nodeIdx][bIdx]._chkString[cindex] = null; grrr[nodeIdx][bIdx]._chkLong[cindex] = null; } if (chkLen > 0) { grrr[nodeIdx][bIdx]._chk = null; grrr[nodeIdx][bIdx]._chkString = null; grrr[nodeIdx][bIdx]._chkLong = null; } } } } } } // compress all chunks and store them private void chunksCompressAndStore(final int b, final int numColsInResult, final double[][][] frameLikeChunks, BufferedString[][][] frameLikeChunks4String, final long[][][] frameLikeChunksLong) { // compress all chunks and store them Futures fs = new Futures(); for (int col = 0; col < numColsInResult; col++) { if (this._stringCols[col]) { NewChunk nc = new NewChunk(null, 0); for (int index = 0; index < frameLikeChunks4String[col][b].length; index++) nc.addStr(frameLikeChunks4String[col][b][index]); Chunk ck = nc.compress(); DKV.put(getKeyForMSBComboPerCol(_leftSB._msb, _riteSB._msb, col, b, _mergeId), ck, fs, true); frameLikeChunks4String[col][b] = null; //free mem as early as possible (it's now in the store) } else if( _intCols[col] ) { NewChunk nc = new NewChunk(null,-1); for(long l: frameLikeChunksLong[col][b]) { if( l==Long.MIN_VALUE ) nc.addNA(); else nc.addNum(l, 0); } Chunk ck = nc.compress(); DKV.put(getKeyForMSBComboPerCol(_leftSB._msb, _riteSB._msb, col, b, _mergeId), ck, fs, true); frameLikeChunksLong[col][b] = null; //free mem as early as possible (it's now in the store) } else { Chunk ck = new NewChunk(frameLikeChunks[col][b]).compress(); DKV.put(getKeyForMSBComboPerCol(_leftSB._msb, _riteSB._msb, col, b, _mergeId), ck, fs, true); frameLikeChunks[col][b] = null; //free mem as early as possible (it's now in the store) } } fs.blockForPending(); } static Key getKeyForMSBComboPerCol(/*Frame leftFrame, Frame rightFrame,*/ int leftMSB, int rightMSB, int col /*final table*/, int batch, long mergeId) { return Key.make("__binary_merge__Chunk_for_col" + col + "_batch" + batch // + rightFrame._key.toString() + "_joined_with" + leftFrame._key.toString() + "_leftSB._msb" + leftMSB + "_riteSB._msb" + rightMSB + "_" + mergeId, Key.HIDDEN_USER_KEY, false, SplitByMSBLocal.ownerOfMSB(rightMSB==-1 ? leftMSB : rightMSB) ); //TODO home locally } static class GetRawRemoteRows extends DTask<GetRawRemoteRows> { Frame _fr; long[/*rows*/] _rows; //which rows to fetch from remote node, non-null on the way to remote, null on the way back double[/*col*/][] _chk; //null on the way to remote node, non-null on the way back BufferedString[][] _chkString; long[/*col*/][] _chkLong; double timeTaken; GetRawRemoteRows(Frame fr, long[] rows) { _rows = rows; _fr = fr; } private static long[][] malloc8A(int m, int n) { long [][] res = new long[m][]; for(int i = 0; i < m; ++i) res[i] = MemoryManager.malloc8(n); return res; } @Override public void compute2() { assert(_rows!=null); assert(_chk ==null); long t0 = System.nanoTime(); _chk = MemoryManager.malloc8d(_fr.numCols(),_rows.length); // TODO: should this be transposed in memory? _chkLong = malloc8A(_fr.numCols(), _rows.length); _chkString = new BufferedString[_fr.numCols()][_rows.length]; int cidx[] = MemoryManager.malloc4(_rows.length); int offset[] = MemoryManager.malloc4(_rows.length); Vec anyVec = _fr.anyVec(); assert anyVec != null; for (int row=0; row<_rows.length; row++) { cidx[row] = anyVec.elem2ChunkIdx(_rows[row]); // binary search of espc array. TODO: sort input row numbers to avoid offset[row] = (int)(_rows[row] - anyVec.espc()[cidx[row]]); } Chunk c[] = new Chunk[anyVec.nChunks()]; for (int col=0; col<_fr.numCols(); col++) { Vec v = _fr.vec(col); for (int i=0; i<c.length; i++) c[i] = v.chunkKey(i).home() ? v.chunkForChunkIdx(i) : null; // grab a chunk here if (v.isString()) { for (int row = 0; row < _rows.length; row++) { // copy string and numeric columns _chkString[col][row] = c[cidx[row]].atStr(new BufferedString(), offset[row]); // _chkString[col][row] store by reference here } } else if( v.isInt() ) { for (int row = 0; row < _rows.length; row++) { // extract info from chunks to one place _chkLong[col][row] = (c[cidx[row]].isNA(offset[row])) ? Long.MIN_VALUE : c[cidx[row]].at8(offset[row]); } } else { for (int row = 0; row < _rows.length; row++) { // extract info from chunks to one place _chk[col][row] = c[cidx[row]].atd(offset[row]); } } } // tell remote node to fill up Chunk[/*batch*/][/*rows*/] // perNodeRows[node] has perNodeRows[node].length batches of row numbers to fetch _rows=null; _fr=null; assert(_chk != null && _chkLong != null); timeTaken = (System.nanoTime() - t0) / 1e9; tryComplete(); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/Env.java
package water.rapids; import hex.ObjectConsistencyChecker; import water.*; import water.fvec.Frame; import water.rapids.ast.*; import water.rapids.ast.params.AstConst; import water.rapids.ast.prims.advmath.*; import water.rapids.ast.prims.assign.*; import water.rapids.ast.prims.filters.dropduplicates.AstDropDuplicates; import water.rapids.ast.prims.internal.AstRunTool; import water.rapids.ast.prims.math.*; import water.rapids.ast.prims.matrix.*; import water.rapids.ast.prims.misc.*; import water.rapids.ast.prims.models.AstPerfectAUC; import water.rapids.ast.prims.models.AstSegmentModelsAsFrame; import water.rapids.ast.prims.models.*; import water.rapids.ast.prims.mungers.*; import water.rapids.ast.prims.operators.*; import water.rapids.ast.prims.reducers.*; import water.rapids.ast.prims.repeaters.*; import water.rapids.ast.prims.search.*; import water.rapids.ast.prims.string.*; import water.rapids.ast.prims.testing.AstSetReadForbidden; import water.rapids.ast.prims.time.*; import water.rapids.ast.prims.timeseries.*; import water.rapids.vals.ValFrame; import water.rapids.vals.ValFun; import water.rapids.vals.ValKeyed; import water.rapids.vals.ValModel; import java.io.Closeable; import java.util.ArrayList; import java.util.HashMap; /** * Execute a set of instructions in the context of an H2O cloud. * <p/> * An Env (environment) object is a classic stack of values used during * execution of an AstRoot. The stack is hidden in the normal Java execution * stack and is not explicit. * <p/> * For efficiency, reference counting is employed to recycle objects already * in use rather than creating copies upon copies (a la R). When a Frame is * `pushed` on to the stack, its reference count is incremented by 1. When a * Frame is `popped` off of the stack, its reference count is decremented by * 1. When the reference count is 0, the Env instance will dispose of the * object. All objects live and die by the Env's that create them. That * means that any object not created by an Env instance shalt not be * DKV.removed. * <p/> * Therefore, the Env class is a stack of values + an API for reference counting. */ public class Env extends Iced { /** * for DEVELOPMENT/TESTING only - can be used to enable DKV object consistency check, this check is generally expensive * and should only be used when hunting down bugs or for running tests on CI */ static final boolean DEV_CHECK_OBJECT_CONSISTENCY = H2O.getSysBoolProperty("rapids.checkObjectConsistency", false); // Session holds the ref-counts across multiple executions. public final Session _ses; // Current lexical scope lookup public AstFunction _scope; // Frames that are alive in mid-execution; usually because we have evaluated // some first expression and need to hang onto it while evaluating the next // expression. private ArrayList<Frame> _stk = new ArrayList<>(); // Built-in constants, checked before other namespace lookups happen private static final HashMap<String, AstPrimitive> PRIMS = new HashMap<>(); // Built-in primitives, done after other namespace lookups happen private static final HashMap<String, AstParameter> CONSTS = new HashMap<>(); static void init(AstPrimitive ast) { PRIMS.put(ast.str(), ast); } static void init(AstPrimitive ast, String name) { PRIMS.put(name, ast); } static { // Constants CONSTS.put("FALSE", AstConst.FALSE); CONSTS.put("False", AstConst.FALSE); CONSTS.put("false", AstConst.FALSE); CONSTS.put("TRUE", AstConst.TRUE); CONSTS.put("True", AstConst.TRUE); CONSTS.put("true", AstConst.TRUE); CONSTS.put("NaN", AstConst.NAN); CONSTS.put("NA", AstConst.NAN); CONSTS.put("nan", AstConst.NAN); CONSTS.put("PI", AstConst.PI); CONSTS.put("Pi", AstConst.PI); CONSTS.put("null", null); // Standard math functions init(new AstAbs()); init(new AstAcos()); init(new AstAcosh()); init(new AstAsin()); init(new AstAsinh()); init(new AstAtan()); init(new AstAtanh()); init(new AstCeiling()); init(new AstCos()); init(new AstCosh()); init(new AstCosPi()); init(new AstDiGamma()); init(new AstExp()); init(new AstExpm1()); init(new AstFloor()); init(new AstGamma()); init(new AstLGamma()); init(new AstLog()); init(new AstLog1P()); init(new AstLog2()); init(new AstLog10()); init(new AstNoOp()); init(new AstNot()); init(new AstNot(), "!!"); init(new AstRound()); init(new AstSgn()); init(new AstSignif()); init(new AstSin()); init(new AstSinh()); init(new AstSinPi()); init(new AstSqrt()); init(new AstTan()); init(new AstTanh()); init(new AstTanPi()); init(new AstTriGamma()); init(new AstTrunc()); // Math binary operators init(new AstAnd()); init(new AstDiv()); init(new AstEq()); init(new AstGe()); init(new AstGt()); init(new AstIntDiv()); init(new AstIntDivR()); init(new AstLAnd()); init(new AstLe()); init(new AstLOr()); init(new AstLt()); init(new AstMod()); init(new AstModR()); init(new AstMul()); init(new AstNe()); init(new AstOr()); init(new AstPlus()); init(new AstPow()); init(new AstSub()); init(new AstIfElse()); init(new AstIfElse()); // this one is ternary // Reducers init(new AstAll()); init(new AstAny()); init(new AstAnyNa()); init(new AstCumMax()); init(new AstCumMin()); init(new AstCumProd()); init(new AstCumSum()); init(new AstMad()); init(new AstMax()); init(new AstMaxNa()); init(new AstMean()); init(new AstMedian()); init(new AstMin()); init(new AstMinNa()); init(new AstNaCnt()); init(new AstProd()); init(new AstProdNa()); init(new AstSdev()); init(new AstSum()); init(new AstSumAxis()); init(new AstSumNa()); init(new AstTopN()); // top N% // Time init(new AstAsDate()); init(new AstDay()); init(new AstDayOfWeek()); init(new AstGetTimeZone()); init(new AstHour()); init(new AstListTimeZones()); init(new AstMillis()); init(new AstMinute()); init(new AstMktime()); init(new AstMoment()); init(new AstMonth()); init(new AstSecond()); init(new AstSetTimeZone()); init(new AstWeek()); init(new AstYear()); // Time Series init(new AstDiffLag1()); init(new AstIsax()); // Advanced Math init(new AstCorrelation()); init(new AstDistance()); init(new AstHist()); init(new AstFillNA()); init(new AstImpute()); init(new AstKFold()); init(new AstMode()); init(new AstSkewness()); init(new AstKurtosis()); init(new AstModuloKFold()); init(new AstQtile()); init(new AstRunif()); init(new AstSort()); init(new AstStratifiedKFold()); init(new AstStratifiedSplit()); init(new AstTable()); init(new AstUnique()); init(new AstVariance()); init(new AstTfIdf()); // Generic data mungers init(new AstAnyFactor()); init(new AstApply()); init(new AstAsFactor()); init(new AstAsCharacter()); init(new AstAsNumeric()); init(new AstCBind()); init(new AstColNames()); init(new AstColPySlice()); init(new AstColSlice()); init(new AstCut()); init(new AstDdply()); init(new AstFilterNaCols()); init(new AstFlatten()); init(new AstGetrow()); init(new AstGroup()); init(new AstGroupedPermute()); init(new AstIsCharacter()); init(new AstIsFactor()); init(new AstIsNa()); init(new AstIsNumeric()); init(new AstLevels()); init(new AstAppendLevels()); init(new AstMelt()); init(new AstMerge()); init(new AstNaOmit()); init(new AstColumnsByType()); init(new AstNcol()); init(new AstNLevels()); init(new AstNrow()); init(new AstRBind()); init(new AstReLevel()); init(new AstRelevelByFreq()); init(new AstRename()); init(new AstRowSlice()); init(new AstScale()); init(new AstScale.AstScaleInPlace()); init(new AstSetDomain()); init(new AstSetLevel()); init(new AstPivot()); init(new AstRankWithinGroupBy()); // provide ranking withing groupby groups sorted after certain columns // Assignment; all of these lean heavily on Copy-On-Write optimizations. init(new AstAppend()); // Add a column init(new AstAssign()); // Overwrite a global init(new AstRectangleAssign()); // Overwrite a rectangular slice init(new AstRm()); // Remove a frame, but maintain internal sharing init(new AstTmpAssign()); // Create a new immutable tmp frame // Matrix functions init(new AstTranspose()); init(new AstMMult()); // String functions init(new AstCountMatches()); init(new AstCountSubstringsWords()); init(new AstEntropy()); init(new AstLStrip()); init(new AstGrep()); init(new AstReplaceAll()); init(new AstReplaceFirst()); init(new AstRStrip()); init(new AstStrLength()); init(new AstStrSplit()); init(new AstTokenize()); init(new AstSubstring()); init(new AstToLower()); init(new AstToUpper()); init(new AstTrim()); init(new AstStrDistance()); // Miscellaneous init(new AstComma()); init(new AstLs()); init(new AstSetProperty()); init(new AstPerfectAUC()); // Test Support init(new AstSetReadForbidden()); // Search init(new AstMatch()); init(new AstWhich()); init(new AstWhichMax()); init(new AstWhichMin()); // Repeaters init(new AstRepLen()); init(new AstSeq()); init(new AstSeqLen()); // Segment Models init(new AstSegmentModelsAsFrame()); // Models init(new AstModelResetThreshold()); init(new AstTestJavaScoring()); init(new AstPermutationVarImp()); init(new AstFairnessMetrics()); // Make Leaderboard init(new AstMakeLeaderboard()); // Filters init(new AstDropDuplicates()); // For internal use only init(new AstRunTool()); // generate result frame init(new AstResultFrame()); // generate transform frame init(new AstTransformFrame()); // Custom (eg. algo-specific) for (AstPrimitive prim : PrimsService.INSTANCE.getAllPrims()) init(prim); } public Env(Session ses) { _ses = ses; } public int sp() { return _stk.size(); } private Frame peek(int x) { return _stk.get(sp() + x); } // Deletes dead Frames & forces good stack cleanliness at opcode end. One // per Opcode implementation. Track frames that are alive mid-execution, but // dead at Opcode end. public StackHelp stk() { return new StackHelp(); } public class StackHelp implements Closeable { final int _sp = sp(); // Push & track. Called on every Val that spans a (nested) exec call. // Used to track Frames with lifetimes spanning other AstRoot executions. public Val track(Val v) { if (v instanceof ValFrame) track(v.getFrame()); return v; } public Frame track(Frame fr) { _stk.add(sp(), new Frame(fr._names, fr.vecs().clone())); // Push and track a defensive copy return fr; } // Pop-all and remove dead. If a Frame was not "tracked" above, then if it // goes dead it will leak on function exit. If a Frame is returned from a // function and not declared "returning", any Vecs it shares with Frames // that are dying in this opcode will be deleted out from under it. @Override public void close() { Futures fs = null; int sp = sp(); while (sp > _sp) { Frame fr = _stk.remove(--sp); // Pop and stop tracking fs = _ses.downRefCnt(fr, fs); // Refcnt -1 all Vecs, and delete if zero refs } if (fs != null) fs.blockForPending(); } // Pop last element and lower refcnts - but do not delete. Lifetime is // responsibility of the caller. public Val untrack(Val vfr) { if (!vfr.isFrame()) return vfr; Frame fr = vfr.getFrame(); _ses.addRefCnt(fr, -1); // Lower counts, but do not delete on zero return vfr; } } // If an opcode is returning a Frame, it must call "returning(frame)" to // track the returned Frame. Otherwise shared input Vecs who's last use is // in this opcode will get deleted as the opcode exits - even if they are // shared in the returning output Frame. public <V extends Val> V returning(V val) { if (val instanceof ValFrame) _ses.addRefCnt(val.getFrame(), 1); return val; } // ---- // Variable lookup public Val lookup(String id) { // Lexically scoped functions first Val val = _scope == null ? null : _scope.lookup(id); if (val != null) return val; // disallow TRUE/FALSE/NA to be overwritten by keys in the DKV... just way way saner this way if (CONSTS.containsKey(id)) { return CONSTS.get(id).exec(this); } // Now the DKV Value value = DKV.get(Key.make(expand(id))); if (value != null) { if (DEV_CHECK_OBJECT_CONSISTENCY) { // executed for every id => expensive => should only be enabled in test mode new ObjectConsistencyChecker(value._key).doAllNodes(); } if (value.isFrame()) return addGlobals(value.get()); if (value.isModel()) return new ValModel(value.get()); Object other = value.get(); if (other instanceof Keyed) return new ValKeyed((Keyed) other); // Only understand Frames/Models/Keyed right now throw new IllegalArgumentException("DKV name lookup of " + id + " yielded an instance of type " + value.className() + ", but only Frame, Model & Keyed are supported"); } // Now the built-ins AstPrimitive ast = PRIMS.get(id); if (ast != null) return new ValFun(ast); throw new IllegalArgumentException("Name lookup of '" + id + "' failed"); } public String expand(String id) { return id.startsWith("$")? id.substring(1) + "~" + _ses.id() : id; } // Add these Vecs to the global list, and make a new defensive copy of the // frame - so we can hack it without changing the global frame view. ValFrame addGlobals(Frame fr) { _ses.addGlobals(fr); return new ValFrame(new Frame(fr._names.clone(), fr.vecs().clone())); } /* * Utility & Cleanup */ @Override public String toString() { String s = "{"; for (int i = 0, sp = sp(); i < sp; i++) s += peek(-sp + i).toString() + ","; return s + "}"; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/Merge.java
package water.rapids; import water.*; import water.fvec.*; import water.util.Log; import java.math.BigInteger; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.atomic.AtomicLong; import static java.math.BigInteger.ZERO; import static java.math.BigInteger.ONE; import static water.rapids.SingleThreadRadixOrder.getSortedOXHeaderKey; public class Merge { public static int ASCENDING = 1; public static int DESCENDING = -1; public static final int MEM_MULTIPLIER = 30; // multiplier applied to batchsize. Determined by experiment. public static final int OPTIMAL_BATCHSIZE = 1048576; private static final AtomicLong _mergeSeq = new AtomicLong(0); public static Frame sort(final Frame fr, int col) { return sort(fr, new int[]{col}); } public static Frame sort(final Frame fr, int[] cols) { int numCol = cols.length; int[] ascending = new int[numCol]; Arrays.fill(ascending,1); return sort(fr, cols, ascending); // default is to sort in ascending order } // Radix-sort a Frame using the given columns as keys. // This is a fully distributed and parallel sort. // It is not currently an in-place sort, so the data is doubled and a sorted copy is returned. public static Frame sort(final Frame fr, int[] cols, int[] ascending) { if( cols.length==0 ) // Empty key list return fr; // Return original frame for( int col : cols ) if( col < 0 || col >= fr.numCols() ) throw new IllegalArgumentException("Column "+col+" is out of range of "+fr.numCols()); // All identity ID maps int id_maps[][] = new int[cols.length][]; for( int i=0; i<cols.length; i++ ) { Vec vec = fr.vec(cols[i]); if( vec.isCategorical() ) { String[] domain = vec.domain(); id_maps[i] = new int[domain.length]; for( int j=0; j<domain.length; j++ ) id_maps[i][j] = j; } } return Merge.merge(fr, new Frame(new Vec[0]), cols, new int[0], true/*allLeft*/, id_maps, ascending, new int[0]); } public static Frame merge(final Frame leftFrame, final Frame riteFrame, final int leftCols[], final int riteCols[], boolean allLeft, int[][] id_maps) { int[] ascendingL, ascendingR; if (leftCols != null && leftCols.length>0) { ascendingL = new int[leftCols.length]; Arrays.fill(ascendingL, 1); } else ascendingL = new int[0]; if (riteCols != null && riteCols.length > 0) { ascendingR = new int[riteCols.length]; Arrays.fill(ascendingR, 1); } else ascendingR = new int[0]; return merge(leftFrame, riteFrame, leftCols, riteCols, allLeft, id_maps, ascendingL, ascendingR); } // single-threaded driver logic. Merge left and right frames based on common columns. public static Frame merge(final Frame leftFrame, final Frame riteFrame, final int leftCols[], final int riteCols[], boolean allLeft, int[][] id_maps, int[] ascendingL, int[] ascendingR) { if (allLeft && (riteFrame.numRows()==0)) return sortOnly(leftFrame, leftCols, id_maps, ascendingL); final boolean hasRite = riteCols.length > 0; // if there are NaN or null values in the rite frames in the merge columns, it is decided by Matt Dowle to not // include those rows in the final merged frame. Hence, I am going to first remove the na rows in the // mergedCols. boolean naPresent = false; // true if there are nas in merge columns if (riteFrame != null) { for (int colidx : riteCols) if (riteFrame.vec(colidx).naCnt() > 0) { naPresent = true; break; } } Frame rightFrame = naPresent ? new RemoveNAsTask(riteCols) .doAll(riteFrame.types(), riteFrame).outputFrame(riteFrame.names(), riteFrame.domains()) : riteFrame; // map missing levels to -1 (rather than increasing slots after the end) // for now to save a deep branch later for (int i=0; i<id_maps.length; i++) { // id_maps is for leftFrame. if (id_maps[i] == null) continue; // id_maps -1 represent leftFrame levels not found in riteFrame assert id_maps[i].length >= leftFrame.vec(leftCols[i]).max()+1 :"Left frame cardinality is higher than right frame! Switch frames and change merge directions to get " + "around this restriction."; if( !hasRite ) continue; int right_max = (int)rightFrame.vec(riteCols[i]).max(); for (int j=0; j<id_maps[i].length; j++) { assert id_maps[i][j] >= 0; if (id_maps[i][j] > right_max) id_maps[i][j] = -1; // map enum levels of left frame to -1 if not found in rite frame } } // Running 3 consecutive times on an idle cluster showed that running left // and right in parallel was a little slower (97s) than one by one (89s). // empty frame will come back with base = Long.MIN_VALUE (-9223372036854775808). // TODO: retest in future final long mergeId = nextMergeId(); RadixOrder leftIndex = createIndex(true ,leftFrame,leftCols,id_maps, ascendingL, mergeId); RadixOrder riteIndex = createIndex(false,rightFrame,riteCols,id_maps, ascendingR, mergeId); // TODO: start merging before all indexes had been created. Use callback? boolean leftFrameEmpty = (leftFrame.numRows()==0); boolean riteFrameEmpty = (riteFrame.numRows()==0); Log.info("Making BinaryMerge RPC calls ... "); long t0 = System.nanoTime(); ArrayList<BinaryMerge> bmList = new ArrayList<>(); Futures fs = new Futures(); final int leftShift = leftFrameEmpty?-1:leftIndex._shift[0]; final BigInteger leftBase = leftFrameEmpty?ZERO:leftIndex._base[0]; final int riteShift = riteFrameEmpty?-1:riteIndex._shift[0]; final BigInteger riteBase = riteFrameEmpty?ZERO : riteIndex._base [0]; // initialize for double columns, may not be used.... long leftMSBfrom = riteBase.subtract(leftBase).shiftRight(leftShift).longValue(); // calculate the MSB or base differences between rite and left base boolean riteBaseExceedsleftBase=riteFrameEmpty?false:riteBase.compareTo(leftBase)>0; // true if rite base minimum value exceeds left base minimum value // deal with the left range below the right minimum, if any if (riteBaseExceedsleftBase) { // left base starts at lower value than rite frame // deal with the range of the left below the start of the right, if any assert leftMSBfrom >= 0; if (leftMSBfrom>255) { // The left range ends before the right range starts. So every left row is a no-match to the right leftMSBfrom = 256; // so that the loop below runs for all MSBs (0-255) to fetch the left rows only } // run the merge for the whole lefts that end before the first right. // The overlapping one with the right base is dealt with inside if (allLeft) { // no need to iterate from 0 to 255, only need to go from leftbase MSB to min(max leftMSB, ritebaseMSB) for (int leftMSB = 0; leftMSB < leftMSBfrom; leftMSB++) { // grab only left frame and add to final merged frame BinaryMerge bm = new BinaryMerge(new BinaryMerge.FFSB(leftFrame, leftMSB, leftShift, leftIndex._bytesUsed, leftIndex._base), new BinaryMerge.FFSB(rightFrame,/*rightMSB*/-1, riteShift, riteIndex._bytesUsed, riteIndex._base), true, mergeId); bmList.add(bm); fs.add(new RPC<>(SplitByMSBLocal.ownerOfMSB(leftMSB), bm).call()); } } } else { // completely ignore right MSBs below the left base assert leftMSBfrom <= 0; // rite frame starts with lower or equal base than right leftMSBfrom = 0; } BigInteger rightS = BigInteger.valueOf(256L<<riteShift); // get max value of key values possible, power of 2 only long leftMSBto = leftFrameEmpty?0:riteBase.add(rightS).subtract(ONE).subtract(leftBase).shiftRight(leftShift).longValue(); // deal with the left range above the right maximum, if any. For doubles, -1 from shift to avoid negative outcome boolean leftRangeAboveRightMax = leftIndex._isCategorical[0]? leftBase.add(BigInteger.valueOf(256L<<leftShift)).compareTo(riteBase.add(rightS)) > 0: leftBase.add(BigInteger.valueOf(256L<<leftShift)).compareTo(riteBase.add(rightS)) >= 0; if (leftRangeAboveRightMax) { // left and rite frames have no overlap and left frame base is higher than rite max assert leftMSBto <= 255; if (leftMSBto<0) { // The left range starts after the right range ends. So every left row // is a no-match to the right leftMSBto = -1; // all MSBs (0-255) need to fetch the left rows only } // run the merge for the whole lefts that start after the last right if (allLeft) { // not worthy restricting length here unless store column max. for (int leftMSB = (int) leftMSBto + 1; leftMSB <= 255; leftMSB++) { BinaryMerge bm = new BinaryMerge(new BinaryMerge.FFSB(leftFrame, leftMSB, leftShift, leftIndex._bytesUsed, leftIndex._base), new BinaryMerge.FFSB(rightFrame,/*rightMSB*/-1, riteShift, riteIndex._bytesUsed, riteIndex._base), true, mergeId); bmList.add(bm); fs.add(new RPC<>(SplitByMSBLocal.ownerOfMSB(leftMSB), bm).call()); } } } else if (!leftFrameEmpty){ // completely ignore right MSBs after the right peak assert leftMSBto >= 255; leftMSBto = 255; } // the overlapped region; i.e. between [ max(leftMin,rightMin), min(leftMax, rightMax) ] // when right frame is empty, the leftMSBto will be 9223372036854775808 and hence the code will // stall here. I have changed the way leftMSBto in order to avoid this problem. assert leftMSBfrom >= 0; assert leftMSBto <= 255; for (int leftMSB = (int) leftMSBfrom; leftMSB <= leftMSBto; leftMSB++) { // calculate the key values at the bin extents: [leftFrom,leftTo] in terms of keys long leftFrom = leftFrameEmpty ? 0 : ((((long) leftMSB) << leftShift) - 1 + leftBase.longValue()); // -1 for leading NA spot long leftTo = leftFrameEmpty ? 0 : (((((long) leftMSB + 1) << leftShift) - 1 + leftBase.longValue()) - 1); // -1 for leading NA spot and another -1 to get last of previous bin // which right bins do these left extents occur in (could span multiple, and fall in the middle) long temprightMSB = (leftFrom - (riteFrameEmpty ? 0 : riteBase.longValue()) + 1) >> riteShift; // direct casting to int can give wrong values int rightMSBfrom = temprightMSB < 0 ? 0 : (int) temprightMSB; // +1 again for the leading NA spot temprightMSB = (leftTo - (riteFrameEmpty ? 0 : riteBase.longValue()) + 1) >> riteShift; int rightMSBto = temprightMSB < 0 ? 0 : (int) temprightMSB; // the non-matching part of this region will have been dealt with above when allLeft==true if (rightMSBfrom < 0) rightMSBfrom = 0; assert rightMSBfrom <= 255; if (rightMSBto > 255) rightMSBto = 255; assert rightMSBto >= rightMSBfrom; for (int rightMSB = rightMSBfrom; rightMSB <= rightMSBto; rightMSB++) { BinaryMerge bm = new BinaryMerge(new BinaryMerge.FFSB(leftFrame, leftMSB, leftShift, leftIndex._bytesUsed, leftIndex._base), new BinaryMerge.FFSB(rightFrame, rightMSB, riteShift, riteIndex._bytesUsed, riteIndex._base), allLeft, mergeId); bmList.add(bm); // TODO: choose the bigger side to execute on (where that side of index // already is) to minimize transfer. within BinaryMerge it will // recalculate the extents in terms of keys and bsearch for them within // the (then local) both sides H2ONode node = SplitByMSBLocal.ownerOfMSB(rightMSB); fs.add(new RPC<>(node, bm).call()); } } Log.debug("took: " + String.format("%.3f", (System.nanoTime() - t0) / 1e9) +" seconds."); t0 = System.nanoTime(); Log.info("Sending BinaryMerge async RPC calls in a queue ... "); fs.blockForPending(); Log.debug("took: " + (System.nanoTime() - t0) / 1e9+" seconds."); Log.debug("Removing DKV keys of left and right index. ... "); // TODO: In future we won't delete but rather persist them as index on the table // Explicitly deleting here (rather than Arno's cleanUp) to reveal if we're not removing keys early enough elsewhere t0 = System.nanoTime(); for (int msb=0; msb<256; msb++) { for (int isLeft=0; isLeft<2; isLeft++) { Key k = getSortedOXHeaderKey(isLeft!=0, msb, mergeId); SingleThreadRadixOrder.OXHeader oxheader = DKV.getGet(k); DKV.remove(k); if (oxheader != null) { for (int b=0; b<oxheader._nBatch; ++b) { k = SplitByMSBLocal.getSortedOXbatchKey(isLeft!=0, msb, b, mergeId); DKV.remove(k); } } } } Log.debug("took: " + (System.nanoTime() - t0)/1e9+" seconds."); Log.info("Allocating and populating chunk info (e.g. size and batch number) ..."); t0 = System.nanoTime(); long ansN = 0; int numChunks = 0; for( BinaryMerge thisbm : bmList ) if( thisbm._numRowsInResult > 0 ) { numChunks += thisbm._chunkSizes.length; ansN += thisbm._numRowsInResult; } long chunkSizes[] = new long[numChunks]; int chunkLeftMSB[] = new int[numChunks]; // using too much space repeating the same value here, but, limited int chunkRightMSB[] = new int[numChunks]; int chunkBatch[] = new int[numChunks]; int k = 0; for( BinaryMerge thisbm : bmList ) { if (thisbm._numRowsInResult == 0) continue; int thisChunkSizes[] = thisbm._chunkSizes; for (int j=0; j<thisChunkSizes.length; j++) { chunkSizes[k] = thisChunkSizes[j]; chunkLeftMSB [k] = thisbm._leftSB._msb; chunkRightMSB[k] = thisbm._riteSB._msb; chunkBatch[k] = j; k++; } } Log.debug("took: " + (System.nanoTime() - t0) / 1e9+" seconds."); // Now we can stitch together the final frame from the raw chunks that were // put into the store Log.info("Allocating and populated espc ..."); t0 = System.nanoTime(); long espc[] = new long[chunkSizes.length+1]; int i=0; long sum=0; for (long s : chunkSizes) { espc[i++] = sum; sum+=s; } espc[espc.length-1] = sum; Log.debug("took: " + (System.nanoTime() - t0) / 1e9+" seconds."); assert(sum==ansN); Log.info("Allocating dummy vecs/chunks of the final frame ..."); t0 = System.nanoTime(); int numJoinCols = hasRite ? leftIndex._bytesUsed.length : 0; int numLeftCols = leftFrame.numCols(); int numColsInResult = numLeftCols + rightFrame.numCols() - numJoinCols ; final byte[] types = new byte[numColsInResult]; final String[][] doms = new String[numColsInResult][]; final String[] names = new String[numColsInResult]; for (int j=0; j<numLeftCols; j++) { types[j] = leftFrame.vec(j).get_type(); doms[j] = leftFrame.domains()[j]; names[j] = leftFrame.names()[j]; } for (int j=0; j<rightFrame.numCols()-numJoinCols; j++) { types[numLeftCols + j] = rightFrame.vec(j+numJoinCols).get_type(); doms[numLeftCols + j] = rightFrame.domains()[j+numJoinCols]; names[numLeftCols + j] = rightFrame.names()[j+numJoinCols]; } Key<Vec> key = Vec.newKey(); Vec[] vecs = new Vec(key, Vec.ESPC.rowLayout(key, espc)).makeCons(numColsInResult, 0, doms, types); Log.debug("took: " + (System.nanoTime() - t0) / 1e9+" seconds."); Log.info("Finally stitch together by overwriting dummies ..."); t0 = System.nanoTime(); Frame fr = new Frame(names, vecs); ChunkStitcher ff = new ChunkStitcher(chunkSizes, chunkLeftMSB, chunkRightMSB, chunkBatch, mergeId); ff.doAll(fr); Log.debug("took: " + (System.nanoTime() - t0) / 1e9+" seconds"); return fr; } public static List<SortCombine> gatherSameMSBRows(Frame leftFrame, long mergeId) { long t0 = System.nanoTime(); List<SortCombine> bmList = new ArrayList<SortCombine>(); Futures fs = new Futures(); for (int leftMSB=0; leftMSB<=255; leftMSB++) { // For each MSB, gather sorted rows with same MSB into one spot SingleThreadRadixOrder.OXHeader leftSortedOXHeader = DKV.getGet(getSortedOXHeaderKey(/*left=*/true, leftMSB, mergeId)); if (leftSortedOXHeader != null) { SortCombine bm = new SortCombine(new SortCombine.FFSB(leftFrame, leftMSB), leftSortedOXHeader, mergeId); bmList.add(bm); fs.add(new RPC<>(SplitByMSBLocal.ownerOfMSB(leftMSB), bm).call()); } } Log.debug("took: " + String.format("%.3f", (System.nanoTime() - t0) / 1e9)+" seconds."); Log.debug("Removing DKV keys of left index. ... "); // finished gather the sorted rows per MSB, remove used objects t0 = System.nanoTime(); Log.info("Sending BinaryMerge async RPC calls in a queue ... "); fs.blockForPending(); Log.debug("took: " + (System.nanoTime() - t0) / 1e9+" seconds."); t0 = System.nanoTime(); // now that we have collected sorted columns for each MSB, remove info that are no longer needed for (int msb=0; msb<256; msb++) { for (int isLeft=0; isLeft<2; isLeft++) { Key k = getSortedOXHeaderKey(isLeft!=0, msb, mergeId); SingleThreadRadixOrder.OXHeader oxheader = DKV.getGet(k); DKV.remove(k); if (oxheader != null) { for (int b=0; b<oxheader._nBatch; ++b) { k = SplitByMSBLocal.getSortedOXbatchKey(isLeft!=0, msb, b, mergeId); DKV.remove(k); } } } } Log.debug("took: " + (System.nanoTime() - t0)/1e9+" seconds."); return bmList; } public static class RemoveNAsTask extends MRTask<RemoveNAsTask> { private final int[] _columns; public RemoveNAsTask(int ... _columns) { this._columns = _columns; } private void copyRow(int row, Chunk[] cs, NewChunk[] ncs) { for (int i = 0; i < cs.length; ++i) { if (cs[i] instanceof CStrChunk) ncs[i].addStr(cs[i], row); else if (cs[i] instanceof C16Chunk) ncs[i].addUUID(cs[i], row); else if (cs[i].hasFloat()) ncs[i].addNum(cs[i].atd(row)); else ncs[i].addNum(cs[i].at8(row), 0); } } @Override public void map(Chunk[] cs, NewChunk[] ncs) { boolean noNA = true; for (int row = 0; row < cs[0]._len; ++row) { noNA = true; for (int col : _columns) { if (cs[col].isNA(row)) { noNA = false; break; } } if (noNA) copyRow(row, cs, ncs); } } } public static long allocateChunk(List<SortCombine> bmList, long chunkSizes[], int chunkLeftMSB[], int chunkRightMSB[], int chunkBatch[]) { Log.info("Allocating and populating chunk info (e.g. size and batch number) ..."); Long t0 = System.nanoTime(); long ansN = 0; int numChunks = 0; for( SortCombine thisbm : bmList ) if( thisbm._numRowsInResult > 0 ) { numChunks += thisbm._chunkSizes.length; ansN += thisbm._numRowsInResult; } chunkSizes = new long[numChunks]; chunkLeftMSB = new int[numChunks]; // using too much space repeating the same value here, but, limited chunkRightMSB = new int[numChunks]; // leave it alone so as not to re-write chunkStitcher, fill with -1 Arrays.fill(chunkRightMSB, -1); chunkBatch = new int[numChunks]; int k = 0; for( SortCombine thisbm : bmList ) { if (thisbm._numRowsInResult == 0) continue; int thisChunkSizes[] = thisbm._chunkSizes; for (int j=0; j<thisChunkSizes.length; j++) { chunkSizes[k] = thisChunkSizes[j]; chunkLeftMSB [k] = thisbm._leftSB._msb; chunkBatch[k] = j; k++; } } Log.debug("took: " + (System.nanoTime() - t0) / 1e9 +" seconds."); return ansN; } public static Frame allocatePopulateChunk(List<SortCombine> bmList, Frame leftFrame, long ansN, long chunkSizes[], int chunkLeftMSB[], int chunkRightMSB[], int chunkBatch[], long mergeId) { // Now we can stitch together the final frame from the raw chunks that were // put into the store Log.info("Allocating and populated espc ..."); long t0 = System.nanoTime(); long espc[] = new long[chunkSizes.length+1]; int i=0; long sum=0; for (long s : chunkSizes) { espc[i++] = sum; sum+=s; } espc[espc.length-1] = sum; Log.debug("took: " + (System.nanoTime() - t0) / 1e9+" seconds"); assert(sum==ansN); Log.info("Allocating dummy vecs/chunks of the final frame ..."); t0 = System.nanoTime(); int numLeftCols = leftFrame.numCols(); int numColsInResult = numLeftCols; final byte[] types = new byte[numColsInResult]; final String[][] doms = new String[numColsInResult][]; final String[] names = new String[numColsInResult]; for (int j=0; j<numLeftCols; j++) { types[j] = leftFrame.vec(j).get_type(); doms[j] = leftFrame.domains()[j]; names[j] = leftFrame.names()[j]; } Key<Vec> key = Vec.newKey(); Vec[] vecs = new Vec(key, Vec.ESPC.rowLayout(key, espc)).makeCons(numColsInResult, 0, doms, types); Log.debug("took: " + (System.nanoTime() - t0) / 1e9+" seconds"); Log.info("Finally stitch together by overwriting dummies ..."); t0 = System.nanoTime(); Frame fr = new Frame(names, vecs); ChunkStitcher ff = new ChunkStitcher(chunkSizes, chunkLeftMSB, chunkRightMSB, chunkBatch, mergeId); ff.doAll(fr); Log.debug("took: " + (System.nanoTime() - t0) / 1e9+" seconds."); return fr; } public static Frame sortOnly(final Frame leftFrame, final int leftCols[], int[][] id_maps, int[] ascendingL) { final long t0 = System.nanoTime(); final long mergeId = nextMergeId(); createIndex(true, leftFrame, leftCols, id_maps, ascendingL, mergeId); // sort the columns. Log.info("Making BinaryMerge RPC calls ... "); List<SortCombine> bmList = gatherSameMSBRows(leftFrame, mergeId); // For each MSB, gather sorted rows with same MSB into one spot Log.info("Allocating and populating chunk info (e.g. size and batch number) ..."); long ansN = 0; int numChunks = 0; for (SortCombine thisbm : bmList) if (thisbm._numRowsInResult > 0) { numChunks += thisbm._chunkSizes.length; ansN += thisbm._numRowsInResult; } long chunkSizes[] = new long[numChunks]; int chunkLeftMSB[] = new int[numChunks]; // using too much space repeating the same value here, but, limited int chunkRightMSB[] = new int[numChunks]; // leave it alone so as not to re-write chunkStitcher, fill with -1 Arrays.fill(chunkRightMSB, -1); int chunkBatch[] = new int[numChunks]; int k = 0; for (SortCombine thisbm : bmList) { if (thisbm._numRowsInResult == 0) continue; int thisChunkSizes[] = thisbm._chunkSizes; for (int j = 0; j < thisChunkSizes.length; j++) { chunkSizes[k] = thisChunkSizes[j]; chunkLeftMSB[k] = thisbm._leftSB._msb; chunkBatch[k] = j; k++; } } Log.debug("took: " + (System.nanoTime() - t0) / 1e9 + " seconds."); long finalRowNumber = allocateChunk(bmList, chunkSizes, chunkLeftMSB, chunkRightMSB, chunkBatch); Log.info("Populate chunks and form final sorted frame ..."); return allocatePopulateChunk(bmList, leftFrame, finalRowNumber, chunkSizes, chunkLeftMSB, chunkRightMSB, chunkBatch, mergeId); } private static RadixOrder createIndex(boolean isLeft, Frame fr, int[] cols, int[][] id_maps, int[] ascending, long mergeId) { Log.info("Creating "+(isLeft ? "left" : "right")+" index ..."); long t0 = System.nanoTime(); RadixOrder idxTask = new RadixOrder(fr, isLeft, cols, id_maps, ascending, mergeId); H2O.submitTask(idxTask); // each of those launches an MRTask idxTask.join(); Log.debug("*** Creating "+(isLeft ? "left" : "right")+" index took: " + (System.nanoTime() - t0) / 1e9 + " seconds ***"); return idxTask; } static class ChunkStitcher extends MRTask<ChunkStitcher> { final long _chunkSizes[]; final int _chunkLeftMSB[]; final int _chunkRightMSB[]; final int _chunkBatch[]; final long _mergeId; ChunkStitcher(long[] chunkSizes, int[] chunkLeftMSB, int[] chunkRightMSB, int[] chunkBatch, long mergeId ) { _chunkSizes = chunkSizes; _chunkLeftMSB = chunkLeftMSB; _chunkRightMSB= chunkRightMSB; _chunkBatch = chunkBatch; _mergeId = mergeId; } @Override public void map(Chunk[] cs) { int chkIdx = cs[0].cidx(); Futures fs = new Futures(); for (int i=0;i<cs.length;++i) { Key destKey = cs[i].vec().chunkKey(chkIdx); assert(cs[i].len() == _chunkSizes[chkIdx]); Key k = BinaryMerge.getKeyForMSBComboPerCol(_chunkLeftMSB[chkIdx], _chunkRightMSB[chkIdx], i, _chunkBatch[chkIdx], _mergeId); Chunk ck = DKV.getGet(k); DKV.put(destKey, ck, fs, /*don't cache*/true); DKV.remove(k); } fs.blockForPending(); } } static long nextMergeId() { return H2O.runOnLeaderNode(new NextSeqRunnable())._seq; } private static class NextSeqRunnable extends H2O.RemoteRunnable<NextSeqRunnable> { private long _seq; @Override public void run() { _seq = Merge._mergeSeq.incrementAndGet(); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/PermutationVarImp.java
package water.rapids; import hex.*; import water.Futures; import water.H2O; import water.H2OError; import water.exceptions.H2OFailException; import water.fvec.Frame; import water.fvec.Vec; import water.util.*; import java.util.*; import java.util.concurrent.*; import java.util.stream.IntStream; import static water.util.RandomUtils.getRNG; /** * Permutation Variable (feature) importance measures the increase in the prediction error of the model after permuting * the variables' values, which breaks the relationship between the variables and the true outcome. * https://christophm.github.io/interpretable-ml-book/feature-importance.html * <p> * Calculate permutation variables importance, by shuffling randomly each variable of the training Frame, * scoring the model with the newly created frame using One At a Time approach * and Morris method; creating TwoDimTable with relative, scaled, and percentage value * TwoDimTable with mean of the absolute value, and standard deviation of all features importance */ public class PermutationVarImp { private final Model _model; private final Frame _inputFrame; /** * Constructor that stores the model, frame * * @param model trained model * @param fr training frame */ public PermutationVarImp(Model model, Frame fr) { if (fr.numRows() < 2) throw new IllegalArgumentException("Frame must contain more than 1 rows to be used in permutation variable importance!"); if (!ArrayUtils.contains(fr.names(), model._parms._response_column)) { throw new IllegalArgumentException("Frame must contain the response column for the use in permutation variable importance!"); } _model = model; _inputFrame = fr; } /** * Returns the metric (loss function) selected by the user (mse is default) * * @throws IllegalArgumentException if metric could not be loaded */ private static double getMetric(ModelMetrics mm, String metric) { assert mm != null; double metricValue = ModelMetrics.getMetricFromModelMetric(mm, metric); if (Double.isNaN(metricValue)) throw new IllegalArgumentException("Model doesn't support the metric following metric " + metric); return metricValue; } private String inferAndValidateMetric(String metric) { Set allowed_metrics = ModelMetrics.getAllowedMetrics(_model._key); metric = metric.toLowerCase(); if (metric.equals("auto")) { if (_model._output._training_metrics instanceof ModelMetricsBinomial) metric = "auc"; else if (_model._output._training_metrics instanceof ModelMetricsRegression) metric = "rmse"; else if (_model._output._training_metrics instanceof ModelMetricsMultinomial) metric = "logloss"; else throw new IllegalArgumentException("Unable to infer metric. Please specify metric for permutation variable importance."); } if (!allowed_metrics.contains(metric)) throw new IllegalArgumentException("Permutation Variable Importance doesn't support " + metric + " for model " + _model._key); return metric; } /** * Used for shuffling the next feature asynchronously while the previous one is being evaluated. */ private Future<Vec> precomputeShuffledVec(ExecutorService executor, Frame fr, HashSet<String> featuresToCompute, String[] variables, int currentFeature, long seed) { for (int f = currentFeature + 1; f < fr.numCols(); f++) { if (!featuresToCompute.contains(variables[f])) continue; int finalF = f; return executor.submit( () -> VecUtils.shuffleVec(fr.vec(finalF), seed) ); } return null; } Map<String, Double> calculatePermutationVarImp(String metric, long n_samples, final String[] features, long seed) { // Use random seed if set to -1 if (-1 == seed) seed = new Random().nextLong(); if (n_samples == 1) throw new IllegalArgumentException("Unable to permute one row. Please set n_samples to higher value or to -1 to use the whole dataset."); final String[] variables = _inputFrame.names(); HashSet<String> featuresToCompute = new HashSet<>(Arrays.asList((null != features && features.length > 0) ? features : variables)); featuresToCompute.removeAll(Arrays.asList(_model._parms.getNonPredictors())); if (_model._parms._ignored_columns != null) featuresToCompute.removeAll(Arrays.asList(_model._parms._ignored_columns)); final Frame fr; if (n_samples > 1) { if (n_samples > 1000 || _model._parms._weights_column != null) { fr = MRUtils.sampleFrame(_inputFrame, n_samples, _model._parms._weights_column, seed); } else { fr = MRUtils.sampleFrameSmall(_inputFrame, (int) n_samples, seed); } } else { fr = _inputFrame; } _model.score(fr).remove(); final double origMetric = getMetric(ModelMetrics.getFromDKV(_model, fr), metric); ExecutorService executor = Executors.newSingleThreadExecutor(); Vec shuffledFeature = null; Future<Vec> shuffledFeatureFuture = precomputeShuffledVec(executor, fr, featuresToCompute, variables, -1, seed); HashMap<String, Double> result = new HashMap<>(); try { for (int f = 0; f < fr.numCols(); f++) { if (!featuresToCompute.contains(variables[f])) continue; // shuffle values of feature assert shuffledFeatureFuture != null; shuffledFeature = shuffledFeatureFuture.get(); shuffledFeatureFuture = precomputeShuffledVec(executor, fr, featuresToCompute, variables, f, seed); final Vec origFeature = fr.replace(f, shuffledFeature); // score the model again and compute diff _model.score(fr).remove(); // save the difference for the given variable result.put(variables[f], Math.abs(getMetric(ModelMetrics.getFromDKV(_model, fr), metric) - origMetric)); // return the original data fr.replace(f, origFeature); shuffledFeature.remove(); shuffledFeature = null; } } catch (InterruptedException | ExecutionException e) { throw new RuntimeException("Unable to calculate the permutation variable importance.", e); } finally { if (null != fr && fr != _inputFrame) fr.remove(); if (null != shuffledFeature) shuffledFeature.remove(); if (null != shuffledFeatureFuture) shuffledFeatureFuture.cancel(true); executor.shutdownNow(); } return result; } /** * Get PermutationVarImp * * @param metric Metric to use to calculate the variable (feature) importance * @param n_samples Number of samples to use to calculate the variable (feature) importance; Use -1 to use the whole frame * @param features Features to evaluate * @param seed Seed for random generator * @return TwoDimTable of Permutation Feature Importance scores */ public TwoDimTable getPermutationVarImp(String metric, final long n_samples, final String[] features, long seed) { metric = inferAndValidateMetric(metric); Map<String, Double> varImps = calculatePermutationVarImp(metric, n_samples, features, seed); String[] names = new String[varImps.size()]; double[] importance = new double[varImps.size()]; int i = 0; for(Map.Entry<String, Double> entry : varImps.entrySet()) { names[i] = entry.getKey(); importance[i++] = entry.getValue(); } // Create TwoDimTable having (Relative + Scaled + percentage) importance return ModelMetrics.calcVarImp(importance, names); } /** * Get Repeated Permutation Variable Importance * * @param metric Metric to use to calculate the variable (feature) importance * @param n_samples Number of samples to use to calculate the variable (feature) importance; Use -1 to use the whole frame * @param n_repeats Number of repeats * @param features Features to evaluate * @param seed Seed for random generator * @return TwoDimTable of Permutation Feature Importance scores */ public TwoDimTable getRepeatedPermutationVarImp(String metric, final long n_samples, final int n_repeats ,final String[] features, long seed) { metric = inferAndValidateMetric(metric); Map<String, Double>[] varImps = new HashMap[n_repeats]; for (int i = 0; i < n_repeats; i++) { varImps[i] = calculatePermutationVarImp(metric, n_samples, features, (seed == -1 ? -1 : seed + i)); } String[] names = new String[varImps[0].size()]; // One row per feature, one column per PVI evaluation double[/* features */][/* repeats */] importance = new double[varImps[0].size()][n_repeats]; List<Map.Entry<String, Double>> sortedFeatures = new ArrayList<>(varImps[0].entrySet()); sortedFeatures.sort(Map.Entry.comparingByValue(Collections.reverseOrder())); int i = 0; for(Map.Entry<String, Double> entry : sortedFeatures) { names[i] = entry.getKey(); for (int j = 0; j < n_repeats; j++) { importance[i][j] = varImps[j].get(entry.getKey()); } i++; } return new TwoDimTable( "Repeated Permutation Variable Importance", null, names, IntStream.range(0, n_repeats).mapToObj((run) -> "Run "+(run+1)).toArray(String[]::new), IntStream.range(0, n_repeats).mapToObj((run) -> "double").toArray(String[]::new), null, "Variable", new String[names.length][], importance ); } /** * Get PermutationVarImp * * @param metric Metric to use to calculate the feature importance * @return TwoDimTable of Permutation Feature Importance scores */ public TwoDimTable getPermutationVarImp(String metric) { return getPermutationVarImp(metric, -1, null, -1); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/PrimsService.java
package water.rapids; import water.rapids.ast.AstPrimitive; import java.util.*; /** * PrimService manages access to non-core Rapid primitives. * This includes algorithm specific rapids & 3rd party rapids. */ class PrimsService { static PrimsService INSTANCE = new PrimsService(); private final ServiceLoader<AstPrimitive> _loader; private PrimsService() { _loader = ServiceLoader.load(AstPrimitive.class); } /** * Locates all available non-core primitives of the Rapid language. * @return list of Rapid primitives */ synchronized List<AstPrimitive> getAllPrims() { List<AstPrimitive> prims = new ArrayList<>(); for (AstPrimitive prim : _loader) { prims.add(prim); } return prims; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/rapids/RadixCount.java
package water.rapids; import water.*; import water.fvec.Chunk; import water.util.MathUtils; import java.math.BigInteger; import static java.math.BigInteger.ONE; import static java.math.BigInteger.ZERO; class RadixCount extends MRTask<RadixCount> { static class Long2DArray extends Iced { Long2DArray(int len) { _val = new long[len][]; } long _val[][]; } private Long2DArray _counts; private final int _shift; private final int _col; private final BigInteger _base; // used to determine the unique DKV names since DF._key is null now and // before only an RTMP name anyway private final boolean _isLeft; private final int _id_maps[][]; private final int _ascending; final long _mergeId; RadixCount(boolean isLeft, BigInteger base, int shift, int col, int id_maps[][], int ascending, long mergeId) { _isLeft = isLeft; _base = base; _col = col; _shift = shift; _id_maps = id_maps; _ascending = ascending; _mergeId = mergeId; } // make a unique deterministic key as a function of frame, column and node // make it homed to the owning node. Add current system time to make it not // repeatable. This can cause problem if sort is used in cross-validation /*** * make a unique deterministic key as a function of frame, column and node make it homed to the owning node. * Add current system time to make it not repeatable. This can cause problem if sort is used in cross-validation */ static Key getKey(boolean isLeft, int col, long mergeId, H2ONode node) { return Key.make("__radix_order__MSBNodeCounts_col" + col + "_node" + node.index() + "_" + mergeId + (isLeft ? "_LEFT" : "_RIGHT")); // Each node's contents is different so the node number needs to be in the key // TODO: need the biggestBit in here too, that the MSB is offset from } @Override protected void setupLocal() { _counts = new Long2DArray(_fr.anyVec().nChunks()); } @Override public void map( Chunk chk ) { long tmp[] = _counts._val[chk.cidx()] = new long[256]; boolean isIntVal = chk.vec().isCategorical() || chk.vec().isInt(); // TODO: assert chk instanceof integer or enum; -- but how since many if (chk.vec().isCategorical()) { assert _id_maps[0].length > 0; assert _base.compareTo(ZERO)==0; if (chk.vec().naCnt() == 0) { for (int r=0; r<chk._len; r++) { int ctrVal = _isLeft?BigInteger.valueOf(_id_maps[0][(int)chk.at8(r)]+1).shiftRight(_shift).intValue() :BigInteger.valueOf((int)chk.at8(r)+1).shiftRight(_shift).intValue(); tmp[ctrVal]++; } } else { for (int r=0; r<chk._len; r++) { if (chk.isNA(r)) tmp[0]++; else { int ctrVal = _isLeft?BigInteger.valueOf(_id_maps[0][(int)chk.at8(r)]+1).shiftRight(_shift).intValue() :BigInteger.valueOf((int)chk.at8(r)+1).shiftRight(_shift).intValue(); tmp[ctrVal]++; } } } } else if (!(_isLeft && chk.vec().isCategorical())) { if (chk.vec().naCnt() == 0) { // no NAs in column // There are no NA in this join column; hence branch-free loop. Most // common case as should never really have NA in join columns. for (int r = 0; r < chk._len; r++) { // note that 0th bucket here is for rows to exclude from merge result long ctrVal = isIntVal ? BigInteger.valueOf(chk.at8(r)*_ascending).subtract(_base).add(ONE).shiftRight(_shift).longValue(): MathUtils.convertDouble2BigInteger(_ascending*chk.atd(r)).subtract(_base).add(ONE).shiftRight(_shift).longValue(); tmp[(int) ctrVal]++; // ctrVal is the MSB value of chk.at8(r) } } else { // contains NAs in column // There are some NA in the column so have to branch. TODO: warn user // NA are present in join column for (int r=0; r<chk._len; r++) { if (chk.isNA(r)) tmp[0]++; else { long ctrVal = isIntVal ? BigInteger.valueOf(_ascending*chk.at8(r)).subtract(_base).add(ONE).shiftRight(_shift).longValue(): MathUtils.convertDouble2BigInteger(_ascending*chk.atd(r)).subtract(_base).add(ONE).shiftRight(_shift).longValue(); tmp[(int) ctrVal]++; } // Done - we will join NA to NA as data.table does // TODO: allow NA-to-NA join to be turned off. Do that in bmerge as a simple low-cost switch. // Note that NA and the minimum may well both be in MSB 0 but most of // the time we will not have NA in join columns } } } } @Override protected void closeLocal() { DKV.put(getKey(_isLeft, _col, _mergeId, H2O.SELF), _counts, _fs, true); // just the MSB counts per chunk on this node. Most of this spine will be empty here. // TODO: could condense to just the chunks on this node but for now, leave sparse. // We'll use this sparse spine right now on this node and the reduce happens on _o and _x later } }