index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/C8Chunk.java
package water.fvec; import water.*; /** * The empty-compression function, where data is in 'long's. */ public class C8Chunk extends Chunk { protected static final long _NA = Long.MIN_VALUE; C8Chunk( byte[] bs ) { _mem=bs; _start = -1; _len = _mem.length>>3; } @Override protected final long at8_impl( int i ) { long res = UDP.get8(_mem,i<<3); if( res == _NA ) throw new IllegalArgumentException("at8 but value is missing"); return res; } @Override protected final double atd_impl( int i ) { long res = UDP.get8(_mem,i<<3); return res == _NA?Double.NaN:res; } @Override protected final boolean isNA_impl( int i ) { return UDP.get8(_mem,i<<3)==_NA; } @Override boolean set_impl(int idx, long l) { return false; } @Override boolean set_impl(int i, double d) { return false; } @Override boolean set_impl(int i, float f ) { return false; } @Override boolean setNA_impl(int idx) { UDP.set8(_mem,(idx<<3),_NA); return true; } @Override boolean hasFloat() { return false; } @Override public AutoBuffer write(AutoBuffer bb) { return bb.putA1(_mem,_mem.length); } @Override public C8Chunk read(AutoBuffer bb) { _mem = bb.bufClose(); _start = -1; _len = _mem.length>>3; assert _mem.length == _len<<3; return this; } @Override NewChunk inflate_impl(NewChunk nc) { nc.set_len(nc.set_sparseLen(0)); for( int i=0; i< len(); i++ ) if(isNA0(i))nc.addNA(); else nc.addNum(at80(i),0); return nc; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/C8DChunk.java
package water.fvec; import water.AutoBuffer; import water.MemoryManager; import water.UDP; /** * The empty-compression function, where data is in 'double's. */ public class C8DChunk extends Chunk { C8DChunk( byte[] bs ) { _mem=bs; _start = -1; _len = _mem.length>>3; } @Override protected final long at8_impl( int i ) { double res = UDP.get8d(_mem,i<<3); if( Double.isNaN(res) ) throw new IllegalArgumentException("at8 but value is missing"); return (long)res; } @Override protected final double atd_impl( int i ) { return UDP.get8d(_mem,i<<3) ; } @Override protected final boolean isNA_impl( int i ) { return Double.isNaN(UDP.get8d(_mem,i<<3)); } @Override boolean set_impl(int idx, long l) { return false; } @Override boolean set_impl(int i, double d) { UDP.set8d(_mem,i<<3,d); return true; } @Override boolean set_impl(int i, float f ) { UDP.set8d(_mem,i<<3,f); return true; } @Override boolean setNA_impl(int idx) { UDP.set8d(_mem,(idx<<3),Double.NaN); return true; } @Override boolean hasFloat() { return true; } @Override public AutoBuffer write(AutoBuffer bb) {return bb.putA1(_mem,_mem.length);} @Override public C8DChunk read(AutoBuffer bb) { _mem = bb.bufClose(); _start = -1; _len = _mem.length>>3; assert _mem.length == _len<<3; return this; } @Override NewChunk inflate_impl(NewChunk nc) { nc.alloc_doubles(len()); for( int i=0; i< len(); i++ ) nc.doubles()[i] = UDP.get8d(_mem,(i<<3)); nc.set_len(nc.set_sparseLen(len())); return nc; } // 3.3333333e33 public int pformat_len0() { return 22; } public String pformat0() { return "% 21.15e"; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/CBSChunk.java
package water.fvec; import water.AutoBuffer; import water.MemoryManager; import water.H2O; /** A simple chunk for boolean values. In fact simple bit vector. * Each boolean is represented by 2bits since we need to represent * NA. */ public class CBSChunk extends Chunk { static protected final byte _NA = 0x02; // Internal representation of NA static final int OFF = 2; protected byte _bpv; protected byte _gap;// number of trailing unused bits in the end (== _len % 8, we allocate bytes, but our length i generally not multiple of 8) public CBSChunk(byte[] bs, byte gap, byte bpv) { assert gap < 8; assert bpv == 1 || bpv == 2; _mem = bs; _start = -1; _gap = gap; _bpv = bpv; _len = ((_mem.length - OFF)*8 - _gap) / _bpv; // number of boolean items } @Override protected long at8_impl(int idx) { byte b = atb(idx); if( b == _NA ) throw new IllegalArgumentException("at8 but value is missing"); return b; } @Override protected double atd_impl(int idx) { byte b = atb(idx); return b == _NA ? Double.NaN : b; } @Override protected final boolean isNA_impl( int i ) { return atb(i)==_NA; } protected byte atb(int idx) { int vpb = 8 / _bpv; // values per byte int bix = OFF + idx / vpb; // byte index int off = _bpv * (idx % vpb); byte b = _mem[bix]; switch( _bpv ) { case 1: return read1b(b, off); case 2: return read2b(b, off); default: H2O.fail(); } return -1; } @Override boolean set_impl(int idx, long l) { return false; } @Override boolean set_impl(int idx, double d) { return false; } @Override boolean set_impl(int idx, float f ) { return false; } @Override boolean setNA_impl(int idx) { return false; } @Override boolean hasFloat () { return false; } @Override public AutoBuffer write(AutoBuffer bb) { return bb.putA1(_mem, _mem.length); } @Override public Chunk read(AutoBuffer bb) { _mem = bb.bufClose(); _start = -1; _gap = _mem[0]; _bpv = _mem[1]; _len = ((_mem.length - OFF)*8 - _gap) / _bpv; return this; } @Override NewChunk inflate_impl(NewChunk nc) { nc.set_len(nc.set_sparseLen(0)); for (int i=0; i< len(); i++) { int res = atb(i); if (res == _NA) nc.addNA(); else nc.addNum(res,0); } return nc; } /** Writes 1bit from value into b at given offset and return b */ public static byte write1b(byte b, byte val, int off) { val = (byte) ((val & 0x1) << (7-off)); return (byte) (b | val); } /** Writes 2bits from value into b at given offset and return b */ public static byte write2b(byte b, byte val, int off) { val = (byte) ((val & 0x3) << (6-off)); // 0000 00xx << (6-off) return (byte) (b | val); } /** Reads 1bit from given b in given offset. */ public static byte read1b(byte b, int off) { return (byte) ((b >> (7-off)) & 0x1); } /** Reads 1bit from given b in given offset. */ public static byte read2b(byte b, int off) { return (byte) ((b >> (6-off)) & 0x3); } /** Returns compressed len of the given array length if the value if represented by bpv-bits. */ public static int clen(int values, int bpv) { int len = (values*bpv) >> 3; return values*bpv % 8 == 0 ? len : len + 1; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/CX0Chunk.java
package water.fvec; import water.*; import java.util.Arrays; import java.util.Iterator; /** specialized subtype of SPARSE chunk for boolean (bitvector); no NAs. contains just a list of rows that are non-zero. */ public final class CX0Chunk extends CXIChunk { // Sparse constructor protected CX0Chunk(int len, int nzs, byte [] buf){super(len,nzs,0,buf);} @Override protected final long at8_impl(int idx) {return getId(findOffset(idx)) == idx?1:0;} @Override protected final double atd_impl(int idx) { return at8_impl(idx); } @Override protected final boolean isNA_impl( int i ) { return false; } @Override boolean hasFloat () { return false; } @Override NewChunk inflate_impl(NewChunk nc) { final int slen = sparseLen(); nc.set_sparseLen(slen); nc.set_len(len()); nc.alloc_mantissa(slen); Arrays.fill(nc.mantissa(),1); nc.alloc_exponent(slen); nc.alloc_indices(slen); nonzeros(nc.indices()); return nc; } public Iterator<Value> values(){ return new SparseIterator(new Value(){ @Override public final long asLong(){return 1;} @Override public final double asDouble() { return 1;} @Override public final boolean isNA(){ return false;} }); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/CXDChunk.java
package water.fvec; import water.H2O; import water.MemoryManager; import water.UDP; import java.util.Iterator; /** * Created by tomasnykodym on 3/26/14. */ public class CXDChunk extends CXIChunk { protected CXDChunk(int len, int nzs, int valsz, byte [] buf){super(len,nzs,valsz,buf);} // extract fp value from an (byte)offset protected final double getFValue(int off){ if(_valsz == 8) return UDP.get8d(_mem, off + _ridsz); throw H2O.unimpl(); } @Override protected long at8_impl(int idx) { int off = findOffset(idx); if(getId(off) != idx)return 0; double d = getFValue(off); if(Double.isNaN(d)) throw new IllegalArgumentException("at8 but value is missing"); return (long)d; } @Override protected double atd_impl(int idx) { int off = findOffset(idx); if(getId(off) != idx)return 0; return getFValue(off); } @Override protected boolean isNA_impl( int i ) { int off = findOffset(i); if(getId(off) != i)return false; return Double.isNaN(getFValue(off)); } @Override final boolean hasFloat () { return true; } @Override NewChunk inflate_impl(NewChunk nc) { final int slen = sparseLen(); nc.set_len(len()); nc.set_sparseLen(slen); nc.alloc_doubles(slen); nc.alloc_indices(slen); int off = OFF; for( int i = 0; i < slen; ++i, off += _ridsz + _valsz) { nc.indices()[i] = getId(off); nc.doubles()[i] = getFValue(off); } return nc; } public Iterator<Value> values(){ return new SparseIterator(new Value(){ @Override public final long asLong(){ double d = getFValue(_off); if(Double.isNaN(d)) throw new IllegalArgumentException("at8 but value is missing"); return (long)d; } @Override public final double asDouble() {return getFValue(_off);} @Override public final boolean isNA(){ double d = getFValue(_off); return Double.isNaN(d); } }); } public int pformat_len0() { return 22; } public String pformat0() { return "% 21.15e"; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/CXIChunk.java
package water.fvec; import water.AutoBuffer; import water.H2O; import water.MemoryManager; import water.UDP; import java.util.Iterator; /** * Created by tomasnykodym on 3/18/14. * Sparse chunk. */ public class CXIChunk extends Chunk { protected transient int _sparseLen; // Number of elements in this chunk protected transient int _valsz; // byte size of stored value protected transient int _valsz_log; // protected transient int _ridsz; // byte size of stored (chunk-relative) row nums protected static final int OFF = 6; protected transient int _lastOff = OFF; private static final long [] NAS = {C1Chunk._NA,C2Chunk._NA,C4Chunk._NA,C8Chunk._NA}; protected CXIChunk(int len, int nzs, int valsz, byte [] buf){ assert (valsz == 0 || valsz == 1 || valsz == 2 || valsz == 4 || valsz == 8); _len = len; int log = 0; while((1 << log) < valsz)++log; assert valsz == 0 || (1 << log) == valsz; _valsz = valsz; _valsz_log = log; _ridsz = (len >= 65535)?4:2; UDP.set4(buf,0,len); byte b = (byte) _ridsz; buf[4] = b; buf[5] = (byte) _valsz; _mem = buf; _sparseLen = (_mem.length - OFF) / (_valsz + _ridsz); assert (_mem.length - OFF) % (_valsz + _ridsz) == 0:"unexpected mem.length in sparse chunk: mem.length = " + (_mem.length - OFF) + "val_sz = " + _valsz + ", rowId_sz = " + _ridsz; } @Override public final boolean isSparse() {return true;} @Override public final int sparseLen(){return _sparseLen;} @Override public final int nonzeros(int [] arr){ int len = sparseLen(); int off = OFF; final int inc = _valsz + _ridsz; for(int i = 0; i < len; ++i, off += inc) arr[i] = _ridsz == 2 ? UDP.get2(_mem, off)&0xFFFF : UDP.get4(_mem, off) ; return len; } @Override boolean set_impl(int idx, long l) { return false; } @Override boolean set_impl(int idx, double d) { return false; } @Override boolean set_impl(int idx, float f ) { return false; } @Override boolean setNA_impl(int idx) { return false; } @Override protected long at8_impl(int idx) { int off = findOffset(idx); if(getId(off) != idx)return 0; long v = getIValue(off); if( v== NAS[_valsz_log]) throw new IllegalArgumentException("at8 but value is missing"); return v; } @Override protected double atd_impl(int idx) { int off = findOffset(idx); if(getId(off) != idx)return 0; long v = getIValue(off); return (v == NAS[_valsz_log])?Double.NaN:v; } @Override protected boolean isNA_impl( int i ) { int off = findOffset(i); if(getId(off) != i)return false; return getIValue(off) == NAS[_valsz_log]; } @Override boolean hasFloat () { return false; } @Override public String toString(){ return getClass().getSimpleName() + "( start = " + _start + ", len = " + _len + " sparseLen = " + _sparseLen + " valSz = " + _valsz + " rIdSz = " + _ridsz + ")"; } @Override NewChunk inflate_impl(NewChunk nc) { final int slen = sparseLen(); nc.set_sparseLen(slen); nc.set_len(len()); nc.alloc_mantissa(slen); nc.alloc_exponent(slen); nc.alloc_indices(slen); int off = OFF; for( int i = 0; i < slen; ++i, off += _ridsz + _valsz) { nc.indices()[i] = getId(off); long v = getIValue(off); if(v == NAS[_valsz_log]) nc.setNA_impl2(i); else nc.mantissa()[i] = v; } return nc; } // get id of nth (chunk-relative) stored element protected final int getId(int off){ return _ridsz == 2 ?UDP.get2(_mem,off)&0xFFFF :UDP.get4(_mem,off); } // get offset of nth (chunk-relative) stored element private final int getOff(int n){return OFF + (_ridsz + _valsz)*n;} // extract integer value from an (byte)offset protected final long getIValue(int off){ switch(_valsz){ case 1: return _mem[off+ _ridsz]&0xFF; case 2: return UDP.get2(_mem, off + _ridsz); case 4: return UDP.get4(_mem, off + _ridsz); case 8: return UDP.get8(_mem, off + _ridsz); default: throw H2O.unimpl(); } } // find offset of the chunk-relative row id, or -1 if not stored (i.e. sparse zero) protected final int findOffset(int idx) { if(idx >= _len)throw new IndexOutOfBoundsException(); final byte [] mem = _mem; int sparseLen = sparseLen(); if(sparseLen == 0)return 0; final int off = _lastOff; int lastIdx = getId(off); // check the last accessed elem if( idx == lastIdx ) return off; if(idx > lastIdx){ // check the next one final int nextOff = off + _ridsz + _valsz; if(nextOff < mem.length){ int nextId = getId(nextOff); if(idx < nextId)return off; if(idx == nextId){ _lastOff = nextOff; return nextOff; } } } // no match so far, do binary search int lo=0, hi = sparseLen; while( lo+1 != hi ) { int mid = (hi+lo)>>>1; if( idx < getId(getOff(mid))) hi = mid; else lo = mid; } int y = getOff(lo); _lastOff = y; return y; } @Override public AutoBuffer write(AutoBuffer bb) { return bb.putA1(_mem, _mem.length); } @Override public Chunk read(AutoBuffer bb) { _mem = bb.bufClose(); _start = -1; _len = UDP.get4(_mem,0); _ridsz = _mem[4]; _valsz = _mem[5]; _sparseLen = (_mem.length - OFF) / (_valsz + _ridsz); assert (_mem.length - OFF) % (_valsz + _ridsz) == 0:"unexpected mem.length in sparse chunk: mem.length = " + (_mem.length - OFF) + "val_sz = " + _valsz + ", rowId_sz = " + _ridsz; int x = _valsz; int log = 0; while(x > 1){ x = x >>> 1; ++log; } _valsz_log = log; return this; } @Override public final int nextNZ(int rid){ final int off = rid == -1?OFF:findOffset(rid); int x = getId(off); if(x > rid)return x; if(off < _mem.length - _ridsz - _valsz) return getId(off + _ridsz + _valsz); return _len; } public abstract class Value { protected int _off = 0; public int rowInChunk(){return getId(_off);} public abstract long asLong(); public abstract double asDouble(); public abstract boolean isNA(); } public final class SparseIterator implements Iterator<Value> { final Value _val; public SparseIterator(Value v){_val = v;} @Override public final boolean hasNext(){return _val._off < _mem.length - (_ridsz + _valsz);} @Override public final Value next(){ if(_val._off == 0)_val._off = OFF; else _val._off += (_ridsz + _valsz); return _val; } @Override public final void remove(){throw new UnsupportedOperationException();} } public Iterator<Value> values(){ return new SparseIterator(new Value(){ @Override public final long asLong(){ long v = getIValue(_off); if(v == NAS[(_valsz >>> 1) - 1]) throw new IllegalArgumentException("at8 but value is missing"); return v; } @Override public final double asDouble() { long v = getIValue(_off); return (v == NAS[_valsz_log -1])?Double.NaN:v; } @Override public final boolean isNA(){ long v = getIValue(_off); return (v == NAS[_valsz_log]); } }); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/Chunk.java
package water.fvec; import water.*; /** A compression scheme, over a chunk - a single array of bytes. The *actual* * vector header info is in the Vec struct - which contains info to find all * the bytes of the distributed vector. This struct is basically a 1-entry * chunk cache of the total vector. Subclasses of this abstract class * implement (possibly empty) compression schemes. */ public abstract class Chunk extends Iced implements Cloneable { public long _start = -1; // Start element; filled after AutoBuffer.read public int _len; // Number of elements in this chunk public int len() { return _len; } public int set_len(int l) { _len = l; return _len; } protected Chunk _chk2; // Normally==null, changed if chunk is written to public Vec _vec; // Owning Vec; filled after AutoBuffer.read public byte[] _mem; // Short-cut to the embedded memory; WARNING: holds onto a large array public final boolean readable( ) { return _vec.readable(); } public final boolean writable( ) { return _vec.writable(); } public final byte[] getBytes() { return _mem; } /** Load a long value. Floating point values are silently rounded to an * integer. Throws if the value is missing. * <p> * Loads from the 1-entry chunk cache, or misses-out. This version uses * absolute element numbers, but must convert them to chunk-relative indices * - requiring a load from an aliasing local var, leading to lower quality * JIT'd code (similar issue to using iterator objects). * <p> * Slightly slower than 'at0' since it range checks within a chunk. */ public final long at8( long i ) { long x = i - (_start>0 ? _start : 0); if( 0 <= x && x < _len ) return at80((int)x); throw new ArrayIndexOutOfBoundsException(""+_start+" <= "+i+" < "+(_start+_len)); } /** Load a double value. Returns Double.NaN if value is missing. * <p> * Loads from the 1-entry chunk cache, or misses-out. This version uses * absolute element numbers, but must convert them to chunk-relative indices * - requiring a load from an aliasing local var, leading to lower quality * JIT'd code (similar issue to using iterator objects). * <p> * Slightly slower than 'at80' since it range checks within a chunk. */ public final double at( long i ) { long x = i - (_start>0 ? _start : 0); if( 0 <= x && x < _len ) return at0((int)x); throw new ArrayIndexOutOfBoundsException(getClass().getSimpleName() + " " +_start+" <= "+i+" < "+(_start+_len)); } /** Fetch the missing-status the slow way. */ public final boolean isNA(long i) { long x = i - (_start>0 ? _start : 0); if( 0 <= x && x < _len ) return isNA0((int)x); throw new ArrayIndexOutOfBoundsException(getClass().getSimpleName() + " " +_start+" <= "+i+" < "+(_start+_len)); } public final long at16l( long i ) { long x = i - (_start>0 ? _start : 0); if( 0 <= x && x < _len ) return at16l0((int)x); throw new ArrayIndexOutOfBoundsException(getClass().getSimpleName() + " " +_start+" <= "+i+" < "+(_start+_len)); } public final long at16h( long i ) { long x = i - (_start>0 ? _start : 0); if( 0 <= x && x < _len ) return at16h0((int)x); throw new ArrayIndexOutOfBoundsException(getClass().getSimpleName() + " " +_start+" <= "+i+" < "+(_start+_len)); } /** The zero-based API. Somewhere between 10% to 30% faster in a tight-loop * over the data than the generic at() API. Probably no gain on larger * loops. The row reference is zero-based on the chunk, and should * range-check by the JIT as expected. */ public final double at0 ( int i ) { return _chk2 == null ? atd_impl(i) : _chk2. atd_impl(i); } public final long at80 ( int i ) { return _chk2 == null ? at8_impl(i) : _chk2. at8_impl(i); } public final boolean isNA0( int i ) { return _chk2 == null ?isNA_impl(i) : _chk2.isNA_impl(i); } public final long at16l0( int i ) { return _chk2 == null ? at16l_impl(i) : _chk2.at16l_impl(i); } public final long at16h0( int i ) { return _chk2 == null ? at16h_impl(i) : _chk2.at16h_impl(i); } /** Slightly slower than 'at0' inside a chunk; goes (very) slow outside the * chunk instead of throwing. First outside-chunk fetches and caches whole * chunk; maybe takes multiple msecs. 2nd and later touches in the same * outside-chunk probably run 100x slower than inside-chunk accesses. */ public final double at_slow( long i ) { long x = i-_start; return (0 <= x && x < _len) ? at0((int)x) : _vec. at(i); } public final long at8_slow( long i ) { long x = i-_start; return (0 <= x && x < _len) ? at80((int)x) : _vec.at8(i); } public final boolean isNA_slow( long i ) { long x = i-_start; return (0 <= x && x < _len) ? isNA0((int)x) : _vec.isNA(i); } /** Write element the slow way, as a long. There is no way to write a * missing value with this call. Under rare circumstances this can throw: * if the long does not fit in a double (value is larger magnitude than * 2^52), AND float values are stored in Vector. In this case, there is no * common compatible data representation. */ public final long set( long i, long l) { long x = i-_start; return (0 <= x && x < _len) ? set0((int)x,l) : _vec.set(i,l); } /** Write element the slow way, as a double. Double.NaN will be treated as * a set of a missing element. */ public final double set( long i, double d) { long x = i-_start; return (0 <= x && x < _len) ? set0((int)x,d) : _vec.set(i,d); } /** Write element the slow way, as a float. Float.NaN will be treated as * a set of a missing element. */ public final float set( long i, float f) { long x = i-_start; return (0 <= x && x < _len) ? set0((int)x,f) : _vec.set(i,f); } /** Set the element as missing the slow way. */ public final boolean setNA( long i ) { long x = i-_start; return (0 <= x && x < _len) ? setNA0((int)x) : _vec.setNA(i); } private void setWrite() { if( _chk2 != null ) return; // Already setWrite assert !(this instanceof NewChunk) : "Cannot direct-write into a NewChunk, only append"; _vec.preWriting(); // One-shot writing-init _chk2 = clone(); // Flag this chunk as having been written into assert _chk2._chk2 == null; // Clone has NOT been written into } /** * Set a long element in a chunk given a 0-based chunk local index. * * Write into a chunk. * May rewrite/replace chunks if the chunk needs to be * "inflated" to hold larger values. Returns the input value. * * Note that the idx is an int (instead of a long), which tells you * that index 0 is the first row in the chunk, not the whole Vec. */ public final long set0(int idx, long l) { setWrite(); if( _chk2.set_impl(idx,l) ) return l; (_chk2 = inflate_impl(new NewChunk(this))).set_impl(idx,l); return l; } /** Set a double element in a chunk given a 0-based chunk local index. */ public final double set0(int idx, double d) { setWrite(); if( _chk2.set_impl(idx,d) ) return d; (_chk2 = inflate_impl(new NewChunk(this))).set_impl(idx,d); return d; } /** Set a floating element in a chunk given a 0-based chunk local index. */ public final float set0(int idx, float f) { setWrite(); if( _chk2.set_impl(idx,f) ) return f; (_chk2 = inflate_impl(new NewChunk(this))).set_impl(idx,f); return f; } /** Set the element in a chunk as missing given a 0-based chunk local index. */ public final boolean setNA0(int idx) { setWrite(); if( _chk2.setNA_impl(idx) ) return true; (_chk2 = inflate_impl(new NewChunk(this))).setNA_impl(idx); return true; } /** After writing we must call close() to register the bulk changes */ public void close( int cidx, Futures fs ) { if( this instanceof NewChunk ) _chk2 = this; if( _chk2 == null ) return; // No change? if( _chk2 instanceof NewChunk ) _chk2 = ((NewChunk)_chk2).new_close(); DKV.put(_vec.chunkKey(cidx),_chk2,fs,true); // Write updated chunk back into K/V if( _vec._cache == this ) _vec._cache = null; } public int cidx() { return _vec.elem2ChunkIdx(_start); } /** Chunk-specific readers. */ abstract protected double atd_impl(int idx); abstract protected long at8_impl(int idx); abstract protected boolean isNA_impl(int idx); protected long at16l_impl(int idx) { throw new IllegalArgumentException("Not a UUID"); } protected long at16h_impl(int idx) { throw new IllegalArgumentException("Not a UUID"); } /** Chunk-specific writer. Returns false if the value does not fit in the * current compression scheme. */ abstract boolean set_impl (int idx, long l ); abstract boolean set_impl (int idx, double d ); abstract boolean set_impl (int idx, float f ); abstract boolean setNA_impl(int idx); public int nextNZ(int rid){return rid+1;} public boolean isSparse() {return false;} public int sparseLen(){return _len;} /** * Get chunk-relative indexes of values (nonzeros for sparse, all for dense) stored in this chunk. * For desne chunks, this will contain indeces of all the rows in this chunk. * * @return array of chunk-relative indeces of values stored in this chunk. */ public int nonzeros(int [] res){ for( int i = 0; i < _len; ++i) res[i] = i; return _len; } /** * Get chunk-relative indeces of values (nonzeros for sparse, all for dense) stored in this chunk. * For desne chunks, this will contain indeces of all the rows in this chunk. * * @return array of chunk-relative indeces of values stored in this chunk. */ public final int [] nonzeros () { int [] res = MemoryManager.malloc4(sparseLen()); nonzeros(res); return res; } /** Chunk-specific bulk inflator back to NewChunk. Used when writing into a * chunk and written value is out-of-range for an update-in-place operation. * Bulk copy from the compressed form into the nc._ls array. */ abstract NewChunk inflate_impl(NewChunk nc); abstract boolean hasFloat(); /** Chunk-specific implementations of read and write */ public abstract AutoBuffer write(AutoBuffer bb); public abstract Chunk read (AutoBuffer bb); // Support for fixed-width format printing public String pformat () { return pformat0(); } public int pformat_len() { return pformat_len0(); } protected String pformat0() { assert !hasFloat() : "need impl:"+getClass(); // Floats handled in subclasses long min = (long)_vec.min(); if( min < 0 ) return "% "+pformat_len0()+"d"; return "%"+pformat_len0()+"d"; } protected int pformat_len0() { assert !hasFloat(); // Floats handled in subclasses int len=0; long min = (long)_vec.min(); if( min < 0 ) len++; long max = Math.max(Math.abs(min),Math.abs((long)_vec.max())); for( int i=1; i<PrettyPrint.powers10i.length; i++ ) if( max < PrettyPrint.powers10i[i] ) return i+len; return 20; } protected int pformat_len0( double scale, int lg ) { double dx = Math.log10(scale); int x = (int)dx; if( x >= 0 && PrettyPrint.pow10i(x) != scale ) throw H2O.unimpl(); int w=1/*blank/sign*/+lg/*compression limits digits*/+1/*dot*/+1/*e*/+1/*neg exp*/+2/*digits of exp*/; return w; } @Override public Chunk clone() { return (Chunk)super.clone(); } @Override public String toString() { return getClass().getSimpleName() + "(start = " + _start + ", len = " + _len + ")"; } public long byteSize() { long s= _mem == null ? 0 : _mem.length; s += (2+5)*8 + 12; // 2 hdr words, 5 other words, @8bytes each, plus mem array hdr if( _chk2 != null ) s += _chk2.byteSize(); return s; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/ChunkSplitter.java
package water.fvec; import java.util.Iterator; import water.Futures; import water.util.Log; /** Helper to provide access to package * hidden methods and attributes. */ public class ChunkSplitter { /** Reset len fields of given chunk */ public static NewChunk resetLen(NewChunk nc) { nc._len = nc._sparseLen = 0; return nc; } /** Extract portion of given chunk into given output chunk. */ public static void extractChunkPart(Chunk ic, Chunk oc, int startRow, int nrows, Futures fs) { try { NewChunk dst = new NewChunk(oc); dst._len = dst._sparseLen = 0; NewChunk src = new NewChunk(ic); src = ic.inflate_impl(src); assert src._len == ic._len; // Iterate over values skip all 0 int remain = nrows; Iterator<NewChunk.Value> it = src.values(startRow, startRow + nrows); int off = startRow - 1; while (it.hasNext()) { NewChunk.Value v = it.next(); final int rid = v.rowId0(); assert rid < startRow + nrows; int add = rid - off; // number of values to add off = rid; dst.addZeros(add - 1); // append (add-1) zeros v.add2Chunk(dst); // followed by a value remain -= add; assert remain >= 0; } // Handle case when last added value is followed by zeros till startRow+nrows dst.addZeros(remain); assert dst._len == oc._len : "NewChunk.dst.len = " + dst._len + ", oc._len = " + oc._len; dst.close(dst.cidx(), fs); } catch(RuntimeException t){ Log.err("gor exception in chunkSplitter, ic = " + ic + ", oc = " + oc + " startRow = " + startRow + " nrows = " + nrows); throw t; } return ; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/FileVec.java
package water.fvec; import water.*; public abstract class FileVec extends ByteVec { long _len; // File length final byte _be; public static final int CHUNK_SZ = 1 << LOG_CHK; protected FileVec(Key key, long len, byte be) { super(key,null); _len = len; _be = be; } @Override public long length() { return _len; } @Override public int nChunks() { return (int)Math.max(1,_len>>LOG_CHK); } @Override public boolean writable() { return false; } //NOTE: override ALL rollups-related methods or ALL files will be loaded after import. @Override public double min() { return Double.NaN; } /** Return column max - lazily computed as needed. */ @Override public double max() { return Double.NaN; } /** Return column mean - lazily computed as needed. */ @Override public double mean() { return Double.NaN; } /** Return column standard deviation - lazily computed as needed. */ @Override public double sigma(){ return Double.NaN; } /** Return column missing-element-count - lazily computed as needed. */ @Override public long naCnt() { return 0; } /** Is all integers? */ @Override public boolean isInt(){return false; } /** Size of compressed vector data. */ @Override public long byteSize(){return length(); } // Convert a row# to a chunk#. For constant-sized chunks this is a little // shift-and-add math. For variable-sized chunks this is a binary search, // with a sane API (JDK has an insane API). @Override int elem2ChunkIdx( long i ) { assert 0 <= i && i <= _len : " "+i+" < "+_len; int cidx = (int)(i>>LOG_CHK); int nc = nChunks(); if( i >= _len ) return nc; if( cidx >= nc ) cidx=nc-1; // Last chunk is larger assert 0 <= cidx && cidx < nc; return cidx; } // Convert a chunk-index into a starting row #. Constant sized chunks // (except for the last, which might be a little larger), and size-1 rows so // this is a little shift-n-add math. @Override public long chunk2StartElem( int cidx ) { return (long)cidx <<LOG_CHK; } // Convert a chunk-key to a file offset. Size 1 rows, so this is a direct conversion. static public long chunkOffset ( Key ckey ) { return (long)chunkIdx(ckey)<<LOG_CHK; } // Reverse: convert a chunk-key into a cidx static public int chunkIdx(Key ckey) { assert ckey._kb[0]==Key.DVEC; return UDP.get4(ckey._kb,1+1+4); } // Convert a chunk# into a chunk - does lazy-chunk creation. As chunks are // asked-for the first time, we make the Key and an empty backing DVec. // Touching the DVec will force the file load. @Override public Value chunkIdx( int cidx ) { final long nchk = nChunks(); assert 0 <= cidx && cidx < nchk; Key dkey = chunkKey(cidx); Value val1 = DKV.get(dkey);// Check for an existing one... will fetch data as needed if( val1 != null ) return val1; // Found an existing one? // Lazily create a DVec for this chunk int len = (int)(cidx < nchk-1 ? CHUNK_SZ : (_len-chunk2StartElem(cidx))); // DVec is just the raw file data with a null-compression scheme Value val2 = new Value(dkey,len,null,TypeMap.C1NCHUNK,_be); val2.setdsk(); // It is already on disk. // If not-home, then block till the Key is everywhere. Most calls here are // from the parser loading a text file, and the parser splits the work such // that most puts here are on home - so this is a simple speed optimization: // do not make a Futures nor block on it on home. Futures fs = dkey.home() ? null : new Futures(); // Atomically insert: fails on a race, but then return the old version Value val3 = DKV.DputIfMatch(dkey,val2,null,fs); if( !dkey.home() && fs != null ) fs.blockForPending(); return val3 == null ? val2 : val3; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/Frame.java
package water.fvec; import jsr166y.CountedCompleter; import water.*; import water.H2O.H2OCountedCompleter; import water.exec.Flow; import water.util.Log; import java.io.IOException; import java.io.InputStream; import java.util.Arrays; import java.util.HashMap; import java.util.IllegalFormatException; /** * A collection of named Vecs. Essentially an R-like data-frame. Multiple * Frames can reference the same Vecs. A Frame is a lightweight object, it is * meant to be cheaply created and discarded for data munging purposes. * E.g. to exclude a Vec from a computation on a Frame, create a new Frame that * references all the Vecs but this one. */ public class Frame extends Lockable<Frame> { public String[] _names; Key[] _keys; // Keys for the vectors private transient Vec[] _vecs;// The Vectors (transient to avoid network traffic) private transient Vec _col0; // First readable vec; fast access to the VectorGroup's Chunk layout private final UniqueId uniqueId; public Frame(Key k){ super(k); uniqueId = new UniqueFrameId(k, this); } public Frame( Frame fr ) { this(fr._key,fr._names.clone(), fr.vecs().clone()); _col0 = null; } public Frame( Vec... vecs ){ this(null,vecs);} public Frame( String[] names, Vec[] vecs ) { this(null,names,vecs); } public Frame( Key key, String[] names, Vec[] vecs ) { super(key); this.uniqueId = new UniqueFrameId(_key, this); if( names==null ) { names = new String[vecs.length]; for( int i=0; i<vecs.length; i++ ) names[i] = "C"+(i+1); } assert names.length == vecs.length : "Number of columns does not match to number of cols' names."; _names=names; _vecs=vecs; _keys = new Key[vecs.length]; Futures fs = new Futures(); for( int i=0; i<vecs.length; i++ ) { Key k = _keys[i] = vecs[i]._key; if( DKV.get(k)==null ) // If not already in KV, put it there DKV.put(k,vecs[i], fs); } fs.blockForPending(); assert checkCompatible(); } /** * Task to compare the two frames, returns true if they are identical. * We can't in general expect frames to be bit-compatible so we compare the numbers, * integers are compared exaclty, doubles only with given precision (1e-8 is default). * (compression scheme may be altered by the way they were parsed and by rebalancing) * The frames are expected to be compatible. * @param f * @return */ public final boolean isIdentical(Frame f){ FrameIdenticalTask fbt = new FrameIdenticalTask(this,f); H2O.submitTask(fbt); fbt.join(); return fbt._res; } public static class FrameIdenticalTask extends H2OCountedCompleter { final Frame _f1; final Frame _f2; public FrameIdenticalTask(Frame f1, Frame f2){_f1 = f1; _f2 = f2;} boolean _res; double _fpointPrecision = 1e-8; private Vec.VecIdenticalTask[] _vts; @Override public void compute2() { if(_f1 == _f2){ _res = true; } else if(Arrays.deepEquals(_f1.names(), _f2.names())){ _vts = new Vec.VecIdenticalTask[_f1.numCols()]; addToPendingCount(_vts.length); for(int i = 0; i < _vts.length; ++i) { _vts[i] = new Vec.VecIdenticalTask(this,_fpointPrecision); _vts[i].asyncExec(_f1.vec(i),_f2.vec(i)); } } tryComplete(); } @Override public void onCompletion(CountedCompleter cc){ if(_vts != null){ _res = _vts[0]._res; for(int i = 1; i < _vts.length; ++i) _res = _res && _vts[i]._res; } } } public UniqueId getUniqueId() { return this.uniqueId; } /** 64-bit checksum of the checksums of the vecs. SHA-265 checksums of the chunks are XORed * together. Since parse always parses the same pieces of files into the same offsets * in some chunk this checksum will be consistent across reparses. */ public long checksum() { Vec [] vecs = vecs(); long _checksum = 0; for(int i = 0; i < _names.length; ++i) { long vec_checksum = vecs[i].checksum(); _checksum ^= vec_checksum; _checksum ^= (2147483647 * i); } return _checksum; } public Vec vec(String name){ Vec [] vecs = vecs(); for(int i = 0; i < _names.length; ++i) if(_names[i].equals(name))return vecs[i]; return null; } /** Returns the vector by given index. * <p>The call is direct equivalent to call <code>vecs()[i]</code> and * it does not do any array bounds checking.</p> * @param idx idx of column * @return this frame idx-th vector, never returns <code>null</code> */ public Vec vec(int idx) { Vec[] vecs = vecs(); return vecs[idx]; } /** Returns a subframe of this frame containing only vectors with desired names. * * @param names list of vector names * @return a new frame which collects vectors from this frame with desired names. * @throws IllegalArgumentException if there is no vector with desired name in this frame. */ public Frame subframe(String[] names) { return subframe(names, false, 0)[0]; } /** Returns a new frame composed of vectors of this frame selected by given names. * The method replaces missing vectors by a constant column filled by given value. * @param names names of vector to compose a subframe * @param c value to fill missing columns. * @return two frames, the first contains subframe, the second contains newly created constant vectors or null */ public Frame[] subframe(String[] names, double c) { return subframe(names, true, c); } /** Create a subframe from this frame based on desired names. * Throws an exception if desired column is not in this frame and <code>replaceBy</code> is <code>false</code>. * Else replace a missing column by a constant column with given value. * * @param names list of column names to extract * @param replaceBy should be missing column replaced by a constant column * @param c value for constant column * @return array of 2 frames, the first is containing a desired subframe, the second one contains newly created columns or null * @throws IllegalArgumentException if <code>replaceBy</code> is false and there is a missing column in this frame */ private Frame[] subframe(String[] names, boolean replaceBy, double c){ Vec [] vecs = new Vec[names.length]; Vec [] cvecs = replaceBy ? new Vec [names.length] : null; String[] cnames = replaceBy ? new String[names.length] : null; int ccv = 0; // counter of constant columns vecs(); // Preload the vecs HashMap<String, Integer> map = new HashMap<String, Integer>((int) ((names.length/0.75f)+1)); // avoid rehashing by set up initial capacity for(int i = 0; i < _names.length; ++i) map.put(_names[i], i); for(int i = 0; i < names.length; ++i) if(map.containsKey(names[i])) vecs[i] = _vecs[map.get(names[i])]; else if (replaceBy) { Log.warn("Column " + names[i] + " is missing, filling it in with " + c); cnames[ccv] = names[i]; vecs[i] = cvecs[ccv++] = anyVec().makeCon(c); } return new Frame[] { new Frame(names,vecs), ccv>0 ? new Frame(Arrays.copyOf(cnames, ccv), Arrays.copyOf(cvecs,ccv)) : null }; } public final Vec[] vecs(int [] idxs) { Vec [] all = vecs(); Vec [] res = new Vec[idxs.length]; for(int i = 0; i < idxs.length; ++i) res[i] = all[idxs[i]]; return res; } // Return (and cache) vectors public final Vec[] vecs() { Vec[] tvecs = _vecs; // read the content return tvecs == null ? (_vecs=vecs_impl()) : tvecs; } // Compute vectors for caching private Vec[] vecs_impl() { // Load all Vec headers; load them all in parallel by spawning F/J tasks. final Vec [] vecs = new Vec[_keys.length]; Futures fs = new Futures(); for( int i=0; i<_keys.length; i++ ) { final int ii = i; final Key k = _keys[i]; H2OCountedCompleter t = new H2OCountedCompleter() { // We need higher priority here as there is a danger of deadlock in // case of many calls from MRTask2 at once (e.g. frame with many // vectors invokes rollup tasks for all vectors in parallel). Should // probably be done in CPS style in the future @Override public byte priority(){return H2O.MIN_HI_PRIORITY;} @Override public void compute2() { Value v = DKV.get(k); if( v==null ) Log.err("Missing vector #" + ii + " (" + _names[ii] + ") during Frame fetch: "+k); vecs[ii] = v.get(); tryComplete(); } }; H2O.submitTask(t); fs.add(t); } fs.blockForPending(); return vecs; } // Force a cache-flush & reload, assuming vec mappings were altered remotely public final Vec[] reloadVecs() { _vecs=null; return vecs(); } /** Finds the first column with a matching name. */ public int find( String name ) { if (_names!=null) for( int i=0; i<_names.length; i++ ) if( name.equals(_names[i]) ) return i; return -1; } public int find( Vec vec ) { for( int i=0; i<_vecs.length; i++ ) if( vec.equals(_vecs[i]) ) return i; return -1; } // Return Frame 'f' if 'f' is compatible with 'this'. // Return a new Frame compatible with 'this' and a copy of 'f's data otherwise. public Frame makeCompatible( Frame f) { // Small data frames are always "compatible" if( anyVec()==null) // Or it is small return f; // Then must be compatible // Same VectorGroup is also compatible if( f.anyVec() == null || f.anyVec().group().equals(anyVec().group()) && Arrays.equals(f.anyVec()._espc,anyVec()._espc)) return f; // Ok, here make some new Vecs with compatible layout Key k = Key.make(); H2O.submitTask(new RebalanceDataSet(this, f, k)).join(); Frame f2 = DKV.get(k).get(); DKV.remove(k); return f2; } /** Appends a named column, keeping the last Vec as the response */ public Frame add( String name, Vec vec ) { if( find(name) != -1 ) throw new IllegalArgumentException("Duplicate name '"+name+"' in Frame"); if( _vecs.length != 0 ) { if( !anyVec().group().equals(vec.group()) && !Arrays.equals(anyVec()._espc,vec._espc) ) throw new IllegalArgumentException("Vector groups differs - adding vec '"+name+"' into the frame " + Arrays.toString(_names)); if( numRows() != vec.length() ) throw new IllegalArgumentException("Vector lengths differ - adding vec '"+name+"' into the frame " + Arrays.toString(_names)); } final int len = _names != null ? _names.length : 0; _names = _names != null ? Arrays.copyOf(_names,len+1) : new String[len+1]; _vecs = _names != null ? Arrays.copyOf(_vecs ,len+1) : new Vec [len+1]; _keys = _names != null ? Arrays.copyOf(_keys ,len+1) : new Key [len+1]; _names[len] = name; _vecs [len] = vec ; _keys [len] = vec._key; return this; } /** Insert a named column as the first column */ public Frame prepend( String name, Vec vec ) { if( find(name) != -1 ) throw new IllegalArgumentException("Duplicate name '"+name+"' in Frame"); if( _vecs.length != 0 ) { if( !anyVec().group().equals(vec.group()) && !Arrays.equals(anyVec()._espc,vec._espc) ) throw new IllegalArgumentException("Vector groups differs - adding vec '"+name+"' into the frame " + Arrays.toString(_names)); if( numRows() != vec.length() ) throw new IllegalArgumentException("Vector lengths differ - adding vec '"+name+"' into the frame " + Arrays.toString(_names)); } final int len = _names != null ? _names.length : 0; String[] _names2 = new String[len+1]; Vec[] _vecs2 = new Vec [len+1]; Key[] _keys2 = new Key [len+1]; _names2[0] = name; _vecs2 [0] = vec ; _keys2 [0] = vec._key; System.arraycopy(_names, 0, _names2, 1, len); System.arraycopy(_vecs, 0, _vecs2, 1, len); System.arraycopy(_keys, 0, _keys2, 1, len); _names = _names2; _vecs = _vecs2; _keys = _keys2; return this; } /** Appends an entire Frame */ public Frame add( Frame fr, String names[] ) { assert _vecs.length==0 || (anyVec().group().equals(fr.anyVec().group()) || Arrays.equals(anyVec()._espc,fr.anyVec()._espc)): "Adding a vector from different vector group. Current frame contains "+Arrays.toString(_names)+ " vectors. New frame contains "+Arrays.toString(fr.names()) + " vectors."; if( _names != null && fr._names != null ) for( String name : names ) if( find(name) != -1 ) throw new IllegalArgumentException("Duplicate name '"+name+"' in Frame"); final int len0= _names!=null ? _names.length : 0; final int len1= names!=null ? names.length : 0; final int len = len0+len1; // Note: _names==null <=> _vecs==null <=> _keys==null _names = _names != null ? Arrays.copyOf(_names,len) : new String[len]; _vecs = _vecs != null ? Arrays.copyOf(_vecs ,len) : new Vec [len]; _keys = _keys != null ? Arrays.copyOf(_keys ,len) : new Key [len]; System.arraycopy( names,0,_names,len0,len1); System.arraycopy(fr._vecs ,0,_vecs ,len0,len1); System.arraycopy(fr._keys ,0,_keys ,len0,len1); return this; } public Frame add( Frame fr, boolean rename ) { if( !rename ) return add(fr,fr._names); String names[] = new String[fr._names.length]; for( int i=0; i<names.length; i++ ) { String name = fr._names[i]; int cnt=0; while( find(name) != -1 ) name = fr._names[i]+"_"+(cnt++); names[i] = name; } return add(fr,names); } /** Removes the first column with a matching name. */ public Vec remove( String name ) { return remove(find(name)); } /** Removes a numbered column. */ public Vec [] remove( int [] idxs ) { for(int i :idxs)if(i < 0 || i > _vecs.length) throw new ArrayIndexOutOfBoundsException(); Arrays.sort(idxs); Vec [] res = new Vec[idxs.length]; Vec [] rem = new Vec[_vecs.length-idxs.length]; String [] names = new String[rem.length]; Key [] keys = new Key [rem.length]; int j = 0; int k = 0; int l = 0; for(int i = 0; i < _vecs.length; ++i) { if(j < idxs.length && i == idxs[j]) { ++j; res[k++] = _vecs[i]; } else { rem [l] = _vecs [i]; names[l] = _names[i]; keys [l] = _keys [i]; ++l; } } _vecs = rem; _names = names; _keys = keys; assert l == rem.length && k == idxs.length; return res; } /** Removes a numbered column. */ public Vec remove( int idx ) { int len = _names.length; if( idx < 0 || idx >= len ) return null; Vec v = vecs()[idx]; System.arraycopy(_names,idx+1,_names,idx,len-idx-1); System.arraycopy(_vecs ,idx+1,_vecs ,idx,len-idx-1); System.arraycopy(_keys ,idx+1,_keys ,idx,len-idx-1); _names = Arrays.copyOf(_names,len-1); _vecs = Arrays.copyOf(_vecs ,len-1); _keys = Arrays.copyOf(_keys ,len-1); if( v == _col0 ) _col0 = null; return v; } /** * Remove given interval of columns from frame. Motivated by R intervals. * @param startIdx - start index of column (inclusive) * @param endIdx - end index of column (exclusive) * @return an array of remove columns */ public Vec[] remove(int startIdx, int endIdx) { int len = _names.length; int nlen = len - (endIdx-startIdx); String[] names = new String[nlen]; Key[] keys = new Key[nlen]; Vec[] vecs = new Vec[nlen]; reloadVecs(); // force vecs reload if (startIdx > 0) { System.arraycopy(_names, 0, names, 0, startIdx); System.arraycopy(_vecs, 0, vecs, 0, startIdx); System.arraycopy(_keys, 0, keys, 0, startIdx); } nlen -= startIdx; if (endIdx < _names.length+1) { System.arraycopy(_names, endIdx, names, startIdx, nlen); System.arraycopy(_vecs, endIdx, vecs, startIdx, nlen); System.arraycopy(_keys, endIdx, keys, startIdx, nlen); } Vec[] vec = Arrays.copyOfRange(vecs(),startIdx,endIdx); _names = names; _vecs = vecs; _keys = keys; _col0 = null; return vec; } public Vec replace(int col, Vec nv) { if (col >= numCols()) throw new IllegalArgumentException("Trying to select column "+(col+1)+" but only "+numCols()+" present."); Vec rv = vecs()[col]; assert rv.group().equals(nv.group()); _vecs[col] = nv; _keys[col] = nv._key; if( DKV.get(nv._key)==null ) // If not already in KV, put it there DKV.put(nv._key, nv); return rv; } public Vec factor(int col) { Vec nv = vecs()[col].toEnum(); return replace(col, nv); } public Frame extractFrame(int startIdx, int endIdx) { Frame f = subframe(startIdx, endIdx); remove(startIdx, endIdx); return f; } /** Create a subframe from given interval of columns. * * @param startIdx index of first column (inclusive) * @param endIdx index of the last column (exclusive) * @return a new frame containing specified interval of columns */ public Frame subframe(int startIdx, int endIdx) { Frame result = new Frame(Arrays.copyOfRange(_names,startIdx,endIdx),Arrays.copyOfRange(vecs(),startIdx,endIdx)); return result; } public final String[] names() { return _names; } public int numCols() { return vecs().length; } public long numRows() { return anyVec()==null ? 0 : anyVec().length(); } public boolean isRawData() { // Right now there is only one Vec for raw data, but imagine a Parse after a JDBC import or such. for (Vec v : vecs()) { if (v.isByteVec()) return true; } return false; } // Number of columns when categoricals expanded. // Note: One level is dropped in each categorical col. public int numExpCols() { int ncols = 0; for(int i = 0; i < vecs().length; i++) ncols += vecs()[i].domain() == null ? 1 : (vecs()[i].domain().length - 1); return ncols; } /** All the domains for enum columns; null for non-enum columns. */ public String[][] domains() { String ds[][] = new String[vecs().length][]; for( int i=0; i<vecs().length; i++ ) ds[i] = vecs()[i].domain(); return ds; } /** true/false every Vec is a UUID */ public boolean[] uuids() { boolean bs[] = new boolean[vecs().length]; for( int i=0; i<vecs().length; i++ ) bs[i] = vecs()[i].isUUID(); return bs; } /** Time status for every Vec */ public byte[] times() { byte bs[] = new byte[vecs().length]; for( int i=0; i<vecs().length; i++ ) bs[i] = vecs()[i]._time; return bs; } private String[][] domains(int [] cols){ Vec [] vecs = vecs(); String [][] res = new String[cols.length][]; for(int i = 0; i < cols.length; ++i) res[i] = vecs[cols[i]]._domain; return res; } private String [] names(int [] cols){ if(_names == null)return null; String [] res = new String[cols.length]; for(int i = 0; i < cols.length; ++i) res[i] = _names[cols[i]]; return res; } public Vec lastVec() { final Vec [] vecs = vecs(); return vecs[vecs.length-1]; } /** Returns the first readable vector. */ public Vec anyVec() { Vec c0 = _col0; // single read if( c0 != null ) return c0; for( Vec v : vecs() ) if( v.readable() ) return (_col0 = v); return null; } /* Returns the only Vector, or tosses IAE */ public final Vec theVec(String err) { if( _keys.length != 1 ) throw new IllegalArgumentException(err); if( _vecs == null ) _vecs = new Vec[]{_col0 = DKV.get(_keys[0]).get() }; return _vecs[0]; } /** Check that the vectors are all compatible. All Vecs have their content * sharded using same number of rows per chunk. */ public boolean checkCompatible( ) { Vec v0 = anyVec(); if( v0 == null ) return true; int nchunks = v0.nChunks(); for( Vec vec : vecs() ) { if( vec instanceof AppendableVec ) continue; // New Vectors are endlessly compatible if( vec.nChunks() != nchunks ) throw new IllegalArgumentException("Vectors different numbers of chunks, "+nchunks+" and "+vec.nChunks()); } // Also check each chunk has same rows for( int i=0; i<nchunks; i++ ) { long es = v0.chunk2StartElem(i); for( Vec vec : vecs() ) if( !(vec instanceof AppendableVec) && vec.chunk2StartElem(i) != es ) throw new IllegalArgumentException("Vector chunks different numbers of rows, "+es+" and "+vec.chunk2StartElem(i)); } // For larger Frames, verify that the layout is compatible - else we'll be // endlessly cache-missing the data around the cluster, pulling copies // local everywhere. if( v0.length() > 1e4 ) { Key gk = v0.groupKey(); for( Vec vec : vecs() ) assert gk.equals(vec.groupKey()) : "Vector " + vec + " has different vector group!"; } return true; } public void closeAppendables() {closeAppendables(new Futures()).blockForPending(); } // Close all AppendableVec public Futures closeAppendables(Futures fs) { _col0 = null; // Reset cache int len = vecs().length; for( int i=0; i<len; i++ ) { Vec v = _vecs[i]; if( v instanceof AppendableVec ) DKV.put(_keys[i],_vecs[i] = ((AppendableVec)v).close(fs),fs); } return fs; } /** Actually remove/delete all Vecs from memory, not just from the Frame. */ @Override public Futures delete_impl(Futures fs) { for( Key k : _keys ) UKV.remove(k,fs); _names = new String[0]; _vecs = new Vec[0]; _keys = new Key[0]; return fs; } @Override public String errStr() { return "Dataset"; } public long byteSize() { long sum=0; for( int i=0; i<vecs().length; i++ ) sum += _vecs[i].byteSize(); return sum; } // Allow sorting of columns based on some function public void swap( int lo, int hi ) { assert 0 <= lo && lo < _keys.length; assert 0 <= hi && hi < _keys.length; if( lo==hi ) return; Vec vecs[] = vecs(); Vec v = vecs [lo]; vecs [lo] = vecs [hi]; vecs [hi] = v; Key k = _keys[lo]; _keys [lo] = _keys [hi]; _keys [hi] = k; String n=_names[lo]; _names[lo] = _names[hi]; _names[hi] = n; } @Override public String toString() { // Across Vec vecs[] = _vecs; // Do Not Cache _vecs in toString lest IdeaJ variable display cause side-effects if( vecs == null ) vecs = vecs_impl(); if( vecs.length==0 ) return "{}"; String s="{"+(_names==null?"C0":_names[0]); long bs=vecs[0].byteSize(); for( int i=1; i<vecs.length; i++ ) { s += ","+(_names==null?"C"+i:_names[i]); bs+= vecs[i].byteSize(); } s += "}, "+PrettyPrint.bytes(bs)+"\n"; // Down Vec v0 = vecs[0]; // Do Not Cache, no side-effects if( v0 == null ) return s; int nc = v0.nChunks(); s += "Chunk starts: {"; for( int c=0; c<nc; c++ ) s += v0.chunk2StartElem(c)+","; s += "}"; return s; } public String toStringNames() { return Arrays.toString(_names); } // Print a row with headers inlined private String toStr( long idx, int col ) { return _names[col]+"="+(_vecs[col].isNA(idx) ? "NA" : _vecs[col].at(idx)); } public String toString( long idx ) { String s="{"+toStr(idx,0); for( int i=1; i<_names.length; i++ ) s += ","+toStr(idx,i); return s+"}"; } // Print fixed-width row & fixed-width headers (more compressed print // format). Returns the column formats. public String[] toStringHdr( StringBuilder sb ) { String[] fs = new String[numCols()]; for( int c=0; c<fs.length; c++ ) { String n = (_names != null && c < _names.length) ? _names[c] : ("C"+c); int nlen = n.length(); if( numRows()==0 ) { sb.append(n).append(' '); continue; } int w=0; if( _vecs[c].isEnum() ) { String ss[] = _vecs[c]._domain; for( int i=0; i<ss.length; i++ ) w = Math.max(w,ss[i].length()); w = Math.min(w,10); fs[c] = "%"+w+"."+w+"s"; } else { Chunk C = _vecs[c].chunkForChunkIdx(0); // 1st Chunk // Possible situation: 1) vec is INT - C is has no floats => OK // 2) vec is INT - C has floats => IMPOSSIBLE, // 3) vec is FLOAT - C has floats => OK, // 4) vec is FLOAT - C has no floats => find the first chunk with floats if (!_vecs[c].isInt() && !C.hasFloat()) { for (int i=1; i<_vecs[c].nChunks(); i++) { C=_vecs[c].chunkForChunkIdx(i); if (C.hasFloat()) break; } } String f = fs[c] = C.pformat(); // Printable width for( int x=0; x<f.length(); x++ )// Get printable width from format if( Character.isDigit(f.charAt(x)) ) w = w*10+(f.charAt(x)-'0'); else if( w>0 ) break; if( f.charAt(1)==' ' ) w++; // Leading blank is not in print-width } int len = sb.length(); if( nlen>1 && w==1 ) { fs[c]=" "+fs[c]; w=2; } if( nlen <= w ) { // Short name, big digits sb.append(n); for( int i=nlen; i<w; i++ ) sb.append(' '); } else if( w==1 ) { // First char only sb.append(n.charAt(0)); } else if( w==2 ) { // First 2 chars only sb.append(n.charAt(0)).append(n.charAt(1)); } else { // First char dot lastchars; e.g. Compress "Interval" to "I.val" sb.append(n.charAt(0)).append('.'); for( int i=nlen-(w-2); i<nlen; i++ ) sb.append(n.charAt(i)); } assert len+w==sb.length(); sb.append(' '); // Column seperator } sb.append('\n'); return fs; } public StringBuilder toString( StringBuilder sb, String[] fs, long idx ) { Vec vecs[] = vecs(); for( int c=0; c<fs.length; c++ ) { Vec vec = vecs[c]; if( vec.isEnum() ) { String s = "----------"; if( !vec.isNA(idx) ) { int x = (int)vec.at8(idx); if( x >= 0 && x < vec._domain.length ) s = vec._domain[x]; } sb.append(String.format(fs[c],s)); } else if( vec.isInt() ) { if( vec.isNA(idx) ) { Chunk C = vec.chunkForChunkIdx(0); // 1st Chunk int len = C.pformat_len0(); // Printable width for( int i=0; i<len; i++ ) sb.append('-'); } else { try { if( vec.isUUID() ) sb.append(PrettyPrint.UUID(vec.at16l(idx),vec.at16h(idx))); else sb.append(String.format(fs[c],vec.at8(idx))); } catch( IllegalFormatException ife ) { System.out.println("Format: "+fs[c]+" col="+c+" not for ints"); ife.printStackTrace(); } } } else { sb.append(String.format(fs[c],vec.at (idx))); if( vec.isNA(idx) ) sb.append(' '); } sb.append(' '); // Column seperator } sb.append('\n'); return sb; } public String toStringAll() { StringBuilder sb = new StringBuilder(); String[] fs = toStringHdr(sb); for( int i=0; i<numRows(); i++ ) toString(sb,fs,i); return sb.toString(); } // Return the entire Frame as a CSV stream public InputStream toCSV(boolean headers) { return new CSVStream(headers, false); } public InputStream toCSV(boolean headers, boolean hex_string) { return new CSVStream(headers, hex_string); } private class CSVStream extends InputStream { private final boolean _hex_string; byte[] _line; int _position; long _row; CSVStream(boolean headers, boolean hex_string) { _hex_string = hex_string; StringBuilder sb = new StringBuilder(); Vec vs[] = vecs(); if( headers ) { sb.append('"' + _names[0] + '"'); for(int i = 1; i < vs.length; i++) sb.append(',').append('"' + _names[i] + '"'); sb.append('\n'); } _line = sb.toString().getBytes(); } @Override public int available() throws IOException { if(_position == _line.length) { if(_row == numRows()) return 0; StringBuilder sb = new StringBuilder(); Vec vs[] = vecs(); for( int i = 0; i < vs.length; i++ ) { if(i > 0) sb.append(','); if(!vs[i].isNA(_row)) { if( vs[i].isEnum() ) sb.append('"' + vs[i]._domain[(int) vs[i].at8(_row)] + '"'); else if( vs[i].isUUID() ) sb.append(PrettyPrint.UUID(vs[i].at16l(_row),vs[i].at16h(_row))); else if( vs[i].isInt() ) sb.append(vs[i].at8(_row)); else { // R 3.1 unfortunately changed the behavior of read.csv(). // (Really type.convert()). // // Numeric values with too much precision now trigger a type conversion in R 3.1 into a factor. // // See these discussions: // https://bugs.r-project.org/bugzilla/show_bug.cgi?id=15751 // https://stat.ethz.ch/pipermail/r-devel/2014-April/068778.html // http://stackoverflow.com/questions/23072988/preserve-old-pre-3-1-0-type-convert-behavior double d = vs[i].at(_row); String s; if (_hex_string) { // Used by R's as.data.frame(). s = Double.toHexString(d); } else { // To emit CSV files that can be read by R 3.1, limit the number of significant digits. // s = String.format("%.15g", d); s = Double.toString(d); } sb.append(s); } } } sb.append('\n'); _line = sb.toString().getBytes(); _position = 0; _row++; } return _line.length - _position; } @Override public void close() throws IOException { super.close(); _line = null; } @Override public int read() throws IOException { return available() == 0 ? -1 : _line[_position++]; } @Override public int read(byte[] b, int off, int len) throws IOException { int n = available(); if(n > 0) { n = Math.min(n, len); System.arraycopy(_line, _position, b, off, n); _position += n; } return n; } } // -------------------------------------------------------------------------- // In support of R, a generic Deep Copy & Slice. // Semantics are a little odd, to match R's. // Each dimension spec can be: // null - all of them // a sorted list of negative numbers (no dups) - all BUT these // an unordered list of positive - just these, allowing dups // The numbering is 1-based; zero's are not allowed in the lists, nor are out-of-range. final int MAX_EQ2_COLS = 100000; // FIXME. Put this in a better spot. public Frame deepSlice( Object orows, Object ocols ) { // ocols is either a long[] or a Frame-of-1-Vec long[] cols = null; if( ocols == null ) cols = null; else if (ocols instanceof long[]) cols = (long[])ocols; else if (ocols instanceof Frame) { Frame fr = (Frame) ocols; if (fr.numCols() != 1) throw new IllegalArgumentException("Columns Frame must have only one column (actually has " + fr.numCols() + " columns)"); long n = fr.anyVec().length(); if (n > MAX_EQ2_COLS) throw new IllegalArgumentException("Too many requested columns (requested " + n +", max " + MAX_EQ2_COLS + ")"); cols = new long[(int)n]; Vec v = fr.anyVec(); for (long i = 0; i < v.length(); i++) cols[(int)i] = v.at8(i); } else throw new IllegalArgumentException("Columns is specified by an unsupported data type (" + ocols.getClass().getName() + ")"); // Since cols is probably short convert to a positive list. int c2[] = null; if( cols==null ) { c2 = new int[numCols()]; for( int i=0; i<c2.length; i++ ) c2[i]=i; } else if( cols.length==0 ) { c2 = new int[0]; } else if( cols[0] > 0 ) { c2 = new int[cols.length]; for( int i=0; i<cols.length; i++ ) c2[i] = (int)cols[i]-1; // Convert 1-based cols to zero-based } else { c2 = new int[numCols()-cols.length]; int j=0; for( int i=0; i<numCols(); i++ ) { if( j >= cols.length || i < (-cols[j]-1) ) c2[i-j] = i; else j++; } } for( int i=0; i<c2.length; i++ ) if( c2[i] >= numCols() ) throw new IllegalArgumentException("Trying to select column "+(c2[i]+1)+" but only "+numCols()+" present."); if( c2.length==0 ) throw new IllegalArgumentException("No columns selected (did you try to select column 0 instead of column 1?)"); // Do Da Slice // orows is either a long[] or a Vec if (orows == null) return copyRollups(new DeepSlice(null,c2,vecs()).doAll(c2.length,this).outputFrame(names(c2),domains(c2)),true); else if (orows instanceof long[]) { final long CHK_ROWS=1000000; final long[] rows = (long[])orows; if (this.numRows() == 0) { return this; } if( rows.length==0 || rows[0] < 0 ) { if (rows.length != 0 && rows[0] < 0) { Vec v = new MRTask2() { @Override public void map(Chunk cs) { for (long er : rows) { if (er >= 0) continue; er = Math.abs(er) - 1; // 1-based -> 0-based if (er < cs._start || er > (cs._len + cs._start - 1)) continue; cs.set0((int) (er - cs._start), 1); } } }.doAll(this.anyVec().makeZero()).getResult()._fr.anyVec(); Frame slicedFrame = new DeepSlice(rows, c2, vecs()).doAll(c2.length, this.add("select_vec", v)).outputFrame(names(c2), domains(c2)); UKV.remove(v._key); UKV.remove(this.remove(this.numCols()-1)._key); return copyRollups(slicedFrame, false); } else { return copyRollups(new DeepSlice(rows.length == 0 ? null : rows, c2, vecs()).doAll(c2.length, this).outputFrame(names(c2), domains(c2)), rows.length == 0); } } // Vec'ize the index array Futures fs = new Futures(); AppendableVec av = new AppendableVec(Vec.newKey(Key.make("rownames"))); int r = 0; int c = 0; while (r < rows.length) { NewChunk nc = new NewChunk(av, c); long end = Math.min(r+CHK_ROWS, rows.length); for (; r < end; r++) { nc.addNum(rows[r]); } nc.close(c++, fs); } Vec c0 = av.close(fs); // c0 is the row index vec fs.blockForPending(); Frame fr2 = new Slice(c2, this).doAll(c2.length,new Frame(new String[]{"rownames"}, new Vec[]{c0})) .outputFrame(names(c2), domains(c2)); UKV.remove(c0._key); // Remove hidden vector return fr2; } Frame frows = (Frame)orows; Vec vrows = frows.anyVec(); // It's a compatible Vec; use it as boolean selector. // Build column names for the result. Vec [] vecs = new Vec[c2.length+1]; String [] names = new String[c2.length+1]; for(int i = 0; i < c2.length; ++i){ vecs[i] = _vecs[c2[i]]; names[i] = _names[c2[i]]; } vecs[c2.length] = vrows; names[c2.length] = "predicate"; return new DeepSelect().doAll(c2.length,new Frame(names,vecs)).outputFrame(names(c2),domains(c2)); } // Slice and return in the form of new chunks. private static class Slice extends MRTask2<Slice> { final Frame _base; // the base frame to slice from final int[] _cols; Slice(int[] cols, Frame base) { _cols = cols; _base = base; } @Override public void map(Chunk[] ix, NewChunk[] ncs) { final Vec[] vecs = new Vec[_cols.length]; final Vec anyv = _base.anyVec(); final long nrow = anyv.length(); long r = ix[0].at80(0); int last_ci = anyv.elem2ChunkIdx(r<nrow?r:0); // memoize the last chunk index long last_c0 = anyv._espc[last_ci]; // ... last chunk start long last_c1 = anyv._espc[last_ci + 1]; // ... last chunk end Chunk[] last_cs = new Chunk[vecs.length]; // ... last chunks for (int c = 0; c < _cols.length; c++) { vecs[c] = _base.vecs()[_cols[c]]; last_cs[c] = vecs[c].chunkForChunkIdx(last_ci); } for (int i = 0; i < ix[0]._len; i++) { // select one row r = ix[0].at80(i) - 1; // next row to select if (r < 0) continue; if (r >= nrow) { for (int c = 0; c < vecs.length; c++) ncs[c].addNum(Double.NaN); } else { if (r < last_c0 || r >= last_c1) { last_ci = anyv.elem2ChunkIdx(r); last_c0 = anyv._espc[last_ci]; last_c1 = anyv._espc[last_ci + 1]; for (int c = 0; c < vecs.length; c++) last_cs[c] = vecs[c].chunkForChunkIdx(last_ci); } for (int c = 0; c < vecs.length; c++) if( vecs[c].isUUID() ) ncs[c].addUUID(last_cs[c],r); else ncs[c].addNum (last_cs[c].at(r)); } } } } // Bulk (expensive) copy from 2nd cols into 1st cols. // Sliced by the given cols & rows private static class DeepSlice extends MRTask2<DeepSlice> { final int _cols[]; final long _rows[]; final byte _isInt[]; boolean _ex = true; DeepSlice( long rows[], int cols[], Vec vecs[] ) { _cols=cols; _rows=rows; _isInt = new byte[cols.length]; for( int i=0; i<cols.length; i++ ) _isInt[i] = (byte)(vecs[cols[i]].isInt() ? 1 : 0); } @Override public boolean logVerbose() { return false; } @Override public void map( Chunk chks[], NewChunk nchks[] ) { long rstart = chks[0]._start; int rlen = chks[0]._len; // Total row count int rx = 0; // Which row to in/ex-clude int rlo = 0; // Lo/Hi for this block of rows int rhi = rlen; if (_rows != null && _rows[0] < 0) { // Skip any rows that have 1 in the last column! Chunk select_vec = chks[chks.length-1]; for (int i = 0; i < _cols.length; i++) { Chunk oc = chks[_cols[i]]; NewChunk nc = nchks[i]; if (_isInt[i] == 1) { // Slice on integer columns for (int j = 0; j < oc._len; j++) { if (select_vec.at80(j) == 1) continue; if (oc._vec.isUUID()) nc.addUUID(oc, j); else if (oc.isNA0(j)) nc.addNA(); else nc.addNum(oc.at80(j), 0); } } else { // Slice on double columns for (int j = 0; j < oc._len; j++) { if (select_vec.at80(j) == 1) continue; nc.addNum(oc.at0(j)); } } } } else { while (true) { // Still got rows to include? if (_rows != null) { // Got a row selector? if (rx >= _rows.length) break; // All done with row selections long r = _rows[rx++] - 1;// Next row selector if (r < rstart) continue; rlo = (int) (r - rstart); rhi = rlo + 1; // Stop at the next row while (rx < _rows.length && (_rows[rx] - 1 - rstart) == rhi && rhi < rlen) { rx++; rhi++; // Grab sequential rows } } // Process this next set of rows // For all cols in the new set for (int i = 0; i < _cols.length; i++) { Chunk oc = chks[_cols[i]]; NewChunk nc = nchks[i]; if (_isInt[i] == 1) { // Slice on integer columns for (int j = rlo; j < rhi; j++) if (oc._vec.isUUID()) nc.addUUID(oc, j); else if (oc.isNA0(j)) nc.addNA(); else nc.addNum(oc.at80(j), 0); } else { // Slice on double columns for (int j = rlo; j < rhi; j++) nc.addNum(oc.at0(j)); } } rlo = rhi; if (_rows == null) break; } } } } private static class DeepSelect extends MRTask2<DeepSelect> { @Override public void map( Chunk chks[], NewChunk nchks[] ) { Chunk pred = chks[chks.length-1]; for(int i = 0; i < pred._len; ++i) { if(pred.at0(i) != 0) { for( int j = 0; j < chks.length - 1; j++ ) { Chunk chk = chks[j]; if( chk._vec.isUUID() ) nchks[j].addUUID(chk,i); else nchks[j].addNum(chk.at0(i)); } } } } } private Frame copyRollups( Frame fr, boolean isACopy ) { if( !isACopy ) return fr; // Not a clean copy, do not copy rollups (will do rollups "the hard way" on first ask) Vec vecs0[] = vecs(); Vec vecs1[] = fr.vecs(); for( int i=0; i<fr._names.length; i++ ) { assert vecs1[i]._naCnt== -1; // not computed yet, right after slice Vec v0 = vecs0[find(fr._names[i])]; Vec v1 = vecs1[i]; v1.setRollupStats(v0); } return fr; } // ------------------------------------------------------------------------------ public <Y extends Flow.PerRow<Y>> // Type parameter Flow.FlowPerRow<Y> // Return type of with() with // The method name ( Flow.PerRow<Y> pr ) // Arguments for with() { return new Flow.FlowPerRow<Y>(pr,new Flow.FlowFrame(this)); } public Flow.FlowFilter with( Flow.Filter fr ) { return new Flow.FlowFilter(fr,new Flow.FlowFrame(this)); } public Flow.FlowGroupBy with( Flow.GroupBy fr ) { return new Flow.FlowGroupBy(fr,new Flow.FlowFrame(this)); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/FrameCreator.java
package water.fvec; import hex.CreateFrame; import jsr166y.CountedCompleter; import water.H2O; import water.Key; import water.MRTask2; import water.util.Utils; import java.util.Arrays; import java.util.Random; import java.util.UUID; /** * Helper to make up a Frame from scratch, with random content */ public class FrameCreator extends H2O.H2OCountedCompleter { public FrameCreator(CreateFrame createFrame) { this(createFrame, null); } public FrameCreator(CreateFrame createFrame, Key job) { super(null); _job=job; _createFrame = createFrame; int[] idx = Utils.seq(1, _createFrame.cols+1); int[] shuffled_idx = new int[idx.length]; Utils.shuffleArray(idx, idx.length, shuffled_idx, _createFrame.seed, 0); int catcols = (int)(_createFrame.categorical_fraction * _createFrame.cols); int intcols = (int)(_createFrame.integer_fraction * _createFrame.cols); int realcols = _createFrame.cols - catcols - intcols; assert(catcols >= 0); assert(intcols >= 0); assert(realcols >= 0); _cat_cols = Arrays.copyOfRange(shuffled_idx, 0, catcols); _int_cols = Arrays.copyOfRange(shuffled_idx, catcols, catcols+intcols); _real_cols = Arrays.copyOfRange(shuffled_idx, catcols+intcols, catcols+intcols+realcols); // create domains for categorical variables if (_createFrame.randomize) { assert(_createFrame.response_factors >= 1); _domain = new String[_createFrame.cols+1][]; _domain[0] = _createFrame.response_factors == 1 ? null : new String[_createFrame.response_factors]; if (_domain[0] != null) { for (int i=0; i <_domain[0].length; ++i) { _domain[0][i] = "resp." + i; } } for (int c : _cat_cols) { _domain[c] = new String[_createFrame.factors]; for (int i = 0; i < _createFrame.factors; ++i) { _domain[c][i] = UUID.randomUUID().toString().subSequence(0,5).toString(); } } } } final private CreateFrame _createFrame; private int[] _cat_cols; private int[] _int_cols; private int[] _real_cols; private String[][] _domain; private Frame _out; final private Key _job; @Override public void compute2() { Vec[] vecs = Vec.makeNewCons(_createFrame.rows, _createFrame.cols+1, _createFrame.value, _domain); String[] names = new String[vecs.length]; names[0] = "response"; for( int i=1; i<vecs.length; i++ ) names[i] = "C"+i; _out = new Frame(Key.make(_createFrame.key), names, vecs); assert _out.numRows() == _createFrame.rows; assert _out.numCols() == _createFrame.cols+1; _out.delete_and_lock(_job); // fill with random values new FrameRandomizer(_createFrame, _cat_cols, _int_cols, _real_cols).doAll(_out); //overwrite a fraction with N/A new MissingInserter(this, _createFrame.seed, _createFrame.missing_fraction).asyncExec(_out); } @Override public void onCompletion(CountedCompleter caller){ _out.update(_job); _out.unlock(_job); } private static class FrameRandomizer extends MRTask2<FrameRandomizer> { final private CreateFrame _createFrame; final private int[] _cat_cols; final private int[] _int_cols; final private int[] _real_cols; public FrameRandomizer(CreateFrame createFrame, int[] cat_cols, int[] int_cols, int[] real_cols){ _createFrame = createFrame; _cat_cols = cat_cols; _int_cols = int_cols; _real_cols = real_cols; } //row+col-dependent RNG for reproducibility with different number of VMs, chunks, etc. void setSeed(Random rng, int col, long row) { rng.setSeed(_createFrame.seed + _createFrame.cols * row + col); rng.setSeed(rng.nextLong()); } @Override public void map (Chunk[]cs){ if (!_createFrame.randomize) return; final Random rng = new Random(); // response for (int r = 0; r < cs[0]._len; r++) { setSeed(rng, 0, cs[0]._start + r); if (_createFrame.response_factors >1) cs[0].set0(r, (int)(rng.nextDouble() * _createFrame.response_factors)); //classification else if (_createFrame.positive_response) cs[0].set0(r, _createFrame.real_range * rng.nextDouble()); //regression with positive response else cs[0].set0(r, _createFrame.real_range * (1 - 2 * rng.nextDouble())); //regression } for (int c : _cat_cols) { for (int r = 0; r < cs[c]._len; r++) { setSeed(rng, c, cs[c]._start + r); cs[c].set0(r, (int)(rng.nextDouble() * _createFrame.factors)); } } for (int c : _int_cols) { for (int r = 0; r < cs[c]._len; r++) { setSeed(rng, c, cs[c]._start + r); cs[c].set0(r, (long) ((_createFrame.integer_range+1) * (1 - 2 * rng.nextDouble()))); } } for (int c : _real_cols) { for (int r = 0; r < cs[c]._len; r++) { setSeed(rng, c, cs[c]._start + r); cs[c].set0(r, _createFrame.real_range * (1 - 2 * rng.nextDouble())); } } } } public static class MissingInserter extends MRTask2<MissingInserter> { final long _seed; final double _frac; public MissingInserter(long seed, double frac){ super(null); _seed = seed; _frac = frac; } public MissingInserter(H2O.H2OCountedCompleter cmp, long seed, double frac){ super(cmp); _seed = seed; _frac = frac; } @Override public void map (Chunk[]cs){ if (_frac == 0) return; final Random rng = new Random(); for (int c = 0; c < cs.length; c++) { for (int r = 0; r < cs[c]._len; r++) { rng.setSeed(_seed + 1234 * c ^ 1723 * (cs[c]._start + r)); //row+col-dependent RNG for reproducibility if (rng.nextDouble() < _frac) cs[c].setNA0(r); } } } } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/HdfsFileVec.java
package water.fvec; import org.apache.hadoop.fs.FileStatus; import water.*; // A distributed file-backed Vector // public class HdfsFileVec extends FileVec { // Make a new NFSFileVec key which holds the filename implicitly. // This name is used by the DVecs to load data on-demand. public static Key make(FileStatus f) { Futures fs = new Futures(); Key key = make(f, fs); fs.blockForPending(); return key; } public static Key make(FileStatus f, Futures fs) { long size = f.getLen(); String fname = f.getPath().toString(); Key k = Key.make(fname); Key k2 = Vec.newKey(k); new Frame(k).delete_and_lock(null); // Insert the top-level FileVec key into the store Vec v = new HdfsFileVec(k2,size); DKV.put(k2, v, fs); Frame fr = new Frame(k,new String[]{fname},new Vec[]{v}); fr.update(null); fr.unlock(null); return k; } private HdfsFileVec(Key key, long len) {super(key,len,Value.HDFS);} }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/NFSFileVec.java
package water.fvec; import java.io.File; import water.*; import water.persist.PersistNFS; // A distributed file-backed Vector // public class NFSFileVec extends FileVec { // Make a new NFSFileVec key which holds the filename implicitly. // This name is used by the DVecs to load data on-demand. public static Key make(File f) { Futures fs = new Futures(); Key key = make(f, fs); fs.blockForPending(); return key; } public static Key make(File f, Futures fs) { long size = f.length(); Key k = Vec.newKey(PersistNFS.decodeFile(f)); // Insert the top-level FileVec key into the store DKV.put(k,new NFSFileVec(k,size), fs); return k; } private NFSFileVec(Key key, long len) {super(key,len,Value.NFS);} }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/NewChunk.java
package water.fvec; import java.util.*; import water.*; import water.util.Log; // An uncompressed chunk of data, supporting an append operation public class NewChunk extends Chunk { final int _cidx; // We can record the following (mixed) data types: // 1- doubles, in _ds including NaN for NA & 0; _ls==_xs==null // 2- scaled decimals from parsing, in _ls & _xs; _ds==null // 3- zero: requires _ls==0 && _xs==0 // 4- NA: either _ls==0 && _xs==Integer.MIN_VALUE, OR _ds=NaN // 5- Enum: _xs==(Integer.MIN_VALUE+1) && _ds==null // Chunk._len is the count of elements appended // Sparse: if _sparseLen != _len, then _ls/_ds are compressed to non-zero's only, // and _xs is the row number. Still _len is count of elements including // zeros, and _sparseLen is count of non-zeros. transient long _ls[]; // Mantissa transient int _xs[]; // Exponent, or if _ls==0, NA or Enum or Rows transient int _id[]; // Indices (row numbers) of stored values, used for sparse transient double _ds[]; // Doubles, for inflating via doubles long [] alloc_mantissa(int l) { return _ls = MemoryManager.malloc8(l); } int [] alloc_exponent(int l) { return _xs = MemoryManager.malloc4(l); } int [] alloc_indices(int l) { return _id = MemoryManager.malloc4(l); } double[] alloc_doubles(int l) { return _ds = MemoryManager.malloc8d(l); } final protected long [] mantissa() { return _ls; } final protected int [] exponent() { return _xs; } final protected int [] indices() { return _id; } final protected double[] doubles() { return _ds; } @Override public boolean isSparse() { return sparse(); } int _sparseLen; int set_sparseLen(int l) { _sparseLen = l; return l;} @Override public int sparseLen() { return _sparseLen; } int _naCnt=-1; // Count of NA's appended int _strCnt; // Count of Enum's appended int _nzCnt; // Count of non-zero's appended int _uuidCnt; // Count of UUIDs final int _timCnt[] = new int[ParseTime.TIME_PARSE.length]; // Count of successful time parses public static final int MIN_SPARSE_RATIO = 32; public NewChunk( Vec vec, int cidx ) { _vec = vec; _cidx = cidx; } // Constructor used when inflating a Chunk. public NewChunk( Chunk C ) { this(C._vec, C._vec.elem2ChunkIdx(C._start)); _start = C._start; } // Pre-sized newchunks. public NewChunk( Vec vec, int cidx, int len ) { this(vec,cidx); _ds = new double[len]; Arrays.fill(_ds,Double.NaN); _sparseLen = _len = len; } public final class Value { int _gId; // row number in dense (ie counting zeros) int _lId; // local array index of this value, equal to _gId if dense public Value(int lid, int gid){_lId = lid; _gId = gid;} public final int rowId0(){return _gId;} public void add2Chunk(NewChunk c){ if(_ds == null) c.addNum(_ls[_lId],_xs[_lId]); else { if( _ls != null ) c.addUUID(_ls[_lId], Double.doubleToRawLongBits(_ds[_lId])); else c.addNum(_ds[_lId]); } } } public Iterator<Value> values(int fromIdx, int toIdx){ final int lId, gId; final int to = Math.min(toIdx, _len); if (sparse()) { int x = Arrays.binarySearch(_id, 0, _sparseLen, fromIdx); if (x < 0) x = -x - 1; lId = x; gId = x == _sparseLen ? _len : _id[x]; } else lId = gId = fromIdx; final Value v = new Value(lId, gId); final Value next = new Value(lId, gId); return new Iterator<Value>() { @Override public final boolean hasNext() { return next._gId < to; } @Override public final Value next() { if (!hasNext()) throw new NoSuchElementException(); v._gId = next._gId; v._lId = next._lId; next._lId++; if (sparse()) next._gId = next._lId < _sparseLen ? _id[next._lId] : _len; else next._gId++; return v; } @Override public void remove() { throw new UnsupportedOperationException(); } }; } // Heuristic to decide the basic type of a column public byte type() { if( _naCnt == -1 ) { // No rollups yet? int nas=0, ss=0, nzs=0; if( _ds != null && _ls != null ) { // UUID? for( int i=0; i<_sparseLen; i++ ) if( _xs != null && _xs[i]==Integer.MIN_VALUE ) nas++; else if( _ds[i] !=0 || _ls[i] != 0 ) nzs++; _uuidCnt = _len-nas; } else if( _ds != null ) { // Doubles? assert _xs==null; for( int i = 0; i < _sparseLen; ++i) if( Double.isNaN(_ds[i]) ) nas++; else if( _ds[i]!=0 ) nzs++; } else { // Longs and enums? if( _ls != null ) for( int i=0; i<_sparseLen; i++ ) if( isNA2(i) ) nas++; else { if( isEnum2(i) ) ss++; if( _ls[i] != 0 ) nzs++; } } _nzCnt=nzs; _strCnt=ss; _naCnt=nas; } // Now run heuristic for type if(_naCnt == _len) // All NAs ==> NA Chunk return AppendableVec.NA; if(_strCnt > 0 && _strCnt + _naCnt == _len) return AppendableVec.ENUM; // All are Strings+NAs ==> Enum Chunk // UUIDs? if( _uuidCnt > 0 ) return AppendableVec.UUID; // Larger of time & numbers int timCnt=0; for( int t : _timCnt ) timCnt+=t; int nums = _len-_naCnt-timCnt; return timCnt >= nums ? AppendableVec.TIME : AppendableVec.NUMBER; } protected final boolean isNA2(int idx) { if (isUUID()) return _ls[idx]==C16Chunk._LO_NA && Double.doubleToRawLongBits(_ds[idx])==C16Chunk._HI_NA; return (_ds == null) ? (_ls[idx] == Long.MAX_VALUE && _xs[idx] == Integer.MIN_VALUE) : Double.isNaN(_ds[idx]); } protected final boolean isEnum2(int idx) { return _xs!=null && _xs[idx]==Integer.MIN_VALUE+1; } protected final boolean isEnum(int idx) { if(_id == null)return isEnum2(idx); int j = Arrays.binarySearch(_id,0,_sparseLen,idx); return j>=0 && isEnum2(j); } public void addEnum(int e) {append2(e,Integer.MIN_VALUE+1);} public void addNA() { if( isUUID() ) addUUID(C16Chunk._LO_NA,C16Chunk._HI_NA); else if (_ds != null) addNum(Double.NaN); else append2(Long.MAX_VALUE,Integer.MIN_VALUE); } public void addNum (long val, int exp) { if( isUUID() ) addNA(); else if(_ds != null) { assert _ls == null; addNum(val*PrettyPrint.pow10(exp)); } else { if( val == 0 ) exp = 0;// Canonicalize zero long t; // Remove extra scaling while( exp < 0 && exp > -9999999 && (t=val/10)*10==val ) { val=t; exp++; } append2(val,exp); } } // Fast-path append double data public void addNum(double d) { if( isUUID() ) { addNA(); return; } if(_id == null || d != 0) { if(_ls != null)switch_to_doubles(); if( _ds == null || _sparseLen >= _ds.length ) { append2slowd(); // call addNum again since append2slow might have flipped to sparse addNum(d); assert _sparseLen <= _len; return; } if(_id != null)_id[_sparseLen] = _len; _ds[_sparseLen++] = d; } _len++; assert _sparseLen <= _len; } // Append a UUID, stored in _ls & _ds public void addUUID( long lo, long hi ) { if( _ls==null || _ds== null || _sparseLen >= _ls.length ) append2slowUUID(); _ls[_sparseLen] = lo; _ds[_sparseLen++] = Double.longBitsToDouble(hi); _len++; assert _sparseLen <= _len; } public void addUUID( Chunk c, long row ) { if( c.isNA(row) ) addUUID(C16Chunk._LO_NA,C16Chunk._HI_NA); else addUUID(c.at16l(row),c.at16h(row)); } public void addUUID( Chunk c, int row ) { if( c.isNA0(row) ) addUUID(C16Chunk._LO_NA,C16Chunk._HI_NA); else addUUID(c.at16l0(row),c.at16h0(row)); } public final boolean isUUID(){return _ls != null && _ds != null; } public final boolean sparse(){return _id != null;} public void addZeros(int n){ if(!sparse()) for(int i = 0; i < n; ++i)addNum(0,0); else _len += n; } // Append all of 'nc' onto the current NewChunk. Kill nc. public void add( NewChunk nc ) { assert _cidx >= 0; assert _sparseLen <= _len; assert nc._sparseLen <= nc._len:"_sparseLen = " + nc._sparseLen + ", _len = " + nc._len; if( nc._len == 0 ) return; if(_len == 0){ _ls = nc._ls; nc._ls = null; _xs = nc._xs; nc._xs = null; _id = nc._id; nc._id = null; _ds = nc._ds; nc._ds = null; _sparseLen = nc._sparseLen; _len = nc._len; return; } if(nc.sparse() != sparse()){ // for now, just make it dense cancel_sparse(); nc.cancel_sparse(); } if( _ds != null ) throw H2O.unimpl(); while( _sparseLen+nc._sparseLen >= _xs.length ) _xs = MemoryManager.arrayCopyOf(_xs,_xs.length<<1); _ls = MemoryManager.arrayCopyOf(_ls,_xs.length); System.arraycopy(nc._ls,0,_ls,_sparseLen,nc._sparseLen); System.arraycopy(nc._xs,0,_xs,_sparseLen,nc._sparseLen); if(_id != null) { assert nc._id != null; _id = MemoryManager.arrayCopyOf(_id,_xs.length); System.arraycopy(nc._id,0,_id,_sparseLen,nc._sparseLen); for(int i = _sparseLen; i < _sparseLen + nc._sparseLen; ++i) _id[i] += _len; } else assert nc._id == null; _sparseLen += nc._sparseLen; _len += nc._len; nc._ls = null; nc._xs = null; nc._id = null; nc._sparseLen = nc._len = 0; assert _sparseLen <= _len; } // PREpend all of 'nc' onto the current NewChunk. Kill nc. public void addr( NewChunk nc ) { long [] tmpl = _ls; _ls = nc._ls; nc._ls = tmpl; int [] tmpi = _xs; _xs = nc._xs; nc._xs = tmpi; tmpi = _id; _id = nc._id; nc._id = tmpi; double[] tmpd = _ds; _ds = nc._ds; nc._ds = tmpd; int tmp = _sparseLen; _sparseLen=nc._sparseLen; nc._sparseLen=tmp; tmp = _len; _len = nc._len; nc._len = tmp; add(nc); } // Fast-path append long data void append2( long l, int x ) { if(_id == null || l != 0){ if(_ls == null || _sparseLen == _ls.length) { append2slow(); // again call append2 since calling append2slow might have changed things (eg might have switched to sparse and l could be 0) append2(l,x); return; } _ls[_sparseLen] = l; _xs[_sparseLen] = x; if(_id != null)_id[_sparseLen] = _len; _sparseLen++; } _len++; assert _sparseLen <= _len; } // Slow-path append data private void append2slowd() { if( _sparseLen > Vec.CHUNK_SZ ) throw new ArrayIndexOutOfBoundsException(_sparseLen); assert _ls==null; if(_ds != null && _ds.length > 0){ if(_id == null){ // check for sparseness int nzs = 0; // assume one non-zero for the element currently being stored for(double d:_ds)if(d != 0)++nzs; if((nzs+1)*MIN_SPARSE_RATIO < _len) set_sparse(nzs); } else _id = MemoryManager.arrayCopyOf(_id, _sparseLen << 1); _ds = MemoryManager.arrayCopyOf(_ds,_sparseLen<<1); } else { alloc_doubles(4); if (sparse()) alloc_indices(4); } assert _sparseLen == 0 || _ds.length > _sparseLen :"_ds.length = " + _ds.length + ", _sparseLen = " + _sparseLen; } // Slow-path append data private void append2slowUUID() { if( _sparseLen > Vec.CHUNK_SZ ) throw new ArrayIndexOutOfBoundsException(_sparseLen); if( _ds==null && _ls!=null ) { // This can happen for columns with all NAs and then a UUID _xs=null; alloc_doubles(_sparseLen); Arrays.fill(_ls,C16Chunk._LO_NA); Arrays.fill(_ds,Double.longBitsToDouble(C16Chunk._HI_NA)); } if( _ls != null && _ls.length > 0 ) { _ls = MemoryManager.arrayCopyOf(_ls,_sparseLen<<1); _ds = MemoryManager.arrayCopyOf(_ds,_sparseLen<<1); } else { alloc_mantissa(4); alloc_doubles(4); } assert _sparseLen == 0 || _ls.length > _sparseLen:"_ls.length = " + _ls.length + ", _sparseLen = " + _sparseLen; } // Slow-path append data private void append2slow( ) { if( _sparseLen > Vec.CHUNK_SZ ) throw new ArrayIndexOutOfBoundsException(_sparseLen); assert _ds==null; if(_ls != null && _ls.length > 0){ if(_id == null){ // check for sparseness int nzs = 0; for(int i = 0; i < _ls.length; ++i) if(_ls[i] != 0 || _xs[i] != 0)++nzs; if((nzs+1)*MIN_SPARSE_RATIO < _len){ set_sparse(nzs); assert _sparseLen == 0 || _sparseLen <= _ls.length:"_sparseLen = " + _sparseLen + ", _ls.length = " + _ls.length + ", nzs = " + nzs + ", len2 = " + _len; assert _id.length == _ls.length; assert _sparseLen <= _len; return; } } else { // verify we're still sufficiently sparse if((MIN_SPARSE_RATIO*(_sparseLen) >> 1) > _len) cancel_sparse(); else _id = MemoryManager.arrayCopyOf(_id,_sparseLen<<1); } _ls = MemoryManager.arrayCopyOf(_ls,_sparseLen<<1); _xs = MemoryManager.arrayCopyOf(_xs,_sparseLen<<1); } else { alloc_mantissa(4); alloc_exponent(4); if (_id != null) alloc_indices(4); } assert _sparseLen == 0 || _sparseLen < _ls.length:"_sparseLen = " + _sparseLen + ", _ls.length = " + _ls.length; assert _id == null || _id.length == _ls.length; assert _sparseLen <= _len; } // Do any final actions on a completed NewVector. Mostly: compress it, and // do a DKV put on an appropriate Key. The original NewVector goes dead // (does not live on inside the K/V store). public Chunk new_close() { Chunk chk = compress(); if(_vec instanceof AppendableVec) ((AppendableVec)_vec).closeChunk(this); return chk; } public void close(Futures fs) { close(_cidx,fs); } protected void switch_to_doubles(){ assert _ds == null; double [] ds = MemoryManager.malloc8d(_sparseLen); for(int i = 0; i < _sparseLen; ++i) if(isNA2(i) || isEnum2(i))ds[i] = Double.NaN; else ds[i] = _ls[i]*PrettyPrint.pow10(_xs[i]); _ls = null; _xs = null; _ds = ds; } protected void set_sparse(int nzeros){ if(_sparseLen == nzeros && _len != 0)return; if(_id != null){ // we have sparse represenation but some 0s in it! int [] id = MemoryManager.malloc4(nzeros); int j = 0; if(_ds != null){ double [] ds = MemoryManager.malloc8d(nzeros); for(int i = 0; i < _sparseLen; ++i){ if(_ds[i] != 0){ ds[j] = _ds[i]; id[j] = _id[i]; ++j; } } _ds = ds; } else { long [] ls = MemoryManager.malloc8(nzeros); int [] xs = MemoryManager.malloc4(nzeros); for(int i = 0; i < _sparseLen; ++i){ if(_ls[i] != 0){ ls[j] = _ls[i]; xs[j] = _xs[i]; id[j] = _id[i]; ++j; } } _ls = ls; _xs = xs; } _id = id; assert j == nzeros; _sparseLen = nzeros; return; } assert _sparseLen == _len:"_sparseLen = " + _sparseLen + ", _len = " + _len + ", nzeros = " + nzeros; int zs = 0; if(_ds == null){ if (_len == 0) { _ls = new long[0]; _xs = new int[0]; _id = new int[0]; _sparseLen = 0; return; } else { assert nzeros < _ls.length; _id = MemoryManager.malloc4(_ls.length); for (int i = 0; i < _sparseLen; ++i) { if (_ls[i] == 0 && _xs[i] == 0) ++zs; else { _ls[i - zs] = _ls[i]; _xs[i - zs] = _xs[i]; _id[i - zs] = i; } } } } else { assert nzeros < _ds.length; _id = alloc_indices(_ds.length); for(int i = 0; i < _sparseLen; ++i){ if(_ds[i] == 0)++zs; else { _ds[i-zs] = _ds[i]; _id[i-zs] = i; } } } assert zs == (_sparseLen - nzeros); _sparseLen = nzeros; } protected void cancel_sparse(){ if(_sparseLen != _len){ if(_ds == null){ int [] xs = MemoryManager.malloc4(_len); long [] ls = MemoryManager.malloc8(_len); for(int i = 0; i < _sparseLen; ++i){ xs[_id[i]] = _xs[i]; ls[_id[i]] = _ls[i]; } _xs = xs; _ls = ls; } else { double [] ds = MemoryManager.malloc8d(_len); for(int i = 0; i < _sparseLen; ++i) ds[_id[i]] = _ds[i]; _ds = ds; } _sparseLen = _len; } _id = null; } // Study this NewVector and determine an appropriate compression scheme. // Return the data so compressed. static final int MAX_FLOAT_MANTISSA = 0x7FFFFF; Chunk compress() { Chunk res = compress2(); assert _len == res.len(); assert !sparse() || !res.isSparse() || sparseLen() == res.sparseLen(); // force everything to null after compress to free up the memory _id = null; _xs = null; _ds = null; _ls = null; return res; } private static long leRange(long lemin, long lemax){ if(lemin < 0 && lemax >= (Long.MAX_VALUE + lemin)) return Long.MAX_VALUE; // if overflow return 64 as the max possible value long res = lemax - lemin; assert res >= 0; return res; } private Chunk compress2() { // Check for basic mode info: all missing or all strings or mixed stuff byte mode = type(); if( mode==AppendableVec.NA ) // ALL NAs, nothing to do return new C0DChunk(Double.NaN,_sparseLen); boolean rerun=false; if(mode == AppendableVec.ENUM){ for( int i=0; i<_sparseLen; i++ ) if(isEnum2(i)) _xs[i] = 0; else if(!isNA2(i)){ setNA_impl2(i); ++_naCnt; } // Smack any mismatched string/numbers } else if(mode == AppendableVec.NUMBER){ for( int i=0; i<_sparseLen; i++ ) if(isEnum2(i)) { setNA_impl2(i); rerun = true; } } if( rerun ) { _naCnt = -1; type(); } // Re-run rollups after dropping all numbers/enums boolean sparse = false; // sparse? treat as sparse iff we have at least MIN_SPARSE_RATIOx more zeros than nonzeros if(MIN_SPARSE_RATIO*(_naCnt + _nzCnt) < _len) { set_sparse(_naCnt + _nzCnt); sparse = true; } else if(_sparseLen != _len) cancel_sparse(); // If the data is UUIDs there's not much compression going on if( _ds != null && _ls != null ) return chunkUUID(); // If the data was set8 as doubles, we do a quick check to see if it's // plain longs. If not, we give up and use doubles. if( _ds != null ) { int i=0; boolean isConstant = true; boolean isInteger = true; if ( sparse ) { isConstant = _sparseLen == 0; for( ; i< _sparseLen; i++ ) { if (!Double.isNaN(_ds[i])) isInteger &= (double) (long) _ds[i] == _ds[i]; } } else { assert(_ds.length >= _len); for( ; i< _len; i++ ) { if (!Double.isNaN(_ds[i])) isInteger &= (double) (long) _ds[i] == _ds[i]; isConstant &= _ds[i] == _ds[0]; } assert(_sparseLen == _len); } if (!isInteger) { if (isConstant) return new C0DChunk(_ds[0], _len); if (sparse) return new CXDChunk(len(), sparseLen(), 8, bufD(8)); else return chunkD(); } _ls = new long[_ds.length]; // Else flip to longs _xs = new int [_ds.length]; double [] ds = _ds; _ds = null; final int naCnt = _naCnt; for( i=0; i< _sparseLen; i++ ) // Inject all doubles into longs if( Double.isNaN(ds[i]) )setNA_impl2(i); else _ls[i] = (long)ds[i]; // setNA_impl2 will set _naCnt to -1! // we already know what the naCnt is (it did not change!) so set it back to correct value _naCnt = naCnt; } // IF (_len > _sparseLen) THEN Sparse // Check for compressed *during appends*. Here we know: // - No specials; _xs[]==0. // - No floats; _ds==null // - NZ length in _sparseLen, actual length in _len. // - Huge ratio between _len and _sparseLen, and we do NOT want to inflate to // the larger size; we need to keep it all small all the time. // - Rows in _xs // Data in some fixed-point format, not doubles // See if we can sanely normalize all the data to the same fixed-point. int xmin = Integer.MAX_VALUE; // min exponent found boolean floatOverflow = false; double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; int p10iLength = PrettyPrint.powers10i.length; long llo=Long .MAX_VALUE, lhi=Long .MIN_VALUE; int xlo=Integer.MAX_VALUE, xhi=Integer.MIN_VALUE; for( int i=0; i<_sparseLen; i++ ) { if( isNA2(i) ) continue; long l = _ls[i]; int x = _xs[i]; assert x != Integer.MIN_VALUE:"l = " + l + ", x = " + x; if( x==Integer.MIN_VALUE+1) x=0; // Replace enum flag with no scaling assert l!=0 || x==0:"l == 0 while x = " + x + " ls = " + Arrays.toString(_ls); // Exponent of zero is always zero long t; // Remove extra scaling while( l!=0 && (t=l/10)*10==l ) { l=t; x++; } // Compute per-chunk min/max double d = l*PrettyPrint.pow10(x); if( d < min ) { min = d; llo=l; xlo=x; } if( d > max ) { max = d; lhi=l; xhi=x; } floatOverflow = l < Integer.MIN_VALUE+1 || l > Integer.MAX_VALUE; xmin = Math.min(xmin,x); } if(_len != _sparseLen){ // sparse? then compare vs implied 0s if( min > 0 ) { min = 0; llo=0; xlo=0; } if( max < 0 ) { max = 0; lhi=0; xhi=0; } xmin = Math.min(xmin,0); } // Constant column? if( _naCnt==0 && (min==max)) { if (llo == lhi && xlo == 0 && xhi == 0) return new C0LChunk(llo, len()); else if ((long)min == min) return new C0LChunk((long)min, len()); else return new C0DChunk(min, len()); } // Compute min & max, as scaled integers in the xmin scale. // Check for overflow along the way boolean overflow = ((xhi-xmin) >= p10iLength) || ((xlo-xmin) >= p10iLength); long lemax=0, lemin=0; if( !overflow ) { // Can at least get the power-of-10 without overflow long pow10 = PrettyPrint.pow10i(xhi-xmin); lemax = lhi*pow10; // Hacker's Delight, Section 2-13, checking overflow. // Note that the power-10 is always positive, so the test devolves this: if( (lemax/pow10) != lhi ) overflow = true; // Note that xlo might be > xmin; e.g. { 101e-49 , 1e-48}. long pow10lo = PrettyPrint.pow10i(xlo-xmin); lemin = llo*pow10lo; if( (lemin/pow10lo) != llo ) overflow = true; } // Boolean column? if (max == 1 && min == 0 && xmin == 0 && !overflow) { if(sparse) { // Very sparse? return _naCnt==0 ? new CX0Chunk(_len,_sparseLen,bufS(0))// No NAs, can store as sparse bitvector : new CXIChunk(_len,_sparseLen,1,bufS(1)); // have NAs, store as sparse 1byte values } int bpv = _strCnt+_naCnt > 0 ? 2 : 1; // Bit-vector byte[] cbuf = bufB(bpv); return new CBSChunk(cbuf, cbuf[0], cbuf[1]); } final boolean fpoint = xmin < 0 || min < Long.MIN_VALUE || max > Long.MAX_VALUE; if( sparse ) { if(fpoint) return new CXDChunk(_len,_sparseLen,8,bufD(8)); int sz = 8; if( Short.MIN_VALUE <= min && max <= Short.MAX_VALUE ) sz = 2; else if( Integer.MIN_VALUE <= min && max <= Integer.MAX_VALUE ) sz = 4; return new CXIChunk(_len,_sparseLen,sz,bufS(sz)); } // Exponent scaling: replacing numbers like 1.3 with 13e-1. '13' fits in a // byte and we scale the column by 0.1. A set of numbers like // {1.2,23,0.34} then is normalized to always be represented with 2 digits // to the right: {1.20,23.00,0.34} and we scale by 100: {120,2300,34}. // This set fits in a 2-byte short. // We use exponent-scaling for bytes & shorts only; it's uncommon (and not // worth it) for larger numbers. We need to get the exponents to be // uniform, so we scale up the largest lmax by the largest scale we need // and if that fits in a byte/short - then it's worth compressing. Other // wise we just flip to a float or double representation. if( overflow || (fpoint && floatOverflow) || -35 > xmin || xmin > 35 ) return chunkD(); final long leRange = leRange(lemin,lemax); if( fpoint ) { if( (int)lemin == lemin && (int)lemax == lemax ) { if(leRange < 255) // Fits in scaled biased byte? return new C1SChunk( bufX(lemin,xmin,C1SChunk.OFF,0),lemin,PrettyPrint.pow10(xmin)); if(leRange < 65535) { // we use signed 2B short, add -32k to the bias! long bias = 32767 + lemin; return new C2SChunk( bufX(bias,xmin,C2SChunk.OFF,1),bias,PrettyPrint.pow10(xmin)); } } if(leRange < 4294967295l) { long bias = 2147483647l + lemin; return new C4SChunk( bufX(bias,xmin,C4SChunk.OFF,2),bias,PrettyPrint.pow10(xmin)); } return chunkD(); } // else an integer column // Compress column into a byte if(xmin == 0 && 0<=lemin && lemax <= 255 && ((_naCnt + _strCnt)==0) ) return new C1NChunk( bufX(0,0,C1NChunk.OFF,0)); if( lemin < Integer.MIN_VALUE ) return new C8Chunk( bufX(0,0,0,3)); if( leRange < 255 ) { // Span fits in a byte? if(0 <= min && max < 255 ) // Span fits in an unbiased byte? return new C1Chunk( bufX(0,0,C1Chunk.OFF,0)); return new C1SChunk( bufX(lemin,xmin,C1SChunk.OFF,0),lemin,PrettyPrint.pow10i(xmin)); } // Compress column into a short if( leRange < 65535 ) { // Span fits in a biased short? if( xmin == 0 && Short.MIN_VALUE < lemin && lemax <= Short.MAX_VALUE ) // Span fits in an unbiased short? return new C2Chunk( bufX(0,0,C2Chunk.OFF,1)); int bias = (int)(lemin-(Short.MIN_VALUE+1)); return new C2SChunk( bufX(bias,xmin,C2SChunk.OFF,1),bias,PrettyPrint.pow10i(xmin)); } // Compress column into ints if( Integer.MIN_VALUE < min && max <= Integer.MAX_VALUE ) return new C4Chunk( bufX(0,0,0,2)); return new C8Chunk( bufX(0,0,0,3)); } private static long [] NAS = {C1Chunk._NA,C2Chunk._NA,C4Chunk._NA,C8Chunk._NA}; // Compute a sparse integer buffer private byte[] bufS(final int valsz){ int log = 0; while((1 << log) < valsz)++log; assert valsz == 0 || (1 << log) == valsz; final int ridsz = _len >= 65535?4:2; final int elmsz = ridsz + valsz; int off = CXIChunk.OFF; byte [] buf = MemoryManager.malloc1(off + _sparseLen*elmsz,true); for( int i=0; i<_sparseLen; i++, off += elmsz ) { if(ridsz == 2) UDP.set2(buf,off,(short)_id[i]); else UDP.set4(buf,off,_id[i]); if(valsz == 0){ assert _xs[i] == 0 && _ls[i] == 1; continue; } assert _xs[i] == Integer.MIN_VALUE || _xs[i] >= 0:"unexpected exponent " + _xs[i]; // assert we have int or NA final long lval = _xs[i] == Integer.MIN_VALUE?NAS[log]:_ls[i]*PrettyPrint.pow10i(_xs[i]); switch(valsz){ case 1: buf[off+ridsz] = (byte)lval; break; case 2: short sval = (short)lval; UDP.set2(buf,off+ridsz,sval); break; case 4: int ival = (int)lval; UDP.set4(buf, off+ridsz, ival); break; case 8: UDP.set8(buf, off+ridsz, lval); break; default: throw H2O.unimpl(); } } assert off==buf.length; return buf; } // Compute a sparse float buffer private byte[] bufD(final int valsz){ int log = 0; while((1 << log) < valsz)++log; assert (1 << log) == valsz; final int ridsz = _len >= 65535?4:2; final int elmsz = ridsz + valsz; int off = CXDChunk.OFF; byte [] buf = MemoryManager.malloc1(off + _sparseLen*elmsz,true); for( int i=0; i<_sparseLen; i++, off += elmsz ) { if(ridsz == 2) UDP.set2(buf,off,(short)_id[i]); else UDP.set4(buf,off,_id[i]); final double dval = _ds == null?isNA2(i)?Double.NaN:_ls[i]*PrettyPrint.pow10(_xs[i]):_ds[i]; switch(valsz){ case 4: UDP.set4f(buf, off + ridsz, (float) dval); break; case 8: UDP.set8d(buf, off + ridsz, dval); break; default: throw H2O.unimpl(); } } assert off==buf.length; return buf; } // Compute a compressed integer buffer private byte[] bufX( long bias, int scale, int off, int log ) { byte[] bs = new byte[(_len<<log)+off]; int j = 0; for( int i=0; i<_len; i++ ) { long le = -bias; if(_id == null || _id.length == 0 || (j < _id.length && _id[j] == i)){ if( isNA2(j) ) { le = NAS[log]; } else { int x = (_xs[j]==Integer.MIN_VALUE+1 ? 0 : _xs[j])-scale; le += x >= 0 ? _ls[j]*PrettyPrint.pow10i( x) : _ls[j]/PrettyPrint.pow10i(-x); } ++j; } switch( log ) { case 0: bs [i +off] = (byte)le ; break; case 1: UDP.set2(bs,(i<<1)+off, (short)le); break; case 2: UDP.set4(bs,(i<<2)+off, (int)le); break; case 3: UDP.set8(bs,(i<<3)+off, le); break; default: throw H2O.fail(); } } assert j == _sparseLen:"j = " + j + ", len = " + _sparseLen + ", len2 = " + _len + ", id[j] = " + _id[j]; return bs; } // Compute a compressed double buffer private Chunk chunkD() { if (H2O.SINGLE_PRECISION) { final byte[] bs = MemoryManager.malloc1(_len * 4, true); int j = 0; for (int i = 0; i < _len; ++i) { float f = 0; if (_id == null || _id.length == 0 || (j < _id.length && _id[j] == i)) { f = _ds != null ? (float)_ds[j] : (isNA2(j) || isEnum(j)) ? Float.NaN : (float)(_ls[j] * PrettyPrint.pow10(_xs[j])); ++j; } UDP.set4f(bs, 4 * i, f); } assert j == _sparseLen : "j = " + j + ", _sparseLen = " + _sparseLen; return new C4FChunk(bs); } else { final byte[] bs = MemoryManager.malloc1(_len * 8, true); int j = 0; for (int i = 0; i < _len; ++i) { double d = 0; if (_id == null || _id.length == 0 || (j < _id.length && _id[j] == i)) { d = _ds != null ? _ds[j] : (isNA2(j) || isEnum(j)) ? Double.NaN : _ls[j] * PrettyPrint.pow10(_xs[j]); ++j; } UDP.set8d(bs, 8 * i, d); } assert j == _sparseLen : "j = " + j + ", _sparseLen = " + _sparseLen; return new C8DChunk(bs); } } // Compute a compressed UUID buffer private Chunk chunkUUID() { final byte [] bs = MemoryManager.malloc1(_len*16,true); int j = 0; for( int i = 0; i < _len; ++i ) { long lo = 0, hi=0; if(_id == null || _id.length == 0 || (j < _id.length && _id[j] == i ) ) { lo = _ls[j]; hi = Double.doubleToRawLongBits(_ds[j++]); if( _xs != null && _xs[j] == Integer.MAX_VALUE){// NA? lo = Long.MIN_VALUE; hi = 0; // Canonical NA value } } UDP.set8(bs, 16*i , lo); UDP.set8(bs, 16*i+8, hi); } assert j == _sparseLen:"j = " + j + ", _sparseLen = " + _sparseLen; return new C16Chunk(bs); } // Compute compressed boolean buffer private byte[] bufB(int bpv) { assert bpv == 1 || bpv == 2 : "Only bit vectors with/without NA are supported"; final int off = CBSChunk.OFF; int clen = off + CBSChunk.clen(_len, bpv); byte bs[] = new byte[clen]; // Save the gap = number of unfilled bits and bpv value bs[0] = (byte) (((_len*bpv)&7)==0 ? 0 : (8-((_len*bpv)&7))); bs[1] = (byte) bpv; // Dense bitvector int boff = 0; byte b = 0; int idx = CBSChunk.OFF; int j = 0; for (int i=0; i<_len; i++) { byte val = 0; if(_id == null || (j < _id.length && _id[j] == i)) { assert bpv == 2 || !isNA2(j); val = (byte)(isNA2(j)?CBSChunk._NA:_ls[j]); ++j; } if( bpv==1 ) b = CBSChunk.write1b(b, val, boff); else b = CBSChunk.write2b(b, val, boff); boff += bpv; if (boff>8-bpv) { assert boff == 8; bs[idx] = b; boff = 0; b = 0; idx++; } } assert j == _sparseLen; assert bs[0] == (byte) (boff == 0 ? 0 : 8-boff):"b[0] = " + bs[0] + ", boff = " + boff + ", bpv = " + bpv; // Flush last byte if (boff>0) bs[idx] = b; return bs; } // Set & At on NewChunks are weird: only used after inflating some other // chunk. At this point the NewChunk is full size, no more appends allowed, // and the xs exponent array should be only full of zeros. Accesses must be // in-range and refer to the inflated values of the original Chunk. @Override boolean set_impl(int i, long l) { if( _ds != null ) return set_impl(i,(double)l); if(_sparseLen != _len){ // sparse? int idx = Arrays.binarySearch(_id,0,_sparseLen,i); if(idx >= 0)i = idx; else cancel_sparse(); // for now don't bother setting the sparse value } _ls[i]=l; _xs[i]=0; _naCnt = -1; return true; } @Override public boolean set_impl(int i, double d) { if(_ds == null){ assert _sparseLen == 0 || _ls != null; switch_to_doubles(); } if(_sparseLen != _len){ // sparse? int idx = Arrays.binarySearch(_id,0,_sparseLen,i); if(idx >= 0)i = idx; else cancel_sparse(); // for now don't bother setting the sparse value } while(i >= _len) append2slowd(); _ds[i] = d; _naCnt = -1; return true; } @Override boolean set_impl(int i, float f) { return set_impl(i,(double)f); } protected final boolean setNA_impl2(int i) { if( isNA2(i) ) return true; if( _ls != null ) { _ls[i] = Long.MAX_VALUE; _xs[i] = Integer.MIN_VALUE; } if( _ds != null ) { _ds[i] = Double.NaN; } _naCnt = -1; return true; } @Override boolean setNA_impl(int i) { if( isNA_impl(i) ) return true; if(_sparseLen != _len){ int idx = Arrays.binarySearch(_id,0,_sparseLen,i); if(idx >= 0) i = idx; else cancel_sparse(); // todo - do not necessarily cancel sparse here } return setNA_impl2(i); } @Override public long at8_impl( int i ) { if( _len != _sparseLen ) { int idx = Arrays.binarySearch(_id,0,_sparseLen,i); if(idx >= 0) i = idx; else return 0; } if(isNA2(i))throw new RuntimeException("Attempting to access NA as integer value."); if( _ls == null ) return (long)_ds[i]; return _ls[i]*PrettyPrint.pow10i(_xs[i]); } @Override public double atd_impl( int i ) { if( _len != _sparseLen ) { int idx = Arrays.binarySearch(_id,0,_sparseLen,i); if(idx >= 0) i = idx; else return 0; } // if exponent is Integer.MIN_VALUE (for missing value) or >=0, then go the integer path (at8_impl) // negative exponents need to be handled right here if( _ds == null ) return isNA2(i) || _xs[i] >= 0 ? at8_impl(i) : _ls[i]*Math.pow(10,_xs[i]); assert _xs==null; return _ds[i]; } @Override protected long at16l_impl(int idx) { if(_ls[idx] == C16Chunk._LO_NA) throw new RuntimeException("Attempting to access NA as integer value."); return _ls[idx]; } @Override protected long at16h_impl(int idx) { long hi = Double.doubleToRawLongBits(_ds[idx]); if(hi == C16Chunk._HI_NA) throw new RuntimeException("Attempting to access NA as integer value."); return hi; } @Override public boolean isNA_impl( int i ) { if( _len != _sparseLen ) { int idx = Arrays.binarySearch(_id,0,_sparseLen,i); if(idx >= 0) i = idx; else return false; } return isNA2(i); } @Override public AutoBuffer write(AutoBuffer bb) { throw H2O.fail(); } @Override public NewChunk read(AutoBuffer bb) { throw H2O.fail(); } @Override NewChunk inflate_impl(NewChunk nc) { throw H2O.fail(); } @Override boolean hasFloat() { throw H2O.fail(); } @Override public String toString() { return "NewChunk._sparseLen="+_sparseLen; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/ParseDataset2.java
package water.fvec; import java.io.IOException; import java.io.InputStream; import java.util.Arrays; import java.util.zip.*; import jsr166y.CountedCompleter; import water.*; import water.H2O.H2OCountedCompleter; import water.fvec.Vec.VectorGroup; import water.nbhm.NonBlockingHashMap; import water.nbhm.NonBlockingSetInt; import water.parser.*; import water.parser.CustomParser.ParserSetup; import water.parser.CustomParser.ParserType; import water.parser.CustomParser.StreamDataOut; import water.parser.Enum; import water.util.FrameUtils; import water.util.Log; import water.util.Utils.IcedHashMap; import water.util.Utils.IcedInt; import water.util.Utils; public final class ParseDataset2 extends Job { public final Key _progress; // Job progress Key private MultiFileParseTask _mfpt; // Access to partially built vectors for cleanup after parser crash public static enum Compression { NONE, ZIP, GZIP } public static Key [] filterEmptyFiles(Key [] keys){ Arrays.sort(keys); // first check if there are any empty files and if so remove them Vec [] vecs = new Vec [keys.length]; int c = 0; for(int i = 0; i < vecs.length; ++i) { vecs[i] = getVec(keys[i]); if(vecs[i].length() == 0) c++; } if(c > 0){ // filter out empty files Key[] ks = new Key[keys.length-c]; Vec[] vs = new Vec[vecs.length-c]; int j = 0; for(int i = 0; i < keys.length; ++i) if(vecs[i].length() != 0){ ks[j] = keys[i]; vs[j] = vecs[i]; ++j; } keys = ks; } return keys; } // -------------------------------------------------------------------------- // Parse an array of csv input/file keys into an array of distributed output Vecs public static Frame parse(Key okey, Key [] keys) { return parse(okey,keys,new GuessSetup.GuessSetupTsk(new ParserSetup(),true).invoke(keys)._gSetup._setup,true); } public static Frame parse(Key okey, Key[] keys, CustomParser.ParserSetup globalSetup, boolean delete_on_done) { if( globalSetup._ncols == 0 ) throw new java.lang.IllegalArgumentException(globalSetup.toString()); return forkParseDataset(okey, keys, globalSetup, delete_on_done).get(); } // Same parse, as a backgroundable Job public static ParseDataset2 forkParseDataset(final Key dest, Key[] keys, final CustomParser.ParserSetup setup, boolean delete_on_done) { keys = filterEmptyFiles(keys); setup.checkDupColumnNames(); // Some quick sanity checks: no overwriting your input key, and a resource check. long sum=0; for( Key k : keys ) { if( dest.equals(k) ) throw new IllegalArgumentException("Destination key "+dest+" must be different from all sources"); sum += DKV.get(k).length(); // Sum of all input filesizes } long memsz=0; // Cluster memory for( H2ONode h2o : H2O.CLOUD._memary ) memsz += h2o.get_max_mem(); if( sum > memsz*4 ) throw new IllegalArgumentException("Total input file size of "+PrettyPrint.bytes(sum)+" is much larger than total cluster memory of "+PrettyPrint.bytes(memsz)+", please use either a larger cluster or smaller data."); ParseDataset2 job = new ParseDataset2(dest, keys); new Frame(job.dest(),new String[0],new Vec[0]).delete_and_lock(job.self()); // Lock BEFORE returning for( Key k : keys ) Lockable.read_lock(k,job.self()); // Lock BEFORE returning ParserFJTask fjt = new ParserFJTask(job, keys, setup, delete_on_done); // Fire off background parse // Make a wrapper class that only *starts* when the ParserFJTask fjt // completes - especially it only starts even when fjt completes // exceptionally... thus the fjt onExceptionalCompletion code runs // completely before this empty task starts - providing a simple barrier. // Threads blocking on the job will block on the "cleanup" task, which will // block until the fjt runs the onCompletion or onExceptionCompletion code. H2OCountedCompleter cleanup = new H2OCountedCompleter() { @Override public void compute2() { } @Override public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller) { return true; } }; fjt.setCompleter(cleanup); job.start(cleanup); H2O.submitTask(fjt); return job; } // Setup a private background parse job private ParseDataset2(Key dest, Key[] fkeys) { destination_key = dest; // Job progress Key _progress = Key.make((byte) 0, Key.JOB); UKV.put(_progress, ParseProgress.make(fkeys)); } // Simple internal class doing background parsing, with trackable Job status public static class ParserFJTask extends H2OCountedCompleter { final ParseDataset2 _job; Key[] _keys; CustomParser.ParserSetup _setup; boolean _delete_on_done; public ParserFJTask( ParseDataset2 job, Key[] keys, CustomParser.ParserSetup setup, boolean delete_on_done) { _job = job; _keys = keys; _setup = setup; _delete_on_done = delete_on_done; } @Override public void compute2() { parse_impl(_job, _keys, _setup, _delete_on_done); tryComplete(); } // Took a crash/NPE somewhere in the parser. Attempt cleanup. @Override public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller){ Futures fs = new Futures(); if( _job != null ) { UKV.remove(_job.destination_key,fs); UKV.remove(_job._progress,fs); // Find & remove all partially-built output vecs & chunks if( _job._mfpt != null ) _job._mfpt.onExceptionCleanup(fs); } // Assume the input is corrupt - or already partially deleted after // parsing. Nuke it all - no partial Vecs lying around. for( Key k : _keys ) UKV.remove(k,fs); fs.blockForPending(); // As soon as the job is canceled, threads blocking on the job will // wake up. Better have all cleanup done first! if( _job != null ) _job.cancel(ex); return true; } } // -------------------------------------------------------------------------- // Parser progress static class ParseProgress extends Iced { final long _total; long _value; DException _ex; ParseProgress(long val, long total){_value = val; _total = total;} // Total number of steps is equal to total bytecount across files static ParseProgress make( Key[] fkeys ) { long total = 0; for( Key fkey : fkeys ) total += getVec(fkey).length(); return new ParseProgress(0,total); } public void setException(DException ex){_ex = ex;} public DException getException(){return _ex;} } static void onProgress(final long len, final Key progress) { new TAtomic<ParseProgress>() { @Override public ParseProgress atomic(ParseProgress old) { if (old == null) return null; old._value += len; return old; } }.fork(progress); } @Override public float progress() { ParseProgress progress = UKV.get(_progress); if( progress == null || progress._total == 0 ) return 0; return progress._value / (float) progress._total; } @Override public void remove() { DKV.remove(_progress); super.remove(); } /** Task to update enum values to match the global numbering scheme. * Performs update in place so that values originally numbered using * node-local unordered numbering will be numbered using global numbering. * @author tomasnykodym */ private static class EnumUpdateTask extends MRTask2<EnumUpdateTask> { private transient int[][][] _emap; final Key _eKey; private final ValueString [][] _gDomain; private final Enum [][] _lEnums; private final int [] _chunk2Enum; private final int [] _colIds; private EnumUpdateTask(ValueString [][] gDomain, Enum [][] lEnums, int [] chunk2Enum, Key lDomKey, int [] colIds){ _gDomain = gDomain; _lEnums = lEnums; _chunk2Enum = chunk2Enum; _eKey = lDomKey; _colIds = colIds; } private int[][] emap(int nodeId) { if( _emap == null ) _emap = new int[_lEnums.length][][]; if( _emap[nodeId] == null ) { int[][] emap = new int[_gDomain.length][]; for( int i = 0; i < _gDomain.length; ++i ) { if( _gDomain[i] != null ) { assert _lEnums[nodeId] != null : "missing lEnum of node " + nodeId + ", enums = " + Arrays.toString(_lEnums); final Enum e = _lEnums[nodeId][_colIds[i]]; emap[i] = new int[e.maxId()+1]; Arrays.fill(emap[i], -1); for(int j = 0; j < _gDomain[i].length; ++j) { ValueString vs = _gDomain[i][j]; if( e.containsKey(vs) ) { assert e.getTokenId(vs) <= e.maxId():"maxIdx = " + e.maxId() + ", got " + e.getTokenId(vs); emap[i][e.getTokenId(vs)] = j; } } } } _emap[nodeId] = emap; } return _emap[nodeId]; } @Override public void map(Chunk [] chks){ int[][] emap = emap(_chunk2Enum[chks[0].cidx()]); final int cidx = chks[0].cidx(); for(int i = 0; i < chks.length; ++i) { Chunk chk = chks[i]; if(_gDomain[i] == null) // killed, replace with all NAs DKV.put(chk._vec.chunkKey(chk.cidx()),new C0DChunk(Double.NaN,chk._len)); else for( int j = 0; j < chk._len; ++j){ if( chk.isNA0(j) )continue; long l = chk.at80(j); if (l < 0 || l >= emap[i].length) reportBrokenEnum(chk, i, j, l, emap); if(emap[i][(int)l] < 0) throw new RuntimeException(H2O.SELF.toString() + ": missing enum at col:" + i + ", line: " + j + ", val = " + l + ", chunk=" + chk.getClass().getSimpleName()); chk.set0(j, emap[i][(int)l]); } chk.close(cidx, _fs); } } private void reportBrokenEnum( Chunk chk, int i, int j, long l, int[][] emap ) { Chunk chk2 = chk._chk2; chk._chk2 = null; StringBuilder sb = new StringBuilder("Enum renumber task, column # " + i + ": Found OOB index " + l + " (expected 0 - " + emap[i].length + ", global domain has " + _gDomain[i].length + " levels) pulled from " + chk.getClass().getSimpleName() + "\n"); int k = 0; for(; k < Math.min(5,chk._len); ++k) sb.append("at8[" + (k+chk._start) + "] = " + chk.at80(k) + ", chk2 = " + (chk2 != null?chk2.at80(k):"") + "\n"); k = Math.max(k,j-2); sb.append("...\n"); for(; k < Math.min(chk._len,j+2); ++k) sb.append("at8[" + (k+chk._start) + "] = " + chk.at80(k) + ", chk2 = " + (chk2 != null?chk2.at80(k):"") + "\n"); sb.append("...\n"); k = Math.max(k,chk._len-5); for(; k < chk._len; ++k) sb.append("at8[" + (k+chk._start) + "] = " + chk.at80(k) + ", chk2 = " + (chk2 != null?chk2.at80(k):"") + "\n"); throw new RuntimeException(sb.toString()); } } // -------------------------------------------------------------------------- private static class EnumFetchTask extends MRTask<EnumFetchTask> { private final Key _k; private final int[] _ecols; private final int _homeNode; // node where the computation started, enum from this node MUST be cloned! private Enum[] _gEnums; // global enums per column private Enum[][] _lEnums; // local enums per node per column private EnumFetchTask(int homeNode, Key k, int[] ecols){_homeNode = homeNode; _k = k;_ecols = ecols;} @Override public void map(Key key) { _lEnums = new Enum[H2O.CLOUD.size()][]; if(MultiFileParseTask._enums.containsKey(_k)){ _lEnums[H2O.SELF.index()] = _gEnums = MultiFileParseTask._enums.get(_k); // if we are the original node (i.e. there will be no sending over // wire), we have to clone the enums not to share the same object // (causes problems when computing column domain and renumbering maps). if( H2O.SELF.index() == _homeNode ) { _gEnums = _gEnums.clone(); for(int i = 0; i < _gEnums.length; ++i) _gEnums[i] = _gEnums[i].clone(); } MultiFileParseTask._enums.remove(_k); } } @Override public void reduce(EnumFetchTask etk) { if(_gEnums == null) { _gEnums = etk._gEnums; _lEnums = etk._lEnums; } else if (etk._gEnums != null) { for( int i : _ecols ) _gEnums[i].merge(etk._gEnums[i]); for( int i = 0; i < _lEnums.length; ++i ) if( _lEnums[i] == null ) _lEnums[i] = etk._lEnums[i]; else assert etk._lEnums[i] == null; } } } // -------------------------------------------------------------------------- // Run once on all nodes; fill in missing zero chunks private static class SVFTask extends MRTask<SVFTask> { private final Frame _f; private SVFTask( Frame f ) { _f = f; } @Override public void map(Key key) { Vec v0 = _f.anyVec(); for( int i = 0; i < v0.nChunks(); ++i ) { if( !v0.chunkKey(i).home() ) continue; // First find the nrows as the # rows of non-missing chunks; done on // locally-homed chunks only - to keep the data distribution. int nlines = 0; for( Vec vec : _f.vecs() ) { Value val = H2O.get(vec.chunkKey(i)); // Local-get only if( val != null ) { nlines = ((Chunk)val.get())._len; break; } } // Now fill in appropriate-sized zero chunks for( Vec vec : _f.vecs() ) { Key k = vec.chunkKey(i); if( !k.home() ) continue; // Local keys only Value val = H2O.get(k); // Local-get only if( val == null ) // Missing? Fill in w/zero chunk H2O.putIfMatch(k, new Value(k,new C0DChunk(0, nlines)), null); } } } @Override public void reduce( SVFTask drt ) {} } private static Vec getVec(Key key) { Object o = UKV.get(key); return o instanceof Vec ? (ByteVec) o : ((Frame) o).vecs()[0]; } private static String [] genericColumnNames(int ncols){ String [] res = new String[ncols]; for(int i = 0; i < res.length; ++i) res[i] = "C" + String.valueOf(i+1); return res; } // Log information about the dataset we just parsed. private static void logParseResults(ParseDataset2 job, Frame fr) { try { long numRows = fr.anyVec().length(); Log.info("Parse result for " + job.dest() + " (" + Long.toString(numRows) + " rows):"); Vec[] vecArr = fr.vecs(); for( int i = 0; i < vecArr.length; i++ ) { Vec v = vecArr[i]; boolean isCategorical = v.isEnum(); boolean isConstant = (v.min() == v.max()); String CStr = String.format("C%d:", i+1); String typeStr = String.format("%s", (v._isUUID ? "UUID" : (isCategorical ? "categorical" : "numeric"))); String minStr = String.format("min(%f)", v.min()); String maxStr = String.format("max(%f)", v.max()); long numNAs = v.naCnt(); String naStr = (numNAs > 0) ? String.format("na(%d)", numNAs) : ""; String isConstantStr = isConstant ? "constant" : ""; String numLevelsStr = isCategorical ? String.format("numLevels(%d)", v.domain().length) : ""; boolean printLogSeparatorToStdout = false; boolean printColumnToStdout; { // Print information to stdout for this many leading columns. final int MAX_HEAD_TO_PRINT_ON_STDOUT = 10; // Print information to stdout for this many trailing columns. final int MAX_TAIL_TO_PRINT_ON_STDOUT = 10; if (vecArr.length <= (MAX_HEAD_TO_PRINT_ON_STDOUT + MAX_TAIL_TO_PRINT_ON_STDOUT)) { // For small numbers of columns, print them all. printColumnToStdout = true; } else if (i < MAX_HEAD_TO_PRINT_ON_STDOUT) { printColumnToStdout = true; } else if (i == MAX_HEAD_TO_PRINT_ON_STDOUT) { printLogSeparatorToStdout = true; printColumnToStdout = false; } else if ((i + MAX_TAIL_TO_PRINT_ON_STDOUT) < vecArr.length) { printColumnToStdout = false; } else { printColumnToStdout = true; } } if (printLogSeparatorToStdout) { System.out.println("Additional column information only sent to log file..."); } if (printColumnToStdout) { // Log to both stdout and log file. Log.info(String.format(" %-8s %15s %20s %20s %15s %11s %16s", CStr, typeStr, minStr, maxStr, naStr, isConstantStr, numLevelsStr)); } else { // Log only to log file. Log.info_no_stdout(String.format(" %-8s %15s %20s %20s %15s %11s %16s", CStr, typeStr, minStr, maxStr, naStr, isConstantStr, numLevelsStr)); } } Log.info(FrameUtils.chunkSummary(fr).toString()); } catch (Exception ignore) {} // Don't fail due to logging issues. Just ignore them. } // -------------------------------------------------------------------------- // Top-level parser driver private static void parse_impl(ParseDataset2 job, Key[] fkeys, CustomParser.ParserSetup setup, boolean delete_on_done) { assert setup._ncols > 0; if( fkeys.length == 0) { job.cancel(); return; } // Remove any previous instance and insert a sentinel (to ensure no one has // been writing to the same keys during our parse)! Vec v = getVec(fkeys[0]); MultiFileParseTask mfpt = job._mfpt = new MultiFileParseTask(v.group(),setup,job._progress); mfpt.invoke(fkeys); EnumUpdateTask eut = null; // Calculate enum domain int n = 0; int [] ecols = new int[mfpt._dout._nCols]; for( int i = 0; i < ecols.length; ++i ) if(mfpt._dout._vecs[i].shouldBeEnum()) ecols[n++] = i; ecols = Arrays.copyOf(ecols, n); if( ecols.length > 0 ) { EnumFetchTask eft = new EnumFetchTask(H2O.SELF.index(), mfpt._eKey, ecols).invokeOnAllNodes(); Enum[] enums = eft._gEnums; ValueString[][] ds = new ValueString[ecols.length][]; int j = 0; for( int i : ecols ) mfpt._dout._vecs[i]._domain = ValueString.toString(ds[j++] = enums[i].computeColumnDomain()); eut = new EnumUpdateTask(ds, eft._lEnums, mfpt._chunk2Enum, mfpt._eKey, ecols); } Frame fr = new Frame(job.dest(),setup._columnNames != null?setup._columnNames:genericColumnNames(mfpt._dout._nCols),mfpt._dout.closeVecs()); // SVMLight is sparse format, there may be missing chunks with all 0s, fill them in new SVFTask(fr).invokeOnAllNodes(); // Update enums to the globally agreed numbering if( eut != null ) { Vec[] evecs = new Vec[ecols.length]; for( int i = 0; i < evecs.length; ++i ) evecs[i] = fr.vecs()[ecols[i]]; eut.doAll(evecs); } logParseResults(job, fr); // Release the frame for overwriting fr.unlock(job.self()); // Remove CSV files from H2O memory if( delete_on_done ) for( Key k : fkeys ) Lockable.delete(k,job.self()); else for( Key k : fkeys ) { Lockable l = UKV.get(k); l.unlock(job.self()); } job.remove(); } public static class ParseProgressMonitor extends Iced implements Job.ProgressMonitor { final Key _progressKey; private long _progress; public ParseProgressMonitor(Key pKey){_progressKey = pKey;} @Override public void update(long n) { ParseDataset2.onProgress(n, _progressKey); _progress += n; } public long progress() { return _progress; } } // -------------------------------------------------------------------------- // We want to do a standard MRTask with a collection of file-keys (so the // files are parsed in parallel across the cluster), but we want to throttle // the parallelism on each node. private static class MultiFileParseTask extends MRTask<MultiFileParseTask> { private final CustomParser.ParserSetup _setup; // The expected column layout private final VectorGroup _vg; // vector group of the target dataset private final int _vecIdStart; // Start of available vector keys // Shared against all concurrent unrelated parses, a map to the node-local // Enum lists for each concurrent parse. private static NonBlockingHashMap<Key, Enum[]> _enums = new NonBlockingHashMap<Key, Enum[]>(); // The Key used to sort out *this* parse's Enum[] private final Key _eKey = Key.make(); private final Key _progress; // Mapping from Chunk# to cluster-node-number holding the enum mapping. // It is either self for all the non-parallel parses, or the Chunk-home for parallel parses. private int[] _chunk2Enum; // All column data for this one file // Vec _vecs[]; // A mapping of Key+ByteVec to rolling total Chunk counts. private IcedHashMap<Key,IcedInt> _fileChunkOffsets; // OUTPUT fields: FVecDataOut _dout; public String _parserr; // NULL if parse is OK, else an error string MultiFileParseTask(VectorGroup vg, CustomParser.ParserSetup setup, Key progress ) { _vg = vg; _setup = setup; _progress = progress; _vecIdStart = _vg.reserveKeys(setup._pType == ParserType.SVMLight ? 100000000 : setup._ncols); _runSingleThreaded = true; } @Override public MultiFileParseTask dfork(Key... keys){ _fileChunkOffsets = new IcedHashMap<Key, IcedInt>(); int len = 0; for( Key k:keys) { _fileChunkOffsets.put(k,new IcedInt(len)); len += getVec(k).nChunks(); } // Mapping from Chunk# to cluster-node-number _chunk2Enum = MemoryManager.malloc4(len); Arrays.fill(_chunk2Enum, -1); return super.dfork(keys); } // Called once per file @Override public void map( Key key ) { // Get parser setup info for this chunk ByteVec vec = (ByteVec) getVec(key); byte [] bits = vec.chunkForChunkIdx(0)._mem; if(bits == null || bits.length == 0){ assert false:"encountered empty file during multifile parse? should've been filtered already"; return; // should not really get here } final int chunkStartIdx = _fileChunkOffsets.get(key)._val; Compression cpr = Utils.guessCompressionMethod(bits); CustomParser.ParserSetup localSetup = GuessSetup.guessSetup(Utils.unzipBytes(bits,cpr), _setup,false)._setup; // Local setup: nearly the same as the global all-files setup, but maybe // has the header-flag changed. if(!localSetup.isCompatible(_setup)) { _parserr = "Conflicting file layouts, expecting: " + _setup + " but found "+localSetup; return; } // Allow dup headers, if they are equals-ignoring-case boolean has_hdr = _setup._header && localSetup._header; if( has_hdr ) { // Both have headers? for( int i = 0; i < localSetup._columnNames.length; ++i ) has_hdr = localSetup._columnNames[i].equalsIgnoreCase(_setup._columnNames[i]); if( !has_hdr ) // Headers not compatible? // Then treat as no-headers, i.e., parse it as a normal row localSetup = new CustomParser.ParserSetup(ParserType.CSV,localSetup._separator, false); } // Parse the file try { switch( cpr ) { case NONE: if( localSetup._pType.parallelParseSupported ) { DParse dp = new DParse(_vg, localSetup, _vecIdStart, chunkStartIdx,this, vec.nChunks()); addToPendingCount(1); dp._removeKey = vec._key; dp.exec(new Frame(vec)); for( int i = 0; i < vec.nChunks(); ++i ) _chunk2Enum[chunkStartIdx + i] = vec.chunkKey(i).home_node().index(); } else { ParseProgressMonitor pmon = new ParseProgressMonitor(_progress); _dout = streamParse(vec.openStream(pmon), localSetup, _vecIdStart, chunkStartIdx, pmon); for(int i = 0; i < vec.nChunks(); ++i) _chunk2Enum[chunkStartIdx + i] = H2O.SELF.index(); } break; case ZIP: { // Zipped file; no parallel decompression; ParseProgressMonitor pmon = new ParseProgressMonitor(_progress); ZipInputStream zis = new ZipInputStream(vec.openStream(pmon)); ZipEntry ze = zis.getNextEntry(); // Get the *FIRST* entry // There is at least one entry in zip file and it is not a directory. if( ze != null && !ze.isDirectory() ) _dout = streamParse(zis,localSetup, _vecIdStart, chunkStartIdx, pmon); else zis.close(); // Confused: which zipped file to decompress // set this node as the one which rpocessed all the chunks for(int i = 0; i < vec.nChunks(); ++i) _chunk2Enum[chunkStartIdx + i] = H2O.SELF.index(); break; } case GZIP: // Zipped file; no parallel decompression; ParseProgressMonitor pmon = new ParseProgressMonitor(_progress); _dout = streamParse(new GZIPInputStream(vec.openStream(pmon)),localSetup,_vecIdStart, chunkStartIdx, pmon); // set this node as the one which processed all the chunks for(int i = 0; i < vec.nChunks(); ++i) _chunk2Enum[chunkStartIdx + i] = H2O.SELF.index(); break; } } catch( IOException ioe ) { throw new RuntimeException(ioe); } } // Reduce: combine errors from across files. // Roll-up other meta data @Override public void reduce( MultiFileParseTask mfpt ) { assert this != mfpt; // Combine parse errors from across files if( _parserr == null ) _parserr = mfpt._parserr; else if( mfpt._parserr != null ) _parserr += mfpt._parserr; // Collect & combine columns across files if( _dout == null ) _dout = mfpt._dout; else _dout.reduce(mfpt._dout); if( _chunk2Enum == null ) _chunk2Enum = mfpt._chunk2Enum; else if(_chunk2Enum != mfpt._chunk2Enum) { // we're sharing global array! for( int i = 0; i < _chunk2Enum.length; ++i ) { if( _chunk2Enum[i] == -1 ) _chunk2Enum[i] = mfpt._chunk2Enum[i]; else assert mfpt._chunk2Enum[i] == -1 : Arrays.toString(_chunk2Enum) + " :: " + Arrays.toString(mfpt._chunk2Enum); } } } private static Enum [] enums(Key eKey, int ncols){ if(!_enums.containsKey(eKey)){ Enum [] enums = new Enum[ncols]; for(int i = 0; i < enums.length; ++i)enums[i] = new Enum(); _enums.putIfAbsent(eKey, enums); } return _enums.get(eKey); } // ------------------------------------------------------------------------ // Zipped file; no parallel decompression; decompress into local chunks, // parse local chunks; distribute chunks later. private FVecDataOut streamParse( final InputStream is, final CustomParser.ParserSetup localSetup, int vecIdStart, int chunkStartIdx, ParseProgressMonitor pmon) throws IOException { // All output into a fresh pile of NewChunks, one per column FVecDataOut dout = new FVecDataOut(_vg, chunkStartIdx, localSetup._ncols, vecIdStart, enums(_eKey,localSetup._ncols)); CustomParser p = localSetup.parser(); // assume 2x inflation rate //if( localSetup._pType.parallelParseSupported ) if( localSetup._pType.parallelParseSupported ) try{p.streamParse(is, dout, pmon);}catch(IOException e){throw new RuntimeException(e);} else try{p.streamParse(is, dout);}catch(Exception e){throw new RuntimeException(e);} // Parse all internal "chunks", until we drain the zip-stream dry. Not // real chunks, just flipping between 32K buffers. Fills up the single // very large NewChunk. dout.close(_fs); return dout; } // ------------------------------------------------------------------------ private static class DParse extends MRTask2<DParse> { private final CustomParser.ParserSetup _setup; private final int _vecIdStart; private final int _startChunkIdx; // for multifile parse, offset of the first chunk in the final dataset private final VectorGroup _vg; private transient AppendableVec [] _appendables; private FVecDataOut _dout; private final Key _eKey; final Key _progress; Key _removeKey; private transient final MultiFileParseTask _outerMFPT; final int _nchunks; private transient NonBlockingSetInt _visited; DParse(VectorGroup vg, CustomParser.ParserSetup setup, int vecIdstart, int startChunkIdx, MultiFileParseTask mfpt, int nchunks) { super(mfpt); _vg = vg; _setup = setup; _vecIdStart = vecIdstart; _startChunkIdx = startChunkIdx; _outerMFPT = mfpt; _eKey = mfpt._eKey; _progress = mfpt._progress; _nchunks = nchunks; } @Override public void setupLocal(){ super.setupLocal(); if(_setup._pType == ParserType.CSV){ _appendables = new AppendableVec[_setup._ncols]; for(int i = 0; i < _setup._ncols; ++i) _appendables[i] = new AppendableVec(_vg.vecKey(_vecIdStart + i)); } _visited = new NonBlockingSetInt(); } @Override public void map( Chunk in) { Enum [] enums = enums(_eKey,_setup._ncols); // Break out the input & output vectors before the parse loop // The Parser FVecDataIn din = new FVecDataIn(in); FVecDataOut dout; CustomParser p; switch(_setup._pType) { case CSV: p = new CsvParser(_setup); dout = new FVecDataOut(_vg,_startChunkIdx + in.cidx(),_setup._ncols,_vecIdStart,enums,_appendables); break; case SVMLight: p = new SVMLightParser(_setup); dout = new SVMLightFVecDataOut(_vg, _startChunkIdx + in.cidx(), _setup._ncols, _vecIdStart, enums); break; default: throw H2O.unimpl(); } p.parallelParse(in.cidx(),din,dout); (_dout = dout).close(_fs); onProgress(in._len, _progress); // Record bytes parsed // remove parsed data right away (each chunk is used by 2) final int cidx = in.cidx(); if(!_visited.add(cidx)) { Value v = H2O.get(in._vec.chunkKey(cidx)); if(v != null && v.isPersisted()) { v.freePOJO(); v.freeMem(); } } if(!_visited.add(cidx+1)) { Value v = H2O.get(in._vec.chunkKey(cidx+1)); if(v != null && v.isPersisted()) { v.freePOJO(); v.freeMem(); } } } @Override public void reduce(DParse dp) { if(_dout == null)_dout = dp._dout; else _dout.reduce(dp._dout); } @Override public void postGlobal() { super.postGlobal(); _outerMFPT._dout = _dout; _dout = null; // Reclaim GC eagerly if(_removeKey != null) UKV.remove(_removeKey); } } // Find & remove all partially built output chunks & vecs private Futures onExceptionCleanup(Futures fs) { int nchunks = _chunk2Enum.length; int ncols = _setup._ncols; for( int i = 0; i < ncols; ++i ) { Key vkey = _vg.vecKey(_vecIdStart + i); DKV.remove(vkey,fs); for( int c = 0; c < nchunks; ++c ) DKV.remove(Vec.chunkKey(vkey,c),fs); } cancel(true); return fs; } } // ------------------------------------------------------------------------ /** Parsed data output specialized for fluid vecs. * @author tomasnykodym */ static class FVecDataOut extends Iced implements CustomParser.StreamDataOut { protected transient NewChunk [] _nvs; protected AppendableVec []_vecs; private transient final Enum [] _enums; protected byte [] _ctypes; long _nLines; int _nCols; int _col = -1; final int _cidx; final int _vecIdStart; boolean _closedVecs = false; private final VectorGroup _vg; static final private byte UCOL = 0; // unknown col type static final private byte NCOL = 1; // numeric col type static final private byte ECOL = 2; // enum col type static final private byte TCOL = 3; // time col typ static final private byte ICOL = 4; // UUID col typ private static AppendableVec[] newAppendables(int n, VectorGroup vg, int vecIdStart){ AppendableVec [] apps = new AppendableVec[n]; for(int i = 0; i < n; ++i) apps[i] = new AppendableVec(vg.vecKey(vecIdStart + i)); return apps; } public FVecDataOut(VectorGroup vg, int cidx, int ncols, int vecIdStart, Enum[] enums){ this(vg,cidx,ncols,vecIdStart,enums,newAppendables(ncols,vg,vecIdStart)); } public FVecDataOut(VectorGroup vg, int cidx, int ncols, int vecIdStart, Enum [] enums, AppendableVec [] appendables){ _vecs = appendables; _nvs = new NewChunk[ncols]; _enums = enums; _nCols = ncols; _cidx = cidx; _vg = vg; _vecIdStart = vecIdStart; _ctypes = MemoryManager.malloc1(ncols); for(int i = 0; i < ncols; ++i) _nvs[i] = (NewChunk)_vecs[i].chunkForChunkIdx(_cidx); } @Override public FVecDataOut reduce(StreamDataOut sdout){ FVecDataOut dout = (FVecDataOut)sdout; if( dout!=null && _vecs != dout._vecs){ _nCols = Math.max(_nCols,dout._nCols); if(dout._vecs.length > _vecs.length){ AppendableVec [] v = _vecs; _vecs = dout._vecs; dout._vecs = v; } for(int i = 0; i < dout._vecs.length; ++i) _vecs[i].reduce(dout._vecs[i]); } return this; } @Override public FVecDataOut close(){ Futures fs = new Futures(); close(fs); fs.blockForPending(); return this; } public void check(){ if(_nvs != null) for(NewChunk nv:_nvs) assert (nv._len == _nLines):"unexpected number of lines in NewChunk, got " + nv._len + ", but expected " + _nLines; } @Override public FVecDataOut close(Futures fs){ if( _nvs == null ) return this; // Might call close twice for(NewChunk nv:_nvs) nv.close(_cidx, fs); _nvs = null; // Free for GC return this; } @Override public FVecDataOut nextChunk(){ return new FVecDataOut(_vg, _cidx+1, _nCols, _vecIdStart, _enums); } private Vec [] closeVecs(){ Futures fs = new Futures(); _closedVecs = true; Vec [] res = new Vec[_vecs.length]; for(int i = 0; i < _vecs[0]._espc.length; ++i){ int j = 0; while(j < _vecs.length && _vecs[j]._espc[i] == 0)++j; if(j == _vecs.length)break; final long clines = _vecs[j]._espc[i]; for(AppendableVec v:_vecs) { if(v._espc[i] == 0)v._espc[i] = clines; else assert v._espc[i] == clines:"incompatible number of lines: " + v._espc[i] + " != " + clines; } } for(int i = 0; i < _vecs.length; ++i) res[i] = _vecs[i].close(fs); _vecs = null; // Free for GC fs.blockForPending(); return res; } @Override public void newLine() { if(_col >= 0){ ++_nLines; for(int i = _col+1; i < _nCols; ++i) addInvalidCol(i); } _col = -1; } @Override public void addNumCol(int colIdx, long number, int exp) { if( colIdx < _nCols ) { _nvs[_col = colIdx].addNum(number, exp); if(_ctypes[colIdx] == UCOL ) _ctypes[colIdx] = NCOL; } } @Override public final void addInvalidCol(int colIdx) { if(colIdx < _nCols) _nvs[_col = colIdx].addNA(); } @Override public final boolean isString(int colIdx) { return false; } @Override public final void addStrCol(int colIdx, ValueString str) { if(colIdx < _nvs.length){ if(_ctypes[colIdx] == NCOL){ // support enforced types addInvalidCol(colIdx); return; } if(_ctypes[colIdx] == UCOL && ParseTime.attemptTimeParse(str) > 0) _ctypes[colIdx] = TCOL; if( _ctypes[colIdx] == UCOL ) { // Attempt UUID parse int old = str.get_off(); ParseTime.attemptUUIDParse0(str); ParseTime.attemptUUIDParse1(str); if( str.get_off() != -1 ) _ctypes[colIdx] = ICOL; str.setOff(old); } if( _ctypes[colIdx] == TCOL ) { long l = ParseTime.attemptTimeParse(str); if( l == Long.MIN_VALUE ) addInvalidCol(colIdx); else { int time_pat = ParseTime.decodePat(l); // Get time pattern l = ParseTime.decodeTime(l); // Get time addNumCol(colIdx, l, 0); // Record time in msec _nvs[_col]._timCnt[time_pat]++; // Count histo of time parse patterns } } else if( _ctypes[colIdx] == ICOL ) { // UUID column? Only allow UUID parses long lo = ParseTime.attemptUUIDParse0(str); long hi = ParseTime.attemptUUIDParse1(str); if( str.get_off() == -1 ) { lo = C16Chunk._LO_NA; hi = C16Chunk._HI_NA; } if( colIdx < _nCols ) _nvs[_col = colIdx].addUUID(lo, hi); } else if(!_enums[_col = colIdx].isKilled()) { // store enum id into exponent, so that it will be interpreted as NA if compressing as numcol. int id = _enums[colIdx].addKey(str); if(_ctypes[colIdx] == UCOL && id > 1) _ctypes[colIdx] = ECOL; _nvs[colIdx].addEnum(id); } else // turn the column into NAs by adding value overflowing Enum.MAX_SIZE _nvs[colIdx].addEnum(Integer.MAX_VALUE); } //else System.err.println("additional column (" + colIdx + ":" + str + ") on line " + linenum()); } /** Adds double value to the column. */ @Override public void addNumCol(int colIdx, double value) { if (Double.isNaN(value)) { addInvalidCol(colIdx); } else { double d= value; int exp = 0; long number = (long)d; while (number != d) { d = d * 10; --exp; number = (long)d; } addNumCol(colIdx, number, exp); } } @Override public void setColumnNames(String [] names){} @Override public final void rollbackLine() {} @Override public void invalidLine(String err) { newLine(); } @Override public void invalidValue(int line, int col) {} } // ------------------------------------------------------------------------ /** Parser data in taking data from fluid vec chunk. * @author tomasnykodym */ private static class FVecDataIn implements CustomParser.DataIn { final Vec _vec; Chunk _chk; int _idx; final long _firstLine; public FVecDataIn(Chunk chk){ _chk = chk; _idx = _chk.cidx(); _firstLine = _chk._start; _vec = chk._vec; } @Override public byte[] getChunkData(int cidx) { if(cidx != _idx) _chk = cidx < _vec.nChunks()?_vec.chunkForChunkIdx(_idx = cidx):null; return (_chk == null)?null:_chk._mem; } @Override public int getChunkDataStart(int cidx) { return -1; } @Override public void setChunkDataStart(int cidx, int offset) { } } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/ParseTime.java
package water.fvec; import org.joda.time.DateTime; import org.joda.time.chrono.ISOChronology; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import org.joda.time.format.DateTimeFormatterBuilder; import water.parser.ValueString; import water.util.Log; public abstract class ParseTime { // Deduce if we are looking at a Date/Time value, or not. // If so, return time as msec since Jan 1, 1970 or Long.MIN_VALUE. // I tried java.util.SimpleDateFormat, but it just throws too many // exceptions, including ParseException, NumberFormatException, and // ArrayIndexOutOfBoundsException... and the Piece de resistance: a // ClassCastException deep in the SimpleDateFormat code: // "sun.util.calendar.Gregorian$Date cannot be cast to sun.util.calendar.JulianCalendar$Date" public static int digit( int x, int c ) { if( x < 0 || c < '0' || c > '9' ) return -1; return x*10+(c-'0'); } // So I just brutally parse "dd-MMM-yy". public static final byte MMS[][][] = new byte[][][] { {"jan".getBytes(),"january" .getBytes()}, {"feb".getBytes(),"february" .getBytes()}, {"mar".getBytes(),"march" .getBytes()}, {"apr".getBytes(),"april" .getBytes()}, {"may".getBytes(),"may" .getBytes()}, {"jun".getBytes(),"june" .getBytes()}, {"jul".getBytes(),"july" .getBytes()}, {"aug".getBytes(),"august" .getBytes()}, {"sep".getBytes(),"september".getBytes()}, {"oct".getBytes(),"october" .getBytes()}, {"nov".getBytes(),"november" .getBytes()}, {"dec".getBytes(),"december" .getBytes()} }; // Time parse patterns public static final String TIME_PARSE[] = { "yyyy-MM-dd", "yyyy-MM-dd HH:mm:ss.SSS", "dd-MMM-yy" }; // Returns: // - not a time parse: Long.MIN_VALUE // - time parse via pattern X: time in msecs since Jan 1, 1970, shifted left by 1 byte, OR'd with X public static long encodeTimePat(long tcode, int tpat ) { return (tcode<<8)|tpat; } public static long decodeTime(long tcode ) { return tcode>>8; } public static int decodePat (long tcode ) { return ((int)tcode&0xFF); } public static long attemptTimeParse( ValueString str ) { try { long t0 = attemptTimeParse_01(str); // "yyyy-MM-dd" and that plus " HH:mm:ss.SSS" if( t0 != Long.MIN_VALUE ) return t0; long t2 = attemptTimeParse_2 (str); // "dd-MMM-yy" if( t2 != Long.MIN_VALUE ) return t2; } catch( org.joda.time.IllegalFieldValueException ignore ) { } return Long.MIN_VALUE; } // So I just brutally parse "yyyy-MM-dd HH:mm:ss.SSS" private static long attemptTimeParse_01( ValueString str ) { final byte[] buf = str.get_buf(); int i=str.get_off(); final int end = i+str.get_length(); while( i < end && buf[i] == ' ' ) i++; if ( i < end && buf[i] == '"' ) i++; if( (end-i) != 10 && (end-i) < 19 ) return Long.MIN_VALUE; int yy=0, MM=0, dd=0, HH=0, mm=0, ss=0, SS=0; yy = digit(yy,buf[i++]); yy = digit(yy,buf[i++]); yy = digit(yy,buf[i++]); yy = digit(yy,buf[i++]); if( yy < 1970 ) return Long.MIN_VALUE; if( buf[i++] != '-' ) return Long.MIN_VALUE; MM = digit(MM,buf[i++]); MM = digit(MM,buf[i++]); if( MM < 1 || MM > 12 ) return Long.MIN_VALUE; if( buf[i++] != '-' ) return Long.MIN_VALUE; dd = digit(dd,buf[i++]); dd = digit(dd,buf[i++]); if( dd < 1 || dd > 31 ) return Long.MIN_VALUE; if( i==end ) return encodeTimePat(new DateTime(yy,MM,dd,0,0,0).getMillis(),0); if( buf[i++] != ' ' ) return Long.MIN_VALUE; HH = digit(HH,buf[i++]); HH = digit(HH,buf[i++]); if( HH < 0 || HH > 23 ) return Long.MIN_VALUE; if( buf[i++] != ':' ) return Long.MIN_VALUE; mm = digit(mm,buf[i++]); mm = digit(mm,buf[i++]); if( mm < 0 || mm > 59 ) return Long.MIN_VALUE; if( buf[i++] != ':' ) return Long.MIN_VALUE; ss = digit(ss,buf[i++]); ss = digit(ss,buf[i++]); if( ss < 0 || ss > 59 ) return Long.MIN_VALUE; if( i<end && buf[i] == '.' ) { i++; if( i<end ) SS = digit(SS,buf[i++]); if( i<end ) SS = digit(SS,buf[i++]); if( i<end ) SS = digit(SS,buf[i++]); if( SS < 0 || SS > 999 ) return Long.MIN_VALUE; } if( i<end && buf[i] == '"' ) i++; if( i<end ) return Long.MIN_VALUE; return encodeTimePat(new DateTime(yy,MM,dd,HH,mm,ss).getMillis()+SS,1); } // DD-MMM-YY private static long attemptTimeParse_2( ValueString str ) { final byte[] buf = str.get_buf(); int i=str.get_off(); final int end = i+str.get_length(); while( i < end && buf[i] == ' ' ) i++; if ( i < end && buf[i] == '"' ) i++; if( (end-i) < 7 ) return Long.MIN_VALUE; // Shortest date: d-mm-yy, only 7 chars int yy=0, MM=0, dd=0; dd = digit(dd,buf[i++]); if( buf[i] != '-' ) dd = digit(dd,buf[i++]); if( dd < 1 || dd > 31 ) return Long.MIN_VALUE; if( buf[i++] != '-' ) return Long.MIN_VALUE; byte[]mm=null; OUTER: for( ; MM<MMS.length; MM++ ) { byte[][] mms = MMS[MM]; INNER: for( int k=0; k<mms.length; k++ ) { mm = mms[k]; if( mm == null ) continue; if( i+mm.length >= end ) continue INNER; for( int j=0; j<mm.length; j++ ) if( mm[j] != Character.toLowerCase(buf[i+j]) ) continue INNER; if( buf[i+mm.length] == '-' ) break OUTER; } } if( MM == MMS.length ) return Long.MIN_VALUE; // No matching month i += mm.length; // Skip month bytes MM++; // 1-based month if( buf[i++] != '-' ) return Long.MIN_VALUE; yy = digit(yy,buf[i++]); // 2-digit year if( i >= buf.length ) return Long.MIN_VALUE; yy = digit(yy,buf[i++]); if( end-i>=2 && buf[i] != '"' ) { if( i >= buf.length+1 ) return Long.MIN_VALUE; yy = digit(yy,buf[i++]); // 4-digit year yy = digit(yy,buf[i++]); } else { yy += 2000; // Y2K bug } if( i<end && buf[i] == '"' ) i++; if( i<end ) return Long.MIN_VALUE; return encodeTimePat(new DateTime(yy,MM,dd,0,0,0).getMillis(),2); } // Parse XXXXXXXX-XXXX-XXXX and return an arbitrary long, or set str.off==-1 // (and return Long.MIN_VALUE but this is a valid long return value). public static long attemptUUIDParse0( ValueString str ) { final byte[] buf = str.get_buf(); int i=str.get_off(); if( i+36>buf.length ) return badUUID(str); long lo=0; lo = get2(lo,buf,(i+=2)-2); lo = get2(lo,buf,(i+=2)-2); lo = get2(lo,buf,(i+=2)-2); lo = get2(lo,buf,(i+=2)-2); if( buf[i++]!='-' ) return badUUID(str); lo = get2(lo,buf,(i+=2)-2); lo = get2(lo,buf,(i+=2)-2); if( buf[i++]!='-' ) return badUUID(str); lo = get2(lo,buf,(i+=2)-2); return attemptUUIDParseLast(str,lo,buf,i); } // Parse -XXXX-XXXXXXXXXXXX and return an arbitrary long, or set str.off==-1 // (and return Long.MIN_VALUE but this is a valid long return value). public static long attemptUUIDParse1( ValueString str ) { final byte[] buf = str.get_buf(); int i=str.get_off(); if( i== -1 ) return badUUID(str); long hi=0; if( buf[i++]!='-' ) return badUUID(str); hi = get2(hi,buf,(i+=2)-2); hi = get2(hi,buf,(i+=2)-2); if( buf[i++]!='-' ) return badUUID(str); hi = get2(hi,buf,(i+=2)-2); hi = get2(hi,buf,(i+=2)-2); hi = get2(hi,buf,(i+=2)-2); hi = get2(hi,buf,(i+=2)-2); hi = get2(hi,buf,(i+=2)-2); return attemptUUIDParseLast(str,hi,buf,i); } private static long attemptUUIDParseLast( ValueString str, long lo, byte[] buf, int i ) { // Can never equal MIN_VALUE since only parsed 14 of 16 digits, unless // failed parse already. if( lo == Long.MIN_VALUE ) return badUUID(str); // If the last 2 digits are 0x8000 and the first 14 are all 0's then might // legitimately parse MIN_VALUE, need to check for it special. str.setOff(i+2); // Mark as parsed if( lo == 0x80000000000000L && buf[i]=='0' && buf[i+1]=='0' ) return Long.MIN_VALUE; // Valid MIN_VALUE parse // First 14 digits are a random scramble; will never equal MIN_VALUE result // unless we have a failed parse in the last 2 digits lo = get2(lo,buf,i); return (lo == Long.MIN_VALUE || // broken UUID already, OR // too many valid UUID digits (i+2< buf.length && hdigit(0,buf[i+2]) != Long.MIN_VALUE)) ? badUUID(str) : lo; } private static long get2( long x, byte[] buf, int i ) { if( x == Long.MIN_VALUE ) return x; x = hdigit(x,buf[i++]); x = hdigit(x,buf[i++]); return x; } private static long hdigit( long x, byte b ) { if( x == Long.MIN_VALUE ) return Long.MIN_VALUE; else if( b >= '0' && b <= '9' ) return (x<<4)+b-'0'; else if( b >= 'A' && b <= 'F' ) return (x<<4)+b-'A'+10; else if( b >= 'a' && b <= 'f' ) return (x<<4)+b-'a'+10; else return Long.MIN_VALUE; } public static long badUUID( ValueString str ) { str.setOff(-1); return Long.MIN_VALUE; } /** * Factory to create a formatter from a strptime pattern string. * This models the commonly supported features of strftime from POSIX * (where it can). * <p> * The format may contain locale specific output, and this will change as * you change the locale of the formatter. * Call DateTimeFormatter.withLocale(Locale) to switch the locale. * For example: * <pre> * DateTimeFormat.forPattern(pattern).withLocale(Locale.FRANCE).print(dt); * </pre> * * @param pattern pattern specification * @return the formatter * @throws IllegalArgumentException if the pattern is invalid */ public static DateTimeFormatter forStrptimePattern(String pattern) { if (pattern == null || pattern.length() == 0) throw new IllegalArgumentException("Empty date time pattern specification"); DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); parseToBuilder(builder, pattern); DateTimeFormatter formatter = builder.toFormatter(); return formatter; } //----------------------------------------------------------------------- /** * Parses the given pattern and appends the rules to the given * DateTimeFormatterBuilder. See strptime man page for valid patterns. * * @param pattern pattern specification * @throws IllegalArgumentException if the pattern is invalid */ private static void parseToBuilder(DateTimeFormatterBuilder builder, String pattern) { int length = pattern.length(); int[] indexRef = new int[1]; for (int i=0; i<length; i++) { indexRef[0] = i; String token = parseToken(pattern, indexRef); i = indexRef[0]; int tokenLen = token.length(); if (tokenLen == 0) { break; } char c = token.charAt(0); if (c == '%' && token.charAt(1) != '%') { c = token.charAt(1); switch(c) { case 'a': builder.appendDayOfWeekShortText(); break; case 'A': builder.appendDayOfWeekText(); break; case 'b': case 'h': builder.appendMonthOfYearShortText(); break; case 'B': builder.appendMonthOfYearText(); break; case 'c': builder.appendDayOfWeekShortText(); builder.appendLiteral(' '); builder.appendMonthOfYearShortText(); builder.appendLiteral(' '); builder.appendDayOfMonth(2); builder.appendLiteral(' '); builder.appendHourOfDay(2); builder.appendLiteral(':'); builder.appendMinuteOfHour(2); builder.appendLiteral(':'); builder.appendSecondOfMinute(2); builder.appendLiteral(' '); builder.appendYear(4,4); break; case 'C': builder.appendCenturyOfEra(1,2); break; case 'd': builder.appendDayOfMonth(2); break; case 'D': builder.appendMonthOfYear(2); builder.appendLiteral('/'); builder.appendDayOfMonth(2); builder.appendLiteral('/'); builder.appendTwoDigitYear(2019); break; case 'e': builder.appendOptional(DateTimeFormat.forPattern("' '").getParser()); builder.appendDayOfMonth(2); break; case 'F': builder.appendYear(4,4); builder.appendLiteral('-'); builder.appendMonthOfYear(2); builder.appendLiteral('-'); builder.appendDayOfMonth(2); break; case 'g': case 'G': break; //for output only, accepted and ignored for input case 'H': builder.appendHourOfDay(2); break; case 'I': builder.appendClockhourOfHalfday(2); break; case 'j': builder.appendDayOfYear(3); break; case 'k': builder.appendOptional(DateTimeFormat.forPattern("' '").getParser()); builder.appendHourOfDay(2); break; case 'l': builder.appendOptional(DateTimeFormat.forPattern("' '").getParser()); builder.appendClockhourOfHalfday(2); break; case 'm': builder.appendMonthOfYear(2); break; case 'M': builder.appendMinuteOfHour(2); break; case 'n': break; case 'p': builder.appendHalfdayOfDayText(); break; case 'r': builder.appendClockhourOfHalfday(2); builder.appendLiteral(':'); builder.appendMinuteOfHour(2); builder.appendLiteral(':'); builder.appendSecondOfMinute(2); builder.appendLiteral(' '); builder.appendHalfdayOfDayText(); break; case 'R': builder.appendHourOfDay(2); builder.appendLiteral(':'); builder.appendMinuteOfHour(2); break; case 'S': builder.appendSecondOfMinute(2); break; case 't': break; case 'T': builder.appendHourOfDay(2); builder.appendLiteral(':'); builder.appendMinuteOfHour(2); builder.appendLiteral(':'); builder.appendSecondOfMinute(2); break; /* case 'U': //FIXME Joda does not support US week start (Sun), this will be wrong builder.appendWeekOfYear(2); break; case 'u': builder.appendDayOfWeek(1); break;*/ case 'V': break; //accepted and ignored /* case 'w': //FIXME Joda does not support US week start (Sun), this will be wrong builder.appendDayOfWeek(1); break; case 'W': builder.appendWeekOfYear(2); break;*/ case 'x': builder.appendTwoDigitYear(2019); builder.appendLiteral('/'); builder.appendMonthOfYear(2); builder.appendLiteral('/'); builder.appendDayOfMonth(2); break; /* case 'X': //Results differ between OSX and Linux builder.appendHourOfDay(2); builder.appendLiteral(':'); builder.appendMinuteOfHour(2); builder.appendLiteral(':'); builder.appendSecondOfMinute(2); break;*/ case 'y': //POSIX 2004 & 2008 says 69-99 -> 1900s, 00-68 -> 2000s builder.appendTwoDigitYear(2019); break; case 'Y': builder.appendYear(4,4); break; case 'z': builder.appendTimeZoneOffset(null, "z", false, 2, 2); break; case 'Z': break; //for output only, accepted and ignored for input default: // No match, ignore builder.appendLiteral('\''); builder.appendLiteral(token); Log.warn(token + "is not acceptted as a parse token, treating as a literal"); } } else { if (c == '\'') { String sub = token.substring(1); if (sub.length() > 0) { // Create copy of sub since otherwise the temporary quoted // string would still be referenced internally. builder.appendLiteral(new String(sub)); } } else throw new IllegalArgumentException("Unexpected token encountered parsing format string:" + c); } } } /** * Parses an individual token. * * @param pattern the pattern string * @param indexRef a single element array, where the input is the start * location and the output is the location after parsing the token * @return the parsed token */ private static String parseToken(String pattern, int[] indexRef) { StringBuilder buf = new StringBuilder(); int i = indexRef[0]; int length = pattern.length(); char c = pattern.charAt(i); if (c == '%' && i + 1 < length && pattern.charAt(i+1) != '%') { //Grab pattern tokens c = pattern.charAt(++i); //0 is ignored for input, and this ignores alternative religious eras if ((c == '0' || c == 'E') && i + 1 >= length) c = pattern.charAt(++i); buf.append('%'); buf.append(c); } else { // Grab all else as text buf.append('\''); // mark literals with ' in first place buf.append(c); for (i++; i < length;i++) { c = pattern.charAt(i); if (c == '%' ) { // consume literal % otherwise break if (i + 1 < length && pattern.charAt(i + 1) == '%') i++; else { i--; break; } } buf.append(c); } } indexRef[0] = i; return buf.toString(); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/RebalanceDataSet.java
package water.fvec; import jsr166y.CountedCompleter; import water.H2O; import water.Key; import water.MRTask2; import water.util.Log; import java.util.Arrays; import java.util.Iterator; /** * Created by tomasnykodym on 3/28/14. * * Utility to rebalance dataset so that it has requested number of chunks and each chunk has the same number of rows +/-1. * * It *does not* guarantee even chunk-node placement. * (This can not currently be done in H2O, since the placement of chunks is governed only by key-hash /vector group/ for Vecs) */ public class RebalanceDataSet extends H2O.H2OCountedCompleter { final Frame _in; Key _okey; Frame _out; final Key _jobKey; final transient Vec.VectorGroup _vg; final transient long [] _espc; /** * Constructor for make-compatible task. * * To be used to make frame compatible with other frame (i.e. make all vecs compatible with other vector group and rows-per-chunk). */ public RebalanceDataSet(Frame modelFrame, Frame srcFrame, Key dstKey) {this(modelFrame,srcFrame,dstKey,null,null);} public RebalanceDataSet(Frame modelFrame, Frame srcFrame, Key dstKey, H2O.H2OCountedCompleter cmp, Key jobKey) { super(cmp); _in = srcFrame; _jobKey = jobKey; _okey = dstKey; _espc = modelFrame.anyVec()._espc; _vg = modelFrame.anyVec().group(); } /** * Constructor for re-balancing the dataset (e.g. for performance reasons). * Resulting dataset will have requested number of chunks and rows will be unfirmly distributed with the * same rows-per chunk count in all chunk (+/- 1). */ public RebalanceDataSet(Frame srcFrame, Key dstKey, int nchunks) { this(srcFrame,dstKey,nchunks,null,null);} public RebalanceDataSet(Frame srcFrame, Key dstKey, int nchunks, H2O.H2OCountedCompleter cmp, Key jobKey) { super(cmp); // simply create a bogus new vector (don't even put it into KV) with appropriate number of lines per chunk and then use it as a source to do multiple makeZero calls // to create empty vecs and than call RebalanceTask on each one of them. // RebalanceTask will fetch the appropriate src chunks and fetch the data from them. int rpc = (int)(srcFrame.numRows() / nchunks); int rem = (int)(srcFrame.numRows() % nchunks); final long [] espc; _espc = new long[nchunks + 1]; Arrays.fill(_espc, rpc); for (int i = 0; i < rem; ++i) ++_espc[i]; long sum = 0; for (int i = 0; i < _espc.length; ++i) { long s = _espc[i]; _espc[i] = sum; sum += s; } assert _espc[_espc.length - 1] == srcFrame.numRows() : "unexpected number of rows, expected " + srcFrame.numRows() + ", got " + _espc[_espc.length - 1]; _in = srcFrame; _jobKey = jobKey; _okey = dstKey; _vg = Vec.VectorGroup.newVectorGroup(); } public Frame getResult(){join(); return _out;} boolean unlock; @Override public void compute2() { final Vec [] srcVecs = _in.vecs(); _out = new Frame(_okey,_in.names(), new Vec(_vg.addVec(),_espc).makeZeros(srcVecs.length,_in.domains(),_in.uuids(),_in.times())); _out.delete_and_lock(_jobKey); new RebalanceTask(this,srcVecs).asyncExec(_out); } @Override public void onCompletion(CountedCompleter caller){ assert _out.numRows() == _in.numRows(); _out.update(_jobKey); _out.unlock(_jobKey); } @Override public boolean onExceptionalCompletion(Throwable t, CountedCompleter caller){ if(_out != null)_out.delete(_jobKey,0.0f); return true; } public static class RebalanceTask extends MRTask2<RebalanceTask> { final Vec [] _srcVecs; public RebalanceTask(H2O.H2OCountedCompleter cmp, Vec... srcVecs){super(cmp);_srcVecs = srcVecs;} @Override public boolean logVerbose() { return false; } private void rebalanceChunk(Vec srcVec, Chunk chk){ Chunk srcRaw = null; try { NewChunk dst = new NewChunk(chk); dst._len = dst._sparseLen = 0; int rem = chk._len; while (rem > 0 && dst._len < chk._len) { srcRaw = srcVec.chunkForRow(chk._start + dst._len); NewChunk src = new NewChunk((srcRaw)); src = srcRaw.inflate_impl(src); assert src._len == srcRaw._len; int srcFrom = (int) (chk._start + dst._len - src._start); // check if the result is sparse (not exact since we only take subset of src in general) if ((src.sparse() && dst.sparse()) || ((src.sparseLen() + dst.sparseLen()) * NewChunk.MIN_SPARSE_RATIO < (src.len() + dst.len()))) { src.set_sparse(src.sparseLen()); dst.set_sparse(dst.sparseLen()); } final int srcTo = srcFrom + rem; int off = srcFrom - 1; Iterator<NewChunk.Value> it = src.values(Math.max(0, srcFrom), srcTo); while (it.hasNext()) { NewChunk.Value v = it.next(); final int rid = v.rowId0(); assert rid < srcTo; int add = rid - off; off = rid; dst.addZeros(add - 1); v.add2Chunk(dst); rem -= add; assert rem >= 0; } int trailingZeros = Math.min(rem, src._len - off - 1); dst.addZeros(trailingZeros); rem -= trailingZeros; } assert rem == 0 : "rem = " + rem; assert dst._len == chk._len : "len2 = " + dst._len + ", _len = " + chk._len; dst.close(dst.cidx(), _fs); } catch(RuntimeException t){ Log.err("got exception while rebalancing chunk " + srcRaw == null?"null":srcRaw.getClass().getSimpleName()); throw t; } } @Override public void map(Chunk [] chks){ for(int i = 0; i < chks.length; ++i) rebalanceChunk(_srcVecs[i],chks[i]); } } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/S3FileVec.java
package water.fvec; import water.*; import com.amazonaws.services.s3.model.S3ObjectSummary; // A distributed file-backed Vector // public class S3FileVec extends FileVec { // Make a new NFSFileVec key which holds the filename implicitly. // This name is used by the DVecs to load data on-demand. public static Key make(S3ObjectSummary obj) { Futures fs = new Futures(); Key key = make(obj, fs); fs.blockForPending(); return key; } public static Key make(S3ObjectSummary obj, Futures fs) { String fname = obj.getKey(); Key k = Key.make("s3://" + obj.getBucketName() + "/" + fname); long size = obj.getSize(); Key k2 = Vec.newKey(k); new Frame(k).delete_and_lock(null); // Insert the top-level FileVec key into the store Vec v = new S3FileVec(k2,size); DKV.put(k2, v, fs); Frame fr = new Frame(k,new String[]{fname},new Vec[]{v}); fr.update(null); fr.unlock(null); return k; } private S3FileVec(Key key, long len) {super(key,len,Value.S3);} }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/SVMLightFVecDataOut.java
package water.fvec; import java.util.Arrays; import water.fvec.ParseDataset2.FVecDataOut; import water.fvec.Vec.VectorGroup; import water.parser.Enum; public class SVMLightFVecDataOut extends FVecDataOut { protected final VectorGroup _vg; public SVMLightFVecDataOut(VectorGroup vg, int cidx, int ncols, int vecIdStart, Enum [] enums){ super(vg,cidx,0,vg.reserveKeys(10000000),enums); _nvs = new NewChunk[0]; _vg = vg; _col = 0; } private void addColumns(int ncols){ if(ncols > _nCols){ _nvs = Arrays.copyOf(_nvs , ncols); _vecs = Arrays.copyOf(_vecs , ncols); _ctypes= Arrays.copyOf(_ctypes, ncols); for(int i = _nCols; i < ncols; ++i) { _vecs[i] = new AppendableVec(_vg.vecKey(i+1)); _nvs[i] = new NewChunk(_vecs[i], _cidx); for(int j = 0; j < _nLines; ++j) _nvs[i].addNum(0, 0); } _nCols = ncols; } } @Override public void addNumCol(int colIdx, long number, int exp) { assert colIdx >= _col; addColumns(colIdx+1); for(int i = _col; i < colIdx; ++i) super.addNumCol(i, 0, 0); super.addNumCol(colIdx, number, exp); _col = colIdx+1; } @Override public void newLine() { if(_col < _nCols)addNumCol(_nCols-1, 0,0); super.newLine(); _col = 0; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/SubsetVec.java
package water.fvec; import water.*; /** * A simple wrapper for looking at only a subset of rows */ public class SubsetVec extends WrappedVec { final Key _subsetRowsKey; transient Vec _rows; // Cached copy of the rows-Vec public SubsetVec(Key subsetRowsKey, Key masterVecKey, Key key, long[] espc) { super(masterVecKey,key, espc); _subsetRowsKey = subsetRowsKey; } public Vec rows() { if( _rows==null ) _rows = DKV.get(_subsetRowsKey).get(); return _rows; } // A subset chunk @Override public Chunk chunkForChunkIdx(int cidx) { Chunk crows = rows().chunkForChunkIdx(cidx); return new SubsetChunk(crows,this,masterVec()); } @Override public Futures remove(Futures fs) { super.remove(fs); UKV.remove(_subsetRowsKey,fs); return fs; } // static class SubsetChunk extends Chunk { final Chunk _crows; final Vec _masterVec; protected SubsetChunk(Chunk crows, SubsetVec vec, Vec masterVec) { _vec = vec; _masterVec = masterVec; _len = crows._len; _start = crows._start; _crows = crows; } @Override protected double atd_impl(int idx) { long rownum = _crows.at8_impl(idx); return _masterVec.at(rownum); } @Override protected long at8_impl(int idx) { long rownum = _crows.at8_impl(idx); return _masterVec.at8(rownum); } @Override protected boolean isNA_impl(int idx) { long rownum = _crows.at8_impl(idx); return _masterVec.isNA(rownum); } @Override boolean set_impl(int idx, long l) { return false; } @Override boolean set_impl(int idx, double d) { return false; } @Override boolean set_impl(int idx, float f) { return false; } @Override boolean setNA_impl(int idx) { return false; } @Override boolean hasFloat() { return false; } @Override NewChunk inflate_impl(NewChunk nc) { throw H2O.fail(); } @Override public AutoBuffer write(AutoBuffer bb) { throw H2O.fail(); } @Override public Chunk read(AutoBuffer bb) { throw H2O.fail(); } } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/TachyonFileVec.java
package water.fvec; import tachyon.thrift.ClientFileInfo; import water.*; import water.persist.PersistTachyon; public class TachyonFileVec extends FileVec { public static Key make(String serverUri, ClientFileInfo tf) { Futures fs = new Futures(); Key key = make(serverUri, tf, fs); fs.blockForPending(); return key; } public static Key make(String serverUri, ClientFileInfo tf, Futures fs) { String fname = tf.getPath(); // Always return absolute path /dir/filename long size = tf.getLength(); Key k = Key.make(PersistTachyon.PREFIX + serverUri + fname); Key k2 = Vec.newKey(k); new Frame(k).delete_and_lock(null); // Insert the top-level FileVec key into the store Vec v = new TachyonFileVec(k2,size); DKV.put(k2, v, fs); Frame fr = new Frame(k,new String[] {fname}, new Vec[] {v}); fr.update(null); fr.unlock(null); return k; } private TachyonFileVec(Key key, long len) {super(key,len,Value.TACHYON);} }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/TransfVec.java
package water.fvec; import java.util.Arrays; import water.*; import water.util.Utils; /** * Dummy vector transforming values of given vector according to given domain mapping. * * <p>The mapping is defined by a simple hash map composed of two arrays. * The first array contains values. Index of values is index into the second array {@link #_indexes} * which contains final value (i.e., index to domain array).</p> * * <p>If {@link #_indexes} array is null, then index of found value is used directly.</p> * * <p>To avoid virtual calls or additional null check for {@link #_indexes} the vector * returns two implementation of underlying chunk ({@link TransfChunk} when {@link #_indexes} is not <code>null</code>, * and {@link FlatTransfChunk} when {@link #_indexes} is <code>null</code>.</p> */ public class TransfVec extends WrappedVec { /** List of values from underlying vector which this vector map to a new value. If * a value is not included in this array the implementation returns NA. */ final int[] _values; /** The transformed value - i.e. transformed value is: <code>int idx = find(value, _values); return _indexes[idx]; </code> */ final int[] _indexes; public TransfVec(int[][] mapping, Key masterVecKey, Key key, long[] espc) { this(mapping, null, masterVecKey, key, espc); } public TransfVec(int[][] mapping, String[] domain, Key masterVecKey, Key key, long[] espc) { this(mapping[0], mapping[1], domain, masterVecKey, key, espc); } public TransfVec(int[] values, int[] indexes, String[] domain, Key masterVecKey, Key key, long[] espc) { super(masterVecKey, key, espc); _values = values; _indexes = indexes; _domain = domain; } @Override public Chunk chunkForChunkIdx(int cidx) { Chunk c = masterVec().chunkForChunkIdx(cidx); if (_indexes!=null) // two way mapping return new TransfChunk(c, this); else // single way mapping return new FlatTransfChunk(c, this); } static abstract class AbstractTransfChunk extends Chunk { protected static final long MISSING_VALUE = -1L; final Chunk _c; protected AbstractTransfChunk(Chunk c, TransfVec vec) { _c = c; _len = _c._len; _start = _c._start; _vec = vec; } @Override protected double atd_impl(int idx) { double d = 0; return _c.isNA0(idx) ? Double.NaN : ( (d=at8_impl(idx)) == MISSING_VALUE ? Double.NaN : d ) ; } @Override protected boolean isNA_impl(int idx) { if (_c.isNA_impl(idx)) return true; return at8_impl(idx) == MISSING_VALUE; // this case covers situation when there is no mapping } @Override boolean set_impl(int idx, long l) { return false; } @Override boolean set_impl(int idx, double d) { return false; } @Override boolean set_impl(int idx, float f) { return false; } @Override boolean setNA_impl(int idx) { return false; } @Override boolean hasFloat() { return _c.hasFloat(); } @Override NewChunk inflate_impl(NewChunk nc) { nc.set_len(nc.set_sparseLen(0)); for( int i=0; i< len(); i++ ) if(isNA0(i))nc.addNA(); else nc.addNum(at80(i),0); return nc; } @Override public AutoBuffer write(AutoBuffer bb) { throw new UnsupportedOperationException(); } @Override public Chunk read(AutoBuffer bb) { throw new UnsupportedOperationException(); } } static class TransfChunk extends AbstractTransfChunk { /** @see TransfVec#_values */ final int[] _values; /** @see TransfVec#_indexes */ final int[] _indexes; public TransfChunk(Chunk c, TransfVec vec) { super(c,vec); assert vec._indexes != null : "TransfChunk needs not-null indexing array."; _values = vec._values; _indexes = vec._indexes; } @Override protected long at8_impl(int idx) { return get(_c.at8_impl(idx)); } private long get(long val) { int indx = -1; return (indx = Arrays.binarySearch(_values, (int)val)) < 0 ? MISSING_VALUE : _indexes[indx]; } } static class FlatTransfChunk extends AbstractTransfChunk { /** @see TransfVec#_values */ final int[] _values; public FlatTransfChunk(Chunk c, TransfVec vec) { super(c,vec); assert vec._indexes == null : "TransfChunk requires NULL indexing array."; _values = vec._values; } @Override protected long at8_impl(int idx) { return get(_c.at8_impl(idx)); } private long get(long val) { int indx = -1; return (indx = Arrays.binarySearch(_values, (int)val)) < 0 ? MISSING_VALUE : indx ; } } /** Compose this vector with given transformation. Always return a new vector */ public Vec compose(int[][] transfMap, String[] domain) { return compose(this, transfMap, domain, true); } /** * Compose given origVector with given transformation. Always returns a new vector. * Original vector is kept if keepOrig is true. * @param origVec * @param transfMap * @param keepOrig * @return a new instance of {@link TransfVec} composing transformation of origVector and tranfsMap */ public static Vec compose(TransfVec origVec, int[][] transfMap, String[] domain, boolean keepOrig) { // Do a mapping from INT -> ENUM -> this vector ENUM int[][] domMap = Utils.compose(new int[][] {origVec._values, origVec._indexes }, transfMap); Vec result = origVec.masterVec().makeTransf(domMap[0], domMap[1], domain);; if (!keepOrig) DKV.remove(origVec._key); return result; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/UploadFileVec.java
package water.fvec; import java.util.Arrays; import java.io.InputStream; import water.*; import water.util.Log; /** Build a Vec by reading from an InputStream */ public class UploadFileVec extends FileVec { int _nchunks; protected UploadFileVec(Key key) { super(key,-1,Value.ICE); } @Override public boolean writable() { return _len==-1; } public void addAndCloseChunk(Chunk c, Futures fs) { assert _len==-1; // Not closed assert (c._vec == null); // Don't try to re-purpose a chunk. c._vec = this; // Attach chunk to this vec. DKV.put(chunkKey(_nchunks++),c,fs); // Write updated chunk back into K/V } // Close, and possible replace the prior chunk with a new, larger Chunk public void close(C1NChunk c, int cidx, Futures fs) { assert _len==-1; // Not closed c._vec = this; // Attach chunk to this vec. DKV.put(chunkKey(cidx),c,fs); // Write updated chunk back into K/V _len = ((_nchunks-1L)<<LOG_CHK)+c._len; } @Override public Value chunkIdx( int cidx ) { Value val = DKV.get(chunkKey(cidx)); assert checkMissing(cidx,val); return val; } // --------------------------------------------------------------------------- // Store a file (byte by byte) into a frame. // This file will generally come from a POST through the REST interface. // --------------------------------------------------------------------------- static public Key readPut(String keyname, InputStream is) throws Exception { return readPut(Key.make(keyname), is); } static public Key readPut(Key k, InputStream is) throws Exception { readPut(k,is,new Futures()).blockForPending(); return k; } static private Futures readPut(Key key, InputStream is, final Futures fs) throws Exception { Log.info("Reading byte InputStream into Frame:"); Log.info(" frameKey: " + key.toString()); Key newVecKey = Vec.newKey(); try { new Frame(key,new String[0],new Vec[0]).delete_and_lock(null); UploadFileVec uv = new UploadFileVec(newVecKey); assert uv.writable(); byte prev[] = null; byte bytebuf[] = new byte[FileVec.CHUNK_SZ]; int bytesInChunkSoFar = 0; while (true) { int rv = is.read(bytebuf, bytesInChunkSoFar, FileVec.CHUNK_SZ - bytesInChunkSoFar); if (rv < 0) break; bytesInChunkSoFar += rv; if( bytesInChunkSoFar == FileVec.CHUNK_SZ ) { // Write full chunk of size FileVec.CHUNK_SZ. C1NChunk c = new C1NChunk(bytebuf); uv.addAndCloseChunk(c, fs); prev = bytebuf; bytebuf = new byte[FileVec.CHUNK_SZ]; bytesInChunkSoFar = 0; } } // Add last bytes onto last chunk, which may be bigger than CHUNK_SZ. if( prev==null ) { // No chunks at all uv._nchunks++; // Put a 1st chunk uv.close(new C1NChunk(Arrays.copyOf(bytebuf,bytesInChunkSoFar)),0,fs); } else if (bytesInChunkSoFar != 0 ) { byte buf2[] = Arrays.copyOf(prev,bytesInChunkSoFar+prev.length); System.arraycopy(bytebuf,0,buf2,prev.length,bytesInChunkSoFar); uv.close(new C1NChunk(buf2),uv._nchunks-1,fs); } Log.info(" totalFrames: " + 1); Log.info(" totalVecs: " + 1); Log.info(" totalChunks: " + uv.nChunks()); Log.info(" totalBytes: " + uv.length()); DKV.put(newVecKey, uv, fs); String[] sarr = {"bytes"}; Vec[] varr = {uv}; Frame f = new Frame(key,sarr, varr); f.unlock(null); Log.info(" Success."); } catch (Exception e) { // Clean up and do not leak keys. Log.err("Exception caught in Frame::readPut; attempting to clean up the new frame and vector"); Log.err(e); Lockable.delete(key); DKV.remove(newVecKey); Log.err("Frame::readPut cleaned up new frame and vector successfully"); throw e; } return fs; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/Vec.java
package water.fvec; import jsr166y.CountedCompleter; import water.*; import water.nbhm.NonBlockingHashMapLong; import water.util.Utils; import java.util.Arrays; import java.util.UUID; import static water.util.Utils.seq; /** * A single distributed vector column. * <p> * A distributed vector has a count of elements, an element-to-chunk mapping, a * Java type (mostly determines rounding on store and display), and functions * to directly load elements without further indirections. The data is * compressed, or backed by disk or both. *Writing* to elements may throw if the * backing data is read-only (file backed). * <p> * <pre> * Vec Key format is: Key. VEC - byte, 0 - byte, 0 - int, normal Key bytes. * DVec Key format is: Key.DVEC - byte, 0 - byte, chunk# - int, normal Key bytes. * </pre> * * The main API is at, set, and isNA:<br> *<pre> * double at ( long row ); // Returns the value expressed as a double. NaN if missing. * long at8 ( long row ); // Returns the value expressed as a long. Throws if missing. * boolean isNA( long row ); // True if the value is missing. * set( long row, double d ); // Stores a double; NaN will be treated as missing. * set( long row, long l ); // Stores a long; throws if l exceeds what fits in a double and any floats are ever set. * setNA( long row ); // Sets the value as missing. * </pre> * * Note this dangerous scenario: loading a missing value as a double, and * setting it as a long: <pre> * set(row,(long)at(row)); // Danger! *</pre> * The cast from a Double.NaN to a long produces a zero! This code will * replace a missing value with a zero. * * @author Cliff Click */ public class Vec extends Iced { /** Log-2 of Chunk size. */ public static final int LOG_CHK = 22; // Chunks are 1<<22, or 4Meg /** Chunk size. Bigger increases batch sizes, lowers overhead costs, lower * increases fine-grained parallelism. */ public static final int CHUNK_SZ = 1 << LOG_CHK; /** Key mapping a Value which holds this Vec. */ final public Key _key; // Top-level key /** Element-start per chunk. Always zero for chunk 0. One more entry than * chunks, so the last entry is the total number of rows. This field is * dead/ignored in subclasses that are guaranteed to have fixed-sized chunks * such as file-backed Vecs. */ final public long _espc[]; /** Enum/factor/categorical names. */ public String [] _domain; /** Time parse, index into Utils.TIME_PARSE, or -1 for not-a-time */ public byte _time; /** RollupStats: min/max/mean of this Vec lazily computed. */ private double _min, _max, _mean, _sigma; long _size; boolean _isInt; // All ints boolean _isUUID; // All UUIDs (or zero or missing) /** The count of missing elements.... or -2 if we have active writers and no * rollup info can be computed (because the vector is being rapidly * modified!), or -1 if rollups have not been computed since the last * modification. */ volatile long _naCnt=-1; private long _last_write_timestamp = System.currentTimeMillis(); private long _checksum_timestamp = -1; private long _checksum = 0; /** Maximal size of enum domain */ public static final int MAX_ENUM_SIZE = 10000; /** Main default constructor; requires the caller understand Chunk layout * already, along with count of missing elements. */ public Vec( Key key, long espc[]) { this(key, espc, null); } public Vec( Key key, long espc[], String[] domain) { this(key,espc,domain,false,(byte)-1); } public Vec( Key key, long espc[], String[] domain, boolean hasUUID, byte time) { assert key._kb[0]==Key.VEC; _key = key; _espc = espc; _time = time; // is-a-time, or not (and what flavor used to parse time) _isUUID = hasUUID; // all-or-nothing UUIDs _domain = domain; } protected Vec( Key key, Vec v ) { this(key, v._espc); assert group()==v.group(); } public Vec [] makeZeros(int n){return makeZeros(n,null,null,null);} public Vec [] makeZeros(int n, String [][] domain, boolean[] uuids, byte[] times){ return makeCons(n, 0, domain, uuids, times);} public Vec [] makeCons(int n, final long l, String [][] domain, boolean[] uuids, byte[] times){ if( _espc == null ) throw H2O.unimpl(); // need to make espc for e.g. NFSFileVecs! final int nchunks = nChunks(); Key [] keys = group().addVecs(n); final Vec [] vs = new Vec[keys.length]; for(int i = 0; i < vs.length; ++i) vs[i] = new Vec(keys[i],_espc, domain == null ? null : domain[i], uuids == null ? false : uuids [i], times == null ? (byte)-1: times [i]); new DRemoteTask(){ @Override public void lcompute(){ addToPendingCount(vs.length); for(int i = 0; i < vs.length; ++i){ final int fi = i; new H2O.H2OCountedCompleter(this){ @Override public void compute2(){ long row=0; // Start row Key k; for( int i=0; i<nchunks; i++ ) { long nrow = chunk2StartElem(i+1); // Next row if((k = vs[fi].chunkKey(i)).home()) DKV.put(k,new C0LChunk(l,(int)(nrow-row)),_fs); row = nrow; } tryComplete(); } }.fork(); } tryComplete(); } @Override public final void lonCompletion( CountedCompleter caller ) { Futures fs = new Futures(); for(Vec v:vs) if(v._key.home()) DKV.put(v._key,v,fs); fs.blockForPending(); } @Override public void reduce(DRemoteTask drt){} }.invokeOnAllNodes(); return vs; } /** * Create an array of Vecs from scratch * @param rows Length of each vec * @param cols Number of vecs * @param val Constant value (long) * @param domain Factor levels (for factor columns) * @return Array of Vecs */ static public Vec [] makeNewCons(final long rows, final int cols, final long val, final String [][] domain){ int chunks = Math.min((int)rows, 4*H2O.NUMCPUS*H2O.CLOUD.size()); long[] espc = new long[chunks+1]; for (int i = 0; i<=chunks; ++i) espc[i] = i * rows / chunks; Vec v = new Vec(Vec.newKey(), espc); return v.makeCons(cols, val, domain,null,null); } /** Make a new vector with the same size and data layout as the old one, and * initialized to zero. */ public Vec makeZero() { return makeCon(0); } public Vec makeZero(String[] domain) { return makeCon(0, domain); } /** Make a new vector with the same size and data layout as the old one, and * initialized to a constant. */ public Vec makeCon( final long l ) { return makeCon(l, null); } public Vec makeCon( final long l, String[] domain ) { Futures fs = new Futures(); if( _espc == null ) throw H2O.unimpl(); // need to make espc for e.g. NFSFileVecs! final int nchunks = nChunks(); final Vec v0 = new Vec(group().addVecs(1)[0],_espc, domain); new DRemoteTask(){ @Override public void lcompute(){ long row=0; // Start row Key k; for( int i=0; i<nchunks; i++ ) { long nrow = chunk2StartElem(i+1); // Next row if((k = v0.chunkKey(i)).home()) DKV.put(k,new C0LChunk(l,(int)(nrow-row)),_fs); row = nrow; } tryComplete(); } @Override public void reduce(DRemoteTask drt){} }.invokeOnAllNodes(); DKV.put(v0._key,v0,fs); fs.blockForPending(); return v0; } public Vec makeCon( final double d ) { Futures fs = new Futures(); if( _espc == null ) throw H2O.unimpl(); // need to make espc for e.g. NFSFileVecs! if( (long)d==d ) return makeCon((long)d); final int nchunks = nChunks(); final Vec v0 = new Vec(group().addVecs(1)[0],_espc); new DRemoteTask(){ @Override public void lcompute(){ getFutures(); long row=0; // Start row Key k; for( int i=0; i<nchunks; i++ ) { long nrow = chunk2StartElem(i+1); // Next row if((k = v0.chunkKey(i)).home()) DKV.put(k,new C0DChunk(d,(int)(nrow-row)),_fs); row = nrow; } tryComplete(); } @Override public void reduce(DRemoteTask drt){} }.invokeOnAllNodes(); DKV.put(v0._key,v0,fs); fs.blockForPending(); return v0; } public static Vec makeSeq( long len) { return new MRTask2() { @Override public void map(Chunk[] cs) { for (int i = 0; i < cs.length; i++) { Chunk c = cs[i]; for (int r = 0; r < c._len; r++) c.set0(r, r+1+c._start); } } }.doAll(makeConSeq(0, len)).vecs(0); } public static Vec makeConSeq(double x, long len) { int chunks = (int)Math.ceil((double)len / Vec.CHUNK_SZ); long[] espc = new long[chunks+1]; for (int i = 1; i<=chunks; ++i) espc[i] = Math.min(espc[i-1] + Vec.CHUNK_SZ, len); return new Vec(VectorGroup.VG_LEN1.addVec(), espc).makeCon(x); } /** Create a new 1-element vector in the shared vector group for 1-element vectors. */ public static Vec make1Elem(double d) { return make1Elem(Vec.VectorGroup.VG_LEN1.addVec(), d); } /** Create a new 1-element vector representing a scalar value. */ public static Vec make1Elem(Key key, double d) { assert key.isVec(); Vec v = new Vec(key,new long[]{0,1}); Futures fs = new Futures(); DKV.put(v.chunkKey(0),new C0DChunk(d,1),fs); DKV.put(key,v,fs); fs.blockForPending(); return v; } /** Create a vector transforming values according given domain map. * @see Vec#makeTransf(int[], int[], String[]) */ public Vec makeTransf(final int[][] map, String[] finalDomain) { return makeTransf(map[0], map[1], finalDomain); } /** * Creates a new transformation from given values to given indexes of * given domain. * @param values values being mapped from * @param indexes values being mapped to * @param domain domain of new vector * @return always return a new vector which maps given values into a new domain */ public Vec makeTransf(final int[] values, final int[] indexes, final String[] domain) { if( _espc == null ) throw H2O.unimpl(); Vec v0 = new TransfVec(values, indexes, domain, this._key, group().addVecs(1)[0],_espc); UKV.put(v0._key,v0); return v0; } /** * Makes a new transformation vector with identity mapping. * * @return a new transformation vector * @see Vec#makeTransf(int[], int[], String[]) */ Vec makeIdentityTransf() { assert _domain != null : "Cannot make an identity transformation of non-enum vector!"; return makeTransf(seq(0, _domain.length), null, _domain); } /** * Makes a new transformation vector from given values to * values 0..domain size * @param values values which are mapped from * @param domain target domain which is mapped to * @return a new transformation vector providing mapping between given values and target domain. * @see Vec#makeTransf(int[], int[], String[]) */ Vec makeSimpleTransf(long[] values, String[] domain) { int is[] = new int[values.length]; for( int i=0; i<values.length; i++ ) is[i] = (int)values[i]; return makeTransf(is, null, domain); } /** This Vec does not have dependent hidden Vec it uses. * * @return dependent hidden vector or <code>null</code> */ public Vec masterVec() { return null; } /** * Adapt given vector <code>v</code> to this vector. * I.e., unify domains, compute transformation, and call makeTransf(). * * This vector is a leader - it determines a domain (i.e., {@link #domain()}) and mapping between values stored in vector * and domain values. * The vector <code>v</code> can contain different domain (subset, superset), hence the values stored in the vector * has to be transformed to the values determined by this vector. The resulting vector domain is the * same as this vector domain. * * Always returns a new vector and user's responsibility is delete the vector. * * @param v vector which should be adapter in according this vector. * @param exact should vector match exactly (recommended value is true). * @return a new vector which implements transformation of original values. */ /*// Not used any more in code ?? public Vec adaptTo(Vec v, boolean exact) { assert isInt() : "This vector has to be int/enum vector!"; int[] domain = null; // Compute domain of this vector // - if vector is enum, use domain directly // - if vector is int, then vector numeric domain is collected and transformed to string domain // and then adapted String[] sdomain = (_domain == null) ? Utils.toStringMap(domain = new CollectDomain(this).doAll(this).domain()) // it is number-column : domain(); // it is enum // Compute transformation - domain map - each value in an array is one value from vector domain, its index // represents an index into string domain representation. int[] domMap = Model.getDomainMapping(v._domain, sdomain, exact); if (domain!=null) { // do a mapping from INT -> ENUM -> this vector ENUM domMap = Utils.compose(Utils.mapping(domain), domMap); } return this.makeTransf(domMap, sdomain); }*/ /** Number of elements in the vector. Overridden by subclasses that compute * length in an alternative way, such as file-backed Vecs. */ public long length() { return _espc[_espc.length-1]; } /** Number of chunks. Overridden by subclasses that compute chunks in an * alternative way, such as file-backed Vecs. */ public int nChunks() { return _espc.length-1; } /** Whether or not this column parsed as a time, and if so what pattern was used. */ public final boolean isTime(){ return _time>=0; } public final int timeMode(){ return _time; } public final String timeParse(){ return ParseTime.TIME_PARSE[_time]; } /** Map the integer value for a enum/factor/categorical to it's String. * Error if it is not an ENUM. */ public String domain(long i) { return _domain[(int)i]; } /** Return an array of domains. This is eagerly manifested for enum or * categorical columns. Returns null for non-Enum/factor columns. */ public String[] domain() { return _domain; } /** Returns cardinality for enum domain or -1 for other types. */ public int cardinality() { return isEnum() ? _domain.length : -1; } /** Transform this vector to enum. * If the vector is integer vector then its domain is collected and transformed to * corresponding strings. * If the vector is enum an identity transformation vector is returned. * Transformation is done by a {@link TransfVec} which provides a mapping between values. * * @return always returns a new vector and the caller is responsible for vector deletion! */ public Vec toEnum() { if( isEnum() ) return this.makeIdentityTransf(); // Make an identity transformation of this vector if( !isInt() ) throw new IllegalArgumentException("Enum conversion only works on integer columns"); long[] domain; String[] sdomain = Utils.toString(domain = new CollectDomain(this).doAll(this).domain()); if( domain.length > MAX_ENUM_SIZE ) throw new IllegalArgumentException("Column domain is too large to be represented as an enum: " + domain.length + " > " + MAX_ENUM_SIZE); return this.makeSimpleTransf(domain, sdomain); } /** Default read/write behavior for Vecs. File-backed Vecs are read-only. */ protected boolean readable() { return true ; } /** Default read/write behavior for Vecs. AppendableVecs are write-only. */ protected boolean writable() { return true; } /** Return column min - lazily computed as needed. */ public double min() { return rollupStats()._min; } /** Return column max - lazily computed as needed. */ public double max() { return rollupStats()._max; } /** Return column mean - lazily computed as needed. */ public double mean() { return rollupStats()._mean; } /** Return column standard deviation - lazily computed as needed. */ public double sigma(){ return rollupStats()._sigma; } /** Return column missing-element-count - lazily computed as needed. */ public long naCnt() { return rollupStats()._naCnt; } /** Is all integers? */ public boolean isInt(){return rollupStats()._isInt; } /** Size of compressed vector data. */ public long byteSize(){return rollupStats()._size; } public long checksum() { final long now = _last_write_timestamp; // TODO: someone can be writing while we're checksuming. . . if (-1 != now && now == _checksum_timestamp) { return _checksum; } final long checksum = new ChecksummerTask().doAll(this).getChecksum(); new TAtomic<Vec>() { @Override public Vec atomic(Vec v) { if (v != null) { v._checksum = checksum; v._checksum_timestamp = now; } return v; } }.invoke(_key); this._checksum = checksum; this._checksum_timestamp = now; return checksum; } /** Is the column a factor/categorical/enum? Note: all "isEnum()" columns * are are also "isInt()" but not vice-versa. */ public final boolean isEnum(){return _domain != null;} public final boolean isUUID(){return _isUUID;} /** Is the column constant. * <p>Returns true if the column contains only constant values and it is not full of NAs.</p> */ public final boolean isConst() { return min() == max(); } /** Is the column bad. * <p>Returns true if the column is full of NAs.</p> */ public final boolean isBad() { return naCnt() == length(); } public static class VecIdenticalTask extends MRTask2<VecIdenticalTask> { final double fpointPrecision; VecIdenticalTask(H2O.H2OCountedCompleter cc, double precision){super(cc); fpointPrecision = precision;} boolean _res; @Override public void map(Chunk c1, Chunk c2){ if(!(c1 instanceof C8DChunk) && c1.getClass().equals(c2.getClass())) _res = Arrays.equals(c1._mem,c2._mem); else { if(c1._len != c2._len)return; if(c1.hasFloat()){ if(!c2.hasFloat())return; for(int i = 0; i < c1._len; ++i) { double diff = c1.at0(i) - c2.at0(i); if(diff > fpointPrecision || -diff > fpointPrecision)return; } } else { if(c2.hasFloat())return; for(int i = 0; i < c1._len; ++i) if(c1.at80(i) != c2.at80(i))return; } _res = true; } } @Override public void reduce(VecIdenticalTask bt){_res = _res && bt._res;} } /** Is the column contains float values. */ public final boolean isFloat() { return !isEnum() && !isInt(); } public final boolean isByteVec() { return (this instanceof ByteVec); } Vec setRollupStats( RollupStats rs ) { _min = rs._min; _max = rs._max; _mean = rs._mean; _sigma = Math.sqrt(rs._sigma / (rs._rows - 1)); _size =rs._size; _isInt= rs._isInt; if( rs._rows == 0 ) // All rows missing? Then no rollups _min = _max = _mean = _sigma = Double.NaN; _naCnt= rs._naCnt; // Volatile write last to announce all stats ready return this; } Vec setRollupStats( Vec v ) { _min = v._min; _max = v._max; _mean = v._mean; _sigma = v._sigma; _size = v._size; _isInt = v._isInt; _naCnt= v._naCnt; // Volatile write last to announce all stats ready return this; } /** Compute the roll-up stats as-needed, and copy into the Vec object */ public Vec rollupStats() { return rollupStats(null); } // Allow a bunch of rollups to run in parallel. If Futures is passed in, run // the rollup in the background. *Always* returns "this". public Vec rollupStats(Futures fs) { Vec vthis = DKV.get(_key).get(); if( vthis._naCnt==-2 ) throw new IllegalArgumentException("Cannot ask for roll-up stats while the vector is being actively written."); if( vthis._naCnt>= 0 ) // KV store has a better answer return vthis == this ? this : setRollupStats(vthis); // KV store reports we need to recompute RollupStats rs = new RollupStats().dfork(this); if(fs != null) fs.add(rs); else setRollupStats(rs.getResult()); return this; } /** A private class to compute the rollup stats */ private static class RollupStats extends MRTask2<RollupStats> { double _min=Double.MAX_VALUE, _max=-Double.MAX_VALUE, _mean, _sigma; long _rows, _naCnt, _size; boolean _isInt=true; @Override public void postGlobal(){ final RollupStats rs = this; _fr.vecs()[0].setRollupStats(rs); // Now do this remotely also new TAtomic<Vec>() { @Override public Vec atomic(Vec v) { if( v!=null && v._naCnt == -1 ) v.setRollupStats(rs); return v; } }.fork(_fr._keys[0]); } @Override public void map( Chunk c ) { _size = c.byteSize(); // UUID columns do not compute min/max/mean/sigma if( c._vec._isUUID ) { _min = _max = _mean = _sigma = Double.NaN; for( int i=0; i<c._len; i++ ) { if( c.isNA0(i) ) _naCnt++; else _rows++; } return; } // All other columns have useful rollups for( int i=0; i<c._len; i++ ) { double d = c.at0(i); if( Double.isNaN(d) ) _naCnt++; else { if( d < _min ) _min = d; if( d > _max ) _max = d; _mean += d; _rows++; if( _isInt && ((long)d) != d ) _isInt = false; } } _mean = _mean / _rows; for( int i=0; i<c._len; i++ ) { if( !c.isNA0(i) ) { double d = c.at0(i); _sigma += (d - _mean) * (d - _mean); } } } @Override public void reduce( RollupStats rs ) { _min = Math.min(_min,rs._min); _max = Math.max(_max,rs._max); _naCnt += rs._naCnt; double delta = _mean - rs._mean; if (_rows == 0) { _mean = rs._mean; _sigma = rs._sigma; } else if (rs._rows > 0) { _mean = (_mean*_rows + rs._mean*rs._rows)/(_rows + rs._rows); _sigma = _sigma + rs._sigma + delta*delta * _rows*rs._rows / (_rows+rs._rows); } _rows += rs._rows; _size += rs._size; _isInt &= rs._isInt; } // Just toooo common to report always. Drowning in multi-megabyte log file writes. @Override public boolean logVerbose() { return false; } } // class RollupStats /** A private class to compute the rollup stats */ private static class ChecksummerTask extends MRTask2<ChecksummerTask> { public long checksum = 0; public long getChecksum() { return checksum; } @Override public void map( Chunk c ) { long _start = c._start; for( int i=0; i<c._len; i++ ) { long l = 81985529216486895L; // 0x0123456789ABCDEF if (! c.isNA0(i)) { if (c instanceof C16Chunk) { l = c.at16l0(i); l ^= (37 * c.at16h0(i)); } else { l = c.at80(i); } } long global_row = _start + i; checksum ^= (17 * global_row); checksum ^= (23 * l); } } // map() @Override public void reduce( ChecksummerTask that ) { this.checksum ^= that.checksum; } } // class ChecksummerTask /** Writing into this Vector from *some* chunk. Immediately clear all caches * (_min, _max, _mean, etc). Can be called repeatedly from one or all * chunks. Per-chunk row-counts will not be changing, just row contents and * caches of row contents. */ void preWriting( ) { if( _naCnt == -2 ) return; // Already set _naCnt = -2; if( !writable() ) throw new IllegalArgumentException("Vector not writable"); // Set remotely lazily. This will trigger a cloud-wide invalidate of the // existing Vec, and eventually we'll have to load a fresh copy of the Vec // with active writing turned on, and caching disabled. new TAtomic<Vec>() { @Override public Vec atomic(Vec v) { if( v!=null ) v._naCnt=-2; return v; } }.invoke(_key); } /** Stop writing into this Vec. Rollup stats will again (lazily) be computed. */ public void postWrite() { Vec vthis = DKV.get(_key).get(); if( vthis._naCnt==-2 ) { _naCnt = vthis._naCnt=-1; new TAtomic<Vec>() { @Override public Vec atomic(Vec v) { if( v != null ) { v._last_write_timestamp = System.currentTimeMillis(); if (v._naCnt==-2 ) { v._naCnt=-1; } // _naCnt != -2 } // ! null return v; } }.invoke(_key); } } /** Convert a row# to a chunk#. For constant-sized chunks this is a little * shift-and-add math. For variable-sized chunks this is a binary search, * with a sane API (JDK has an insane API). Overridden by subclasses that * compute chunks in an alternative way, such as file-backed Vecs. */ int elem2ChunkIdx( long i ) { assert 0 <= i && i < length() : "0 <= "+i+" < "+length(); int lo=0, hi = nChunks(); while( lo < hi-1 ) { int mid = (hi+lo)>>>1; if( i < _espc[mid] ) hi = mid; else lo = mid; } while( _espc[lo+1] == i ) lo++; return lo; } /** Convert a chunk-index into a starting row #. For constant-sized chunks * this is a little shift-and-add math. For variable-sized chunks this is a * table lookup. */ public long chunk2StartElem( int cidx ) { return _espc[cidx]; } /** Number of rows in chunk. Does not fetch chunk content. */ public int chunkLen( int cidx ) { return (int) (_espc[cidx + 1] - _espc[cidx]); } /** Get a Vec Key from Chunk Key, without loading the Chunk */ static public Key getVecKey( Key key ) { assert key._kb[0]==Key.DVEC; byte [] bits = key._kb.clone(); bits[0] = Key.VEC; UDP.set4(bits,6,-1); // chunk# return Key.make(bits); } /** Get a Chunk Key from a chunk-index. Basically the index-to-key map. */ public Key chunkKey(int cidx ) { return chunkKey(_key,cidx); } static public Key chunkKey(Key veckey, int cidx ) { byte [] bits = veckey._kb.clone(); bits[0] = Key.DVEC; UDP.set4(bits,6,cidx); // chunk# return Key.make(bits); } /** Get a Chunk's Value by index. Basically the index-to-key map, * plus the {@code DKV.get()}. Warning: this pulls the data locally; * using this call on every Chunk index on the same node will * probably trigger an OOM! */ public Value chunkIdx( int cidx ) { Value val = DKV.get(chunkKey(cidx)); assert checkMissing(cidx,val); return val; } protected boolean checkMissing(int cidx, Value val) { if( val != null ) return true; System.out.println("Error: Missing chunk "+cidx+" for "+_key); return false; } /** Make a new random Key that fits the requirements for a Vec key. */ static public Key newKey(){return newKey(Key.make());} public static final int KEY_PREFIX_LEN = 4+4+1+1; /** Make a new Key that fits the requirements for a Vec key, based on the * passed-in key. Used to make Vecs that back over e.g. disk files. */ static Key newKey(Key k) { byte [] kb = k._kb; byte [] bits = MemoryManager.malloc1(kb.length+KEY_PREFIX_LEN); bits[0] = Key.VEC; bits[1] = -1; // Not homed UDP.set4(bits,2,0); // new group, so we're the first vector UDP.set4(bits,6,-1); // 0xFFFFFFFF in the chunk# area System.arraycopy(kb, 0, bits, 4+4+1+1, kb.length); return Key.make(bits); } /** Make a Vector-group key. */ public Key groupKey(){ byte [] bits = _key._kb.clone(); bits[0] = Key.VGROUP; UDP.set4(bits, 2, -1); UDP.set4(bits, 6, -1); return Key.make(bits); } /** * Get the group this vector belongs to. * In case of a group with only one vector, the object actually does not exist in KV store. * * @return VectorGroup this vector belongs to. */ public final VectorGroup group() { Key gKey = groupKey(); Value v = DKV.get(gKey); if(v != null)return v.get(VectorGroup.class); // no group exists so we have to create one return new VectorGroup(gKey,1); } /** The Chunk for a chunk#. Warning: this loads the data locally! */ public Chunk chunkForChunkIdx(int cidx) { long start = chunk2StartElem(cidx); // Chunk# to chunk starting element# Value dvec = chunkIdx(cidx); // Chunk# to chunk data Chunk c = dvec.get(); // Chunk data to compression wrapper long cstart = c._start; // Read once, since racily filled in Vec v = c._vec; if( cstart == start && v != null) return c; // Already filled-in assert cstart == -1 || v == null; // Was not filled in (everybody racily writes the same start value) c._vec = this; // Fields not filled in by unpacking from Value c._start = start; // Fields not filled in by unpacking from Value return c; } /** The Chunk for a row#. Warning: this loads the data locally! */ private Chunk chunkForRow_impl(long i) { return chunkForChunkIdx(elem2ChunkIdx(i)); } // Cache of last Chunk accessed via at/set api transient Chunk _cache; /** The Chunk for a row#. Warning: this loads the data locally! */ public final Chunk chunkForRow(long i) { Chunk c = _cache; return (c != null && c._chk2==null && c._start <= i && i < c._start+c._len) ? c : (_cache = chunkForRow_impl(i)); } /** Fetch element the slow way, as a long. Floating point values are * silently rounded to an integer. Throws if the value is missing. */ public final long at8( long i ) { return chunkForRow(i).at8(i); } /** Fetch element the slow way, as a double. Missing values are * returned as Double.NaN instead of throwing. */ public final double at( long i ) { return chunkForRow(i).at(i); } /** Fetch the missing-status the slow way. */ public final boolean isNA(long row){ return chunkForRow(row).isNA(row); } /** Fetch element the slow way, as a long. Throws if the value is missing or not a UUID. */ public final long at16l( long i ) { return chunkForRow(i).at16l(i); } public final long at16h( long i ) { return chunkForRow(i).at16h(i); } /** Write element the VERY slow way, as a long. There is no way to write a * missing value with this call. Under rare circumstances this can throw: * if the long does not fit in a double (value is larger magnitude than * 2^52), AND float values are stored in Vector. In this case, there is no * common compatible data representation. * * NOTE: For a faster way, but still slow, use the Vec.Writer below. * */ public final long set( long i, long l) { Chunk ck = chunkForRow(i); long ret = ck.set(i,l); Futures fs = new Futures(); ck.close(ck.cidx(), fs); //slow to do this for every set -> use Writer if writing many values fs.blockForPending(); postWrite(); return ret; } /** Write element the VERY slow way, as a double. Double.NaN will be treated as * a set of a missing element. * */ public final double set( long i, double d) { Chunk ck = chunkForRow(i); double ret = ck.set(i,d); Futures fs = new Futures(); ck.close(ck.cidx(), fs); //slow to do this for every set -> use Writer if writing many values fs.blockForPending(); postWrite(); return ret; } /** Write element the VERY slow way, as a float. Float.NaN will be treated as * a set of a missing element. * */ public final float set( long i, float f) { Chunk ck = chunkForRow(i); float ret = ck.set(i, f); Futures fs = new Futures(); ck.close(ck.cidx(), fs); //slow to do this for every set -> use Writer if writing many values fs.blockForPending(); postWrite(); return ret; } /** Set the element as missing the VERY slow way. */ public final boolean setNA( long i ) { Chunk ck = chunkForRow(i); boolean ret = ck.setNA(i); Futures fs = new Futures(); ck.close(ck.cidx(), fs); //slow to do this for every set -> use Writer if writing many values fs.blockForPending(); postWrite(); return ret; } /** * More efficient way to write randomly to a Vec - still slow, but much faster than Vec.set() * * Usage: * Vec.Writer vw = vec.open(); * vw.set(0, 3.32); * vw.set(1, 4.32); * vw.set(2, 5.32); * vw.close(); */ public final static class Writer { Vec _vec; private Writer(Vec v){ _vec=v; _vec.preWriting(); } public final long set( long i, long l) { return _vec.chunkForRow(i).set(i,l); } public final double set( long i, double d) { return _vec.chunkForRow(i).set(i,d); } public final float set( long i, float f) { return _vec.chunkForRow(i).set(i,f); } public final boolean setNA( long i ) { return _vec.chunkForRow(i).setNA(i); } public void close() { Futures fs = new Futures(); _vec.close(fs); fs.blockForPending(); _vec.postWrite(); } } public final Writer open() { return new Writer(this); } /** Close all chunks that are local (not just the ones that are homed) * This should only be called from a Writer object * */ private void close(Futures fs) { int nc = nChunks(); for( int i=0; i<nc; i++ ) { if (H2O.get(chunkKey(i)) != null) { chunkForChunkIdx(i).close(i, fs); } } } /** Pretty print the Vec: [#elems, min/mean/max]{chunks,...} */ @Override public String toString() { String s = "["+length()+(_naCnt<0 ? ", {" : ","+_min+"/"+_mean+"/"+_max+", "+PrettyPrint.bytes(_size)+", {"); int nc = nChunks(); for( int i=0; i<nc; i++ ) { s += chunkKey(i).home_node()+":"+chunk2StartElem(i)+":"; // CNC: Bad plan to load remote data during a toString... messes up debug printing // Stupidly chunkForChunkIdx loads all data locally // s += chunkForChunkIdx(i).getClass().getSimpleName().replaceAll("Chunk","")+", "; } return s+"}]"; } public Futures remove( Futures fs ) { for( int i=0; i<nChunks(); i++ ) UKV.remove(chunkKey(i),fs); DKV.remove(_key,fs); return fs; } @Override public boolean equals( Object o ) { return o instanceof Vec && ((Vec)o)._key.equals(_key); } @Override public int hashCode() { return _key.hashCode(); } /** Always makes a copy of the given vector which shares the same * group. * * The user is responsible for deleting the returned vector. * * This can be expensive operation since it can force copy of data * among nodes. * * @param vec vector which is intended to be copied * @return a copy of vec which shared the same {@link VectorGroup} with this vector */ public Vec align(final Vec vec) { assert ! this.group().equals(vec.group()) : "Vector align expects a vector from different vector group"; assert this.length()== vec.length() : "Trying to align vectors with different length!"; Vec avec = makeZero(); // aligned vector new MRTask2() { @Override public void map(Chunk c0) { long srow = c0._start; for (int r = 0; r < c0._len; r++) c0.set0(r, vec.at(srow + r)); } }.doAll(avec); avec._domain = _domain; return avec; } /** * Class representing the group of vectors. * * Vectors from the same group have same distribution of chunks among nodes. * Each vector is member of exactly one group. Default group of one vector * is created for each vector. Group of each vector can be retrieved by * calling group() method; * * The expected mode of operation is that user wants to add new vectors * matching the source. E.g. parse creates several vectors (one for each * column) which are all colocated and are colocated with the original * bytevector. * * To do this, user should first ask for the set of keys for the new vectors * by calling addVecs method on the target group. * * Vectors in the group will have the same keys except for the prefix which * specifies index of the vector inside the group. The only information the * group object carries is it's own key and the number of vectors it * contains(deleted vectors still count). * * Because vectors(and chunks) share the same key-pattern with the group, * default group with only one vector does not have to be actually created, * it is implicit. * * @author tomasnykodym * */ public static class VectorGroup extends Iced { public static VectorGroup newVectorGroup(){ return new Vec(Vec.newKey(),(long[])null).group(); } // The common shared vector group for length==1 vectors public static VectorGroup VG_LEN1 = new VectorGroup(); final int _len; final Key _key; private VectorGroup(Key key, int len){_key = key;_len = len;} public VectorGroup() { byte[] bits = new byte[26]; bits[0] = Key.VGROUP; bits[1] = -1; UDP.set4(bits, 2, -1); UDP.set4(bits, 6, -1); UUID uu = UUID.randomUUID(); UDP.set8(bits,10,uu.getLeastSignificantBits()); UDP.set8(bits,18,uu. getMostSignificantBits()); _key = Key.make(bits); _len = 0; } public Key vecKey(int vecId){ byte [] bits = _key._kb.clone(); bits[0] = Key.VEC; UDP.set4(bits,2,vecId);// return Key.make(bits); } /** * Task to atomically add vectors into existing group. * @author tomasnykodym */ private static class AddVecs2GroupTsk extends TAtomic<VectorGroup>{ final Key _key; int _n; // INPUT: Keys to allocate; OUTPUT: start of run of keys private AddVecs2GroupTsk(Key key, int n){_key = key; _n = n;} @Override public VectorGroup atomic(VectorGroup old) { int n = _n; // how many // If the old group is missing, assume it is the default group-of-self // (having 1 ID already allocated for self), not a new group with // zero prior vectors. _n = old==null ? 1 : old._len; // start of allocated key run return new VectorGroup(_key, n+_n); } } // reserve range of keys and return index of first new available key public int reserveKeys(final int n){ AddVecs2GroupTsk tsk = new AddVecs2GroupTsk(_key, n); tsk.invoke(_key); return tsk._n; } /** * Gets the next n keys of this group. * Performs atomic update of the group object to assure we get unique keys. * The group size will be updated by adding n. * * @param n number of keys to make * @return arrays of unique keys belonging to this group. */ public Key [] addVecs(final int n){ AddVecs2GroupTsk tsk = new AddVecs2GroupTsk(_key, n); tsk.invoke(_key); Key [] res = new Key[n]; for(int i = 0; i < n; ++i) res[i] = vecKey(i + tsk._n); return res; } /** * Shortcut for addVecs(1). * @see #addVecs(int) */ public Key addVec() { return addVecs(1)[0]; } @Override public String toString() { return "VecGrp "+_key.toString()+", next free="+_len; } @Override public boolean equals( Object o ) { return o instanceof VectorGroup && ((VectorGroup)o)._key.equals(_key); } @Override public int hashCode() { return _key.hashCode(); } } /** * Method to change the domain of the Vec. * * Can only be applied to factors (Vec with non-null domain) and * domain can only be set to domain of the same or greater length. * * Updating the domain requires updating the Vec header in the K/V and since chunks cache Vec header references, * need to execute distributed task to flush (null) those references). * * @param newDomain */ public void changeDomain(String [] newDomain){ if(_domain == null)throw new RuntimeException("Setting a domain to a non-factor Vector, call as.Factor() instead."); if(newDomain == null)throw new RuntimeException("Can not set domain to null. You have to convert the vec to numbers explicitly"); if(newDomain.length < _domain.length) throw new RuntimeException("Setting domain to incompatible size. New domain must be at least the same length!"); _domain = newDomain; // update the vec header in the K/V DKV.put(_key,this); // now flush the cached vec header references (still pointing to the old guy) new MRTask2(){ @Override public void map(Chunk c){c._vec = null;} }.doAll(this); } /** Collect numeric domain of given vector */ public static class CollectDomain extends MRTask2<CollectDomain> { transient NonBlockingHashMapLong<Object> _uniques; @Override protected void setupLocal() { _uniques = new NonBlockingHashMapLong(); } public CollectDomain(Vec v) { } @Override public void map(Chunk ys) { for( int row=0; row<ys._len; row++ ) if( !ys.isNA0(row) ) _uniques.put(ys.at80(row),""); } @Override public void reduce(CollectDomain mrt) { if( _uniques == mrt._uniques ) return; _uniques.putAll(mrt._uniques); } @Override public AutoBuffer write( AutoBuffer ab ) { super.write(ab); return ab.putA8(_uniques==null ? null : _uniques.keySetLong()); } @Override public Freezable read( AutoBuffer ab ) { super.read(ab); assert _uniques == null || _uniques.size()==0; long ls[] = ab.getA8(); _uniques = new NonBlockingHashMapLong(); if( ls != null ) for( long l : ls ) _uniques.put(l,""); return this; } @Override public void copyOver(Freezable that) { super.copyOver(that); _uniques = ((CollectDomain)that)._uniques; } /** Returns exact numeric domain of given vector computed by this task. * The domain is always sorted. Hence: * domain()[0] - minimal domain value * domain()[domain().length-1] - maximal domain value */ public long[] domain() { long[] dom = _uniques.keySetLong(); Arrays.sort(dom); return dom; } } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/VecArray.java
package water.fvec; /** * Created by tomasnykodym on 4/4/14. */ public class VecArray { }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/WrappedVec.java
package water.fvec; import water.Key; import water.DKV; /** * A simple wrapper over another Vec. Transforms either data values or rows. */ public abstract class WrappedVec extends Vec { /** A key for underlying vector which contains values which are transformed by this vector. */ final Key _masterVecKey; /** Cached instances of underlying vector. */ transient Vec _masterVec; public WrappedVec(Key masterVecKey, Key key, long[] espc) { super(key, espc); _masterVecKey = masterVecKey; } @Override public Vec masterVec() { if( _masterVec==null ) _masterVec = DKV.get(_masterVecKey).get(); return _masterVec; } // Map from chunk-index to Chunk. These wrappers are making custom Chunks abstract public Chunk chunkForChunkIdx(int cidx); }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/fvec/createInteractions.java
package water.fvec; import hex.Interaction; import jsr166y.CountedCompleter; import water.*; import water.util.Log; import water.util.Utils; import static water.util.Utils.IcedLong; import java.util.*; /** * Helper to create interaction features between enum columns */ public class createInteractions extends H2O.H2OCountedCompleter { public createInteractions(Interaction ci) { this(ci, null); } public createInteractions(Interaction ci, Key job) { super(null); _job = job; _ci = ci; } final private Interaction _ci; static final private int _missing = Integer.MIN_VALUE; //marker for missing factor level static final private String _other = "other"; // name for lost factor levels private Frame _target; final private Key _job; private Map<Long, Long> _sortedMap = null; private static Map<Long, Long> mySort(Map<IcedLong, IcedLong> unsortMap) { List<Map.Entry<IcedLong, IcedLong>> list = new LinkedList<Map.Entry<IcedLong, IcedLong>>(unsortMap.entrySet()); // Sorting the list based on values Collections.sort(list, new Comparator<Map.Entry<IcedLong, IcedLong>>() { public int compare(Map.Entry<IcedLong, IcedLong> o1, Map.Entry<IcedLong, IcedLong> o2) { return ((Long)o2.getValue()._val).compareTo(o1.getValue()._val); } }); // Maintaining insertion order with the help of LinkedList Map sortedMap = new LinkedHashMap<Long, Long>(); for (Map.Entry<IcedLong, IcedLong> entry : list) { sortedMap.put(entry.getKey()._val, entry.getValue()._val); } return sortedMap; } // Create a combined domain from the enum values that map to domain A and domain B // Both enum integers are combined into a long = (int,int), and the unsortedMap keeps the occurrence count for each pair-wise interaction protected String[] makeDomain(Map<IcedLong, IcedLong> unsortedMap, String[] dA, String[] dB) { String[] _domain; // Log.info("Collected hash table"); // Log.info(java.util.Arrays.deepToString(unsortedMap.entrySet().toArray())); // Log.info("Interaction between " + dA.length + " and " + dB.length + " factor levels => " + // ((long)dA.length * dB.length) + " possible factors."); _sortedMap = mySort(unsortedMap); // create domain of the most frequent unique factors long factorCount = 0; // Log.info("Found " + _sortedMap.size() + " unique interaction factors (out of " + ((long)dA.length * (long)dB.length) + ")."); _domain = new String[_sortedMap.size()]; //TODO: use ArrayList here, then convert to array Iterator it2 = _sortedMap.entrySet().iterator(); int d = 0; while (it2.hasNext()) { Map.Entry kv = (Map.Entry)it2.next(); final long ab = (Long)kv.getKey(); final long count = (Long)kv.getValue(); if (factorCount < _ci.max_factors && count >= _ci.min_occurrence) { factorCount++; // extract the two original factor enums String feature = ""; if (dA != dB) { int a = (int)(ab >> 32); final String fA = a != _missing ? dA[a] : "NA"; feature = fA + "_"; } int b = (int) ab; String fB = b != _missing ? dB[b] : "NA"; feature += fB; // Log.info("Adding interaction feature " + feature + ", occurrence count: " + count); // Log.info("Total number of interaction factors so far: " + factorCount); _domain[d++] = feature; } else break; } if (d < _sortedMap.size()) { // Log.info("Truncated map to " + _sortedMap.size() + " elements."); String[] copy = new String[d+1]; System.arraycopy(_domain, 0, copy, 0, d); copy[d] = _other; _domain = copy; Map tm = new LinkedHashMap<Long, Long>(); it2 = _sortedMap.entrySet().iterator(); while (--d >= 0) { Map.Entry kv = (Map.Entry) it2.next(); tm.put(kv.getKey(), kv.getValue()); } _sortedMap = tm; } // Log.info("Created domain: " + Arrays.deepToString(_domain)); return _domain; } @Override public void compute2() { DKV.remove(_ci.dest()); ArrayList<int[]> al = new ArrayList<int[]>(); if (!_ci.pairwise || _ci.factors.length < 3) { al.add(_ci.factors); } else { // pair-wise for (int i=0; i<_ci.factors.length; ++i) { for (int j=i+1; j<_ci.factors.length; ++j) { al.add(new int[]{_ci.factors[i], _ci.factors[j]}); } } } for (int l=0; l<al.size(); ++l) { int[] factors = al.get(l); int idx1 = factors[0]; Vec tmp = null; int start = factors.length == 1 ? 0 : 1; Frame _out = null; for (int i = start; i < factors.length; ++i) { String name; int idx2 = factors[i]; if (i > 1) { idx1 = _out.find(tmp); assert idx1 >= 0; name = _out._names[idx1] + "_" + _ci.source._names[idx2]; } else { name = _ci.source._names[idx1] + "_" + _ci.source._names[idx2]; } // Log.info("Combining columns " + idx1 + " and " + idx2); final Vec A = i > 1 ? _out.vecs()[idx1] : _ci.source.vecs()[idx1]; final Vec B = _ci.source.vecs()[idx2]; // Pass 1: compute unique domains of all interaction features createInteractionDomain pass1 = new createInteractionDomain(idx1 == idx2).doAll(A, B); // Create a new Vec based on the domain final Vec vec = _ci.source.anyVec().makeZero(makeDomain(pass1._unsortedMap, A.domain(), B.domain())); if (i > 1) { _out.add(name, vec); } else { assert(_out == null); _out = new Frame(new String[]{name}, new Vec[]{vec}); } final Vec C = _out.lastVec(); // Create array of enum pairs, in the same (sorted) order as in the _domain map -> for linear lookup // Note: "other" is not mapped in keys, so keys.length can be 1 less than domain.length long[] keys = new long[_sortedMap.size()]; int pos = 0; for (long k : _sortedMap.keySet()) { keys[pos++] = k; } assert (C.domain().length == keys.length || C.domain().length == keys.length + 1); // domain might contain _other // Pass 2: fill Vec values new fillInteractionEnums(idx1 == idx2, keys).doAll(A, B, C); tmp = C; // remove temporary vec if (i > 1) { final int idx = _out.vecs().length - 2; //second-last vec // Log.info("Removing column " + _out._names[idx]); _out.remove(idx); } } if (_target == null) { _target = new Frame(_ci.dest(), _out.names(), _out.vecs()); _target.delete_and_lock(_job); } else { _target.add(_out, true); } } tryComplete(); } @Override public void onCompletion(CountedCompleter caller) { _target.update(_job); _target.unlock(_job); } // Create interaction domain private static class createInteractionDomain extends MRTask2<createInteractionDomain> { // INPUT final private boolean _same; // OUTPUT private Utils.IcedHashMap<IcedLong, IcedLong> _unsortedMap = null; public createInteractionDomain(boolean same) { _same = same; } @Override public void map(Chunk A, Chunk B) { _unsortedMap = new Utils.IcedHashMap<IcedLong, IcedLong>(); // find unique interaction domain for (int r = 0; r < A._len; r++) { int a = A.isNA0(r) ? _missing : (int)A.at80(r); long ab; if (!_same) { int b = B.isNA0(r) ? _missing : (int)B.at80(r); // key: combine both ints into a long ab = ((long) a << 32) | (b & 0xFFFFFFFFL); assert a == (int) (ab >> 32); assert b == (int) ab; } else { if (a == _missing) continue; ab = (long)a; } // add key to hash map, and count occurrences (for pruning) IcedLong AB = new IcedLong(ab); if (_unsortedMap.containsKey(AB)) { _unsortedMap.get(AB)._val++; } else { _unsortedMap.put(AB, new IcedLong(1)); } } } @Override public void reduce(createInteractionDomain mrt) { assert(mrt._unsortedMap != null); assert(_unsortedMap != null); for (Map.Entry<IcedLong,IcedLong> e : mrt._unsortedMap.entrySet()) { IcedLong x = _unsortedMap.get(e.getKey()); if (x != null) { x._val+=e.getValue()._val; } else { _unsortedMap.put(e.getKey(), e.getValue()); } } mrt._unsortedMap = null; // Log.info("Merged hash tables"); // Log.info(java.util.Arrays.deepToString(_unsortedMap.entrySet().toArray())); } } // Fill interaction enums in last Vec in Frame private static class fillInteractionEnums extends MRTask2<fillInteractionEnums> { // INPUT boolean _same; final long[] _keys; //minimum information to be sent over the wire transient private java.util.List<java.util.Map.Entry<Long,Integer>> _valToIndex; //node-local shared helper for binary search public fillInteractionEnums(boolean same, long[] keys) { _same = same; _keys = keys; } @Override protected void setupLocal() { // turn _keys into a sorted array of pairs _valToIndex = new java.util.ArrayList<Map.Entry<Long,Integer>>(); // map factor level (int,int) to domain index (long) for (int i=0;i<_keys.length;++i) { _valToIndex.add(new AbstractMap.SimpleEntry<Long, Integer>(_keys[i], i)); } // sort by key (the factor level) Collections.sort(_valToIndex, new Comparator<Map.Entry<Long, Integer>>() { @Override public int compare(Map.Entry<Long, Integer> o1, Map.Entry<Long, Integer> o2) { return o1.getKey().compareTo(o2.getKey()); } }); } @Override public void map(Chunk A, Chunk B, Chunk C) { // find unique interaction domain for (int r = 0; r < A._len; r++) { final int a = A.isNA0(r) ? _missing : (int)A.at80(r); long ab; if (!_same) { final int b = B.isNA0(r) ? _missing : (int) B.at80(r); ab = ((long) a << 32) | (b & 0xFFFFFFFFL); // key: combine both ints into a long } else { ab = (long)a; } if (_same && A.isNA0(r)) { C.setNA0(r); } else { // find _domain index for given factor level ab int level = -1; int pos = Collections.binarySearch(_valToIndex, new AbstractMap.SimpleEntry<Long,Integer>(ab,0), new Comparator<Map.Entry<Long, Integer>>() { @Override public int compare(Map.Entry<Long, Integer> o1, Map.Entry<Long, Integer> o2) { return o1.getKey().compareTo(o2.getKey()); } }); if (pos >= 0) { level = _valToIndex.get(pos).getValue(); assert _keys[level] == ab; //confirm that binary search in _valToIndex worked } if (level < 0) { for (int i=0; i<_keys.length; ++i) { assert (_keys[i] != ab); } level = _fr.lastVec().domain().length-1; assert _fr.lastVec().domain()[level].equals(_other); } C.set0(r, level); } } } } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/genmodel/GenUtils.java
package water.genmodel; import java.util.Arrays; public class GenUtils { /** * Concatenate given list of arrays into one long array. * * <p>Expect not null array.</p> * * @param aa list of string arrays * @return a long array create by concatenation of given arrays. */ public static String[] concat(String[] ...aa) { int l = 0; for (String[] a : aa) l += a.length; String[] r = new String[l]; l = 0; for (String[] a : aa) { System.arraycopy(a, 0, r, l, a.length); l += a.length; } return r; } public static String[][] array(String[] ...aa) { return aa; } public static int find(String name, String[] ...aa) { int l = 0; for (String[] a : aa) { int ii = Arrays.binarySearch(a, name); if (ii>=0) return l + ii; l += a.length; } return -1; } public static int maxIndex(float[] from, int start) { int result = start; for (int i = start; i<from.length; ++i) if (from[i]>from[result]) result = i; return result; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/genmodel/GeneratedModel.java
package water.genmodel; import java.util.Map; /** This is a helper class to support Java generated models. */ public abstract class GeneratedModel implements IGeneratedModel { @Override public int getNumCols() { return getNames().length - 1; } @Override public int getResponseIdx () { return getNames().length - 1; } @Override public String getResponseName() { return getNames()[getResponseIdx()]; } @Override public int getNumResponseClasses() { return getNumClasses(getResponseIdx()); } @Override public boolean isClassifier() { return getNumResponseClasses()!=-1; } /** * Return <code>true</code> if the given index is in given bit array else false. * * <p>The method returns <code>false</code> if <code>idx</code> is less than * <code>offset</code>. It also returns <code>false</code> if the <code>idx</code> * is greater then length of given bit set! * </p> * * @param gcmp bit set array * @param offset number of bits skipped by default since there are 0 * @param idx index of bit to be checked if it is in bitset * @return */ public static boolean grpContains(byte[] gcmp, int offset, int idx) { if(offset < 0) throw new IndexOutOfBoundsException("offset < 0: " + offset); if(idx < offset) return false; idx = idx - offset; int max_idx = (gcmp.length << 3) - 1; if(idx > max_idx) return false; return (gcmp[idx >> 3] & ((byte)1 << (idx % 8))) != 0; } @Override public int getColIdx(String name) { String[] names = getNames(); for (int i=0; i<names.length; i++) if (names[i].equals(name)) return i; return -1; } @Override public int getNumClasses(int i) { String[] domval = getDomainValues(i); return domval!=null?domval.length:-1; } @Override public String[] getDomainValues(String name) { int colIdx = getColIdx(name); return colIdx != -1 ? getDomainValues(colIdx) : null; } @Override public String[] getDomainValues(int i) { return getDomainValues()[i]; } @Override public int mapEnum(int colIdx, String enumValue) { String[] domain = getDomainValues(colIdx); if (domain==null || domain.length==0) return -1; for (int i=0; i<domain.length;i++) if (enumValue.equals(domain[i])) return i; return -1; } @Override public int getPredsSize() { return isClassifier() ? 1+getNumResponseClasses() : 2; } /** * Takes a HashMap mapping column names to doubles. * <p> * Looks up the column names needed by the model, and places the doubles into the data array in * the order needed by the model. Missing columns use NaN. * </p> */ public double[] map( Map<String, Double> row, double data[] ) { String[] colNames = getNames(); for( int i=0; i<colNames.length-1; i++ ) { Double d = row.get(colNames[i]); data[i] = d==null ? Double.NaN : d; } return data; } // Does the mapping lookup for every row, no allocation public float[] predict( Map<String, Double> row, double data[], float preds[] ) { return predict(map(row,data),preds); } // Allocates a double[] for every row public float[] predict( Map<String, Double> row, float preds[] ) { return predict(map(row,new double[getNames().length]),preds); } // Allocates a double[] and a float[] for every row public float[] predict( Map<String, Double> row ) { return predict(map(row,new double[getNames().length]),new float[getNumResponseClasses()+1]); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/genmodel/IGeneratedModel.java
package water.genmodel; import java.util.Map; /** * A generic interface to access generated models. */ public interface IGeneratedModel { /** Returns number of columns used as input for training (i.e., exclude response column). */ public int getNumCols(); /** The names of columns used in the model. It contains names of input columns and a name of response column. */ public String[] getNames(); /** The name of the response column. */ public String getResponseName(); /** Returns an index of the response column. */ public int getResponseIdx(); /** Get number of classes in in given column. * Return number greater than zero if the column is categorical * or -1 if the column is numeric. */ public int getNumClasses(int i); /** Return a number of classes in response column. */ public int getNumResponseClasses(); /** Return true if this model represents a classifier, else it is used for regression. */ public boolean isClassifier(); /** Predict the given row and return prediction. * * @param data row holding the data. Ordering should follow ordering of columns returned by getNames() * @param preds allocated array to hold a prediction * @return returned preds parameter filled by prediction */ public float[] predict(double[] data, float[] preds); /** Predict the given row and return prediction using given number of iterations (e.g., number of trees from forest). * * @param data row holding the data. Ordering should follow ordering of columns returned by getNames() * @param preds allocated array to hold a prediction * @param maxIters maximum number of iterations to use during predicting process * @return returned preds parameter filled by prediction */ public float[] predict(double[] data, float[] preds, int maxIters); /** Gets domain of given column. * @param name column name * @return return domain for given column or null if column is numeric. */ public String[] getDomainValues(String name); /** * Returns domain values for i-th column. * @param i index of column * @return domain for given enum column or null if columns contains numeric value */ public String[] getDomainValues(int i); /** Returns domain values for all columns */ public String[][] getDomainValues(); /** Returns index of column with give name or -1 if column is not found. */ public int getColIdx(String name); /** Maps given column's enum to integer used by this model. * Returns -1 if mapping is not found. */ public int mapEnum(int colIdx, String enumValue); /** * Returns the expected size of preds array which is passed to {@link #predict(double[], float[])} function. * @return expected size of preds array */ public int getPredsSize(); }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/license/LicenseManager.java
package water.license; import water.util.Log; import java.io.BufferedReader; import java.io.FileReader; import java.io.IOException; import java.util.ArrayList; import java.util.Scanner; import java.util.regex.Matcher; import java.util.regex.Pattern; public class LicenseManager { public static final String FEATURE_GLM_SCORING = "glm_scoring"; public static final String FEATURE_GBM_SCORING = "gbm_scoring"; public static final String FEATURE_RF_SCORING = "rf_scoring"; public static final String FEATURE_DEEPLEARNING_SCORING = "deeplearning_scoring"; public enum Result { OK, FILE_ERROR, SIGNATURE_ERROR, EXPIRED_ERROR } private String _license; public LicenseManager() {} private String readFile(String fileName) throws IOException { BufferedReader br = new BufferedReader(new FileReader(fileName)); try { StringBuilder sb = new StringBuilder(); String line = br.readLine(); while (line != null) { sb.append(line); sb.append("\n"); line = br.readLine(); } return sb.toString(); } finally { br.close(); } } public Result readLicenseFile(String fileName) { // Check if there is a file issue. String s; try { s = readFile(fileName); } catch (Exception e) { Log.err("readFile failed", e); return Result.FILE_ERROR; } // Check if license is not correctly signed. // Check if license is expired. _license = s; return Result.OK; } public void logLicensedFeatures() { ArrayList<String> featureNameList = new ArrayList<String>(); featureNameList.add(FEATURE_DEEPLEARNING_SCORING); featureNameList.add(FEATURE_GBM_SCORING); featureNameList.add(FEATURE_GLM_SCORING); featureNameList.add(FEATURE_RF_SCORING); boolean silent = true; for (String featureName : featureNameList) { boolean b = isFeatureAllowed(featureName, silent); Log.info("isFeatureAllowed(" + featureName + "): " + b); } } public boolean isFeatureAllowed(String featureName) { boolean silent = false; return isFeatureAllowed(featureName, silent); } public boolean isFeatureAllowed(String featureName, boolean silent) { if (_license == null) { if (! silent) { Log.info("isFeatureAllowed(" + featureName + ") is false (no valid license found)"); } return false; } Pattern p = Pattern.compile("Feature:\\s*(\\S*)\\s*"); Scanner scanner = new Scanner(_license); while (scanner.hasNextLine()) { String line = scanner.nextLine(); Matcher m = p.matcher(line); boolean b = m.matches(); if (! b) { continue; } String lineFeatureName = m.group(1); if (featureName.equals(lineFeatureName)) { return true; } } scanner.close(); if (! silent) { Log.info("isFeatureAllowed(" + featureName + ") is false (feature not licensed)"); } return false; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/nbhm/AbstractEntry.java
package water.nbhm; import java.util.Map; /* * Written by Cliff Click and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain */ /** * A simple implementation of {@link java.util.Map.Entry}. * Does not implement {@link java.util.Map.Entry.setValue}, that is done by users of the class. * * @since 1.5 * @author Cliff Click * @param <TypeK> the type of keys maintained by this map * @param <TypeV> the type of mapped values */ abstract class AbstractEntry<TypeK,TypeV> implements Map.Entry<TypeK,TypeV> { /** Strongly typed key */ protected final TypeK _key; /** Strongly typed value */ protected TypeV _val; public AbstractEntry(final TypeK key, final TypeV val) { _key = key; _val = val; } public AbstractEntry(final Map.Entry<TypeK,TypeV> e ) { _key = e.getKey(); _val = e.getValue(); } /** Return "key=val" string */ public String toString() { return _key + "=" + _val; } /** Return key */ public TypeK getKey () { return _key; } /** Return val */ public TypeV getValue() { return _val; } /** Equal if the underlying key & value are equal */ public boolean equals(final Object o) { if (!(o instanceof Map.Entry)) return false; final Map.Entry e = (Map.Entry)o; return eq(_key, e.getKey()) && eq(_val, e.getValue()); } /** Compute <code>"key.hashCode() ^ val.hashCode()"</code> */ public int hashCode() { return ((_key == null) ? 0 : _key.hashCode()) ^ ((_val == null) ? 0 : _val.hashCode()); } private static boolean eq(final Object o1, final Object o2) { return (o1 == null ? o2 == null : o1.equals(o2)); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/nbhm/ConcurrentAutoTable.java
package water.nbhm; import java.io.Serializable; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import sun.misc.Unsafe; /* * Written by Cliff Click and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain */ /** * An auto-resizing table of {@code longs}, supporting low-contention CAS * operations. Updates are done with CAS's to no particular table element. * The intent is to support highly scalable counters, r/w locks, and other * structures where the updates are associative, loss-free (no-brainer), and * otherwise happen at such a high volume that the cache contention for * CAS'ing a single word is unacceptable. * * <p>This API is overkill for simple counters (e.g. no need for the 'mask') * and is untested as an API for making a scalable r/w lock and so is likely * to change! * * @since 1.5 * @author Cliff Click */ public class ConcurrentAutoTable implements Serializable { // --- public interface --- /** * Add the given value to current counter value. Concurrent updates will * not be lost, but addAndGet or getAndAdd are not implemented because the * total counter value (i.e., {@link #get}) is not atomically updated. * Updates are striped across an array of counters to avoid cache contention * and has been tested with performance scaling linearly up to 768 CPUs. */ public void add( long x ) { add_if_mask( x,0); } /** {@link #add} with -1 */ public void decrement() { add_if_mask(-1L,0); } /** {@link #add} with +1 */ public void increment() { add_if_mask( 1L,0); } /** Atomically set the sum of the striped counters to specified value. * Rather more expensive than a simple store, in order to remain atomic. */ public void set( long x ) { CAT newcat = new CAT(null,4,x); // Spin until CAS works while( !CAS_cat(_cat,newcat) ); } /** * Current value of the counter. Since other threads are updating furiously * the value is only approximate, but it includes all counts made by the * current thread. Requires a pass over the internally striped counters. */ public long get() { return _cat.sum(0); } /** Same as {@link #get}, included for completeness. */ public int intValue() { return (int)_cat.sum(0); } /** Same as {@link #get}, included for completeness. */ public long longValue() { return _cat.sum(0); } /** * A cheaper {@link #get}. Updated only once/millisecond, but as fast as a * simple load instruction when not updating. */ public long estimate_get( ) { return _cat.estimate_sum(0); } /** * Return the counter's {@code long} value converted to a string. */ public String toString() { return _cat.toString(0); } /** * A more verbose print than {@link #toString}, showing internal structure. * Useful for debugging. */ public void print() { _cat.print(); } /** * Return the internal counter striping factor. Useful for diagnosing * performance problems. */ public int internal_size() { return _cat._t.length; } // Only add 'x' to some slot in table, hinted at by 'hash', if bits under // the mask are all zero. The sum can overflow or 'x' can contain bits in // the mask. Value is CAS'd so no counts are lost. The CAS is retried until // it succeeds or bits are found under the mask. Returned value is the old // value - which WILL have zero under the mask on success and WILL NOT have // zero under the mask for failure. private long add_if_mask( long x, long mask ) { return _cat.add_if_mask(x,mask,hash(),this); } // The underlying array of concurrently updated long counters private volatile CAT _cat = new CAT(null,4/*Start Small, Think Big!*/,0L); private static final AtomicReferenceFieldUpdater<ConcurrentAutoTable,CAT> _catUpdater = AtomicReferenceFieldUpdater.newUpdater(ConcurrentAutoTable.class,CAT.class, "_cat"); private boolean CAS_cat( CAT oldcat, CAT newcat ) { return _catUpdater.compareAndSet(this,oldcat,newcat); } // Hash spreader private static final int hash() { int h = System.identityHashCode(Thread.currentThread()); // You would think that System.identityHashCode on the current thread // would be a good hash fcn, but actually on SunOS 5.8 it is pretty lousy // in the low bits. h ^= (h>>>20) ^ (h>>>12); // Bit spreader, borrowed from Doug Lea h ^= (h>>> 7) ^ (h>>> 4); return h<<2; // Pad out cache lines. The goal is to avoid cache-line contention } // --- CAT ----------------------------------------------------------------- private static class CAT implements Serializable { // Unsafe crud: get a function which will CAS arrays private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); private static final int _Lbase = _unsafe.arrayBaseOffset(long[].class); private static final int _Lscale = _unsafe.arrayIndexScale(long[].class); private static long rawIndex(long[] ary, int i) { assert i >= 0 && i < ary.length; return _Lbase + i * _Lscale; } private final static boolean CAS( long[] A, int idx, long old, long nnn ) { return _unsafe.compareAndSwapLong( A, rawIndex(A,idx), old, nnn ); } volatile long _resizers; // count of threads attempting a resize static private final AtomicLongFieldUpdater<CAT> _resizerUpdater = AtomicLongFieldUpdater.newUpdater(CAT.class, "_resizers"); private final CAT _next; private volatile long _fuzzy_sum_cache; private volatile long _fuzzy_time; private static final int MAX_SPIN=2; private final long[] _t; // Power-of-2 array of longs CAT( CAT next, int sz, long init ) { _next = next; _t = new long[sz]; _t[0] = init; } // Only add 'x' to some slot in table, hinted at by 'hash', if bits under // the mask are all zero. The sum can overflow or 'x' can contain bits in // the mask. Value is CAS'd so no counts are lost. The CAS is attempted // ONCE. public long add_if_mask( long x, long mask, int hash, ConcurrentAutoTable master ) { long[] t = _t; int idx = hash & (t.length-1); // Peel loop; try once fast long old = t[idx]; if( (old&mask) != 0 ) return old; // Failed for bit-set under mask boolean ok = CAS( t, idx, old&~mask, old+x ); if( ok ) return old; // Got it // Try harder int cnt=0; while( true ) { old = t[idx]; if( (old&mask) != 0 ) return old; // Failed for bit-set under mask if( CAS( t, idx, old, old+x ) ) break; // Got it! cnt++; } if( cnt < MAX_SPIN ) return old; // Allowable spin loop count if( t.length >= 1024*1024 ) return old; // too big already // Too much contention; double array size in an effort to reduce contention long r = _resizers; int newbytes = (t.length<<1)<<3/*word to bytes*/; while( !_resizerUpdater.compareAndSet(this,r,r+newbytes) ) r = _resizers; r += newbytes; if( master._cat != this ) return old; // Already doubled, don't bother if( (r>>17) != 0 ) { // Already too much allocation attempts? // TODO - use a wait with timeout, so we'll wakeup as soon as the new // table is ready, or after the timeout in any case. Annoyingly, this // breaks the non-blocking property - so for now we just briefly sleep. //synchronized( this ) { wait(8*megs); } // Timeout - we always wakeup try { Thread.sleep(r>>17); } catch( InterruptedException e ) { } if( master._cat != this ) return old; } CAT newcat = new CAT(this,t.length*2,0); // Take 1 stab at updating the CAT with the new larger size. If this // fails, we assume some other thread already expanded the CAT - so we // do not need to retry until it succeeds. master.CAS_cat(this,newcat); return old; } // Return the current sum of all things in the table, stripping off mask // before the add. Writers can be updating the table furiously, so the // sum is only locally accurate. public long sum( long mask ) { long sum = _next == null ? 0 : _next.sum(mask); // Recursively get cached sum final long[] t = _t; for( int i=0; i<t.length; i++ ) sum += t[i]&(~mask); return sum; } // Fast fuzzy version. Used a cached value until it gets old, then re-up // the cache. public long estimate_sum( long mask ) { // For short tables, just do the work if( _t.length <= 64 ) return sum(mask); // For bigger tables, periodically freshen a cached value long millis = System.currentTimeMillis(); if( _fuzzy_time != millis ) { // Time marches on? _fuzzy_sum_cache = sum(mask); // Get sum the hard way _fuzzy_time = millis; // Indicate freshness of cached value } return _fuzzy_sum_cache; // Return cached sum } // Update all table slots with CAS. public void all_or ( long mask ) { long[] t = _t; for( int i=0; i<t.length; i++ ) { boolean done = false; while( !done ) { long old = t[i]; done = CAS(t,i, old, old|mask ); } } if( _next != null ) _next.all_or(mask); } public void all_and( long mask ) { long[] t = _t; for( int i=0; i<t.length; i++ ) { boolean done = false; while( !done ) { long old = t[i]; done = CAS(t,i, old, old&mask ); } } if( _next != null ) _next.all_and(mask); } // Set/stomp all table slots. No CAS. public void all_set( long val ) { long[] t = _t; for( int i=0; i<t.length; i++ ) t[i] = val; if( _next != null ) _next.all_set(val); } String toString( long mask ) { return Long.toString(sum(mask)); } public void print() { long[] t = _t; System.out.print("["+t[0]); for( int i=1; i<t.length; i++ ) System.out.print(","+t[i]); System.out.print("]"); if( _next != null ) _next.print(); } } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/nbhm/NonBlockingHashMap.java
package water.nbhm; import java.io.IOException; import java.io.Serializable; import java.lang.reflect.Field; import java.util.AbstractCollection; import java.util.AbstractMap; import java.util.AbstractSet; import java.util.Collection; import java.util.ConcurrentModificationException; import java.util.Enumeration; import java.util.HashMap; import java.util.Hashtable; import java.util.Iterator; import java.util.Map; import java.util.NoSuchElementException; import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import sun.misc.Unsafe; /* * Written by Cliff Click and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain */ /** * A lock-free alternate implementation of {@link java.util.concurrent.ConcurrentHashMap} * with better scaling properties and generally lower costs to mutate the Map. * It provides identical correctness properties as ConcurrentHashMap. All * operations are non-blocking and multi-thread safe, including all update * operations. {@link NonBlockingHashMap} scales substatially better than * {@link java.util.concurrent.ConcurrentHashMap} for high update rates, even with a * large concurrency factor. Scaling is linear up to 768 CPUs on a 768-CPU * Azul box, even with 100% updates or 100% reads or any fraction in-between. * Linear scaling up to all cpus has been observed on a 32-way Sun US2 box, * 32-way Sun Niagra box, 8-way Intel box and a 4-way Power box. * * This class obeys the same functional specification as {@link * java.util.Hashtable}, and includes versions of methods corresponding to * each method of <tt>Hashtable</tt>. However, even though all operations are * thread-safe, operations do <em>not</em> entail locking and there is * <em>not</em> any support for locking the entire table in a way that * prevents all access. This class is fully interoperable with * <tt>Hashtable</tt> in programs that rely on its thread safety but not on * its synchronization details. * * <p> Operations (including <tt>put</tt>) generally do not block, so may * overlap with other update operations (including other <tt>puts</tt> and * <tt>removes</tt>). Retrievals reflect the results of the most recently * <em>completed</em> update operations holding upon their onset. For * aggregate operations such as <tt>putAll</tt>, concurrent retrievals may * reflect insertion or removal of only some entries. Similarly, Iterators * and Enumerations return elements reflecting the state of the hash table at * some point at or since the creation of the iterator/enumeration. They do * <em>not</em> throw {@link ConcurrentModificationException}. However, * iterators are designed to be used by only one thread at a time. * * <p> Very full tables, or tables with high reprobe rates may trigger an * internal resize operation to move into a larger table. Resizing is not * terribly expensive, but it is not free either; during resize operations * table throughput may drop somewhat. All threads that visit the table * during a resize will 'help' the resizing but will still be allowed to * complete their operation before the resize is finished (i.e., a simple * 'get' operation on a million-entry table undergoing resizing will not need * to block until the entire million entries are copied). * * <p>This class and its views and iterators implement all of the * <em>optional</em> methods of the {@link Map} and {@link Iterator} * interfaces. * * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class * does <em>not</em> allow <tt>null</tt> to be used as a key or value. * * * @since 1.5 * @author Cliff Click * @param <TypeK> the type of keys maintained by this map * @param <TypeV> the type of mapped values */ public class NonBlockingHashMap<TypeK, TypeV> extends AbstractMap<TypeK, TypeV> implements ConcurrentMap<TypeK, TypeV>, Cloneable, Serializable { private static final long serialVersionUID = 1234123412341234123L; private static final int REPROBE_LIMIT=10; // Too many reprobes then force a table-resize // --- Bits to allow Unsafe access to arrays private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); private static final int _Obase = _unsafe.arrayBaseOffset(Object[].class); private static final int _Oscale = _unsafe.arrayIndexScale(Object[].class); private static long rawIndex(final Object[] ary, final int idx) { assert idx >= 0 && idx < ary.length; return _Obase + idx * _Oscale; } // --- Setup to use Unsafe private static final long _kvs_offset; static { // <clinit> Field f = null; try { f = NonBlockingHashMap.class.getDeclaredField("_kvs"); } catch( java.lang.NoSuchFieldException e ) { throw new RuntimeException(e); } _kvs_offset = _unsafe.objectFieldOffset(f); } private final boolean CAS_kvs( final Object[] oldkvs, final Object[] newkvs ) { return _unsafe.compareAndSwapObject(this, _kvs_offset, oldkvs, newkvs ); } // --- Adding a 'prime' bit onto Values via wrapping with a junk wrapper class private static final class Prime { final Object _V; Prime( Object V ) { _V = V; } static Object unbox( Object V ) { return V instanceof Prime ? ((Prime)V)._V : V; } } // --- hash ---------------------------------------------------------------- // Helper function to spread lousy hashCodes private static final int hash(final Object key) { int h = key.hashCode(); // The real hashCode call h ^= (h>>>20) ^ (h>>>12); h ^= (h>>> 7) ^ (h>>> 4); return h; } // --- The Hash Table -------------------- // Slot 0 is always used for a 'CHM' entry below to hold the interesting // bits of the hash table. Slot 1 holds full hashes as an array of ints. // Slots {2,3}, {4,5}, etc hold {Key,Value} pairs. The entire hash table // can be atomically replaced by CASing the _kvs field. // // Why is CHM buried inside the _kvs Object array, instead of the other way // around? The CHM info is used during resize events and updates, but not // during standard 'get' operations. I assume 'get' is much more frequent // than 'put'. 'get' can skip the extra indirection of skipping through the // CHM to reach the _kvs array. private transient Object[] _kvs; public Object[] kvs() {return _kvs;} private static final CHM chm (Object[] kvs) { return (CHM )kvs[0]; } private static final int[] hashes(Object[] kvs) { return (int[])kvs[1]; } // Number of K,V pairs in the table private static final int len(Object[] kvs) { return (kvs.length-2)>>1; } // Time since last resize private transient long _last_resize_milli; // --- Minimum table size ---------------- // Pick size 8 K/V pairs, which turns into (8*2+2)*4+12 = 84 bytes on a // standard 32-bit HotSpot, and (8*2+2)*8+12 = 156 bytes on 64-bit Azul. private static final int MIN_SIZE_LOG=3; // private static final int MIN_SIZE=(1<<MIN_SIZE_LOG); // Must be power of 2 // --- Sentinels ------------------------- // No-Match-Old - putIfMatch does updates only if it matches the old value, // and NO_MATCH_OLD basically counts as a wildcard match. private static final Object NO_MATCH_OLD = new Object(); // Sentinel // Match-Any-not-null - putIfMatch does updates only if it find a real old // value. private static final Object MATCH_ANY = new Object(); // Sentinel // This K/V pair has been deleted (but the Key slot is forever claimed). // The same Key can be reinserted with a new value later. public static final Object TOMBSTONE = new Object(); // Prime'd or box'd version of TOMBSTONE. This K/V pair was deleted, then a // table resize started. The K/V pair has been marked so that no new // updates can happen to the old table (and since the K/V pair was deleted // nothing was copied to the new table). private static final Prime TOMBPRIME = new Prime(TOMBSTONE); // A sentinel to indicate that this table is locked as readOnly. It is // slipped over the _newkvs field, and prevents any more keys from being // inserted, and eventually prevents any changes of any kind. public static final Object[] READONLY = new Object[0]; // --- key,val ------------------------------------------------------------- // Access K,V for a given idx // // Note that these are static, so that the caller is forced to read the _kvs // field only once, and share that read across all key/val calls - lest the // _kvs field move out from under us and back-to-back key & val calls refer // to different _kvs arrays. private static final Object key(Object[] kvs,int idx) { return kvs[(idx<<1)+2]; } private static final Object val(Object[] kvs,int idx) { return kvs[(idx<<1)+3]; } private static final boolean CAS_key( Object[] kvs, int idx, Object old, Object key ) { return _unsafe.compareAndSwapObject( kvs, rawIndex(kvs,(idx<<1)+2), old, key ); } private static final boolean CAS_val( Object[] kvs, int idx, Object old, Object val ) { return _unsafe.compareAndSwapObject( kvs, rawIndex(kvs,(idx<<1)+3), old, val ); } // --- dump ---------------------------------------------------------------- /** Verbose printout of table internals, useful for debugging. */ public final void print() { System.out.println("========="); print2(_kvs); System.out.println("========="); } // print the entire state of the table private final void print( Object[] kvs ) { for( int i=0; i<len(kvs); i++ ) { Object K = key(kvs,i); if( K != null ) { String KS = (K == TOMBSTONE) ? "XXX" : K.toString(); Object V = val(kvs,i); Object U = Prime.unbox(V); String p = (V==U) ? "" : "prime_"; String US = (U == TOMBSTONE) ? "tombstone" : U.toString(); System.out.println(""+i+" ("+KS+","+p+US+")"); } } Object[] newkvs = chm(kvs)._newkvs; // New table, if any if( newkvs != null ) { System.out.println("----"); print(newkvs); } } // print only the live values, broken down by the table they are in private final void print2( Object[] kvs) { for( int i=0; i<len(kvs); i++ ) { Object key = key(kvs,i); Object val = val(kvs,i); Object U = Prime.unbox(val); if( key != null && key != TOMBSTONE && // key is sane val != null && U != TOMBSTONE ) { // val is sane String p = (val==U) ? "" : "prime_"; System.out.println(""+i+" ("+key+","+p+val+")"); } } Object[] newkvs = chm(kvs)._newkvs; // New table, if any if( newkvs != null ) { System.out.println("----"); print2(newkvs); } } // Count of reprobes private transient ConcurrentAutoTable _reprobes = new ConcurrentAutoTable(); /** Get and clear the current count of reprobes. Reprobes happen on key * collisions, and a high reprobe rate may indicate a poor hash function or * weaknesses in the table resizing function. * @return the count of reprobes since the last call to {@link #reprobes} * or since the table was created. */ public long reprobes() { long r = _reprobes.get(); _reprobes = new ConcurrentAutoTable(); return r; } // --- reprobe_limit ----------------------------------------------------- // Heuristic to decide if we have reprobed toooo many times. Running over // the reprobe limit on a 'get' call acts as a 'miss'; on a 'put' call it // can trigger a table resize. Several places must have exact agreement on // what the reprobe_limit is, so we share it here. private static final int reprobe_limit( int len ) { return REPROBE_LIMIT + (len>>2); } // --- NonBlockingHashMap -------------------------------------------------- // Constructors /** Create a new NonBlockingHashMap with default minimum size (currently set * to 8 K/V pairs or roughly 84 bytes on a standard 32-bit JVM). */ public NonBlockingHashMap( ) { this(MIN_SIZE); } /** Create a new NonBlockingHashMap with initial room for the given number of * elements, thus avoiding internal resizing operations to reach an * appropriate size. Large numbers here when used with a small count of * elements will sacrifice space for a small amount of time gained. The * initial size will be rounded up internally to the next larger power of 2. */ public NonBlockingHashMap( final int initial_sz ) { initialize(initial_sz); } private final void initialize( int initial_sz ) { if( initial_sz < 0 ) throw new IllegalArgumentException(); int i; // Convert to next largest power-of-2 if( initial_sz > 1024*1024 ) initial_sz = 1024*1024; for( i=MIN_SIZE_LOG; (1<<i) < (initial_sz<<2); i++ ) ; // Double size for K,V pairs, add 1 for CHM and 1 for hashes _kvs = new Object[((1<<i)<<1)+2]; _kvs[0] = new CHM(new ConcurrentAutoTable()); // CHM in slot 0 _kvs[1] = new int[1<<i]; // Matching hash entries _last_resize_milli = System.currentTimeMillis(); } // Version for subclassed readObject calls, to be called after the defaultReadObject protected final void initialize() { initialize(MIN_SIZE); } // --- wrappers ------------------------------------------------------------ /** Returns the number of key-value mappings in this map. * @return the number of key-value mappings in this map */ @Override public int size ( ) { return chm(_kvs).size(); } /** Returns <tt>size() == 0</tt>. * @return <tt>size() == 0</tt> */ @Override public boolean isEmpty ( ) { return size() == 0; } /** Tests if the key in the table using the <tt>equals</tt> method. * @return <tt>true</tt> if the key is in the table using the <tt>equals</tt> method * @throws NullPointerException if the specified key is null */ @Override public boolean containsKey( Object key ) { return get(key) != null; } /** Legacy method testing if some key maps into the specified value in this * table. This method is identical in functionality to {@link * #containsValue}, and exists solely to ensure full compatibility with * class {@link java.util.Hashtable}, which supported this method prior to * introduction of the Java Collections framework. * @param val a value to search for * @return <tt>true</tt> if this map maps one or more keys to the specified value * @throws NullPointerException if the specified value is null */ public boolean contains ( Object val ) { return containsValue(val); } /** Maps the specified key to the specified value in the table. Neither key * nor value can be null. * <p> The value can be retrieved by calling {@link #get} with a key that is * equal to the original key. * @param key key with which the specified value is to be associated * @param val value to be associated with the specified key * @return the previous value associated with <tt>key</tt>, or * <tt>null</tt> if there was no mapping for <tt>key</tt> * @throws NullPointerException if the specified key or value is null */ @Override public TypeV put ( TypeK key, TypeV val ) { return putIfMatch( key, val, NO_MATCH_OLD); } /** Atomically, do a {@link #put} if-and-only-if the key is not mapped. * Useful to ensure that only a single mapping for the key exists, even if * many threads are trying to create the mapping in parallel. * @return the previous value associated with the specified key, * or <tt>null</tt> if there was no mapping for the key * @throws NullPointerException if the specified key or value is null */ public TypeV putIfAbsent( TypeK key, TypeV val ) { return putIfMatch( key, val, TOMBSTONE ); } /** Removes the key (and its corresponding value) from this map. * This method does nothing if the key is not in the map. * @return the previous value associated with <tt>key</tt>, or * <tt>null</tt> if there was no mapping for <tt>key</tt> * @throws NullPointerException if the specified key is null */ @Override public TypeV remove ( Object key ) { return putIfMatch( key,TOMBSTONE, NO_MATCH_OLD); } /** Atomically do a {@link #remove(Object)} if-and-only-if the key is mapped * to a value which is <code>equals</code> to the given value. * @throws NullPointerException if the specified key or value is null */ public boolean remove ( Object key,Object val ) { return putIfMatch( key,TOMBSTONE, val ) == val; } /** Atomically do a <code>put(key,val)</code> if-and-only-if the key is * mapped to some value already. * @throws NullPointerException if the specified key or value is null */ public TypeV replace ( TypeK key, TypeV val ) { return putIfMatch( key, val,MATCH_ANY ); } /** Atomically do a <code>put(key,newValue)</code> if-and-only-if the key is * mapped a value which is <code>equals</code> to <code>oldValue</code>. * @throws NullPointerException if the specified key or value is null */ public boolean replace ( TypeK key, TypeV oldValue, TypeV newValue ) { return putIfMatch( key, newValue, oldValue ) == oldValue; } // Atomically replace newVal for oldVal, returning the value that existed // there before, or READONLY. If the oldVal matches the returned value, // then the put inserted newVal, otherwise it failed. public final TypeV putIfMatchUnlocked( Object key, Object newVal, Object oldVal ) { if( oldVal == null ) oldVal = TOMBSTONE; if( newVal == null ) newVal = TOMBSTONE; final TypeV res = (TypeV)putIfMatch( this, _kvs, key, newVal, oldVal ); assert !(res instanceof Prime); //assert res != null; return res == TOMBSTONE ? null : res; } public final TypeV putIfMatch( Object key, Object newVal, Object oldVal ) { if (oldVal == null || newVal == null) throw new NullPointerException(); final Object res = putIfMatch( this, _kvs, key, newVal, oldVal ); assert !(res instanceof Prime); assert res != null; return res == TOMBSTONE ? null : (TypeV)res; } /** Copies all of the mappings from the specified map to this one, replacing * any existing mappings. * @param m mappings to be stored in this map */ @Override public void putAll(Map<? extends TypeK, ? extends TypeV> m) { for (Map.Entry<? extends TypeK, ? extends TypeV> e : m.entrySet()) put(e.getKey(), e.getValue()); } /** Removes all of the mappings from this map. */ @Override public void clear() { // Smack a new empty table down Object[] newkvs = new NonBlockingHashMap(MIN_SIZE)._kvs; while( !CAS_kvs(_kvs,newkvs) ) // Spin until the clear works ; } /** Returns <tt>true</tt> if this Map maps one or more keys to the specified * value. <em>Note</em>: This method requires a full internal traversal of the * hash table and is much slower than {@link #containsKey}. * @param val value whose presence in this map is to be tested * @return <tt>true</tt> if this map maps one or more keys to the specified value * @throws NullPointerException if the specified value is null */ @Override public boolean containsValue( final Object val ) { if( val == null ) throw new NullPointerException(); for( TypeV V : values() ) if( V == val || V.equals(val) ) return true; return false; } // This function is supposed to do something for Hashtable, and the JCK // tests hang until it gets called... by somebody ... for some reason, // any reason.... protected void rehash() { } /** * Creates a shallow copy of this hashtable. All the structure of the * hashtable itself is copied, but the keys and values are not cloned. * This is a relatively expensive operation. * * @return a clone of the hashtable. */ @Override public Object clone() { try { // Must clone, to get the class right; NBHM might have been // extended so it would be wrong to just make a new NBHM. NonBlockingHashMap<TypeK,TypeV> t = (NonBlockingHashMap<TypeK,TypeV>) super.clone(); // But I don't have an atomic clone operation - the underlying _kvs // structure is undergoing rapid change. If I just clone the _kvs // field, the CHM in _kvs[0] won't be in sync. // // Wipe out the cloned array (it was shallow anyways). t.clear(); // Now copy sanely for( TypeK K : keySet() ) { final TypeV V = get(K); // Do an official 'get' t.put(K,V); } return t; } catch (CloneNotSupportedException e) { // this shouldn't happen, since we are Cloneable throw new InternalError(); } } /** * Returns a string representation of this map. The string representation * consists of a list of key-value mappings in the order returned by the * map's <tt>entrySet</tt> view's iterator, enclosed in braces * (<tt>"{}"</tt>). Adjacent mappings are separated by the characters * <tt>", "</tt> (comma and space). Each key-value mapping is rendered as * the key followed by an equals sign (<tt>"="</tt>) followed by the * associated value. Keys and values are converted to strings as by * {@link String#valueOf(Object)}. * * @return a string representation of this map */ @Override public String toString() { Iterator<Entry<TypeK,TypeV>> i = entrySet().iterator(); if( !i.hasNext()) return "{}"; StringBuilder sb = new StringBuilder(); sb.append('{'); for (;;) { Entry<TypeK,TypeV> e = i.next(); TypeK key = e.getKey(); TypeV value = e.getValue(); sb.append(key == this ? "(this Map)" : key); sb.append('='); sb.append(value == this ? "(this Map)" : value); if( !i.hasNext()) return sb.append('}').toString(); sb.append(", "); } } // --- keyeq --------------------------------------------------------------- // Check for key equality. Try direct pointer compare first, then see if // the hashes are unequal (fast negative test) and finally do the full-on // 'equals' v-call. private static boolean keyeq( Object K, Object key, int[] hashes, int hash, int fullhash ) { return K==key || // Either keys match exactly OR // hash exists and matches? hash can be zero during the install of a // new key/value pair. ((hashes[hash] == 0 || hashes[hash] == fullhash) && // Do not call the users' "equals()" call with a Tombstone, as this can // surprise poorly written "equals()" calls that throw exceptions // instead of simply returning false. K != TOMBSTONE && // Do not call users' equals call with a Tombstone // Do the match the hard way - with the users' key being the loop- // invariant "this" pointer. I could have flipped the order of // operands (since equals is commutative), but I'm making mega-morphic // v-calls in a reprobing loop and nailing down the 'this' argument // gives both the JIT and the hardware a chance to prefetch the call target. key.equals(K)); // Finally do the hard match } // --- get ----------------------------------------------------------------- /** Returns the value to which the specified key is mapped, or {@code null} * if this map contains no mapping for the key. * <p>More formally, if this map contains a mapping from a key {@code k} to * a value {@code v} such that {@code key.equals(k)}, then this method * returns {@code v}; otherwise it returns {@code null}. (There can be at * most one such mapping.) * @throws NullPointerException if the specified key is null */ // Never returns a Prime nor a Tombstone. @Override public TypeV get( Object key ) { final Object V = get_impl(this,_kvs,key); assert !(V instanceof Prime); // Never return a Prime assert V != TOMBSTONE; assert V != READONLY; return (TypeV)V; } private static final Object get_impl( final NonBlockingHashMap topmap, final Object[] kvs, final Object key ) { final int fullhash= hash (key); // throws NullPointerException if key is null final int len = len (kvs); // Count of key/value pairs, reads kvs.length final CHM chm = chm (kvs); // The CHM, for a volatile read below; reads slot 0 of kvs final int[] hashes=hashes(kvs); // The memoized hashes; reads slot 1 of kvs int idx = fullhash & (len-1); // First key hash // Main spin/reprobe loop, looking for a Key hit int reprobe_cnt=0; while( true ) { // Probe table. Each read of 'val' probably misses in cache in a big // table; hopefully the read of 'key' then hits in cache. final Object K = key(kvs,idx); // Get key before volatile read, could be null final Object V = val(kvs,idx); // Get value before volatile read, could be null or Tombstone or Prime if( K == null ) return null; // A clear miss // We need a volatile-read here to preserve happens-before semantics on // newly inserted Keys. If the Key body was written just before inserting // into the table a Key-compare here might read the uninitalized Key body. // Annoyingly this means we have to volatile-read before EACH key compare. // . // We also need a volatile-read between reading a newly inserted Value // and returning the Value (so the user might end up reading the stale // Value contents). Same problem as with keys - and the one volatile // read covers both. final Object[] newkvs = chm._newkvs; // VOLATILE READ before key compare // Key-compare if( keyeq(K,key,hashes,idx,fullhash) ) { // Key hit! Check for no table-copy-in-progress if( !(V instanceof Prime) ) // No copy? return (V == TOMBSTONE) ? null : V; // Return the value // Key hit in locked table? Just unbox. if( newkvs == READONLY ) return Prime.unbox(V); // Key hit - but slot is (possibly partially) copied to the new table. // Finish the copy & retry in the new table. return get_impl(topmap,chm.copy_slot_and_check(topmap,kvs,idx,key),key); // Retry in the new table } // get and put must have the same key lookup logic! But only 'put' // needs to force a table-resize for a too-long key-reprobe sequence. // Check for too-many-reprobes on get - and flip to the new table. if( ++reprobe_cnt >= reprobe_limit(len) || // too many probes K == TOMBSTONE ) { // found a TOMBSTONE key, means no more keys in this table if( newkvs == READONLY ) return null; // Missed in a locked table return newkvs == null ? null : get_impl(topmap,topmap.help_copy(newkvs),key); // Retry in the new table } idx = (idx+1)&(len-1); // Reprobe by 1! (could now prefetch) } } // --- getk ----------------------------------------------------------------- /** Returns the Key to which the specified key is mapped, or {@code null} * if this map contains no mapping for the key. * @throws NullPointerException if the specified key is null */ // Never returns a Prime nor a Tombstone. public TypeK getk( TypeK key ) { return (TypeK)getk_impl(this,_kvs,key); } private static final Object getk_impl( final NonBlockingHashMap topmap, final Object[] kvs, final Object key ) { final int fullhash= hash (key); // throws NullPointerException if key is null final int len = len (kvs); // Count of key/value pairs, reads kvs.length final CHM chm = chm (kvs); // The CHM, for a volatile read below; reads slot 0 of kvs final int[] hashes=hashes(kvs); // The memoized hashes; reads slot 1 of kvs int idx = fullhash & (len-1); // First key hash // Main spin/reprobe loop, looking for a Key hit int reprobe_cnt=0; while( true ) { // Probe table. final Object K = key(kvs,idx); // Get key before volatile read, could be null if( K == null ) return null; // A clear miss // We need a volatile-read here to preserve happens-before semantics on // newly inserted Keys. If the Key body was written just before inserting // into the table a Key-compare here might read the uninitalized Key body. // Annoyingly this means we have to volatile-read before EACH key compare. // . // We also need a volatile-read between reading a newly inserted Value // and returning the Value (so the user might end up reading the stale // Value contents). Same problem as with keys - and the one volatile // read covers both. final Object[] newkvs = chm._newkvs; // VOLATILE READ before key compare // Key-compare if( keyeq(K,key,hashes,idx,fullhash) ) return K; // Return existing Key! // get and put must have the same key lookup logic! But only 'put' // needs to force a table-resize for a too-long key-reprobe sequence. // Check for too-many-reprobes on get - and flip to the new table. if( ++reprobe_cnt >= reprobe_limit(len) || // too many probes key == TOMBSTONE ) { // found a TOMBSTONE key, means no more keys in this table if( newkvs == READONLY ) return null; // Missed in a locked table return newkvs == null ? null : getk_impl(topmap,topmap.help_copy(newkvs),key); // Retry in the new table } idx = (idx+1)&(len-1); // Reprobe by 1! (could now prefetch) } } // --- putIfMatch --------------------------------------------------------- // Put, Remove, PutIfAbsent, etc. Return the old value. If the returned // value is equal to expVal (or expVal is NO_MATCH_OLD) then the put can be // assumed to work (although might have been immediately overwritten). Only // the path through copy_slot passes in an expected value of null, and // putIfMatch only returns a null if passed in an expected null. private static final Object putIfMatch( final NonBlockingHashMap topmap, final Object[] kvs, final Object key, final Object putval, final Object expVal ) { assert putval != null; assert !(putval instanceof Prime); assert !(expVal instanceof Prime); if( kvs == READONLY ) { // Update attempt in a locked table? if( expVal == NO_MATCH_OLD || expVal == MATCH_ANY ) throw new IllegalStateException("attempting to modify a locked table"); System.out.println("put denied for readonly"); return READONLY; // putIfMatch forced-miss for locked table } final int fullhash = hash (key); // throws NullPointerException if key null final int len = len (kvs); // Count of key/value pairs, reads kvs.length final CHM chm = chm (kvs); // Reads kvs[0] final int[] hashes = hashes(kvs); // Reads kvs[1], read before kvs[0] int idx = fullhash & (len-1); // --- // Key-Claim stanza: spin till we can claim a Key (or force a resizing). int reprobe_cnt=0; Object K=null, V=null; Object[] newkvs=null; while( true ) { // Spin till we get a Key slot V = val(kvs,idx); // Get old value (before volatile read below!) K = key(kvs,idx); // Get current key if( K == null ) { // Slot is free? // Found an empty Key slot - which means this Key has never been in // this table. No need to put a Tombstone - the Key is not here! if( putval == TOMBSTONE ) return putval; // Not-now & never-been in this table if( expVal == MATCH_ANY ) return null; // Will not match, even after K inserts // Claim the null key-slot if( CAS_key(kvs,idx, null, key ) ) { // Claim slot for Key chm._slots.add(1); // Raise key-slots-used count hashes[idx] = fullhash; // Memoize fullhash break; // Got it! } // CAS to claim the key-slot failed. // // This re-read of the Key points out an annoying short-coming of Java // CAS. Most hardware CAS's report back the existing value - so that // if you fail you have a *witness* - the value which caused the CAS // to fail. The Java API turns this into a boolean destroying the // witness. Re-reading does not recover the witness because another // thread can write over the memory after the CAS. Hence we can be in // the unfortunate situation of having a CAS fail *for cause* but // having that cause removed by a later store. This turns a // non-spurious-failure CAS (such as Azul has) into one that can // apparently spuriously fail - and we avoid apparent spurious failure // by not allowing Keys to ever change. K = key(kvs,idx); // CAS failed, get updated value if( K == null ) { System.out.println("Spurious CAS failure? Retrying!"); continue; } assert K != null; // If keys[idx] is null, CAS shoulda worked } // Key slot was not null, there exists a Key here // We need a volatile-read here to preserve happens-before semantics on // newly inserted Keys. If the Key body was written just before inserting // into the table a Key-compare here might read the uninitalized Key body. // Annoyingly this means we have to volatile-read before EACH key compare. newkvs = chm._newkvs; // VOLATILE READ before key compare if( keyeq(K,key,hashes,idx,fullhash) ) break; // Got it! // get and put must have the same key lookup logic! Lest 'get' give // up looking too soon. //topmap._reprobes.add(1); if( ++reprobe_cnt >= reprobe_limit(len) || // too many probes or K == TOMBSTONE ) { // found a TOMBSTONE key, means no more keys // We simply must have a new table to do a 'put'. At this point a // 'get' will also go to the new table (if any). We do not need // to claim a key slot (indeed, we cannot find a free one to claim!). newkvs = chm.resize(topmap,kvs); if( expVal != null ) topmap.help_copy(newkvs); // help along an existing copy return putIfMatch(topmap,newkvs,key,putval,expVal); } idx = (idx+1)&(len-1); // Reprobe! } // End of spinning till we get a Key slot // --- // Found the proper Key slot, now update the matching Value slot. We // never put a null, so Value slots monotonically move from null to // not-null (deleted Values use Tombstone). Thus if 'V' is null we // fail this fast cutout and fall into the check for table-full. if( putval == V ) return V; // Fast cutout for no-change // See if we want to move to a new table (to avoid high average re-probe // counts). We only check on the initial set of a Value from null to // not-null (i.e., once per key-insert). Of course we got a 'free' check // of newkvs once per key-compare (not really free, but paid-for by the // time we get here). if( newkvs == null && // New table-copy already spotted? // Once per fresh key-insert check the hard way ((V == null && chm.tableFull(reprobe_cnt,len)) || // Or we found a Prime, but the JMM allowed reordering such that we // did not spot the new table (very rare race here: the writing // thread did a CAS of _newkvs then a store of a Prime. This thread // reads the Prime, then reads _newkvs - but the read of Prime was so // delayed (or the read of _newkvs was so accelerated) that they // swapped and we still read a null _newkvs. The resize call below // will do a CAS on _newkvs forcing the read. V instanceof Prime) ) newkvs = chm.resize(topmap,kvs); // Force the new table copy to start // See if we are moving to a new table. // If so, copy our slot and retry in the new table. if( newkvs != null ) return putIfMatch(topmap,chm.copy_slot_and_check(topmap,kvs,idx,expVal),key,putval,expVal); // --- // We are finally prepared to update the existing table assert !(V instanceof Prime); // Must match old, and we do not? Then bail out now. Note that either V // or expVal might be TOMBSTONE. Also V can be null, if we've never // inserted a value before. expVal can be null if we are called from // copy_slot. if( expVal != NO_MATCH_OLD && // Do we care about expected-Value at all? V != expVal && // No instant match already? (expVal != MATCH_ANY || V == TOMBSTONE || V == null) && !(V==null && expVal == TOMBSTONE) && // Match on null/TOMBSTONE combo (expVal == null || !expVal.equals(V)) ) // Expensive equals check at the last return V; // Do not update! // Actually change the Value in the Key,Value pair if( CAS_val(kvs, idx, V, putval ) ) { // CAS succeeded - we did the update! // Both normal put's and table-copy calls putIfMatch, but table-copy // does not (effectively) increase the number of live k/v pairs. if( expVal != null ) { // Adjust sizes - a striped counter if( (V == null || V == TOMBSTONE) && putval != TOMBSTONE ) chm._size.add( 1); if( !(V == null || V == TOMBSTONE) && putval == TOMBSTONE ) chm._size.add(-1); } } else { // Else CAS failed V = val(kvs,idx); // Get new value // If a Prime'd value got installed, we need to re-run the put on the // new table. Otherwise we lost the CAS to another racing put. // Simply retry from the start. if( V instanceof Prime ) return putIfMatch(topmap,chm.copy_slot_and_check(topmap,kvs,idx,expVal),key,putval,expVal); } // Win or lose the CAS, we are done. If we won then we know the update // happened as expected. If we lost, it means "we won but another thread // immediately stomped our update with no chance of a reader reading". return (V==null && expVal!=null) ? TOMBSTONE : V; } // --- help_copy --------------------------------------------------------- // Help along an existing resize operation. This is just a fast cut-out // wrapper, to encourage inlining for the fast no-copy-in-progress case. We // always help the top-most table copy, even if there are nested table // copies in progress. private final Object[] help_copy( Object[] helper ) { // Read the top-level KVS only once. We'll try to help this copy along, // even if it gets promoted out from under us (i.e., the copy completes // and another KVS becomes the top-level copy). Object[] topkvs = _kvs; CHM topchm = chm(topkvs); if( topchm._newkvs == null ) return helper; // No copy in-progress topchm.help_copy_impl(this,topkvs,false); return helper; } // --- CHM ----------------------------------------------------------------- // The control structure for the NonBlockingHashMap private static final class CHM<TypeK,TypeV> { // Size in active K,V pairs private final ConcurrentAutoTable _size; public int size () { return (int)_size.get(); } // --- // These next 2 fields are used in the resizing heuristics, to judge when // it is time to resize or copy the table. Slots is a count of used-up // key slots, and when it nears a large fraction of the table we probably // end up reprobing too much. Last-resize-milli is the time since the // last resize; if we are running back-to-back resizes without growing // (because there are only a few live keys but many slots full of dead // keys) then we need a larger table to cut down on the churn. // Count of used slots, to tell when table is full of dead unusable slots private final ConcurrentAutoTable _slots; public int slots() { return (int)_slots.get(); } // --- // New mappings, used during resizing. // The 'new KVs' array - created during a resize operation. This // represents the new table being copied from the old one. It's the // volatile variable that is read as we cross from one table to the next, // to get the required memory orderings. It monotonically transits from // null to set (once). volatile Object[] _newkvs; private final AtomicReferenceFieldUpdater<CHM,Object[]> _newkvsUpdater = AtomicReferenceFieldUpdater.newUpdater(CHM.class,Object[].class, "_newkvs"); // Set the _next field if we can. boolean CAS_newkvs( Object[] newkvs ) { while( _newkvs == null ) if( _newkvsUpdater.compareAndSet(this,null,newkvs) ) return true; return false; } // Sometimes many threads race to create a new very large table. Only 1 // wins the race, but the losers all allocate a junk large table with // hefty allocation costs. Attempt to control the overkill here by // throttling attempts to create a new table. I cannot really block here // (lest I lose the non-blocking property) but late-arriving threads can // give the initial resizing thread a little time to allocate the initial // new table. The Right Long Term Fix here is to use array-lets and // incrementally create the new very large array. In C I'd make the array // with malloc (which would mmap under the hood) which would only eat // virtual-address and not real memory - and after Somebody wins then we // could in parallel initialize the array. Java does not allow // un-initialized array creation (especially of ref arrays!). volatile long _resizers; // count of threads attempting an initial resize private static final AtomicLongFieldUpdater<CHM> _resizerUpdater = AtomicLongFieldUpdater.newUpdater(CHM.class, "_resizers"); // --- // Simple constructor CHM( ConcurrentAutoTable size ) { _size = size; _slots= new ConcurrentAutoTable(); } // --- tableFull --------------------------------------------------------- // Heuristic to decide if this table is too full, and we should start a // new table. Note that if a 'get' call has reprobed too many times and // decided the table must be full, then always the estimate_sum must be // high and we must report the table is full. If we do not, then we might // end up deciding that the table is not full and inserting into the // current table, while a 'get' has decided the same key cannot be in this // table because of too many reprobes. The invariant is: // slots.estimate_sum >= max_reprobe_cnt >= reprobe_limit(len) private final boolean tableFull( int reprobe_cnt, int len ) { return // Do the cheap check first: we allow some number of reprobes always reprobe_cnt >= REPROBE_LIMIT && // More expensive check: see if the table is > 1/2 full. _slots.estimate_get() >= reprobe_limit(len)*2; } // --- resize ------------------------------------------------------------ // Resizing after too many probes. "How Big???" heuristics are here. // Callers will (not this routine) will 'help_copy' any in-progress copy. // Since this routine has a fast cutout for copy-already-started, callers // MUST 'help_copy' lest we have a path which forever runs through // 'resize' only to discover a copy-in-progress which never progresses. private final Object[] resize( NonBlockingHashMap topmap, Object[] kvs) { assert chm(kvs) == this; // Check for resize already in progress, probably triggered by another thread Object[] newkvs = _newkvs; // VOLATILE READ if( newkvs != null ) // See if resize is already in progress return newkvs; // Use the new table already // No copy in-progress, so start one. First up: compute new table size. int oldlen = len(kvs); // Old count of K,V pairs allowed int sz = size(); // Get current table count of active K,V pairs int newsz = sz; // First size estimate // Heuristic to determine new size. We expect plenty of dead-slots-with-keys // and we need some decent padding to avoid endless reprobing. if( sz >= (oldlen>>2) ) { // If we are >25% full of keys then... newsz = oldlen<<1; // Double size if( sz >= (oldlen>>1) ) // If we are >50% full of keys then... newsz = oldlen<<2; // Double double size } // This heuristic in the next 2 lines leads to a much denser table // with a higher reprobe rate //if( sz >= (oldlen>>1) ) // If we are >50% full of keys then... // newsz = oldlen<<1; // Double size // Last (re)size operation was very recent? Then double again; slows // down resize operations for tables subject to a high key churn rate. long tm = System.currentTimeMillis(); long q=0; if( newsz <= oldlen && // New table would shrink or hold steady? tm <= topmap._last_resize_milli+10000 && // Recent resize (less than 1 sec ago) (q=_slots.estimate_get()) >= (sz<<1) ) // 1/2 of keys are dead? newsz = oldlen<<1; // Double the existing size // Do not shrink, ever if( newsz < oldlen ) newsz = oldlen; // Convert to power-of-2 int log2; for( log2=MIN_SIZE_LOG; (1<<log2) < newsz; log2++ ) ; // Compute log2 of size // Now limit the number of threads actually allocating memory to a // handful - lest we have 750 threads all trying to allocate a giant // resized array. long r = _resizers; while( !_resizerUpdater.compareAndSet(this,r,r+1) ) r = _resizers; // Size calculation: 2 words (K+V) per table entry, plus a handful. We // guess at 32-bit pointers; 64-bit pointers screws up the size calc by // 2x but does not screw up the heuristic very much. int megs = ((((1<<log2)<<1)+4)<<3/*word to bytes*/)>>20/*megs*/; if( r >= 2 && megs > 0 ) { // Already 2 guys trying; wait and see newkvs = _newkvs; // Between dorking around, another thread did it if( newkvs != null ) // See if resize is already in progress return newkvs; // Use the new table already // TODO - use a wait with timeout, so we'll wakeup as soon as the new table // is ready, or after the timeout in any case. //synchronized( this ) { wait(8*megs); } // Timeout - we always wakeup // For now, sleep a tad and see if the 2 guys already trying to make // the table actually get around to making it happen. try { Thread.sleep(8*megs); } catch( Exception e ) { } } // Last check, since the 'new' below is expensive and there is a chance // that another thread slipped in a new thread while we ran the heuristic. newkvs = _newkvs; if( newkvs != null ) // See if resize is already in progress return newkvs; // Use the new table already // Double size for K,V pairs, add 1 for CHM newkvs = new Object[((1<<log2)<<1)+2]; // This can get expensive for big arrays newkvs[0] = new CHM(_size); // CHM in slot 0 newkvs[1] = new int[1<<log2]; // hashes in slot 1 // Another check after the slow allocation if( _newkvs != null ) // See if resize is already in progress return _newkvs; // Use the new table already // The new table must be CAS'd in so only 1 winner amongst duplicate // racing resizing threads. Extra CHM's will be GC'd. if( CAS_newkvs( newkvs ) ) { // NOW a resize-is-in-progress! //notifyAll(); // Wake up any sleepers //long nano = System.nanoTime(); //System.out.println(" "+nano+" Resize from "+oldlen+" to "+(1<<log2)+" and had "+(_resizers-1)+" extras" ); //if( System.out != null ) System.out.print("["+log2); topmap.rehash(); // Call for Hashtable's benefit } else // CAS failed? newkvs = _newkvs; // Reread new table return newkvs; } // The next part of the table to copy. It monotonically transits from zero // to _kvs.length. Visitors to the table can claim 'work chunks' by // CAS'ing this field up, then copying the indicated indices from the old // table to the new table. Workers are not required to finish any chunk; // the counter simply wraps and work is copied duplicately until somebody // somewhere completes the count. volatile long _copyIdx = 0; static private final AtomicLongFieldUpdater<CHM> _copyIdxUpdater = AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyIdx"); // Work-done reporting. Used to efficiently signal when we can move to // the new table. From 0 to len(oldkvs) refers to copying from the old // table to the new. volatile long _copyDone= 0; static private final AtomicLongFieldUpdater<CHM> _copyDoneUpdater = AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyDone"); // --- help_copy_impl ---------------------------------------------------- // Help along an existing resize operation. We hope its the top-level // copy (it was when we started) but this CHM might have been promoted out // of the top position. private final void help_copy_impl( NonBlockingHashMap topmap, Object[] oldkvs, boolean copy_all ) { assert chm(oldkvs) == this; Object[] newkvs = _newkvs; assert newkvs != null; // Already checked by caller int oldlen = len(oldkvs); // Total amount to copy final int MIN_COPY_WORK = Math.min(oldlen,1024); // Limit per-thread work // --- int panic_start = -1; int copyidx=-9999; // Fool javac to think it's initialized while( _copyDone < oldlen ) { // Still needing to copy? // Carve out a chunk of work. The counter wraps around so every // thread eventually tries to copy every slot repeatedly. // We "panic" if we have tried TWICE to copy every slot - and it still // has not happened. i.e., twice some thread somewhere claimed they // would copy 'slot X' (by bumping _copyIdx) but they never claimed to // have finished (by bumping _copyDone). Our choices become limited: // we can wait for the work-claimers to finish (and become a blocking // algorithm) or do the copy work ourselves. Tiny tables with huge // thread counts trying to copy the table often 'panic'. if( panic_start == -1 ) { // No panic? copyidx = (int)_copyIdx; while( !_copyIdxUpdater.compareAndSet(this,copyidx,copyidx+MIN_COPY_WORK) ) copyidx = (int)_copyIdx; // Re-read if( !(copyidx < (oldlen<<1)) ) // Panic! panic_start = copyidx; // Record where we started to panic-copy } // We now know what to copy. Try to copy. int workdone = 0; for( int i=0; i<MIN_COPY_WORK; i++ ) if( copy_slot(topmap,(copyidx+i)&(oldlen-1),oldkvs,newkvs) ) // Made an oldtable slot go dead? workdone++; // Yes! if( workdone > 0 ) // Report work-done occasionally copy_check_and_promote( topmap, oldkvs, workdone );// See if we can promote //for( int i=0; i<MIN_COPY_WORK; i++ ) // if( copy_slot(topmap,(copyidx+i)&(oldlen-1),oldkvs,newkvs) ) // Made an oldtable slot go dead? // copy_check_and_promote( topmap, oldkvs, 1 );// See if we can promote copyidx += MIN_COPY_WORK; // Uncomment these next 2 lines to turn on incremental table-copy. // Otherwise this thread continues to copy until it is all done. if( !copy_all && panic_start == -1 ) // No panic? return; // Then done copying after doing MIN_COPY_WORK } // Extra promotion check, in case another thread finished all copying // then got stalled before promoting. copy_check_and_promote( topmap, oldkvs, 0 );// See if we can promote } // --- copy_slot_and_check ----------------------------------------------- // Copy slot 'idx' from the old table to the new table. If this thread // confirmed the copy, update the counters and check for promotion. // // Returns the result of reading the volatile _newkvs, mostly as a // convenience to callers. We come here with 1-shot copy requests // typically because the caller has found a Prime, and has not yet read // the _newkvs volatile - which must have changed from null-to-not-null // before any Prime appears. So the caller needs to read the _newkvs // field to retry his operation in the new table, but probably has not // read it yet. private final Object[] copy_slot_and_check( NonBlockingHashMap topmap, Object[] oldkvs, int idx, Object should_help ) { assert chm(oldkvs) == this; Object[] newkvs = _newkvs; // VOLATILE READ // We're only here because the caller saw a Prime, which implies a // table-copy is in progress. assert newkvs != null; if( copy_slot(topmap,idx,oldkvs,_newkvs) ) // Copy the desired slot copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied // Generically help along any copy (except if called recursively from a helper) return (should_help == null) ? newkvs : topmap.help_copy(newkvs); } // --- copy_check_and_promote -------------------------------------------- private final void copy_check_and_promote( NonBlockingHashMap topmap, Object[] oldkvs, int workdone ) { assert chm(oldkvs) == this; int oldlen = len(oldkvs); // We made a slot unusable and so did some of the needed copy work long copyDone = _copyDone; assert (copyDone+workdone) <= oldlen; if( workdone > 0 ) { while( !_copyDoneUpdater.compareAndSet(this,copyDone,copyDone+workdone) ) { copyDone = _copyDone; // Reload, retry assert (copyDone+workdone) <= oldlen; } //if( (10*copyDone/oldlen) != (10*(copyDone+workdone)/oldlen) ) //System.out.print(" "+(copyDone+workdone)*100/oldlen+"%"+"_"+(_copyIdx*100/oldlen)+"%"); } // Check for copy being ALL done, and promote. Note that we might have // nested in-progress copies and manage to finish a nested copy before // finishing the top-level copy. We only promote top-level copies. if( copyDone+workdone == oldlen && // Ready to promote this table? topmap._kvs == oldkvs && // Looking at the top-level table? _newkvs != READONLY && // Table is locked down? // Attempt to promote topmap.CAS_kvs(oldkvs,_newkvs) ) { topmap._last_resize_milli = System.currentTimeMillis(); // Record resize time for next check } } // --- copy_slot --------------------------------------------------------- // Copy one K/V pair from oldkvs[i] to newkvs. Returns true if we can // confirm that the new table guaranteed has a value for this old-table // slot. We need an accurate confirmed-copy count so that we know when we // can promote (if we promote the new table too soon, other threads may // 'miss' on values not-yet-copied from the old table). We don't allow // any direct updates on the new table, unless they first happened to the // old table - so that any transition in the new table from null to // not-null must have been from a copy_slot (or other old-table overwrite) // and not from a thread directly writing in the new table. Thus we can // count null-to-not-null transitions in the new table. private boolean copy_slot( NonBlockingHashMap topmap, int idx, Object[] oldkvs, Object[] newkvs ) { // Blindly set the key slot from null to TOMBSTONE, to eagerly stop // fresh put's from inserting new values in the old table when the old // table is mid-resize. We don't need to act on the results here, // because our correctness stems from box'ing the Value field. Slamming // the Key field is a minor speed optimization. Object key; while( (key=key(oldkvs,idx)) == null ) CAS_key(oldkvs,idx, null, TOMBSTONE); // --- // Prevent new values from appearing in the old table. // Box what we see in the old table, to prevent further updates. Object oldval = val(oldkvs,idx); // Read OLD table while( !(oldval instanceof Prime) ) { final Prime box = (oldval == null || oldval == TOMBSTONE) ? TOMBPRIME : new Prime(oldval); if( CAS_val(oldkvs,idx,oldval,box) ) { // CAS down a box'd version of oldval // If we made the Value slot hold a TOMBPRIME, then we both // prevented further updates here but also the (absent) // oldval is vaccuously available in the new table. We // return with true here: any thread looking for a value for // this key can correctly go straight to the new table and // skip looking in the old table. if( box == TOMBPRIME ) return true; // Otherwise we boxed something, but it still needs to be // copied into the new table. oldval = box; // Record updated oldval break; // Break loop; oldval is now boxed by us } oldval = val(oldkvs,idx); // Else try, try again } if( oldval == TOMBPRIME ) return false; // Copy already complete here! // If the new table is really the table-locked flag, then we are done // here: the Value was wrapped in a Prime preventing it from changing // again. if( newkvs == READONLY ) return true; // --- // Copy the value into the new table, but only if we overwrite a null. // If another value is already in the new table, then somebody else // wrote something there and that write is happens-after any value that // appears in the old table. Object old_unboxed = ((Prime)oldval)._V; assert old_unboxed != TOMBSTONE; putIfMatch(topmap, newkvs, key, old_unboxed, null); // --- // Finally, now that any old value is exposed in the new table, we can // forever hide the old-table value by slapping a TOMBPRIME down. This // will stop other threads from uselessly attempting to copy this slot // (i.e., it's a speed optimization not a correctness issue). while( oldval != TOMBPRIME && !CAS_val(oldkvs,idx,oldval,TOMBPRIME) ) oldval = val(oldkvs,idx); return oldval != TOMBPRIME; // True if we slammed the TOMBPRIME down } // end copy_slot } // End of CHM // --- Snapshot ------------------------------------------------------------ // The main class for iterating over the NBHM. It "snapshots" a clean // view of the K/V array. private class SnapshotV implements Iterator<TypeV>, Enumeration<TypeV> { final Object[] _sskvs; public SnapshotV() { while( true ) { // Verify no table-copy-in-progress Object[] topkvs = _kvs; CHM topchm = chm(topkvs); if( topchm._newkvs == null || // No table-copy-in-progress topchm._newkvs == READONLY ) { // The "linearization point" for the iteration. Every key in this // table will be visited, but keys added later might be skipped or // even be added to a following table (also not iterated over). _sskvs = topkvs; break; } // Table copy in-progress - so we cannot get a clean iteration. We // must help finish the table copy before we can start iterating. topchm.help_copy_impl(NonBlockingHashMap.this,topkvs,true); } // Warm-up the iterator next(); } int length() { return len(_sskvs); } Object key(int idx) { return NonBlockingHashMap.key(_sskvs,idx); } private int _idx; // Varies from 0-keys.length private Object _nextK, _prevK; // Last 2 keys found private TypeV _nextV, _prevV; // Last 2 values found public boolean hasNext() { return _nextV != null; } public TypeV next() { // 'next' actually knows what the next value will be - it had to // figure that out last go-around lest 'hasNext' report true and // some other thread deleted the last value. Instead, 'next' // spends all its effort finding the key that comes after the // 'next' key. if( _idx != 0 && _nextV == null ) throw new NoSuchElementException(); _prevK = _nextK; // This will become the previous key _prevV = _nextV; // This will become the previous value _nextV = null; // We have no more next-key // Attempt to set <_nextK,_nextV> to the next K,V pair. // _nextV is the trigger: stop searching when it is != null while( _idx<length() ) { // Scan array _nextK = key(_idx++); // Get a key that definitely is in the set (for the moment!) if( _nextK != null && // Found something? _nextK != TOMBSTONE && (_nextV=get(_nextK)) != null ) break; // Got it! _nextK is a valid Key } // Else keep scanning return _prevV; // Return current value. } public void remove() { if( _prevV == null ) throw new IllegalStateException(); putIfMatch( NonBlockingHashMap.this, _sskvs, _prevK, TOMBSTONE, _prevV ); _prevV = null; } public TypeV nextElement() { return next(); } public boolean hasMoreElements() { return hasNext(); } } public Object[] raw_array() { return new SnapshotV()._sskvs; } /** Returns an enumeration of the values in this table. * @return an enumeration of the values in this table * @see #values() */ public Enumeration<TypeV> elements() { return new SnapshotV(); } // --- values -------------------------------------------------------------- /** Returns a {@link Collection} view of the values contained in this map. * The collection is backed by the map, so changes to the map are reflected * in the collection, and vice-versa. The collection supports element * removal, which removes the corresponding mapping from this map, via the * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>, * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations. * It does not support the <tt>add</tt> or <tt>addAll</tt> operations. * * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator that * will never throw {@link ConcurrentModificationException}, and guarantees * to traverse elements as they existed upon construction of the iterator, * and may (but is not guaranteed to) reflect any modifications subsequent * to construction. */ @Override public Collection<TypeV> values() { return new AbstractCollection<TypeV>() { @Override public void clear ( ) { NonBlockingHashMap.this.clear ( ); } @Override public int size ( ) { return NonBlockingHashMap.this.size ( ); } @Override public boolean contains( Object v ) { return NonBlockingHashMap.this.containsValue(v); } @Override public Iterator<TypeV> iterator() { return new SnapshotV(); } }; } // --- keySet -------------------------------------------------------------- private class SnapshotK implements Iterator<TypeK>, Enumeration<TypeK> { final SnapshotV _ss; public SnapshotK() { _ss = new SnapshotV(); } public void remove() { _ss.remove(); } public TypeK next() { _ss.next(); return (TypeK)_ss._prevK; } public boolean hasNext() { return _ss.hasNext(); } public TypeK nextElement() { return next(); } public boolean hasMoreElements() { return hasNext(); } } /** Returns an enumeration of the keys in this table. * @return an enumeration of the keys in this table * @see #keySet() */ public Enumeration<TypeK> keys() { return new SnapshotK(); } /** Returns a {@link Set} view of the keys contained in this map. The set * is backed by the map, so changes to the map are reflected in the set, * and vice-versa. The set supports element removal, which removes the * corresponding mapping from this map, via the <tt>Iterator.remove</tt>, * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and * <tt>clear</tt> operations. It does not support the <tt>add</tt> or * <tt>addAll</tt> operations. * * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator that * will never throw {@link ConcurrentModificationException}, and guarantees * to traverse elements as they existed upon construction of the iterator, * and may (but is not guaranteed to) reflect any modifications subsequent * to construction. */ @Override public Set<TypeK> keySet() { return new AbstractSet<TypeK> () { @Override public void clear ( ) { NonBlockingHashMap.this.clear ( ); } @Override public int size ( ) { return NonBlockingHashMap.this.size ( ); } @Override public boolean contains( Object k ) { return NonBlockingHashMap.this.containsKey(k); } @Override public boolean remove ( Object k ) { return NonBlockingHashMap.this.remove (k) != null; } @Override public Iterator<TypeK> iterator() { return new SnapshotK(); } }; } // --- entrySet ------------------------------------------------------------ // Warning: Each call to 'next' in this iterator constructs a new NBHMEntry. private class NBHMEntry extends AbstractEntry<TypeK,TypeV> { NBHMEntry( final TypeK k, final TypeV v ) { super(k,v); } public TypeV setValue(final TypeV val) { if( val == null ) throw new NullPointerException(); _val = val; return put(_key, val); } } private class SnapshotE implements Iterator<Map.Entry<TypeK,TypeV>> { final SnapshotV _ss; public SnapshotE() { _ss = new SnapshotV(); } public void remove() { _ss.remove(); } public Map.Entry<TypeK,TypeV> next() { _ss.next(); return new NBHMEntry((TypeK)_ss._prevK,_ss._prevV); } public boolean hasNext() { return _ss.hasNext(); } } /** Returns a {@link Set} view of the mappings contained in this map. The * set is backed by the map, so changes to the map are reflected in the * set, and vice-versa. The set supports element removal, which removes * the corresponding mapping from the map, via the * <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, <tt>removeAll</tt>, * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not support * the <tt>add</tt> or <tt>addAll</tt> operations. * * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator * that will never throw {@link ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. * * <p><strong>Warning:</strong> the iterator associated with this Set * requires the creation of {@link java.util.Map.Entry} objects with each * iteration. The {@link NonBlockingHashMap} does not normally create or * using {@link java.util.Map.Entry} objects so they will be created soley * to support this iteration. Iterating using {@link #keySet} or {@link * #values} will be more efficient. */ @Override public Set<Map.Entry<TypeK,TypeV>> entrySet() { return new AbstractSet<Map.Entry<TypeK,TypeV>>() { @Override public void clear ( ) { NonBlockingHashMap.this.clear( ); } @Override public int size ( ) { return NonBlockingHashMap.this.size ( ); } @Override public boolean remove( final Object o ) { if( !(o instanceof Map.Entry)) return false; final Map.Entry<?,?> e = (Map.Entry<?,?>)o; return NonBlockingHashMap.this.remove(e.getKey(), e.getValue()); } @Override public boolean contains(final Object o) { if( !(o instanceof Map.Entry)) return false; final Map.Entry<?,?> e = (Map.Entry<?,?>)o; TypeV v = get(e.getKey()); return v.equals(e.getValue()); } @Override public Iterator<Map.Entry<TypeK,TypeV>> iterator() { return new SnapshotE(); } }; } // --- writeObject ------------------------------------------------------- // Write a NBHM to a stream private void writeObject(java.io.ObjectOutputStream s) throws IOException { s.defaultWriteObject(); // Nothing to write for( Object K : keySet() ) { final Object V = get(K); // Do an official 'get' s.writeObject(K); // Write the <TypeK,TypeV> pair s.writeObject(V); } s.writeObject(null); // Sentinel to indicate end-of-data s.writeObject(null); } // --- readObject -------------------------------------------------------- // Read a CHM from a stream private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { s.defaultReadObject(); // Read nothing initialize(MIN_SIZE); for(;;) { final TypeK K = (TypeK) s.readObject(); final TypeV V = (TypeV) s.readObject(); if( K == null ) break; put(K,V); // Insert with an offical put } } /** * Atomically make the set immutable. Future calls to mutate with wildcard * matching will throw an IllegalStateException. This basically outlaws put, * remove and replace, but allows putIfAbsent and putIfMatch. Existing * mutator calls in other threads racing with this thread and will either * throw IllegalStateException or their update will be visible to this * thread. This implies that a simple flag cannot make the Set immutable, * because a late-arriving update in another thread might see immutable flag * not set yet, then mutate the Set after the {@link #readOnly} call returns. * This call can be called concurrently (and indeed until the operation * completes, all calls on the Set from any thread either complete normally * or end up calling {@link #readOnly} internally). * * <p> This call is useful in debugging multi-threaded programs where the * Set is constructed in parallel, but construction completes after some * time; and after construction the Set is only read. Making the Set * read-only will cause updates arriving after construction is supposedly * complete to throw an {@link IllegalStateException}. */ public void readOnly() { // Set the innermost kvs to the READONLY sentinel. This will (gradually) // prevent future updates. Object[] kvs = _kvs; while( true ) { // Spin, until we lock down the innermost table CHM chm = chm(kvs); Object[] newkvs = chm._newkvs; if( newkvs == READONLY ) break; if( chm.CAS_newkvs(READONLY) ) break; kvs = newkvs; assert kvs != null && kvs != READONLY; } CHM chm = chm(kvs); // Do some table-lock, but not it all chm.help_copy_impl(NonBlockingHashMap.this,kvs,false); } } // End NonBlockingHashMap class
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/nbhm/NonBlockingHashMapLong.java
package water.nbhm; import java.io.IOException; import java.io.Serializable; import java.lang.reflect.Field; import java.util.AbstractCollection; import java.util.AbstractMap; import java.util.AbstractSet; import java.util.Collection; import java.util.ConcurrentModificationException; import java.util.Enumeration; import java.util.HashMap; import java.util.Hashtable; import java.util.Iterator; import java.util.Map; import java.util.NoSuchElementException; import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import sun.misc.Unsafe; /* * Written by Cliff Click and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain */ /** * A lock-free alternate implementation of java.util.ConcurrentHashMap * with <strong>primitive long keys</strong>, better scaling properties and * generally lower costs. The use of {@code long} keys allows for faster * compares and lower memory costs. The Map provides identical correctness * properties as ConcurrentHashMap. All operations are non-blocking and * multi-thread safe, including all update operations. * NonBlockingHashMapLong scales substatially better than * java.util.ConcurrentHashMap for high update rates, even with a large * concurrency factor. Scaling is linear up to 768 CPUs on a 768-CPU Azul * box, even with 100% updates or 100% reads or any fraction in-between. * Linear scaling up to all cpus has been observed on a 32-way Sun US2 box, * 32-way Sun Niagra box, 8-way Intel box and a 4-way Power box. * * <p><strong>The main benefit of this class</strong> over using plain * org.cliffc.high_scale_lib.NonBlockingHashMap with {@link Long} keys is * that it avoids the auto-boxing and unboxing costs. Since auto-boxing is * <em>automatic</em>, it is easy to accidentally cause auto-boxing and negate * the space and speed benefits. * * <p>This class obeys the same functional specification as {@link * java.util.Hashtable}, and includes versions of methods corresponding to * each method of <tt>Hashtable</tt>. However, even though all operations are * thread-safe, operations do <em>not</em> entail locking and there is * <em>not</em> any support for locking the entire table in a way that * prevents all access. This class is fully interoperable with * <tt>Hashtable</tt> in programs that rely on its thread safety but not on * its synchronization details. * * <p> Operations (including <tt>put</tt>) generally do not block, so may * overlap with other update operations (including other <tt>puts</tt> and * <tt>removes</tt>). Retrievals reflect the results of the most recently * <em>completed</em> update operations holding upon their onset. For * aggregate operations such as <tt>putAll</tt>, concurrent retrievals may * reflect insertion or removal of only some entries. Similarly, Iterators * and Enumerations return elements reflecting the state of the hash table at * some point at or since the creation of the iterator/enumeration. They do * <em>not</em> throw {@link ConcurrentModificationException}. However, * iterators are designed to be used by only one thread at a time. * * <p> Very full tables, or tables with high reprobe rates may trigger an * internal resize operation to move into a larger table. Resizing is not * terribly expensive, but it is not free either; during resize operations * table throughput may drop somewhat. All threads that visit the table * during a resize will 'help' the resizing but will still be allowed to * complete their operation before the resize is finished (i.e., a simple * 'get' operation on a million-entry table undergoing resizing will not need * to block until the entire million entries are copied). * * <p>This class and its views and iterators implement all of the * <em>optional</em> methods of the {@link Map} and {@link Iterator} * interfaces. * * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class * does <em>not</em> allow <tt>null</tt> to be used as a value. * * * @since 1.5 * @author Cliff Click * @param <TypeV> the type of mapped values */ public class NonBlockingHashMapLong<TypeV> extends AbstractMap<Long,TypeV> implements ConcurrentMap<Long,TypeV>, Serializable { private static final long serialVersionUID = 1234123412341234124L; private static final int REPROBE_LIMIT=10; // Too many reprobes then force a table-resize // --- Bits to allow Unsafe access to arrays private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); private static final int _Obase = _unsafe.arrayBaseOffset(Object[].class); private static final int _Oscale = _unsafe.arrayIndexScale(Object[].class); private static long rawIndex(final Object[] ary, final int idx) { assert idx >= 0 && idx < ary.length; return _Obase + idx * _Oscale; } private static final int _Lbase = _unsafe.arrayBaseOffset(long[].class); private static final int _Lscale = _unsafe.arrayIndexScale(long[].class); private static long rawIndex(final long[] ary, final int idx) { assert idx >= 0 && idx < ary.length; return _Lbase + idx * _Lscale; } // --- Bits to allow Unsafe CAS'ing of the CHM field private static final long _chm_offset; private static final long _val_1_offset; static { // <clinit> Field f = null; try { f = NonBlockingHashMapLong.class.getDeclaredField("_chm"); } catch( java.lang.NoSuchFieldException e ) { throw new RuntimeException(e); } _chm_offset = _unsafe.objectFieldOffset(f); try { f = NonBlockingHashMapLong.class.getDeclaredField("_val_1"); } catch( java.lang.NoSuchFieldException e ) { throw new RuntimeException(e); } _val_1_offset = _unsafe.objectFieldOffset(f); } private final boolean CAS( final long offset, final Object old, final Object nnn ) { return _unsafe.compareAndSwapObject(this, offset, old, nnn ); } // --- Adding a 'prime' bit onto Values via wrapping with a junk wrapper class private static final class Prime { final Object _V; Prime( Object V ) { _V = V; } static Object unbox( Object V ) { return V instanceof Prime ? ((Prime)V)._V : V; } } // --- The Hash Table -------------------- private transient CHM _chm; // This next field holds the value for Key 0 - the special key value which // is the initial array value, and also means: no-key-inserted-yet. private transient Object _val_1; // Value for Key: NO_KEY // Time since last resize private transient long _last_resize_milli; // Optimize for space: use a 1/2-sized table and allow more re-probes private final boolean _opt_for_space; // --- Minimum table size ---------------- // Pick size 16 K/V pairs, which turns into (16*2)*4+12 = 140 bytes on a // standard 32-bit HotSpot, and (16*2)*8+12 = 268 bytes on 64-bit Azul. private static final int MIN_SIZE_LOG=4; // private static final int MIN_SIZE=(1<<MIN_SIZE_LOG); // Must be power of 2 // --- Sentinels ------------------------- // No-Match-Old - putIfMatch does updates only if it matches the old value, // and NO_MATCH_OLD basically counts as a wildcard match. private static final Object NO_MATCH_OLD = new Object(); // Sentinel // Match-Any-not-null - putIfMatch does updates only if it find a real old // value. private static final Object MATCH_ANY = new Object(); // Sentinel // This K/V pair has been deleted (but the Key slot is forever claimed). // The same Key can be reinserted with a new value later. private static final Object TOMBSTONE = new Object(); // Prime'd or box'd version of TOMBSTONE. This K/V pair was deleted, then a // table resize started. The K/V pair has been marked so that no new // updates can happen to the old table (and since the K/V pair was deleted // nothing was copied to the new table). private static final Prime TOMBPRIME = new Prime(TOMBSTONE); // I exclude 1 long from the 2^64 possibilities, and test for it before // entering the main array. The NO_KEY value must be zero, the initial // value set by Java before it hands me the array. private static final long NO_KEY = 0L; // --- dump ---------------------------------------------------------------- /** Verbose printout of table internals, useful for debugging. */ public final void print() { System.out.println("========="); print_impl(-99,NO_KEY,_val_1); _chm.print(); System.out.println("========="); } private static final void print_impl(final int i, final long K, final Object V) { String p = (V instanceof Prime) ? "prime_" : ""; Object V2 = Prime.unbox(V); String VS = (V2 == TOMBSTONE) ? "tombstone" : V2.toString(); System.out.println("["+i+"]=("+K+","+p+VS+")"); } private final void print2() { System.out.println("========="); print2_impl(-99,NO_KEY,_val_1); _chm.print(); System.out.println("========="); } private static final void print2_impl(final int i, final long K, final Object V) { if( V != null && Prime.unbox(V) != TOMBSTONE ) print_impl(i,K,V); } // Count of reprobes private transient ConcurrentAutoTable _reprobes = new ConcurrentAutoTable(); /** Get and clear the current count of reprobes. Reprobes happen on key * collisions, and a high reprobe rate may indicate a poor hash function or * weaknesses in the table resizing function. * @return the count of reprobes since the last call to {@link #reprobes} * or since the table was created. */ public long reprobes() { long r = _reprobes.get(); _reprobes = new ConcurrentAutoTable(); return r; } // --- reprobe_limit ----------------------------------------------------- // Heuristic to decide if we have reprobed toooo many times. Running over // the reprobe limit on a 'get' call acts as a 'miss'; on a 'put' call it // can trigger a table resize. Several places must have exact agreement on // what the reprobe_limit is, so we share it here. private static final int reprobe_limit( int len ) { return REPROBE_LIMIT + (len>>2); } // --- NonBlockingHashMapLong ---------------------------------------------- // Constructors /** Create a new NonBlockingHashMapLong with default minimum size (currently set * to 8 K/V pairs or roughly 84 bytes on a standard 32-bit JVM). */ public NonBlockingHashMapLong( ) { this(MIN_SIZE,true); } /** Create a new NonBlockingHashMapLong with initial room for the given * number of elements, thus avoiding internal resizing operations to reach * an appropriate size. Large numbers here when used with a small count of * elements will sacrifice space for a small amount of time gained. The * initial size will be rounded up internally to the next larger power of 2. */ public NonBlockingHashMapLong( final int initial_sz ) { this(initial_sz,true); } /** Create a new NonBlockingHashMapLong, setting the space-for-speed * tradeoff. {@code true} optimizes for space and is the default. {@code * false} optimizes for speed and doubles space costs for roughly a 10% * speed improvement. */ public NonBlockingHashMapLong( final boolean opt_for_space ) { this(1,opt_for_space); } /** Create a new NonBlockingHashMapLong, setting both the initial size and * the space-for-speed tradeoff. {@code true} optimizes for space and is * the default. {@code false} optimizes for speed and doubles space costs * for roughly a 10% speed improvement. */ public NonBlockingHashMapLong( final int initial_sz, final boolean opt_for_space ) { _opt_for_space = opt_for_space; initialize(initial_sz); } private final void initialize( final int initial_sz ) { if( initial_sz < 0 ) throw new IllegalArgumentException(); int i; // Convert to next largest power-of-2 for( i=MIN_SIZE_LOG; (1<<i) < initial_sz; i++ ) ; _chm = new CHM(this,new ConcurrentAutoTable(),i); _val_1 = TOMBSTONE; // Always as-if deleted _last_resize_milli = System.currentTimeMillis(); } // --- wrappers ------------------------------------------------------------ /** Returns the number of key-value mappings in this map. * @return the number of key-value mappings in this map */ public int size ( ) { return (_val_1==TOMBSTONE?0:1) + _chm.size(); } /** Tests if the key in the table. * @return <tt>true</tt> if the key is in the table */ public boolean containsKey( long key ) { return get(key) != null; } /** Legacy method testing if some key maps into the specified value in this * table. This method is identical in functionality to {@link * #containsValue}, and exists solely to ensure full compatibility with * class {@link java.util.Hashtable}, which supported this method prior to * introduction of the Java Collections framework. * @param val a value to search for * @return <tt>true</tt> if this map maps one or more keys to the specified value * @throws NullPointerException if the specified value is null */ public boolean contains ( Object val ) { return containsValue(val); } /** Maps the specified key to the specified value in the table. The value * cannot be null. <p> The value can be retrieved by calling {@link #get} * with a key that is equal to the original key. * @param key key with which the specified value is to be associated * @param val value to be associated with the specified key * @return the previous value associated with <tt>key</tt>, or * <tt>null</tt> if there was no mapping for <tt>key</tt> * @throws NullPointerException if the specified value is null */ public TypeV put ( long key, TypeV val ) { return putIfMatch( key, val,NO_MATCH_OLD);} /** Atomically, do a {@link #put} if-and-only-if the key is not mapped. * Useful to ensure that only a single mapping for the key exists, even if * many threads are trying to create the mapping in parallel. * @return the previous value associated with the specified key, * or <tt>null</tt> if there was no mapping for the key * @throws NullPointerException if the specified is value is null */ public TypeV putIfAbsent( long key, TypeV val ) { return putIfMatch( key, val,TOMBSTONE );} /** Removes the key (and its corresponding value) from this map. * This method does nothing if the key is not in the map. * @return the previous value associated with <tt>key</tt>, or * <tt>null</tt> if there was no mapping for <tt>key</tt>*/ public TypeV remove ( long key ) { return putIfMatch( key,TOMBSTONE,NO_MATCH_OLD);} /** Atomically do a {@link #remove(long)} if-and-only-if the key is mapped * to a value which is <code>equals</code> to the given value. * @throws NullPointerException if the specified value is null */ public boolean remove ( long key,Object val ) { return putIfMatch( key,TOMBSTONE,val ) == val ;} /** Atomically do a <code>put(key,val)</code> if-and-only-if the key is * mapped to some value already. * @throws NullPointerException if the specified value is null */ public TypeV replace ( long key, TypeV val ) { return putIfMatch( key, val,MATCH_ANY );} /** Atomically do a <code>put(key,newValue)</code> if-and-only-if the key is * mapped a value which is <code>equals</code> to <code>oldValue</code>. * @throws NullPointerException if the specified value is null */ public boolean replace ( long key, TypeV oldValue, TypeV newValue ) { return putIfMatch( key, newValue, oldValue ) == oldValue; } private final TypeV putIfMatch( long key, Object newVal, Object oldVal ) { if (oldVal == null || newVal == null) throw new NullPointerException(); if( key == NO_KEY ) { Object curVal = _val_1; if( oldVal == NO_MATCH_OLD || // Do we care about expected-Value at all? curVal == oldVal || // No instant match already? (oldVal == MATCH_ANY && curVal != TOMBSTONE) || oldVal.equals(curVal) ) { // Expensive equals check if( !CAS(_val_1_offset,curVal,newVal) ) // One shot CAS update attempt curVal = _val_1; // Failed; get failing witness } return curVal == TOMBSTONE ? null : (TypeV)curVal; // Return the last value present } final Object res = _chm.putIfMatch( key, newVal, oldVal ); assert !(res instanceof Prime); assert res != null; return res == TOMBSTONE ? null : (TypeV)res; } /** Removes all of the mappings from this map. */ public void clear() { // Smack a new empty table down CHM newchm = new CHM(this,new ConcurrentAutoTable(),MIN_SIZE_LOG); while( !CAS(_chm_offset,_chm,newchm) ) // Spin until the clear works ; CAS(_val_1_offset,_val_1,TOMBSTONE); } /** Returns <tt>true</tt> if this Map maps one or more keys to the specified * value. <em>Note</em>: This method requires a full internal traversal of the * hash table and is much slower than {@link #containsKey}. * @param val value whose presence in this map is to be tested * @return <tt>true</tt> if this Map maps one or more keys to the specified value * @throws NullPointerException if the specified value is null */ public boolean containsValue( Object val ) { if( val == null ) return false; if( val == _val_1 ) return true; // Key 0 for( TypeV V : values() ) if( V == val || V.equals(val) ) return true; return false; } // --- get ----------------------------------------------------------------- /** Returns the value to which the specified key is mapped, or {@code null} * if this map contains no mapping for the key. * <p>More formally, if this map contains a mapping from a key {@code k} to * a value {@code v} such that {@code key==k}, then this method * returns {@code v}; otherwise it returns {@code null}. (There can be at * most one such mapping.) * @throws NullPointerException if the specified key is null */ // Never returns a Prime nor a Tombstone. public final TypeV get( long key ) { if( key == NO_KEY ) { final Object V = _val_1; return V == TOMBSTONE ? null : (TypeV)V; } final Object V = _chm.get_impl(key); assert !(V instanceof Prime); // Never return a Prime assert V != TOMBSTONE; return (TypeV)V; } /** Auto-boxing version of {@link #get(long)}. */ public TypeV get ( Object key ) { return (key instanceof Long) ? get (((Long)key).longValue()) : null; } /** Auto-boxing version of {@link #remove(long)}. */ public TypeV remove ( Object key ) { return (key instanceof Long) ? remove (((Long)key).longValue()) : null; } /** Auto-boxing version of {@link #remove(long,Object)}. */ public boolean remove ( Object key, Object Val ) { return (key instanceof Long) ? remove (((Long)key).longValue(), Val) : false; } /** Auto-boxing version of {@link #containsKey(long)}. */ public boolean containsKey( Object key ) { return (key instanceof Long) ? containsKey(((Long)key).longValue()) : false; } /** Auto-boxing version of {@link #putIfAbsent}. */ public TypeV putIfAbsent( Long key, TypeV val ) { return putIfAbsent( key.longValue(), val ); } /** Auto-boxing version of {@link #replace}. */ public TypeV replace( Long key, TypeV Val ) { return replace(key.longValue(), Val); } /** Auto-boxing version of {@link #put}. */ public TypeV put ( Long key, TypeV val ) { return put(key.longValue(),val); } /** Auto-boxing version of {@link #replace}. */ public boolean replace( Long key, TypeV oldValue, TypeV newValue ) { return replace(key.longValue(), oldValue, newValue); } // --- help_copy ----------------------------------------------------------- // Help along an existing resize operation. This is just a fast cut-out // wrapper, to encourage inlining for the fast no-copy-in-progress case. We // always help the top-most table copy, even if there are nested table // copies in progress. private final void help_copy( ) { // Read the top-level CHM only once. We'll try to help this copy along, // even if it gets promoted out from under us (i.e., the copy completes // and another KVS becomes the top-level copy). CHM topchm = _chm; if( topchm._newchm == null ) return; // No copy in-progress topchm.help_copy_impl(false); } // --- CHM ----------------------------------------------------------------- // The control structure for the NonBlockingHashMapLong private static final class CHM<TypeV> implements Serializable { // Back-pointer to top-level structure final NonBlockingHashMapLong _nbhml; // Size in active K,V pairs private final ConcurrentAutoTable _size; public int size () { return (int)_size.get(); } // --- // These next 2 fields are used in the resizing heuristics, to judge when // it is time to resize or copy the table. Slots is a count of used-up // key slots, and when it nears a large fraction of the table we probably // end up reprobing too much. Last-resize-milli is the time since the // last resize; if we are running back-to-back resizes without growing // (because there are only a few live keys but many slots full of dead // keys) then we need a larger table to cut down on the churn. // Count of used slots, to tell when table is full of dead unusable slots private final ConcurrentAutoTable _slots; public int slots() { return (int)_slots.get(); } // --- // New mappings, used during resizing. // The 'next' CHM - created during a resize operation. This represents // the new table being copied from the old one. It's the volatile // variable that is read as we cross from one table to the next, to get // the required memory orderings. It monotonically transits from null to // set (once). volatile CHM _newchm; private static final AtomicReferenceFieldUpdater<CHM,CHM> _newchmUpdater = AtomicReferenceFieldUpdater.newUpdater(CHM.class,CHM.class, "_newchm"); // Set the _newchm field if we can. AtomicUpdaters do not fail spuriously. boolean CAS_newchm( CHM newchm ) { return _newchmUpdater.compareAndSet(this,null,newchm); } // Sometimes many threads race to create a new very large table. Only 1 // wins the race, but the losers all allocate a junk large table with // hefty allocation costs. Attempt to control the overkill here by // throttling attempts to create a new table. I cannot really block here // (lest I lose the non-blocking property) but late-arriving threads can // give the initial resizing thread a little time to allocate the initial // new table. The Right Long Term Fix here is to use array-lets and // incrementally create the new very large array. In C I'd make the array // with malloc (which would mmap under the hood) which would only eat // virtual-address and not real memory - and after Somebody wins then we // could in parallel initialize the array. Java does not allow // un-initialized array creation (especially of ref arrays!). volatile long _resizers; // count of threads attempting an initial resize private static final AtomicLongFieldUpdater<CHM> _resizerUpdater = AtomicLongFieldUpdater.newUpdater(CHM.class, "_resizers"); // --- key,val ------------------------------------------------------------- // Access K,V for a given idx private final boolean CAS_key( int idx, long old, long key ) { return _unsafe.compareAndSwapLong ( _keys, rawIndex(_keys, idx), old, key ); } private final boolean CAS_val( int idx, Object old, Object val ) { return _unsafe.compareAndSwapObject( _vals, rawIndex(_vals, idx), old, val ); } final long [] _keys; final Object [] _vals; // Simple constructor CHM( final NonBlockingHashMapLong nbhml, ConcurrentAutoTable size, final int logsize ) { _nbhml = nbhml; _size = size; _slots= new ConcurrentAutoTable(); _keys = new long [1<<logsize]; _vals = new Object[1<<logsize]; } // --- print innards private final void print() { for( int i=0; i<_keys.length; i++ ) { long K = _keys[i]; if( K != NO_KEY ) print_impl(i,K,_vals[i]); } CHM newchm = _newchm; // New table, if any if( newchm != null ) { System.out.println("----"); newchm.print(); } } // --- print only the live objects private final void print2( ) { for( int i=0; i<_keys.length; i++ ) { long K = _keys[i]; if( K != NO_KEY ) // key is sane print2_impl(i,K,_vals[i]); } CHM newchm = _newchm; // New table, if any if( newchm != null ) { System.out.println("----"); newchm.print2(); } } // --- get_impl ---------------------------------------------------------- // Never returns a Prime nor a Tombstone. private final Object get_impl ( final long key ) { final int len = _keys.length; int idx = (int)(key & (len-1)); // First key hash // Main spin/reprobe loop, looking for a Key hit int reprobe_cnt=0; while( true ) { final long K = _keys[idx]; // Get key before volatile read, could be NO_KEY final Object V = _vals[idx]; // Get value before volatile read, could be null or Tombstone or Prime if( K == NO_KEY ) return null; // A clear miss // Key-compare if( key == K ) { // Key hit! Check for no table-copy-in-progress if( !(V instanceof Prime) ) { // No copy? if( V == TOMBSTONE) return null; // We need a volatile-read between reading a newly inserted Value // and returning the Value (so the user might end up reading the // stale Value contents). // VOLATILE READ before returning V @SuppressWarnings("unused") final CHM newchm = _newchm; return V; } // Key hit - but slot is (possibly partially) copied to the new table. // Finish the copy & retry in the new table. return copy_slot_and_check(idx,key).get_impl(key); // Retry in the new table } // get and put must have the same key lookup logic! But only 'put' // needs to force a table-resize for a too-long key-reprobe sequence. // Check for too-many-reprobes on get. if( ++reprobe_cnt >= reprobe_limit(len) ) // too many probes return _newchm == null // Table copy in progress? ? null // Nope! A clear miss : copy_slot_and_check(idx,key).get_impl(key); // Retry in the new table idx = (idx+1)&(len-1); // Reprobe by 1! (could now prefetch) } } // --- putIfMatch --------------------------------------------------------- // Put, Remove, PutIfAbsent, etc. Return the old value. If the returned // value is equal to expVal (or expVal is NO_MATCH_OLD) then the put can // be assumed to work (although might have been immediately overwritten). // Only the path through copy_slot passes in an expected value of null, // and putIfMatch only returns a null if passed in an expected null. private final Object putIfMatch( final long key, final Object putval, final Object expVal ) { assert putval != null; assert !(putval instanceof Prime); assert !(expVal instanceof Prime); final int len = _keys.length; int idx = (int)(key & (len-1)); // The first key // --- // Key-Claim stanza: spin till we can claim a Key (or force a resizing). int reprobe_cnt=0; long K = NO_KEY; Object V = null; while( true ) { // Spin till we get a Key slot V = _vals[idx]; // Get old value K = _keys[idx]; // Get current key if( K == NO_KEY ) { // Slot is free? // Found an empty Key slot - which means this Key has never been in // this table. No need to put a Tombstone - the Key is not here! if( putval == TOMBSTONE ) return putval; // Not-now & never-been in this table // Claim the zero key-slot if( CAS_key(idx, NO_KEY, key) ) { // Claim slot for Key _slots.add(1); // Raise key-slots-used count break; // Got it! } // CAS to claim the key-slot failed. // // This re-read of the Key points out an annoying short-coming of Java // CAS. Most hardware CAS's report back the existing value - so that // if you fail you have a *witness* - the value which caused the CAS // to fail. The Java API turns this into a boolean destroying the // witness. Re-reading does not recover the witness because another // thread can write over the memory after the CAS. Hence we can be in // the unfortunate situation of having a CAS fail *for cause* but // having that cause removed by a later store. This turns a // non-spurious-failure CAS (such as Azul has) into one that can // apparently spuriously fail - and we avoid apparent spurious failure // by not allowing Keys to ever change. K = _keys[idx]; // CAS failed, get updated value assert K != NO_KEY ; // If keys[idx] is NO_KEY, CAS shoulda worked } // Key slot was not null, there exists a Key here if( K == key ) break; // Got it! // get and put must have the same key lookup logic! Lest 'get' give // up looking too soon. //topmap._reprobes.add(1); if( ++reprobe_cnt >= reprobe_limit(len) ) { // We simply must have a new table to do a 'put'. At this point a // 'get' will also go to the new table (if any). We do not need // to claim a key slot (indeed, we cannot find a free one to claim!). final CHM newchm = resize(); if( expVal != null ) _nbhml.help_copy(); // help along an existing copy return newchm.putIfMatch(key,putval,expVal); } idx = (idx+1)&(len-1); // Reprobe! } // End of spinning till we get a Key slot // --- // Found the proper Key slot, now update the matching Value slot. We // never put a null, so Value slots monotonically move from null to // not-null (deleted Values use Tombstone). Thus if 'V' is null we // fail this fast cutout and fall into the check for table-full. if( putval == V ) return V; // Fast cutout for no-change // See if we want to move to a new table (to avoid high average re-probe // counts). We only check on the initial set of a Value from null to // not-null (i.e., once per key-insert). if( (V == null && tableFull(reprobe_cnt,len)) || // Or we found a Prime: resize is already in progress. The resize // call below will do a CAS on _newchm forcing the read. V instanceof Prime) { resize(); // Force the new table copy to start return copy_slot_and_check(idx,expVal).putIfMatch(key,putval,expVal); } // --- // We are finally prepared to update the existing table assert !(V instanceof Prime); // Must match old, and we do not? Then bail out now. Note that either V // or expVal might be TOMBSTONE. Also V can be null, if we've never // inserted a value before. expVal can be null if we are called from // copy_slot. if( expVal != NO_MATCH_OLD && // Do we care about expected-Value at all? V != expVal && // No instant match already? (expVal != MATCH_ANY || V == TOMBSTONE || V == null) && !(V==null && expVal == TOMBSTONE) && // Match on null/TOMBSTONE combo (expVal == null || !expVal.equals(V)) ) // Expensive equals check at the last return V; // Do not update! // Actually change the Value in the Key,Value pair if( CAS_val(idx, V, putval ) ) { // CAS succeeded - we did the update! // Both normal put's and table-copy calls putIfMatch, but table-copy // does not (effectively) increase the number of live k/v pairs. if( expVal != null ) { // Adjust sizes - a striped counter if( (V == null || V == TOMBSTONE) && putval != TOMBSTONE ) _size.add( 1); if( !(V == null || V == TOMBSTONE) && putval == TOMBSTONE ) _size.add(-1); } } else { // Else CAS failed V = _vals[idx]; // Get new value // If a Prime'd value got installed, we need to re-run the put on the // new table. Otherwise we lost the CAS to another racing put. // Simply retry from the start. if( V instanceof Prime ) return copy_slot_and_check(idx,expVal).putIfMatch(key,putval,expVal); } // Win or lose the CAS, we are done. If we won then we know the update // happened as expected. If we lost, it means "we won but another thread // immediately stomped our update with no chance of a reader reading". return (V==null && expVal!=null) ? TOMBSTONE : V; } // --- tableFull --------------------------------------------------------- // Heuristic to decide if this table is too full, and we should start a // new table. Note that if a 'get' call has reprobed too many times and // decided the table must be full, then always the estimate_sum must be // high and we must report the table is full. If we do not, then we might // end up deciding that the table is not full and inserting into the // current table, while a 'get' has decided the same key cannot be in this // table because of too many reprobes. The invariant is: // slots.estimate_sum >= max_reprobe_cnt >= reprobe_limit(len) private final boolean tableFull( int reprobe_cnt, int len ) { return // Do the cheap check first: we allow some number of reprobes always reprobe_cnt >= REPROBE_LIMIT && // More expensive check: see if the table is > 1/4 full. _slots.estimate_get() >= reprobe_limit(len); } // --- resize ------------------------------------------------------------ // Resizing after too many probes. "How Big???" heuristics are here. // Callers will (not this routine) will 'help_copy' any in-progress copy. // Since this routine has a fast cutout for copy-already-started, callers // MUST 'help_copy' lest we have a path which forever runs through // 'resize' only to discover a copy-in-progress which never progresses. private final CHM resize() { // Check for resize already in progress, probably triggered by another thread CHM newchm = _newchm; // VOLATILE READ if( newchm != null ) // See if resize is already in progress return newchm; // Use the new table already // No copy in-progress, so start one. First up: compute new table size. int oldlen = _keys.length; // Old count of K,V pairs allowed int sz = size(); // Get current table count of active K,V pairs int newsz = sz; // First size estimate // Heuristic to determine new size. We expect plenty of dead-slots-with-keys // and we need some decent padding to avoid endless reprobing. if( _nbhml._opt_for_space ) { // This heuristic leads to a much denser table with a higher reprobe rate if( sz >= (oldlen>>1) ) // If we are >50% full of keys then... newsz = oldlen<<1; // Double size } else { if( sz >= (oldlen>>2) ) { // If we are >25% full of keys then... newsz = oldlen<<1; // Double size if( sz >= (oldlen>>1) ) // If we are >50% full of keys then... newsz = oldlen<<2; // Double double size } } // Last (re)size operation was very recent? Then double again; slows // down resize operations for tables subject to a high key churn rate. long tm = System.currentTimeMillis(); if( newsz <= oldlen && // New table would shrink or hold steady? tm <= _nbhml._last_resize_milli+10000 && // Recent resize (less than 1 sec ago) //(q=_slots.estimate_sum()) >= (sz<<1) ) // 1/2 of keys are dead? true ) newsz = oldlen<<1; // Double the existing size // Do not shrink, ever if( newsz < oldlen ) newsz = oldlen; //System.out.println("old="+oldlen+" new="+newsz+" size()="+sz+" est_slots()="+q+" millis="+(tm-_nbhml._last_resize_milli)); // Convert to power-of-2 int log2; for( log2=MIN_SIZE_LOG; (1<<log2) < newsz; log2++ ) ; // Compute log2 of size // Now limit the number of threads actually allocating memory to a // handful - lest we have 750 threads all trying to allocate a giant // resized array. long r = _resizers; while( !_resizerUpdater.compareAndSet(this,r,r+1) ) r = _resizers; // Size calculation: 2 words (K+V) per table entry, plus a handful. We // guess at 32-bit pointers; 64-bit pointers screws up the size calc by // 2x but does not screw up the heuristic very much. int megs = ((((1<<log2)<<1)+4)<<3/*word to bytes*/)>>20/*megs*/; if( r >= 2 && megs > 0 ) { // Already 2 guys trying; wait and see newchm = _newchm; // Between dorking around, another thread did it if( newchm != null ) // See if resize is already in progress return newchm; // Use the new table already // TODO - use a wait with timeout, so we'll wakeup as soon as the new table // is ready, or after the timeout in any case. //synchronized( this ) { wait(8*megs); } // Timeout - we always wakeup // For now, sleep a tad and see if the 2 guys already trying to make // the table actually get around to making it happen. try { Thread.sleep(8*megs); } catch( Exception e ) { } } // Last check, since the 'new' below is expensive and there is a chance // that another thread slipped in a new thread while we ran the heuristic. newchm = _newchm; if( newchm != null ) // See if resize is already in progress return newchm; // Use the new table already // New CHM - actually allocate the big arrays newchm = new CHM(_nbhml,_size,log2); // Another check after the slow allocation if( _newchm != null ) // See if resize is already in progress return _newchm; // Use the new table already // The new table must be CAS'd in so only 1 winner amongst duplicate // racing resizing threads. Extra CHM's will be GC'd. if( CAS_newchm( newchm ) ) { // NOW a resize-is-in-progress! //notifyAll(); // Wake up any sleepers //long nano = System.nanoTime(); //System.out.println(" "+nano+" Resize from "+oldlen+" to "+(1<<log2)+" and had "+(_resizers-1)+" extras" ); //System.out.print("["+log2); } else // CAS failed? newchm = _newchm; // Reread new table return newchm; } // The next part of the table to copy. It monotonically transits from zero // to _keys.length. Visitors to the table can claim 'work chunks' by // CAS'ing this field up, then copying the indicated indices from the old // table to the new table. Workers are not required to finish any chunk; // the counter simply wraps and work is copied duplicately until somebody // somewhere completes the count. volatile long _copyIdx = 0; static private final AtomicLongFieldUpdater<CHM> _copyIdxUpdater = AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyIdx"); // Work-done reporting. Used to efficiently signal when we can move to // the new table. From 0 to len(oldkvs) refers to copying from the old // table to the new. volatile long _copyDone= 0; static private final AtomicLongFieldUpdater<CHM> _copyDoneUpdater = AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyDone"); // --- help_copy_impl ---------------------------------------------------- // Help along an existing resize operation. We hope its the top-level // copy (it was when we started) but this CHM might have been promoted out // of the top position. private final void help_copy_impl( final boolean copy_all ) { final CHM newchm = _newchm; assert newchm != null; // Already checked by caller int oldlen = _keys.length; // Total amount to copy final int MIN_COPY_WORK = Math.min(oldlen,1024); // Limit per-thread work // --- int panic_start = -1; int copyidx=-9999; // Fool javac to think it's initialized while( _copyDone < oldlen ) { // Still needing to copy? // Carve out a chunk of work. The counter wraps around so every // thread eventually tries to copy every slot repeatedly. // We "panic" if we have tried TWICE to copy every slot - and it still // has not happened. i.e., twice some thread somewhere claimed they // would copy 'slot X' (by bumping _copyIdx) but they never claimed to // have finished (by bumping _copyDone). Our choices become limited: // we can wait for the work-claimers to finish (and become a blocking // algorithm) or do the copy work ourselves. Tiny tables with huge // thread counts trying to copy the table often 'panic'. if( panic_start == -1 ) { // No panic? copyidx = (int)_copyIdx; while( copyidx < (oldlen<<1) && // 'panic' check !_copyIdxUpdater.compareAndSet(this,copyidx,copyidx+MIN_COPY_WORK) ) copyidx = (int)_copyIdx; // Re-read if( !(copyidx < (oldlen<<1)) ) // Panic! panic_start = copyidx; // Record where we started to panic-copy } // We now know what to copy. Try to copy. int workdone = 0; for( int i=0; i<MIN_COPY_WORK; i++ ) if( copy_slot((copyidx+i)&(oldlen-1)) ) // Made an oldtable slot go dead? workdone++; // Yes! if( workdone > 0 ) // Report work-done occasionally copy_check_and_promote( workdone );// See if we can promote //for( int i=0; i<MIN_COPY_WORK; i++ ) // if( copy_slot((copyidx+i)&(oldlen-1)) ) // Made an oldtable slot go dead? // copy_check_and_promote( 1 );// See if we can promote copyidx += MIN_COPY_WORK; // Uncomment these next 2 lines to turn on incremental table-copy. // Otherwise this thread continues to copy until it is all done. if( !copy_all && panic_start == -1 ) // No panic? return; // Then done copying after doing MIN_COPY_WORK } // Extra promotion check, in case another thread finished all copying // then got stalled before promoting. copy_check_and_promote( 0 ); // See if we can promote } // --- copy_slot_and_check ----------------------------------------------- // Copy slot 'idx' from the old table to the new table. If this thread // confirmed the copy, update the counters and check for promotion. // // Returns the result of reading the volatile _newchm, mostly as a // convenience to callers. We come here with 1-shot copy requests // typically because the caller has found a Prime, and has not yet read // the _newchm volatile - which must have changed from null-to-not-null // before any Prime appears. So the caller needs to read the _newchm // field to retry his operation in the new table, but probably has not // read it yet. private final CHM copy_slot_and_check( int idx, Object should_help ) { // We're only here because the caller saw a Prime, which implies a // table-copy is in progress. assert _newchm != null; if( copy_slot(idx) ) // Copy the desired slot copy_check_and_promote(1); // Record the slot copied // Generically help along any copy (except if called recursively from a helper) if( should_help != null ) _nbhml.help_copy(); return _newchm; } // --- copy_check_and_promote -------------------------------------------- private final void copy_check_and_promote( int workdone ) { int oldlen = _keys.length; // We made a slot unusable and so did some of the needed copy work long copyDone = _copyDone; long nowDone = copyDone+workdone; assert nowDone <= oldlen; if( workdone > 0 ) { while( !_copyDoneUpdater.compareAndSet(this,copyDone,nowDone) ) { copyDone = _copyDone; // Reload, retry nowDone = copyDone+workdone; assert nowDone <= oldlen; } //if( (10*copyDone/oldlen) != (10*nowDone/oldlen) ) // System.out.print(" "+nowDone*100/oldlen+"%"+"_"+(_copyIdx*100/oldlen)+"%"); } // Check for copy being ALL done, and promote. Note that we might have // nested in-progress copies and manage to finish a nested copy before // finishing the top-level copy. We only promote top-level copies. if( nowDone == oldlen && // Ready to promote this table? _nbhml._chm == this && // Looking at the top-level table? // Attempt to promote _nbhml.CAS(_chm_offset,this,_newchm) ) { _nbhml._last_resize_milli = System.currentTimeMillis(); // Record resize time for next check //long nano = System.nanoTime(); //System.out.println(" "+nano+" Promote table "+oldlen+" to "+_newchm._keys.length); //System.out.print("_"+oldlen+"]"); } } // --- copy_slot --------------------------------------------------------- // Copy one K/V pair from oldkvs[i] to newkvs. Returns true if we can // confirm that the new table guaranteed has a value for this old-table // slot. We need an accurate confirmed-copy count so that we know when we // can promote (if we promote the new table too soon, other threads may // 'miss' on values not-yet-copied from the old table). We don't allow // any direct updates on the new table, unless they first happened to the // old table - so that any transition in the new table from null to // not-null must have been from a copy_slot (or other old-table overwrite) // and not from a thread directly writing in the new table. Thus we can // count null-to-not-null transitions in the new table. private boolean copy_slot( int idx ) { // Blindly set the key slot from NO_KEY to some key which hashes here, // to eagerly stop fresh put's from inserting new values in the old // table when the old table is mid-resize. We don't need to act on the // results here, because our correctness stems from box'ing the Value // field. Slamming the Key field is a minor speed optimization. long key; while( (key=_keys[idx]) == NO_KEY ) CAS_key(idx, NO_KEY, (idx+_keys.length)/*a non-zero key which hashes here*/); // --- // Prevent new values from appearing in the old table. // Box what we see in the old table, to prevent further updates. Object oldval = _vals[idx]; // Read OLD table while( !(oldval instanceof Prime) ) { final Prime box = (oldval == null || oldval == TOMBSTONE) ? TOMBPRIME : new Prime(oldval); if( CAS_val(idx,oldval,box) ) { // CAS down a box'd version of oldval // If we made the Value slot hold a TOMBPRIME, then we both // prevented further updates here but also the (absent) oldval is // vaccuously available in the new table. We return with true here: // any thread looking for a value for this key can correctly go // straight to the new table and skip looking in the old table. if( box == TOMBPRIME ) return true; // Otherwise we boxed something, but it still needs to be // copied into the new table. oldval = box; // Record updated oldval break; // Break loop; oldval is now boxed by us } oldval = _vals[idx]; // Else try, try again } if( oldval == TOMBPRIME ) return false; // Copy already complete here! // --- // Copy the value into the new table, but only if we overwrite a null. // If another value is already in the new table, then somebody else // wrote something there and that write is happens-after any value that // appears in the old table. If putIfMatch does not find a null in the // new table - somebody else should have recorded the null-not_null // transition in this copy. Object old_unboxed = ((Prime)oldval)._V; assert old_unboxed != TOMBSTONE; boolean copied_into_new = (_newchm.putIfMatch(key, old_unboxed, null) == null); // --- // Finally, now that any old value is exposed in the new table, we can // forever hide the old-table value by slapping a TOMBPRIME down. This // will stop other threads from uselessly attempting to copy this slot // (i.e., it's a speed optimization not a correctness issue). while( !CAS_val(idx,oldval,TOMBPRIME) ) oldval = _vals[idx]; return copied_into_new; } // end copy_slot } // End of CHM // --- Snapshot ------------------------------------------------------------ private class SnapshotV implements Iterator<TypeV>, Enumeration<TypeV> { final CHM _sschm; public SnapshotV() { CHM topchm; while( true ) { // Verify no table-copy-in-progress topchm = _chm; if( topchm._newchm == null ) // No table-copy-in-progress break; // Table copy in-progress - so we cannot get a clean iteration. We // must help finish the table copy before we can start iterating. topchm.help_copy_impl(true); } // The "linearization point" for the iteration. Every key in this table // will be visited, but keys added later might be skipped or even be // added to a following table (also not iterated over). _sschm = topchm; // Warm-up the iterator _idx = -1; next(); } int length() { return _sschm._keys.length; } long key(final int idx) { return _sschm._keys[idx]; } private int _idx; // -2 for NO_KEY, -1 for CHECK_NEW_TABLE_LONG, 0-keys.length private long _nextK, _prevK; // Last 2 keys found private TypeV _nextV, _prevV; // Last 2 values found public boolean hasNext() { return _nextV != null; } public TypeV next() { // 'next' actually knows what the next value will be - it had to // figure that out last go 'round lest 'hasNext' report true and // some other thread deleted the last value. Instead, 'next' // spends all its effort finding the key that comes after the // 'next' key. if( _idx != -1 && _nextV == null ) throw new NoSuchElementException(); _prevK = _nextK; // This will become the previous key _prevV = _nextV; // This will become the previous value _nextV = null; // We have no more next-key // Attempt to set <_nextK,_nextV> to the next K,V pair. // _nextV is the trigger: stop searching when it is != null if( _idx == -1 ) { // Check for NO_KEY _idx = 0; // Setup for next phase of search _nextK = NO_KEY; if( (_nextV=get(_nextK)) != null ) return _prevV; } while( _idx<length() ) { // Scan array _nextK = key(_idx++); // Get a key that definitely is in the set (for the moment!) if( _nextK != NO_KEY && // Found something? (_nextV=get(_nextK)) != null ) break; // Got it! _nextK is a valid Key } // Else keep scanning return _prevV; // Return current value. } public void remove() { if( _prevV == null ) throw new IllegalStateException(); _sschm.putIfMatch( _prevK, TOMBSTONE, _prevV ); _prevV = null; } public TypeV nextElement() { return next(); } public boolean hasMoreElements() { return hasNext(); } } /** Returns an enumeration of the values in this table. * @return an enumeration of the values in this table * @see #values() */ public Enumeration<TypeV> elements() { return new SnapshotV(); } // --- values -------------------------------------------------------------- /** Returns a {@link Collection} view of the values contained in this map. * The collection is backed by the map, so changes to the map are reflected * in the collection, and vice-versa. The collection supports element * removal, which removes the corresponding mapping from this map, via the * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>, * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations. * It does not support the <tt>add</tt> or <tt>addAll</tt> operations. * * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator that * will never throw {@link ConcurrentModificationException}, and guarantees * to traverse elements as they existed upon construction of the iterator, * and may (but is not guaranteed to) reflect any modifications subsequent * to construction. */ public Collection<TypeV> values() { return new AbstractCollection<TypeV>() { public void clear ( ) { NonBlockingHashMapLong.this.clear ( ); } public int size ( ) { return NonBlockingHashMapLong.this.size ( ); } public boolean contains( Object v ) { return NonBlockingHashMapLong.this.containsValue(v); } public Iterator<TypeV> iterator() { return new SnapshotV(); } }; } // --- keySet -------------------------------------------------------------- /** A class which implements the {@link Iterator} and {@link Enumeration} * interfaces, generified to the {@link Long} class and supporting a * <strong>non-auto-boxing</strong> {@link #nextLong} function. */ public class IteratorLong implements Iterator<Long>, Enumeration<Long> { private final SnapshotV _ss; /** A new IteratorLong */ public IteratorLong() { _ss = new SnapshotV(); } /** Remove last key returned by {@link #next} or {@link #nextLong}. */ public void remove() { _ss.remove(); } /** <strong>Auto-box</strong> and return the next key. */ public Long next () { _ss.next(); return _ss._prevK; } /** Return the next key as a primitive {@code long}. */ public long nextLong() { _ss.next(); return _ss._prevK; } /** True if there are more keys to iterate over. */ public boolean hasNext() { return _ss.hasNext(); } /** <strong>Auto-box</strong> and return the next key. */ public Long nextElement() { return next(); } /** True if there are more keys to iterate over. */ public boolean hasMoreElements() { return hasNext(); } } /** Returns an enumeration of the <strong>auto-boxed</strong> keys in this table. * <strong>Warning:</strong> this version will auto-box all returned keys. * @return an enumeration of the auto-boxed keys in this table * @see #keySet() */ public Enumeration<Long> keys() { return new IteratorLong(); } /** Returns a {@link Set} view of the keys contained in this map; with care * the keys may be iterated over <strong>without auto-boxing</strong>. The * set is backed by the map, so changes to the map are reflected in the * set, and vice-versa. The set supports element removal, which removes * the corresponding mapping from this map, via the * <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, <tt>removeAll</tt>, * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not support * the <tt>add</tt> or <tt>addAll</tt> operations. * * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator that * will never throw {@link ConcurrentModificationException}, and guarantees * to traverse elements as they existed upon construction of the iterator, * and may (but is not guaranteed to) reflect any modifications subsequent * to construction. */ public Set<Long> keySet() { return new AbstractSet<Long> () { public void clear ( ) { NonBlockingHashMapLong.this.clear ( ); } public int size ( ) { return NonBlockingHashMapLong.this.size ( ); } public boolean contains( Object k ) { return NonBlockingHashMapLong.this.containsKey(k); } public boolean remove ( Object k ) { return NonBlockingHashMapLong.this.remove (k) != null; } public IteratorLong iterator() { return new IteratorLong(); } }; } /** Keys as a long array. Array may be zero-padded if keys are concurrently deleted. */ public long[] keySetLong() { long[] dom = new long[size()]; IteratorLong i=(IteratorLong)keySet().iterator(); int j=0; while( j < dom.length && i.hasNext() ) dom[j++] = i.nextLong(); return dom; } // --- entrySet ------------------------------------------------------------ // Warning: Each call to 'next' in this iterator constructs a new Long and a // new NBHMLEntry. private class NBHMLEntry extends AbstractEntry<Long,TypeV> { NBHMLEntry( final Long k, final TypeV v ) { super(k,v); } public TypeV setValue(final TypeV val) { if (val == null) throw new NullPointerException(); _val = val; return put(_key, val); } } private class SnapshotE implements Iterator<Map.Entry<Long,TypeV>> { final SnapshotV _ss; public SnapshotE() { _ss = new SnapshotV(); } public void remove() { _ss.remove(); } public Map.Entry<Long,TypeV> next() { _ss.next(); return new NBHMLEntry(_ss._prevK,_ss._prevV); } public boolean hasNext() { return _ss.hasNext(); } } /** Returns a {@link Set} view of the mappings contained in this map. The * set is backed by the map, so changes to the map are reflected in the * set, and vice-versa. The set supports element removal, which removes * the corresponding mapping from the map, via the * <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, <tt>removeAll</tt>, * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not support * the <tt>add</tt> or <tt>addAll</tt> operations. * * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator * that will never throw {@link ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. * * <p><strong>Warning:</strong> the iterator associated with this Set * requires the creation of {@link java.util.Map.Entry} objects with each * iteration. The org.cliffc.high_scale_lib.NonBlockingHashMap * does not normally create or using {@link java.util.Map.Entry} objects so * they will be created soley to support this iteration. Iterating using * {@link #keySet} or {@link #values} will be more efficient. In addition, * this version requires <strong>auto-boxing</strong> the keys. */ public Set<Map.Entry<Long,TypeV>> entrySet() { return new AbstractSet<Map.Entry<Long,TypeV>>() { public void clear ( ) { NonBlockingHashMapLong.this.clear( ); } public int size ( ) { return NonBlockingHashMapLong.this.size ( ); } public boolean remove( final Object o ) { if (!(o instanceof Map.Entry)) return false; final Map.Entry<?,?> e = (Map.Entry<?,?>)o; return NonBlockingHashMapLong.this.remove(e.getKey(), e.getValue()); } public boolean contains(final Object o) { if (!(o instanceof Map.Entry)) return false; final Map.Entry<?,?> e = (Map.Entry<?,?>)o; TypeV v = get(e.getKey()); return v.equals(e.getValue()); } public Iterator<Map.Entry<Long,TypeV>> iterator() { return new SnapshotE(); } }; } // --- writeObject ------------------------------------------------------- // Write a NBHML to a stream private void writeObject(java.io.ObjectOutputStream s) throws IOException { s.defaultWriteObject(); // Write nothing for( long K : keySet() ) { final Object V = get(K); // Do an official 'get' s.writeLong (K); // Write the <long,TypeV> pair s.writeObject(V); } s.writeLong(NO_KEY); // Sentinel to indicate end-of-data s.writeObject(null); } // --- readObject -------------------------------------------------------- // Read a CHM from a stream private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { s.defaultReadObject(); // Read nothing initialize(MIN_SIZE); for (;;) { final long K = s.readLong(); final TypeV V = (TypeV) s.readObject(); if( K == NO_KEY && V == null ) break; put(K,V); // Insert with an offical put } } } // End NonBlockingHashMapLong class
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/nbhm/NonBlockingHashSet.java
package water.nbhm; import java.io.Serializable; import java.util.AbstractSet; import java.util.Iterator; import java.util.Set; /* * Written by Cliff Click and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain */ /** * A simple wrapper around {@link NonBlockingHashMap} making it implement the * {@link Set} interface. All operations are Non-Blocking and multi-thread safe. * * @since 1.5 * @author Cliff Click */ public class NonBlockingHashSet<E> extends AbstractSet<E> implements Serializable { private static final Object V = ""; private final NonBlockingHashMap<E,Object> _map; /** Make a new empty {@link NonBlockingHashSet}. */ public NonBlockingHashSet() { super(); _map = new NonBlockingHashMap<E,Object>(); } /** Add {@code o} to the set. * @return <tt>true</tt> if {@code o} was added to the set, <tt>false</tt> * if {@code o} was already in the set. */ public boolean add ( final E o ) { return _map.putIfAbsent(o,V) != V; } /** Add {@code o} to the set. * @return any old match for {@code o} if it was already in the set, or {@code o} otherwise. */ public E addIfAbsent( final E o ) { return (E)_map.putIfAbsent(o,V); } /** * @return <tt>true</tt> if {@code o} is in the set. */ public boolean contains ( final Object o ) { return _map.containsKey(o); } /** * @return Returns the match for {@code o} if {@code o} is in the set. */ public E get( final E o ) { return (E)_map.get(o); } /** Remove {@code o} from the set. * @return <tt>true</tt> if {@code o} was removed to the set, <tt>false</tt> * if {@code o} was not in the set. */ public boolean remove ( final Object o ) { return _map.remove(o) == V; } /** * Current count of elements in the set. Due to concurrent racing updates, * the size is only ever approximate. Updates due to the calling thread are * immediately visible to calling thread. * @return count of elements. */ public int size ( ) { return _map.size(); } /** Empty the set. */ public void clear ( ) { _map.clear(); } public Iterator<E>iterator( ) { return _map.keySet().iterator(); } // --- /** * Atomically make the set immutable. Future calls to mutate will throw an * IllegalStateException. Existing mutator calls in other threads racing * with this thread and will either throw IllegalStateException or their * update will be visible to this thread. This implies that a simple flag * cannot make the Set immutable, because a late-arriving update in another * thread might see immutable flag not set yet, then mutate the Set after * the {@link #readOnly} call returns. This call can be called concurrently * (and indeed until the operation completes, all calls on the Set from any * thread either complete normally or end up calling {@link #readOnly} * internally). * * <p> This call is useful in debugging multi-threaded programs where the * Set is constructed in parallel, but construction completes after some * time; and after construction the Set is only read. Making the Set * read-only will cause updates arriving after construction is supposedly * complete to throw an {@link IllegalStateException}. */ // (1) call _map's immutable() call // (2) get snapshot // (3) CAS down a local map, power-of-2 larger than _map.size()+1/8th // (4) start @ random, visit all snapshot, insert live keys // (5) CAS _map to null, needs happens-after (4) // (6) if Set call sees _map is null, needs happens-after (4) for readers public void readOnly() { throw new RuntimeException("Unimplemented"); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/nbhm/NonBlockingIdentityHashMap.java
package water.nbhm; import java.io.IOException; import java.io.Serializable; import java.lang.reflect.Field; import java.util.AbstractCollection; import java.util.AbstractMap; import java.util.AbstractSet; import java.util.Collection; import java.util.ConcurrentModificationException; import java.util.Enumeration; import java.util.HashMap; import java.util.Hashtable; import java.util.Iterator; import java.util.Map; import java.util.NoSuchElementException; import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import sun.misc.Unsafe; /* * Written by Cliff Click and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain */ /** * A lock-free alternate implementation of {@link java.util.concurrent.ConcurrentHashMap} * with better scaling properties and generally lower costs to mutate the Map. * It provides identical correctness properties as ConcurrentHashMap. All * operations are non-blocking and multi-thread safe, including all update * operations. {@link NonBlockingHashMap} scales substatially better than * {@link java.util.concurrent.ConcurrentHashMap} for high update rates, even with a * large concurrency factor. Scaling is linear up to 768 CPUs on a 768-CPU * Azul box, even with 100% updates or 100% reads or any fraction in-between. * Linear scaling up to all cpus has been observed on a 32-way Sun US2 box, * 32-way Sun Niagra box, 8-way Intel box and a 4-way Power box. * * This class obeys the same functional specification as {@link * java.util.Hashtable}, and includes versions of methods corresponding to * each method of <tt>Hashtable</tt>. However, even though all operations are * thread-safe, operations do <em>not</em> entail locking and there is * <em>not</em> any support for locking the entire table in a way that * prevents all access. This class is fully interoperable with * <tt>Hashtable</tt> in programs that rely on its thread safety but not on * its synchronization details. * * <p> Operations (including <tt>put</tt>) generally do not block, so may * overlap with other update operations (including other <tt>puts</tt> and * <tt>removes</tt>). Retrievals reflect the results of the most recently * <em>completed</em> update operations holding upon their onset. For * aggregate operations such as <tt>putAll</tt>, concurrent retrievals may * reflect insertion or removal of only some entries. Similarly, Iterators * and Enumerations return elements reflecting the state of the hash table at * some point at or since the creation of the iterator/enumeration. They do * <em>not</em> throw {@link ConcurrentModificationException}. However, * iterators are designed to be used by only one thread at a time. * * <p> Very full tables, or tables with high reprobe rates may trigger an * internal resize operation to move into a larger table. Resizing is not * terribly expensive, but it is not free either; during resize operations * table throughput may drop somewhat. All threads that visit the table * during a resize will 'help' the resizing but will still be allowed to * complete their operation before the resize is finished (i.e., a simple * 'get' operation on a million-entry table undergoing resizing will not need * to block until the entire million entries are copied). * * <p>This class and its views and iterators implement all of the * <em>optional</em> methods of the {@link Map} and {@link Iterator} * interfaces. * * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class * does <em>not</em> allow <tt>null</tt> to be used as a key or value. * * * @since 1.5 * @author Cliff Click * @param <TypeK> the type of keys maintained by this map * @param <TypeV> the type of mapped values * * @author Prashant Deva * Modified from original NonBlockingHashMap to use identity equality. * Uses System.identityHashCode() to calculate hashMap. * Key equality is compared using '=='. */ public class NonBlockingIdentityHashMap<TypeK, TypeV> extends AbstractMap<TypeK, TypeV> implements ConcurrentMap<TypeK, TypeV>, Cloneable, Serializable { private static final long serialVersionUID = 1234123412341234123L; private static final int REPROBE_LIMIT=10; // Too many reprobes then force a table-resize // --- Bits to allow Unsafe access to arrays private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); private static final int _Obase = _unsafe.arrayBaseOffset(Object[].class); private static final int _Oscale = _unsafe.arrayIndexScale(Object[].class); private static long rawIndex(final Object[] ary, final int idx) { assert idx >= 0 && idx < ary.length; return _Obase + idx * _Oscale; } // --- Setup to use Unsafe private static final long _kvs_offset; static { // <clinit> Field f = null; try { f = NonBlockingHashMap.class.getDeclaredField("_kvs"); } catch( java.lang.NoSuchFieldException e ) { throw new RuntimeException(e); } _kvs_offset = _unsafe.objectFieldOffset(f); } private final boolean CAS_kvs( final Object[] oldkvs, final Object[] newkvs ) { return _unsafe.compareAndSwapObject(this, _kvs_offset, oldkvs, newkvs ); } // --- Adding a 'prime' bit onto Values via wrapping with a junk wrapper class private static final class Prime { final Object _V; Prime( Object V ) { _V = V; } static Object unbox( Object V ) { return V instanceof Prime ? ((Prime)V)._V : V; } } // --- hash ---------------------------------------------------------------- // Helper function to spread lousy hashCodes private static final int hash(final Object key) { int h = System.identityHashCode(key); // The real hashCode call // I assume that System.identityHashCode is well implemented with a good // spreader, and a second bit-spreader is redundant. //h ^= (h>>>20) ^ (h>>>12); //h ^= (h>>> 7) ^ (h>>> 4); return h; } // --- The Hash Table -------------------- // Slot 0 is always used for a 'CHM' entry below to hold the interesting // bits of the hash table. Slot 1 holds full hashes as an array of ints. // Slots {2,3}, {4,5}, etc hold {Key,Value} pairs. The entire hash table // can be atomically replaced by CASing the _kvs field. // // Why is CHM buried inside the _kvs Object array, instead of the other way // around? The CHM info is used during resize events and updates, but not // during standard 'get' operations. I assume 'get' is much more frequent // than 'put'. 'get' can skip the extra indirection of skipping through the // CHM to reach the _kvs array. private transient Object[] _kvs; private static final CHM chm (Object[] kvs) { return (CHM )kvs[0]; } private static final int[] hashes(Object[] kvs) { return (int[])kvs[1]; } // Number of K,V pairs in the table private static final int len(Object[] kvs) { return (kvs.length-2)>>1; } // Time since last resize private transient long _last_resize_milli; // --- Minimum table size ---------------- // Pick size 8 K/V pairs, which turns into (8*2+2)*4+12 = 84 bytes on a // standard 32-bit HotSpot, and (8*2+2)*8+12 = 156 bytes on 64-bit Azul. private static final int MIN_SIZE_LOG=3; // private static final int MIN_SIZE=(1<<MIN_SIZE_LOG); // Must be power of 2 // --- Sentinels ------------------------- // No-Match-Old - putIfMatch does updates only if it matches the old value, // and NO_MATCH_OLD basically counts as a wildcard match. private static final Object NO_MATCH_OLD = new Object(); // Sentinel // Match-Any-not-null - putIfMatch does updates only if it find a real old // value. private static final Object MATCH_ANY = new Object(); // Sentinel // This K/V pair has been deleted (but the Key slot is forever claimed). // The same Key can be reinserted with a new value later. private static final Object TOMBSTONE = new Object(); // Prime'd or box'd version of TOMBSTONE. This K/V pair was deleted, then a // table resize started. The K/V pair has been marked so that no new // updates can happen to the old table (and since the K/V pair was deleted // nothing was copied to the new table). private static final Prime TOMBPRIME = new Prime(TOMBSTONE); // --- key,val ------------------------------------------------------------- // Access K,V for a given idx // // Note that these are static, so that the caller is forced to read the _kvs // field only once, and share that read across all key/val calls - lest the // _kvs field move out from under us and back-to-back key & val calls refer // to different _kvs arrays. private static final Object key(Object[] kvs,int idx) { return kvs[(idx<<1)+2]; } private static final Object val(Object[] kvs,int idx) { return kvs[(idx<<1)+3]; } private static final boolean CAS_key( Object[] kvs, int idx, Object old, Object key ) { return _unsafe.compareAndSwapObject( kvs, rawIndex(kvs,(idx<<1)+2), old, key ); } private static final boolean CAS_val( Object[] kvs, int idx, Object old, Object val ) { return _unsafe.compareAndSwapObject( kvs, rawIndex(kvs,(idx<<1)+3), old, val ); } // --- dump ---------------------------------------------------------------- /** Verbose printout of table internals, useful for debugging. */ public final void print() { System.out.println("========="); print2(_kvs); System.out.println("========="); } // print the entire state of the table private final void print( Object[] kvs ) { for( int i=0; i<len(kvs); i++ ) { Object K = key(kvs,i); if( K != null ) { String KS = (K == TOMBSTONE) ? "XXX" : K.toString(); Object V = val(kvs,i); Object U = Prime.unbox(V); String p = (V==U) ? "" : "prime_"; String US = (U == TOMBSTONE) ? "tombstone" : U.toString(); System.out.println(""+i+" ("+KS+","+p+US+")"); } } Object[] newkvs = chm(kvs)._newkvs; // New table, if any if( newkvs != null ) { System.out.println("----"); print(newkvs); } } // print only the live values, broken down by the table they are in private final void print2( Object[] kvs) { for( int i=0; i<len(kvs); i++ ) { Object key = key(kvs,i); Object val = val(kvs,i); Object U = Prime.unbox(val); if( key != null && key != TOMBSTONE && // key is sane val != null && U != TOMBSTONE ) { // val is sane String p = (val==U) ? "" : "prime_"; System.out.println(""+i+" ("+key+","+p+val+")"); } } Object[] newkvs = chm(kvs)._newkvs; // New table, if any if( newkvs != null ) { System.out.println("----"); print2(newkvs); } } // Count of reprobes private transient ConcurrentAutoTable _reprobes = new ConcurrentAutoTable(); /** Get and clear the current count of reprobes. Reprobes happen on key * collisions, and a high reprobe rate may indicate a poor hash function or * weaknesses in the table resizing function. * @return the count of reprobes since the last call to {@link #reprobes} * or since the table was created. */ public long reprobes() { long r = _reprobes.get(); _reprobes = new ConcurrentAutoTable(); return r; } // --- reprobe_limit ----------------------------------------------------- // Heuristic to decide if we have reprobed toooo many times. Running over // the reprobe limit on a 'get' call acts as a 'miss'; on a 'put' call it // can trigger a table resize. Several places must have exact agreement on // what the reprobe_limit is, so we share it here. private static final int reprobe_limit( int len ) { return REPROBE_LIMIT + (len>>2); } // --- NonBlockingHashMap -------------------------------------------------- // Constructors /** Create a new NonBlockingHashMap with default minimum size (currently set * to 8 K/V pairs or roughly 84 bytes on a standard 32-bit JVM). */ public NonBlockingIdentityHashMap( ) { this(MIN_SIZE); } /** Create a new NonBlockingHashMap with initial room for the given number of * elements, thus avoiding internal resizing operations to reach an * appropriate size. Large numbers here when used with a small count of * elements will sacrifice space for a small amount of time gained. The * initial size will be rounded up internally to the next larger power of 2. */ public NonBlockingIdentityHashMap( final int initial_sz ) { initialize(initial_sz); } private final void initialize( int initial_sz ) { if( initial_sz < 0 ) throw new IllegalArgumentException(); int i; // Convert to next largest power-of-2 if( initial_sz > 1024*1024 ) initial_sz = 1024*1024; for( i=MIN_SIZE_LOG; (1<<i) < (initial_sz<<2); i++ ) ; // Double size for K,V pairs, add 1 for CHM and 1 for hashes _kvs = new Object[((1<<i)<<1)+2]; _kvs[0] = new CHM(new ConcurrentAutoTable()); // CHM in slot 0 _kvs[1] = new int[1<<i]; // Matching hash entries _last_resize_milli = System.currentTimeMillis(); } // Version for subclassed readObject calls, to be called after the defaultReadObject protected final void initialize() { initialize(MIN_SIZE); } // --- wrappers ------------------------------------------------------------ /** Returns the number of key-value mappings in this map. * @return the number of key-value mappings in this map */ @Override public int size ( ) { return chm(_kvs).size(); } /** Returns <tt>size() == 0</tt>. * @return <tt>size() == 0</tt> */ @Override public boolean isEmpty ( ) { return size() == 0; } /** Tests if the key in the table using the <tt>equals</tt> method. * @return <tt>true</tt> if the key is in the table using the <tt>equals</tt> method * @throws NullPointerException if the specified key is null */ @Override public boolean containsKey( Object key ) { return get(key) != null; } /** Legacy method testing if some key maps into the specified value in this * table. This method is identical in functionality to {@link * #containsValue}, and exists solely to ensure full compatibility with * class {@link java.util.Hashtable}, which supported this method prior to * introduction of the Java Collections framework. * @param val a value to search for * @return <tt>true</tt> if this map maps one or more keys to the specified value * @throws NullPointerException if the specified value is null */ public boolean contains ( Object val ) { return containsValue(val); } /** Maps the specified key to the specified value in the table. Neither key * nor value can be null. * <p> The value can be retrieved by calling {@link #get} with a key that is * equal to the original key. * @param key key with which the specified value is to be associated * @param val value to be associated with the specified key * @return the previous value associated with <tt>key</tt>, or * <tt>null</tt> if there was no mapping for <tt>key</tt> * @throws NullPointerException if the specified key or value is null */ @Override public TypeV put ( TypeK key, TypeV val ) { return putIfMatch( key, val, NO_MATCH_OLD); } /** Atomically, do a {@link #put} if-and-only-if the key is not mapped. * Useful to ensure that only a single mapping for the key exists, even if * many threads are trying to create the mapping in parallel. * @return the previous value associated with the specified key, * or <tt>null</tt> if there was no mapping for the key * @throws NullPointerException if the specified key or value is null */ public TypeV putIfAbsent( TypeK key, TypeV val ) { return putIfMatch( key, val, TOMBSTONE ); } /** Removes the key (and its corresponding value) from this map. * This method does nothing if the key is not in the map. * @return the previous value associated with <tt>key</tt>, or * <tt>null</tt> if there was no mapping for <tt>key</tt> * @throws NullPointerException if the specified key is null */ @Override public TypeV remove ( Object key ) { return putIfMatch( key,TOMBSTONE, NO_MATCH_OLD); } /** Atomically do a {@link #remove(Object)} if-and-only-if the key is mapped * to a value which is <code>equals</code> to the given value. * @throws NullPointerException if the specified key or value is null */ public boolean remove ( Object key,Object val ) { return putIfMatch( key,TOMBSTONE, val ) == val; } /** Atomically do a <code>put(key,val)</code> if-and-only-if the key is * mapped to some value already. * @throws NullPointerException if the specified key or value is null */ public TypeV replace ( TypeK key, TypeV val ) { return putIfMatch( key, val,MATCH_ANY ); } /** Atomically do a <code>put(key,newValue)</code> if-and-only-if the key is * mapped a value which is <code>equals</code> to <code>oldValue</code>. * @throws NullPointerException if the specified key or value is null */ public boolean replace ( TypeK key, TypeV oldValue, TypeV newValue ) { return putIfMatch( key, newValue, oldValue ) == oldValue; } private final TypeV putIfMatch( Object key, Object newVal, Object oldVal ) { if (oldVal == null || newVal == null) throw new NullPointerException(); final Object res = putIfMatch( this, _kvs, key, newVal, oldVal ); assert !(res instanceof Prime); assert res != null; return res == TOMBSTONE ? null : (TypeV)res; } /** Copies all of the mappings from the specified map to this one, replacing * any existing mappings. * @param m mappings to be stored in this map */ @Override public void putAll(Map<? extends TypeK, ? extends TypeV> m) { for (Map.Entry<? extends TypeK, ? extends TypeV> e : m.entrySet()) put(e.getKey(), e.getValue()); } /** Removes all of the mappings from this map. */ @Override public void clear() { // Smack a new empty table down Object[] newkvs = new NonBlockingIdentityHashMap(MIN_SIZE)._kvs; while( !CAS_kvs(_kvs,newkvs) ) // Spin until the clear works ; } /** Returns <tt>true</tt> if this Map maps one or more keys to the specified * value. <em>Note</em>: This method requires a full internal traversal of the * hash table and is much slower than {@link #containsKey}. * @param val value whose presence in this map is to be tested * @return <tt>true</tt> if this map maps one or more keys to the specified value * @throws NullPointerException if the specified value is null */ @Override public boolean containsValue( final Object val ) { if( val == null ) throw new NullPointerException(); for( TypeV V : values() ) if( V == val || V.equals(val) ) return true; return false; } // This function is supposed to do something for Hashtable, and the JCK // tests hang until it gets called... by somebody ... for some reason, // any reason.... protected void rehash() { } /** * Creates a shallow copy of this hashtable. All the structure of the * hashtable itself is copied, but the keys and values are not cloned. * This is a relatively expensive operation. * * @return a clone of the hashtable. */ @Override public Object clone() { try { // Must clone, to get the class right; NBHM might have been // extended so it would be wrong to just make a new NBHM. NonBlockingIdentityHashMap<TypeK,TypeV> t = (NonBlockingIdentityHashMap<TypeK,TypeV>) super.clone(); // But I don't have an atomic clone operation - the underlying _kvs // structure is undergoing rapid change. If I just clone the _kvs // field, the CHM in _kvs[0] won't be in sync. // // Wipe out the cloned array (it was shallow anyways). t.clear(); // Now copy sanely for( TypeK K : keySet() ) { final TypeV V = get(K); // Do an official 'get' t.put(K,V); } return t; } catch (CloneNotSupportedException e) { // this shouldn't happen, since we are Cloneable throw new InternalError(); } } /** * Returns a string representation of this map. The string representation * consists of a list of key-value mappings in the order returned by the * map's <tt>entrySet</tt> view's iterator, enclosed in braces * (<tt>"{}"</tt>). Adjacent mappings are separated by the characters * <tt>", "</tt> (comma and space). Each key-value mapping is rendered as * the key followed by an equals sign (<tt>"="</tt>) followed by the * associated value. Keys and values are converted to strings as by * {@link String#valueOf(Object)}. * * @return a string representation of this map */ @Override public String toString() { Iterator<Entry<TypeK,TypeV>> i = entrySet().iterator(); if( !i.hasNext()) return "{}"; StringBuilder sb = new StringBuilder(); sb.append('{'); for (;;) { Entry<TypeK,TypeV> e = i.next(); TypeK key = e.getKey(); TypeV value = e.getValue(); sb.append(key == this ? "(this Map)" : key); sb.append('='); sb.append(value == this ? "(this Map)" : value); if( !i.hasNext()) return sb.append('}').toString(); sb.append(", "); } } // --- get ----------------------------------------------------------------- /** Returns the value to which the specified key is mapped, or {@code null} * if this map contains no mapping for the key. * <p>More formally, if this map contains a mapping from a key {@code k} to * a value {@code v} such that {@code key.equals(k)}, then this method * returns {@code v}; otherwise it returns {@code null}. (There can be at * most one such mapping.) * @throws NullPointerException if the specified key is null */ // Never returns a Prime nor a Tombstone. @Override public TypeV get( Object key ) { final Object V = get_impl(this,_kvs,key); assert !(V instanceof Prime); // Never return a Prime return (TypeV)V; } private static final Object get_impl( final NonBlockingIdentityHashMap topmap, final Object[] kvs, final Object key ) { final int fullhash= hash (key); // throws NullPointerException if key is null final int len = len (kvs); // Count of key/value pairs, reads kvs.length final CHM chm = chm (kvs); // The CHM, for a volatile read below; reads slot 0 of kvs int idx = fullhash & (len-1); // First key hash // Main spin/reprobe loop, looking for a Key hit int reprobe_cnt=0; while( true ) { // Probe table. Each read of 'val' probably misses in cache in a big // table; hopefully the read of 'key' then hits in cache. final Object K = key(kvs,idx); // Get key before volatile read, could be null final Object V = val(kvs,idx); // Get value before volatile read, could be null or Tombstone or Prime if( K == null ) return null; // A clear miss // We need a volatile-read here to preserve happens-before semantics on // newly inserted Keys. If the Key body was written just before inserting // into the table a Key-compare here might read the uninitalized Key body. // Annoyingly this means we have to volatile-read before EACH key compare. // . // We also need a volatile-read between reading a newly inserted Value // and returning the Value (so the user might end up reading the stale // Value contents). Same problem as with keys - and the one volatile // read covers both. final Object[] newkvs = chm._newkvs; // VOLATILE READ before key compare // Key-compare if( K == key ) { // Key hit! Check for no table-copy-in-progress if( !(V instanceof Prime) ) // No copy? return (V == TOMBSTONE) ? null : V; // Return the value // Key hit - but slot is (possibly partially) copied to the new table. // Finish the copy & retry in the new table. return get_impl(topmap,chm.copy_slot_and_check(topmap,kvs,idx,key),key); // Retry in the new table } // get and put must have the same key lookup logic! But only 'put' // needs to force a table-resize for a too-long key-reprobe sequence. // Check for too-many-reprobes on get - and flip to the new table. if( ++reprobe_cnt >= reprobe_limit(len) || // too many probes key == TOMBSTONE ) // found a TOMBSTONE key, means no more keys in this table return newkvs == null ? null : get_impl(topmap,topmap.help_copy(newkvs),key); // Retry in the new table idx = (idx+1)&(len-1); // Reprobe by 1! (could now prefetch) } } // --- putIfMatch --------------------------------------------------------- // Put, Remove, PutIfAbsent, etc. Return the old value. If the returned // value is equal to expVal (or expVal is NO_MATCH_OLD) then the put can be // assumed to work (although might have been immediately overwritten). Only // the path through copy_slot passes in an expected value of null, and // putIfMatch only returns a null if passed in an expected null. private static final Object putIfMatch( final NonBlockingIdentityHashMap topmap, final Object[] kvs, final Object key, final Object putval, final Object expVal ) { assert putval != null; assert !(putval instanceof Prime); assert !(expVal instanceof Prime); final int fullhash = hash (key); // throws NullPointerException if key null final int len = len (kvs); // Count of key/value pairs, reads kvs.length final CHM chm = chm (kvs); // Reads kvs[0] int idx = fullhash & (len-1); // --- // Key-Claim stanza: spin till we can claim a Key (or force a resizing). int reprobe_cnt=0; Object K=null, V=null; Object[] newkvs=null; while( true ) { // Spin till we get a Key slot V = val(kvs,idx); // Get old value (before volatile read below!) K = key(kvs,idx); // Get current key if( K == null ) { // Slot is free? // Found an empty Key slot - which means this Key has never been in // this table. No need to put a Tombstone - the Key is not here! if( putval == TOMBSTONE ) return putval; // Not-now & never-been in this table // Claim the null key-slot if( CAS_key(kvs,idx, null, key ) ) { // Claim slot for Key chm._slots.add(1); // Raise key-slots-used count break; // Got it! } // CAS to claim the key-slot failed. // // This re-read of the Key points out an annoying short-coming of Java // CAS. Most hardware CAS's report back the existing value - so that // if you fail you have a *witness* - the value which caused the CAS // to fail. The Java API turns this into a boolean destroying the // witness. Re-reading does not recover the witness because another // thread can write over the memory after the CAS. Hence we can be in // the unfortunate situation of having a CAS fail *for cause* but // having that cause removed by a later store. This turns a // non-spurious-failure CAS (such as Azul has) into one that can // apparently spuriously fail - and we avoid apparent spurious failure // by not allowing Keys to ever change. K = key(kvs,idx); // CAS failed, get updated value assert K != null; // If keys[idx] is null, CAS shoulda worked } // Key slot was not null, there exists a Key here // We need a volatile-read here to preserve happens-before semantics on // newly inserted Keys. If the Key body was written just before inserting // into the table a Key-compare here might read the uninitalized Key body. // Annoyingly this means we have to volatile-read before EACH key compare. newkvs = chm._newkvs; // VOLATILE READ before key compare if( K == key ) break; // Got it! // get and put must have the same key lookup logic! Lest 'get' give // up looking too soon. //topmap._reprobes.add(1); if( ++reprobe_cnt >= reprobe_limit(len) || // too many probes or key == TOMBSTONE ) { // found a TOMBSTONE key, means no more keys // We simply must have a new table to do a 'put'. At this point a // 'get' will also go to the new table (if any). We do not need // to claim a key slot (indeed, we cannot find a free one to claim!). newkvs = chm.resize(topmap,kvs); if( expVal != null ) topmap.help_copy(newkvs); // help along an existing copy return putIfMatch(topmap,newkvs,key,putval,expVal); } idx = (idx+1)&(len-1); // Reprobe! } // End of spinning till we get a Key slot // --- // Found the proper Key slot, now update the matching Value slot. We // never put a null, so Value slots monotonically move from null to // not-null (deleted Values use Tombstone). Thus if 'V' is null we // fail this fast cutout and fall into the check for table-full. if( putval == V ) return V; // Fast cutout for no-change // See if we want to move to a new table (to avoid high average re-probe // counts). We only check on the initial set of a Value from null to // not-null (i.e., once per key-insert). Of course we got a 'free' check // of newkvs once per key-compare (not really free, but paid-for by the // time we get here). if( newkvs == null && // New table-copy already spotted? // Once per fresh key-insert check the hard way ((V == null && chm.tableFull(reprobe_cnt,len)) || // Or we found a Prime, but the JMM allowed reordering such that we // did not spot the new table (very rare race here: the writing // thread did a CAS of _newkvs then a store of a Prime. This thread // reads the Prime, then reads _newkvs - but the read of Prime was so // delayed (or the read of _newkvs was so accelerated) that they // swapped and we still read a null _newkvs. The resize call below // will do a CAS on _newkvs forcing the read. V instanceof Prime) ) newkvs = chm.resize(topmap,kvs); // Force the new table copy to start // See if we are moving to a new table. // If so, copy our slot and retry in the new table. if( newkvs != null ) return putIfMatch(topmap,chm.copy_slot_and_check(topmap,kvs,idx,expVal),key,putval,expVal); // --- // We are finally prepared to update the existing table assert !(V instanceof Prime); // Must match old, and we do not? Then bail out now. Note that either V // or expVal might be TOMBSTONE. Also V can be null, if we've never // inserted a value before. expVal can be null if we are called from // copy_slot. if( expVal != NO_MATCH_OLD && // Do we care about expected-Value at all? V != expVal && // No instant match already? (expVal != MATCH_ANY || V == TOMBSTONE || V == null) && !(V==null && expVal == TOMBSTONE) && // Match on null/TOMBSTONE combo (expVal == null || !expVal.equals(V)) ) // Expensive equals check at the last return V; // Do not update! // Actually change the Value in the Key,Value pair if( CAS_val(kvs, idx, V, putval ) ) { // CAS succeeded - we did the update! // Both normal put's and table-copy calls putIfMatch, but table-copy // does not (effectively) increase the number of live k/v pairs. if( expVal != null ) { // Adjust sizes - a striped counter if( (V == null || V == TOMBSTONE) && putval != TOMBSTONE ) chm._size.add( 1); if( !(V == null || V == TOMBSTONE) && putval == TOMBSTONE ) chm._size.add(-1); } } else { // Else CAS failed V = val(kvs,idx); // Get new value // If a Prime'd value got installed, we need to re-run the put on the // new table. Otherwise we lost the CAS to another racing put. // Simply retry from the start. if( V instanceof Prime ) return putIfMatch(topmap,chm.copy_slot_and_check(topmap,kvs,idx,expVal),key,putval,expVal); } // Win or lose the CAS, we are done. If we won then we know the update // happened as expected. If we lost, it means "we won but another thread // immediately stomped our update with no chance of a reader reading". return (V==null && expVal!=null) ? TOMBSTONE : V; } // --- help_copy --------------------------------------------------------- // Help along an existing resize operation. This is just a fast cut-out // wrapper, to encourage inlining for the fast no-copy-in-progress case. We // always help the top-most table copy, even if there are nested table // copies in progress. private final Object[] help_copy( Object[] helper ) { // Read the top-level KVS only once. We'll try to help this copy along, // even if it gets promoted out from under us (i.e., the copy completes // and another KVS becomes the top-level copy). Object[] topkvs = _kvs; CHM topchm = chm(topkvs); if( topchm._newkvs == null ) return helper; // No copy in-progress topchm.help_copy_impl(this,topkvs,false); return helper; } // --- CHM ----------------------------------------------------------------- // The control structure for the NonBlockingIdentityHashMap private static final class CHM<TypeK,TypeV> { // Size in active K,V pairs private final ConcurrentAutoTable _size; public int size () { return (int)_size.get(); } // --- // These next 2 fields are used in the resizing heuristics, to judge when // it is time to resize or copy the table. Slots is a count of used-up // key slots, and when it nears a large fraction of the table we probably // end up reprobing too much. Last-resize-milli is the time since the // last resize; if we are running back-to-back resizes without growing // (because there are only a few live keys but many slots full of dead // keys) then we need a larger table to cut down on the churn. // Count of used slots, to tell when table is full of dead unusable slots private final ConcurrentAutoTable _slots; public int slots() { return (int)_slots.get(); } // --- // New mappings, used during resizing. // The 'new KVs' array - created during a resize operation. This // represents the new table being copied from the old one. It's the // volatile variable that is read as we cross from one table to the next, // to get the required memory orderings. It monotonically transits from // null to set (once). volatile Object[] _newkvs; private final AtomicReferenceFieldUpdater<CHM,Object[]> _newkvsUpdater = AtomicReferenceFieldUpdater.newUpdater(CHM.class,Object[].class, "_newkvs"); // Set the _next field if we can. boolean CAS_newkvs( Object[] newkvs ) { while( _newkvs == null ) if( _newkvsUpdater.compareAndSet(this,null,newkvs) ) return true; return false; } // Sometimes many threads race to create a new very large table. Only 1 // wins the race, but the losers all allocate a junk large table with // hefty allocation costs. Attempt to control the overkill here by // throttling attempts to create a new table. I cannot really block here // (lest I lose the non-blocking property) but late-arriving threads can // give the initial resizing thread a little time to allocate the initial // new table. The Right Long Term Fix here is to use array-lets and // incrementally create the new very large array. In C I'd make the array // with malloc (which would mmap under the hood) which would only eat // virtual-address and not real memory - and after Somebody wins then we // could in parallel initialize the array. Java does not allow // un-initialized array creation (especially of ref arrays!). volatile long _resizers; // count of threads attempting an initial resize private static final AtomicLongFieldUpdater<CHM> _resizerUpdater = AtomicLongFieldUpdater.newUpdater(CHM.class, "_resizers"); // --- // Simple constructor CHM( ConcurrentAutoTable size ) { _size = size; _slots= new ConcurrentAutoTable(); } // --- tableFull --------------------------------------------------------- // Heuristic to decide if this table is too full, and we should start a // new table. Note that if a 'get' call has reprobed too many times and // decided the table must be full, then always the estimate_sum must be // high and we must report the table is full. If we do not, then we might // end up deciding that the table is not full and inserting into the // current table, while a 'get' has decided the same key cannot be in this // table because of too many reprobes. The invariant is: // slots.estimate_sum >= max_reprobe_cnt >= reprobe_limit(len) private final boolean tableFull( int reprobe_cnt, int len ) { return // Do the cheap check first: we allow some number of reprobes always reprobe_cnt >= REPROBE_LIMIT && // More expensive check: see if the table is > 1/4 full. _slots.estimate_get() >= reprobe_limit(len); } // --- resize ------------------------------------------------------------ // Resizing after too many probes. "How Big???" heuristics are here. // Callers will (not this routine) will 'help_copy' any in-progress copy. // Since this routine has a fast cutout for copy-already-started, callers // MUST 'help_copy' lest we have a path which forever runs through // 'resize' only to discover a copy-in-progress which never progresses. private final Object[] resize( NonBlockingIdentityHashMap topmap, Object[] kvs) { assert chm(kvs) == this; // Check for resize already in progress, probably triggered by another thread Object[] newkvs = _newkvs; // VOLATILE READ if( newkvs != null ) // See if resize is already in progress return newkvs; // Use the new table already // No copy in-progress, so start one. First up: compute new table size. int oldlen = len(kvs); // Old count of K,V pairs allowed int sz = size(); // Get current table count of active K,V pairs int newsz = sz; // First size estimate // Heuristic to determine new size. We expect plenty of dead-slots-with-keys // and we need some decent padding to avoid endless reprobing. if( sz >= (oldlen>>2) ) { // If we are >25% full of keys then... newsz = oldlen<<1; // Double size if( sz >= (oldlen>>1) ) // If we are >50% full of keys then... newsz = oldlen<<2; // Double double size } // This heuristic in the next 2 lines leads to a much denser table // with a higher reprobe rate //if( sz >= (oldlen>>1) ) // If we are >50% full of keys then... // newsz = oldlen<<1; // Double size // Last (re)size operation was very recent? Then double again; slows // down resize operations for tables subject to a high key churn rate. long tm = System.currentTimeMillis(); long q=0; if( newsz <= oldlen && // New table would shrink or hold steady? tm <= topmap._last_resize_milli+10000 && // Recent resize (less than 1 sec ago) (q=_slots.estimate_get()) >= (sz<<1) ) // 1/2 of keys are dead? newsz = oldlen<<1; // Double the existing size // Do not shrink, ever if( newsz < oldlen ) newsz = oldlen; // Convert to power-of-2 int log2; for( log2=MIN_SIZE_LOG; (1<<log2) < newsz; log2++ ) ; // Compute log2 of size // Now limit the number of threads actually allocating memory to a // handful - lest we have 750 threads all trying to allocate a giant // resized array. long r = _resizers; while( !_resizerUpdater.compareAndSet(this,r,r+1) ) r = _resizers; // Size calculation: 2 words (K+V) per table entry, plus a handful. We // guess at 32-bit pointers; 64-bit pointers screws up the size calc by // 2x but does not screw up the heuristic very much. int megs = ((((1<<log2)<<1)+4)<<3/*word to bytes*/)>>20/*megs*/; if( r >= 2 && megs > 0 ) { // Already 2 guys trying; wait and see newkvs = _newkvs; // Between dorking around, another thread did it if( newkvs != null ) // See if resize is already in progress return newkvs; // Use the new table already // TODO - use a wait with timeout, so we'll wakeup as soon as the new table // is ready, or after the timeout in any case. //synchronized( this ) { wait(8*megs); } // Timeout - we always wakeup // For now, sleep a tad and see if the 2 guys already trying to make // the table actually get around to making it happen. try { Thread.sleep(8*megs); } catch( Exception e ) { } } // Last check, since the 'new' below is expensive and there is a chance // that another thread slipped in a new thread while we ran the heuristic. newkvs = _newkvs; if( newkvs != null ) // See if resize is already in progress return newkvs; // Use the new table already // Double size for K,V pairs, add 1 for CHM newkvs = new Object[((1<<log2)<<1)+2]; // This can get expensive for big arrays newkvs[0] = new CHM(_size); // CHM in slot 0 newkvs[1] = new int[1<<log2]; // hashes in slot 1 // Another check after the slow allocation if( _newkvs != null ) // See if resize is already in progress return _newkvs; // Use the new table already // The new table must be CAS'd in so only 1 winner amongst duplicate // racing resizing threads. Extra CHM's will be GC'd. if( CAS_newkvs( newkvs ) ) { // NOW a resize-is-in-progress! //notifyAll(); // Wake up any sleepers //long nano = System.nanoTime(); //System.out.println(" "+nano+" Resize from "+oldlen+" to "+(1<<log2)+" and had "+(_resizers-1)+" extras" ); //if( System.out != null ) System.out.print("["+log2); topmap.rehash(); // Call for Hashtable's benefit } else // CAS failed? newkvs = _newkvs; // Reread new table return newkvs; } // The next part of the table to copy. It monotonically transits from zero // to _kvs.length. Visitors to the table can claim 'work chunks' by // CAS'ing this field up, then copying the indicated indices from the old // table to the new table. Workers are not required to finish any chunk; // the counter simply wraps and work is copied duplicately until somebody // somewhere completes the count. volatile long _copyIdx = 0; static private final AtomicLongFieldUpdater<CHM> _copyIdxUpdater = AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyIdx"); // Work-done reporting. Used to efficiently signal when we can move to // the new table. From 0 to len(oldkvs) refers to copying from the old // table to the new. volatile long _copyDone= 0; static private final AtomicLongFieldUpdater<CHM> _copyDoneUpdater = AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyDone"); // --- help_copy_impl ---------------------------------------------------- // Help along an existing resize operation. We hope its the top-level // copy (it was when we started) but this CHM might have been promoted out // of the top position. private final void help_copy_impl( NonBlockingIdentityHashMap topmap, Object[] oldkvs, boolean copy_all ) { assert chm(oldkvs) == this; Object[] newkvs = _newkvs; assert newkvs != null; // Already checked by caller int oldlen = len(oldkvs); // Total amount to copy final int MIN_COPY_WORK = Math.min(oldlen,1024); // Limit per-thread work // --- int panic_start = -1; int copyidx=-9999; // Fool javac to think it's initialized while( _copyDone < oldlen ) { // Still needing to copy? // Carve out a chunk of work. The counter wraps around so every // thread eventually tries to copy every slot repeatedly. // We "panic" if we have tried TWICE to copy every slot - and it still // has not happened. i.e., twice some thread somewhere claimed they // would copy 'slot X' (by bumping _copyIdx) but they never claimed to // have finished (by bumping _copyDone). Our choices become limited: // we can wait for the work-claimers to finish (and become a blocking // algorithm) or do the copy work ourselves. Tiny tables with huge // thread counts trying to copy the table often 'panic'. if( panic_start == -1 ) { // No panic? copyidx = (int)_copyIdx; while( copyidx < (oldlen<<1) && // 'panic' check !_copyIdxUpdater.compareAndSet(this,copyidx,copyidx+MIN_COPY_WORK) ) copyidx = (int)_copyIdx; // Re-read if( !(copyidx < (oldlen<<1)) ) // Panic! panic_start = copyidx; // Record where we started to panic-copy } // We now know what to copy. Try to copy. int workdone = 0; for( int i=0; i<MIN_COPY_WORK; i++ ) if( copy_slot(topmap,(copyidx+i)&(oldlen-1),oldkvs,newkvs) ) // Made an oldtable slot go dead? workdone++; // Yes! if( workdone > 0 ) // Report work-done occasionally copy_check_and_promote( topmap, oldkvs, workdone );// See if we can promote //for( int i=0; i<MIN_COPY_WORK; i++ ) // if( copy_slot(topmap,(copyidx+i)&(oldlen-1),oldkvs,newkvs) ) // Made an oldtable slot go dead? // copy_check_and_promote( topmap, oldkvs, 1 );// See if we can promote copyidx += MIN_COPY_WORK; // Uncomment these next 2 lines to turn on incremental table-copy. // Otherwise this thread continues to copy until it is all done. if( !copy_all && panic_start == -1 ) // No panic? return; // Then done copying after doing MIN_COPY_WORK } // Extra promotion check, in case another thread finished all copying // then got stalled before promoting. copy_check_and_promote( topmap, oldkvs, 0 );// See if we can promote } // --- copy_slot_and_check ----------------------------------------------- // Copy slot 'idx' from the old table to the new table. If this thread // confirmed the copy, update the counters and check for promotion. // // Returns the result of reading the volatile _newkvs, mostly as a // convenience to callers. We come here with 1-shot copy requests // typically because the caller has found a Prime, and has not yet read // the _newkvs volatile - which must have changed from null-to-not-null // before any Prime appears. So the caller needs to read the _newkvs // field to retry his operation in the new table, but probably has not // read it yet. private final Object[] copy_slot_and_check( NonBlockingIdentityHashMap topmap, Object[] oldkvs, int idx, Object should_help ) { assert chm(oldkvs) == this; Object[] newkvs = _newkvs; // VOLATILE READ // We're only here because the caller saw a Prime, which implies a // table-copy is in progress. assert newkvs != null; if( copy_slot(topmap,idx,oldkvs,_newkvs) ) // Copy the desired slot copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied // Generically help along any copy (except if called recursively from a helper) return (should_help == null) ? newkvs : topmap.help_copy(newkvs); } // --- copy_check_and_promote -------------------------------------------- private final void copy_check_and_promote( NonBlockingIdentityHashMap topmap, Object[] oldkvs, int workdone ) { assert chm(oldkvs) == this; int oldlen = len(oldkvs); // We made a slot unusable and so did some of the needed copy work long copyDone = _copyDone; assert (copyDone+workdone) <= oldlen; if( workdone > 0 ) { while( !_copyDoneUpdater.compareAndSet(this,copyDone,copyDone+workdone) ) { copyDone = _copyDone; // Reload, retry assert (copyDone+workdone) <= oldlen; } //if( (10*copyDone/oldlen) != (10*(copyDone+workdone)/oldlen) ) //System.out.print(" "+(copyDone+workdone)*100/oldlen+"%"+"_"+(_copyIdx*100/oldlen)+"%"); } // Check for copy being ALL done, and promote. Note that we might have // nested in-progress copies and manage to finish a nested copy before // finishing the top-level copy. We only promote top-level copies. if( copyDone+workdone == oldlen && // Ready to promote this table? topmap._kvs == oldkvs && // Looking at the top-level table? // Attempt to promote topmap.CAS_kvs(oldkvs,_newkvs) ) { topmap._last_resize_milli = System.currentTimeMillis(); // Record resize time for next check //long nano = System.nanoTime(); //System.out.println(" "+nano+" Promote table to "+len(_newkvs)); //if( System.out != null ) System.out.print("]"); } } // --- copy_slot --------------------------------------------------------- // Copy one K/V pair from oldkvs[i] to newkvs. Returns true if we can // confirm that the new table guaranteed has a value for this old-table // slot. We need an accurate confirmed-copy count so that we know when we // can promote (if we promote the new table too soon, other threads may // 'miss' on values not-yet-copied from the old table). We don't allow // any direct updates on the new table, unless they first happened to the // old table - so that any transition in the new table from null to // not-null must have been from a copy_slot (or other old-table overwrite) // and not from a thread directly writing in the new table. Thus we can // count null-to-not-null transitions in the new table. private boolean copy_slot( NonBlockingIdentityHashMap topmap, int idx, Object[] oldkvs, Object[] newkvs ) { // Blindly set the key slot from null to TOMBSTONE, to eagerly stop // fresh put's from inserting new values in the old table when the old // table is mid-resize. We don't need to act on the results here, // because our correctness stems from box'ing the Value field. Slamming // the Key field is a minor speed optimization. Object key; while( (key=key(oldkvs,idx)) == null ) CAS_key(oldkvs,idx, null, TOMBSTONE); // --- // Prevent new values from appearing in the old table. // Box what we see in the old table, to prevent further updates. Object oldval = val(oldkvs,idx); // Read OLD table while( !(oldval instanceof Prime) ) { final Prime box = (oldval == null || oldval == TOMBSTONE) ? TOMBPRIME : new Prime(oldval); if( CAS_val(oldkvs,idx,oldval,box) ) { // CAS down a box'd version of oldval // If we made the Value slot hold a TOMBPRIME, then we both // prevented further updates here but also the (absent) // oldval is vaccuously available in the new table. We // return with true here: any thread looking for a value for // this key can correctly go straight to the new table and // skip looking in the old table. if( box == TOMBPRIME ) return true; // Otherwise we boxed something, but it still needs to be // copied into the new table. oldval = box; // Record updated oldval break; // Break loop; oldval is now boxed by us } oldval = val(oldkvs,idx); // Else try, try again } if( oldval == TOMBPRIME ) return false; // Copy already complete here! // --- // Copy the value into the new table, but only if we overwrite a null. // If another value is already in the new table, then somebody else // wrote something there and that write is happens-after any value that // appears in the old table. If putIfMatch does not find a null in the // new table - somebody else should have recorded the null-not_null // transition in this copy. Object old_unboxed = ((Prime)oldval)._V; assert old_unboxed != TOMBSTONE; boolean copied_into_new = (putIfMatch(topmap, newkvs, key, old_unboxed, null) == null); // --- // Finally, now that any old value is exposed in the new table, we can // forever hide the old-table value by slapping a TOMBPRIME down. This // will stop other threads from uselessly attempting to copy this slot // (i.e., it's a speed optimization not a correctness issue). while( !CAS_val(oldkvs,idx,oldval,TOMBPRIME) ) oldval = val(oldkvs,idx); return copied_into_new; } // end copy_slot } // End of CHM // --- Snapshot ------------------------------------------------------------ // The main class for iterating over the NBHM. It "snapshots" a clean // view of the K/V array. private class SnapshotV implements Iterator<TypeV>, Enumeration<TypeV> { final Object[] _sskvs; public SnapshotV() { while( true ) { // Verify no table-copy-in-progress Object[] topkvs = _kvs; CHM topchm = chm(topkvs); if( topchm._newkvs == null ) { // No table-copy-in-progress // The "linearization point" for the iteration. Every key in this // table will be visited, but keys added later might be skipped or // even be added to a following table (also not iterated over). _sskvs = topkvs; break; } // Table copy in-progress - so we cannot get a clean iteration. We // must help finish the table copy before we can start iterating. topchm.help_copy_impl(NonBlockingIdentityHashMap.this,topkvs,true); } // Warm-up the iterator next(); } int length() { return len(_sskvs); } Object key(int idx) { return NonBlockingIdentityHashMap.key(_sskvs,idx); } private int _idx; // Varies from 0-keys.length private Object _nextK, _prevK; // Last 2 keys found private TypeV _nextV, _prevV; // Last 2 values found public boolean hasNext() { return _nextV != null; } public TypeV next() { // 'next' actually knows what the next value will be - it had to // figure that out last go-around lest 'hasNext' report true and // some other thread deleted the last value. Instead, 'next' // spends all its effort finding the key that comes after the // 'next' key. if( _idx != 0 && _nextV == null ) throw new NoSuchElementException(); _prevK = _nextK; // This will become the previous key _prevV = _nextV; // This will become the previous value _nextV = null; // We have no more next-key // Attempt to set <_nextK,_nextV> to the next K,V pair. // _nextV is the trigger: stop searching when it is != null while( _idx<length() ) { // Scan array _nextK = key(_idx++); // Get a key that definitely is in the set (for the moment!) if( _nextK != null && // Found something? _nextK != TOMBSTONE && (_nextV=get(_nextK)) != null ) break; // Got it! _nextK is a valid Key } // Else keep scanning return _prevV; // Return current value. } public void remove() { if( _prevV == null ) throw new IllegalStateException(); putIfMatch( NonBlockingIdentityHashMap.this, _sskvs, _prevK, TOMBSTONE, _prevV ); _prevV = null; } public TypeV nextElement() { return next(); } public boolean hasMoreElements() { return hasNext(); } } /** Returns an enumeration of the values in this table. * @return an enumeration of the values in this table * @see #values() */ public Enumeration<TypeV> elements() { return new SnapshotV(); } // --- values -------------------------------------------------------------- /** Returns a {@link Collection} view of the values contained in this map. * The collection is backed by the map, so changes to the map are reflected * in the collection, and vice-versa. The collection supports element * removal, which removes the corresponding mapping from this map, via the * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>, * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations. * It does not support the <tt>add</tt> or <tt>addAll</tt> operations. * * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator that * will never throw {@link ConcurrentModificationException}, and guarantees * to traverse elements as they existed upon construction of the iterator, * and may (but is not guaranteed to) reflect any modifications subsequent * to construction. */ @Override public Collection<TypeV> values() { return new AbstractCollection<TypeV>() { @Override public void clear ( ) { NonBlockingIdentityHashMap.this.clear ( ); } @Override public int size ( ) { return NonBlockingIdentityHashMap.this.size ( ); } @Override public boolean contains( Object v ) { return NonBlockingIdentityHashMap.this.containsValue(v); } @Override public Iterator<TypeV> iterator() { return new SnapshotV(); } }; } // --- keySet -------------------------------------------------------------- private class SnapshotK implements Iterator<TypeK>, Enumeration<TypeK> { final SnapshotV _ss; public SnapshotK() { _ss = new SnapshotV(); } public void remove() { _ss.remove(); } public TypeK next() { _ss.next(); return (TypeK)_ss._prevK; } public boolean hasNext() { return _ss.hasNext(); } public TypeK nextElement() { return next(); } public boolean hasMoreElements() { return hasNext(); } } /** Returns an enumeration of the keys in this table. * @return an enumeration of the keys in this table * @see #keySet() */ public Enumeration<TypeK> keys() { return new SnapshotK(); } /** Returns a {@link Set} view of the keys contained in this map. The set * is backed by the map, so changes to the map are reflected in the set, * and vice-versa. The set supports element removal, which removes the * corresponding mapping from this map, via the <tt>Iterator.remove</tt>, * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and * <tt>clear</tt> operations. It does not support the <tt>add</tt> or * <tt>addAll</tt> operations. * * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator that * will never throw {@link ConcurrentModificationException}, and guarantees * to traverse elements as they existed upon construction of the iterator, * and may (but is not guaranteed to) reflect any modifications subsequent * to construction. */ @Override public Set<TypeK> keySet() { return new AbstractSet<TypeK> () { @Override public void clear ( ) { NonBlockingIdentityHashMap.this.clear ( ); } @Override public int size ( ) { return NonBlockingIdentityHashMap.this.size ( ); } @Override public boolean contains( Object k ) { return NonBlockingIdentityHashMap.this.containsKey(k); } @Override public boolean remove ( Object k ) { return NonBlockingIdentityHashMap.this.remove (k) != null; } @Override public Iterator<TypeK> iterator() { return new SnapshotK(); } }; } // --- entrySet ------------------------------------------------------------ // Warning: Each call to 'next' in this iterator constructs a new NBHMEntry. private class NBHMEntry extends AbstractEntry<TypeK,TypeV> { NBHMEntry( final TypeK k, final TypeV v ) { super(k,v); } public TypeV setValue(final TypeV val) { if( val == null ) throw new NullPointerException(); _val = val; return put(_key, val); } } private class SnapshotE implements Iterator<Map.Entry<TypeK,TypeV>> { final SnapshotV _ss; public SnapshotE() { _ss = new SnapshotV(); } public void remove() { _ss.remove(); } public Map.Entry<TypeK,TypeV> next() { _ss.next(); return new NBHMEntry((TypeK)_ss._prevK,_ss._prevV); } public boolean hasNext() { return _ss.hasNext(); } } /** Returns a {@link Set} view of the mappings contained in this map. The * set is backed by the map, so changes to the map are reflected in the * set, and vice-versa. The set supports element removal, which removes * the corresponding mapping from the map, via the * <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, <tt>removeAll</tt>, * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not support * the <tt>add</tt> or <tt>addAll</tt> operations. * * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator * that will never throw {@link ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. * * <p><strong>Warning:</strong> the iterator associated with this Set * requires the creation of {@link java.util.Map.Entry} objects with each * iteration. The {@link NonBlockingIdentityHashMap} does not normally create or * using {@link java.util.Map.Entry} objects so they will be created soley * to support this iteration. Iterating using {@link #keySet} or {@link * #values} will be more efficient. */ @Override public Set<Map.Entry<TypeK,TypeV>> entrySet() { return new AbstractSet<Map.Entry<TypeK,TypeV>>() { @Override public void clear ( ) { NonBlockingIdentityHashMap.this.clear( ); } @Override public int size ( ) { return NonBlockingIdentityHashMap.this.size ( ); } @Override public boolean remove( final Object o ) { if( !(o instanceof Map.Entry)) return false; final Map.Entry<?,?> e = (Map.Entry<?,?>)o; return NonBlockingIdentityHashMap.this.remove(e.getKey(), e.getValue()); } @Override public boolean contains(final Object o) { if( !(o instanceof Map.Entry)) return false; final Map.Entry<?,?> e = (Map.Entry<?,?>)o; TypeV v = get(e.getKey()); return v.equals(e.getValue()); } @Override public Iterator<Map.Entry<TypeK,TypeV>> iterator() { return new SnapshotE(); } }; } // --- writeObject ------------------------------------------------------- // Write a NBHM to a stream private void writeObject(java.io.ObjectOutputStream s) throws IOException { s.defaultWriteObject(); // Nothing to write for( Object K : keySet() ) { final Object V = get(K); // Do an official 'get' s.writeObject(K); // Write the <TypeK,TypeV> pair s.writeObject(V); } s.writeObject(null); // Sentinel to indicate end-of-data s.writeObject(null); } // --- readObject -------------------------------------------------------- // Read a CHM from a stream private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { s.defaultReadObject(); // Read nothing initialize(MIN_SIZE); for(;;) { final TypeK K = (TypeK) s.readObject(); final TypeV V = (TypeV) s.readObject(); if( K == null ) break; put(K,V); // Insert with an offical put } } } // End NonBlockingIdentityHashMap class
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/nbhm/NonBlockingSetInt.java
package water.nbhm; import java.io.IOException; import java.io.Serializable; import java.lang.reflect.Field; import java.util.AbstractSet; import java.util.Iterator; import java.util.NoSuchElementException; import java.util.concurrent.atomic.AtomicInteger; import sun.misc.Unsafe; /* * Written by Cliff Click and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain */ /** * A multi-threaded bit-vector set, implemented as an array of primitive * {@code longs}. All operations are non-blocking and multi-threaded safe. * {@link #contains(int)} calls are roughly the same speed as a {load, mask} * sequence. {@link #add(int)} and {@link #remove(int)} calls are a tad more * expensive than a {load, mask, store} sequence because they must use a CAS. * The bit-vector is auto-sizing. * * <p><em>General note of caution:</em> The Set API allows the use of {@link Integer} * with silent autoboxing - which can be very expensive if many calls are * being made. Since autoboxing is silent you may not be aware that this is * going on. The built-in API takes lower-case {@code ints} and is much more * efficient. * * <p>Space: space is used in proportion to the largest element, as opposed to * the number of elements (as is the case with hash-table based Set * implementations). Space is approximately (largest_element/8 + 64) bytes. * * The implementation is a simple bit-vector using CAS for update. * * @since 1.5 * @author Cliff Click */ public class NonBlockingSetInt extends AbstractSet<Integer> implements Serializable { private static final long serialVersionUID = 1234123412341234123L; private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); // --- Bits to allow atomic update of the NBSI private static final long _nbsi_offset; static { // <clinit> Field f = null; try { f = NonBlockingSetInt.class.getDeclaredField("_nbsi"); } catch( java.lang.NoSuchFieldException e ) { } _nbsi_offset = _unsafe.objectFieldOffset(f); } private final boolean CAS_nbsi( NBSI old, NBSI nnn ) { return _unsafe.compareAndSwapObject(this, _nbsi_offset, old, nnn ); } // The actual Set of Joy, which changes during a resize event. The // Only Field for this class, so I can atomically change the entire // set implementation with a single CAS. private transient NBSI _nbsi; /** Create a new empty bit-vector */ public NonBlockingSetInt( ) { _nbsi = new NBSI(63, new ConcurrentAutoTable(), this); // The initial 1-word set } /** * Add {@code i} to the set. Uppercase {@link Integer} version of add, * requires auto-unboxing. When possible use the {@code int} version of * {@link #add(int)} for efficiency. * @throws IllegalArgumentException if i is negative. * @return <tt>true</tt> if i was added to the set. */ public boolean add ( final Integer i ) { return add(i.intValue()); } /** * Test if {@code o} is in the set. This is the uppercase {@link Integer} * version of contains, requires a type-check and auto-unboxing. When * possible use the {@code int} version of {@link #contains(int)} for * efficiency. * @return <tt>true</tt> if i was in the set. */ public boolean contains( final Object o ) { return o instanceof Integer ? contains(((Integer)o).intValue()) : false; } /** * Remove {@code o} from the set. This is the uppercase {@link Integer} * version of remove, requires a type-check and auto-unboxing. When * possible use the {@code int} version of {@link #remove(int)} for * efficiency. * @return <tt>true</tt> if i was removed to the set. */ public boolean remove( final Object o ) { return o instanceof Integer ? remove (((Integer)o).intValue()) : false; } /** * Add {@code i} to the set. This is the lower-case '{@code int}' version * of {@link #add} - no autoboxing. Negative values throw * IllegalArgumentException. * @throws IllegalArgumentException if i is negative. * @return <tt>true</tt> if i was added to the set. */ public boolean add( final int i ) { if( i < 0 ) throw new IllegalArgumentException(""+i); return _nbsi.add(i); } /** * Test if {@code i} is in the set. This is the lower-case '{@code int}' * version of {@link #contains} - no autoboxing. * @return <tt>true</tt> if i was int the set. */ public boolean contains( final int i ) { return i<0 ? false : _nbsi.contains(i); } /** * Remove {@code i} from the set. This is the fast lower-case '{@code int}' * version of {@link #remove} - no autoboxing. * @return <tt>true</tt> if i was added to the set. */ public boolean remove ( final int i ) { return i<0 ? false : _nbsi.remove (i); } /** * Current count of elements in the set. Due to concurrent racing updates, * the size is only ever approximate. Updates due to the calling thread are * immediately visible to calling thread. * @return count of elements. */ public int size ( ) { return _nbsi.size( ); } /** Approx largest element in set; at least as big (but max might be smaller). */ public int length() { return _nbsi._bits.length<<6; } /** Empty the bitvector. */ public void clear ( ) { NBSI cleared = new NBSI(63, new ConcurrentAutoTable(), this); // An empty initial NBSI while( !CAS_nbsi( _nbsi, cleared ) ) // Spin until clear works ; } /** Verbose printout of internal structure for debugging. */ public void print() { _nbsi.print(0); } /** * Standard Java {@link Iterator}. Not very efficient because it * auto-boxes the returned values. */ public Iterator<Integer> iterator( ) { return new iter(); } private class iter implements Iterator<Integer> { NBSI _nbsi2; int _idx = -1; int _prev = -1; iter() { _nbsi2 = _nbsi; advance(); } public boolean hasNext() { return _idx != -2; } private void advance() { while( true ) { _idx++; // Next index while( (_idx>>6) >= _nbsi2._bits.length ) { // Index out of range? if( _nbsi2._new == null ) { // New table? _idx = -2; // No, so must be all done return; // } _nbsi2 = _nbsi2._new; // Carry on, in the new table } if( _nbsi2.contains(_idx) ) return; } } public Integer next() { if( _idx == -1 ) throw new NoSuchElementException(); _prev = _idx; advance(); return _prev; } public void remove() { if( _prev == -1 ) throw new IllegalStateException(); _nbsi2.remove(_prev); _prev = -1; } } // --- writeObject ------------------------------------------------------- // Write a NBSI to a stream private void writeObject(java.io.ObjectOutputStream s) throws IOException { s.defaultWriteObject(); // Nothing to write final NBSI nbsi = _nbsi; // The One Field is transient final int len = _nbsi._bits.length<<6; s.writeInt(len); // Write max element for( int i=0; i<len; i++ ) s.writeBoolean( _nbsi.contains(i) ); } // --- readObject -------------------------------------------------------- // Read a CHM from a stream private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { s.defaultReadObject(); // Read nothing final int len = s.readInt(); // Read max element _nbsi = new NBSI(len, new ConcurrentAutoTable(), this); for( int i=0; i<len; i++ ) // Read all bits if( s.readBoolean() ) _nbsi.add(i); } // --- NBSI ---------------------------------------------------------------- private static final class NBSI { // Back pointer to the parent wrapper; sorta like make the class non-static private transient final NonBlockingSetInt _non_blocking_set_int; // Used to count elements: a high-performance counter. private transient final ConcurrentAutoTable _size; // The Bits private final long _bits[]; // --- Bits to allow Unsafe access to arrays private static final int _Lbase = _unsafe.arrayBaseOffset(long[].class); private static final int _Lscale = _unsafe.arrayIndexScale(long[].class); private static long rawIndex(final long[] ary, final int idx) { assert idx >= 0 && idx < ary.length; return _Lbase + idx * _Lscale; } private final boolean CAS( int idx, long old, long nnn ) { return _unsafe.compareAndSwapLong( _bits, rawIndex(_bits, idx), old, nnn ); } // --- Resize // The New Table, only set once to non-zero during a resize. // Must be atomically set. private NBSI _new; private static final long _new_offset; static { // <clinit> Field f = null; try { f = NBSI.class.getDeclaredField("_new"); } catch( java.lang.NoSuchFieldException e ) { } _new_offset = _unsafe.objectFieldOffset(f); } private final boolean CAS_new( NBSI nnn ) { return _unsafe.compareAndSwapObject(this, _new_offset, null, nnn ); } private transient final AtomicInteger _copyIdx; // Used to count bits started copying private transient final AtomicInteger _copyDone; // Used to count words copied in a resize operation private transient final int _sum_bits_length; // Sum of all nested _bits.lengths private static final long mask( int i ) { return 1L<<(i&63); } // I need 1 free bit out of 64 to allow for resize. I do this by stealing // the high order bit - but then I need to do something with adding element // number 63 (and friends). I could use a mod63 function but it's more // efficient to handle the mod-64 case as an exception. // // Every 64th bit is put in it's own recursive bitvector. If the low 6 bits // are all set, we shift them off and recursively operate on the _nbsi64 set. private final NBSI _nbsi64; private NBSI( int max_elem, ConcurrentAutoTable ctr, NonBlockingSetInt nonb ) { super(); _non_blocking_set_int = nonb; _size = ctr; _copyIdx = ctr == null ? null : new AtomicInteger(); _copyDone = ctr == null ? null : new AtomicInteger(); // The main array of bits _bits = new long[(int)(((long)max_elem+63)>>>6)]; // Every 64th bit is moved off to it's own subarray, so that the // sign-bit is free for other purposes _nbsi64 = ((max_elem+1)>>>6) == 0 ? null : new NBSI((max_elem+1)>>>6, null, null); _sum_bits_length = _bits.length + (_nbsi64==null ? 0 : _nbsi64._sum_bits_length); } // Lower-case 'int' versions - no autoboxing, very fast. // 'i' is known positive. public boolean add( final int i ) { // Check for out-of-range for the current size bit vector. // If so we need to grow the bit vector. if( (i>>6) >= _bits.length ) return install_larger_new_bits(i). // Install larger pile-o-bits (duh) help_copy().add(i); // Finally, add to the new table // Handle every 64th bit via using a nested array NBSI nbsi = this; // The bit array being added into int j = i; // The bit index being added while( (j&63) == 63 ) { // Bit 64? (low 6 bits are all set) nbsi = nbsi._nbsi64; // Recurse j = j>>6; // Strip off low 6 bits (all set) } final long mask = mask(j); long old; do { old = nbsi._bits[j>>6]; // Read old bits if( old < 0 ) // Not mutable? // Not mutable: finish copy of word, and retry on copied word return help_copy_impl(i).help_copy().add(i); if( (old & mask) != 0 ) return false; // Bit is already set? } while( !nbsi.CAS( j>>6, old, old | mask ) ); _size.add(1); return true; } public boolean remove( final int i ) { if( (i>>6) >= _bits.length ) // Out of bounds? Not in this array! return _new==null ? false : help_copy().remove(i); // Handle every 64th bit via using a nested array NBSI nbsi = this; // The bit array being added into int j = i; // The bit index being added while( (j&63) == 63 ) { // Bit 64? (low 6 bits are all set) nbsi = nbsi._nbsi64; // Recurse j = j>>6; // Strip off low 6 bits (all set) } final long mask = mask(j); long old; do { old = nbsi._bits[j>>6]; // Read old bits if( old < 0 ) // Not mutable? // Not mutable: finish copy of word, and retry on copied word return help_copy_impl(i).help_copy().remove(i); if( (old & mask) == 0 ) return false; // Bit is already clear? } while( !nbsi.CAS( j>>6, old, old & ~mask ) ); _size.add(-1); return true; } public boolean contains( final int i ) { if( (i>>6) >= _bits.length ) // Out of bounds? Not in this array! return _new==null ? false : help_copy().contains(i); // Handle every 64th bit via using a nested array NBSI nbsi = this; // The bit array being added into int j = i; // The bit index being added while( (j&63) == 63 ) { // Bit 64? (low 6 bits are all set) nbsi = nbsi._nbsi64; // Recurse j = j>>6; // Strip off low 6 bits (all set) } final long mask = mask(j); long old = nbsi._bits[j>>6]; // Read old bits if( old < 0 ) // Not mutable? // Not mutable: finish copy of word, and retry on copied word return help_copy_impl(i).help_copy().contains(i); // Yes mutable: test & return bit return (old & mask) != 0; } public int size() { return (int)_size.get(); } // Must grow the current array to hold an element of size i private NBSI install_larger_new_bits( final int i ) { if( _new == null ) { // Grow by powers of 2, to avoid minor grow-by-1's. // Note: must grow by exact powers-of-2 or the by-64-bit trick doesn't work right int sz = (_bits.length<<6)<<1; // CAS to install a new larger size. Did it work? Did it fail? We // don't know and don't care. Only One can be installed, so if // another thread installed a too-small size, we can't help it - we // must simply install our new larger size as a nested-resize table. CAS_new(new NBSI(sz, _size, _non_blocking_set_int)); } // Return self for 'fluid' programming style return this; } // Help any top-level NBSI to copy until completed. // Always return the _new version of *this* NBSI, in case we're nested. private NBSI help_copy() { // Pick some words to help with - but only help copy the top-level NBSI. // Nested NBSI waits until the top is done before we start helping. NBSI top_nbsi = _non_blocking_set_int._nbsi; final int HELP = 8; // Tuning number: how much copy pain are we willing to inflict? // We "help" by forcing individual bit indices to copy. However, bits // come in lumps of 64 per word, so we just advance the bit counter by 64's. int idx = top_nbsi._copyIdx.getAndAdd(64*HELP); for( int i=0; i<HELP; i++ ) { int j = idx+i*64; j %= (top_nbsi._bits.length<<6); // Limit, wrap to array size; means we retry indices top_nbsi.help_copy_impl(j ); top_nbsi.help_copy_impl(j+63); // Also force the nested-by-64 bit } // Top level guy ready to promote? // Note: WE may not be the top-level guy! if( top_nbsi._copyDone.get() == top_nbsi._sum_bits_length ) // One shot CAS to promote - it may fail since we are racing; others // may promote as well if( _non_blocking_set_int.CAS_nbsi( top_nbsi, top_nbsi._new ) ) { //System.out.println("Promote at top level to size "+(_non_blocking_set_int._nbsi._bits.length<<6)); } // Return the new bitvector for 'fluid' programming style return _new; } // Help copy this one word. State Machine. // (1) If not "made immutable" in the old array, set the sign bit to make // it immutable. // (2) If non-zero in old array & zero in new, CAS new from 0 to copy-of-old // (3) If non-zero in old array & non-zero in new, CAS old to zero // (4) Zero in old, new is valid // At this point, old should be immutable-zero & new has a copy of bits private NBSI help_copy_impl( int i ) { // Handle every 64th bit via using a nested array NBSI old = this; // The bit array being copied from NBSI nnn = _new; // The bit array being copied to if( nnn == null ) return this; // Promoted already int j = i; // The bit index being added while( (j&63) == 63 ) { // Bit 64? (low 6 bits are all set) old = old._nbsi64; // Recurse nnn = nnn._nbsi64; // Recurse j = j>>6; // Strip off low 6 bits (all set) } // Transit from state 1: word is not immutable yet // Immutable is in bit 63, the sign bit. long bits = old._bits[j>>6]; while( bits >= 0 ) { // Still in state (1)? long oldbits = bits; bits |= mask(63); // Target state of bits: sign-bit means immutable if( old.CAS( j>>6, oldbits, bits ) ) { if( oldbits == 0 ) _copyDone.addAndGet(1); break; // Success - old array word is now immutable } bits = old._bits[j>>6]; // Retry if CAS failed } // Transit from state 2: non-zero in old and zero in new if( bits != mask(63) ) { // Non-zero in old? long new_bits = nnn._bits[j>>6]; if( new_bits == 0 ) { // New array is still zero new_bits = bits & ~mask(63); // Desired new value: a mutable copy of bits // One-shot CAS attempt, no loop, from 0 to non-zero. // If it fails, somebody else did the copy for us if( !nnn.CAS( j>>6, 0, new_bits ) ) new_bits = nnn._bits[j>>6]; // Since it failed, get the new value assert new_bits != 0; } // Transit from state 3: non-zero in old and non-zero in new // One-shot CAS attempt, no loop, from non-zero to 0 (but immutable) if( old.CAS( j>>6, bits, mask(63) ) ) _copyDone.addAndGet(1); // One more word finished copying } // Now in state 4: zero (and immutable) in old // Return the self bitvector for 'fluid' programming style return this; } private void print( int d, String msg ) { for( int i=0; i<d; i++ ) System.out.print(" "); System.out.println(msg); } private void print(int d) { StringBuffer buf = new StringBuffer(); buf.append("NBSI - _bits.len="); NBSI x = this; while( x != null ) { buf.append(" "+x._bits.length); x = x._nbsi64; } print(d,buf.toString()); x = this; while( x != null ) { for( int i=0; i<x._bits.length; i++ ) System.out.print(Long.toHexString(x._bits[i])+" "); x = x._nbsi64; System.out.println(); } if( _copyIdx.get() != 0 || _copyDone.get() != 0 ) print(d,"_copyIdx="+_copyIdx.get()+" _copyDone="+_copyDone.get()+" _words_to_cpy="+_sum_bits_length); if( _new != null ) { print(d,"__has_new - "); _new.print(d+1); } } } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/nbhm/UtilUnsafe.java
package water.nbhm; import java.lang.reflect.Field; import sun.misc.Unsafe; /** * Simple class to obtain access to the {@link Unsafe} object. {@link Unsafe} * is required to allow efficient CAS operations on arrays. Note that the * versions in {@link java.util.concurrent.atomic}, such as {@link * java.util.concurrent.atomic.AtomicLongArray}, require extra memory ordering * guarantees which are generally not needed in these algorithms and are also * expensive on most processors. */ public class UtilUnsafe { private UtilUnsafe() { } // dummy private constructor /** Fetch the Unsafe. Use With Caution. */ public static Unsafe getUnsafe() { // Not on bootclasspath if( UtilUnsafe.class.getClassLoader() == null ) return Unsafe.getUnsafe(); try { final Field fld = Unsafe.class.getDeclaredField("theUnsafe"); fld.setAccessible(true); return (Unsafe) fld.get(UtilUnsafe.class); } catch (Exception e) { throw new RuntimeException("Could not obtain access to sun.misc.Unsafe", e); } } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/parser/CsvParser.java
package water.parser; import java.io.ByteArrayInputStream; import java.io.InputStream; import java.util.*; import water.fvec.ParseTime; import water.util.Log; public class CsvParser extends CustomParser { /* Constant to specify that separator is not specified. */ public static final byte AUTO_SEP = -1; public final byte CHAR_DECIMAL_SEPARATOR = '.'; public final byte CHAR_SEPARATOR; public static final byte HIVE_SEP = 1; private static final byte SKIP_LINE = 0; private static final byte EXPECT_COND_LF = 1; private static final byte EOL = 2; private static final byte TOKEN = 3; private static final byte COND_QUOTED_TOKEN = 4; private static final byte NUMBER = 5; private static final byte NUMBER_SKIP = 6; private static final byte NUMBER_SKIP_NO_DOT = 7; private static final byte NUMBER_FRACTION = 8; private static final byte NUMBER_EXP = 9; private static final byte NUMBER_EXP_NEGATIVE = 10; private static final byte NUMBER_EXP_START = 11; private static final byte NUMBER_END = 12; private static final byte STRING = 13; private static final byte COND_QUOTE = 14; private static final byte SEPARATOR_OR_EOL = 15; private static final byte WHITESPACE_BEFORE_TOKEN = 16; private static final byte STRING_END = 17; private static final byte COND_QUOTED_NUMBER_END = 18; private static final byte POSSIBLE_EMPTY_LINE = 19; private static final byte POSSIBLE_CURRENCY = 20; private static final long LARGEST_DIGIT_NUMBER = Long.MAX_VALUE/10; public CsvParser(ParserSetup setup) { super(setup); CHAR_SEPARATOR = setup._separator; } public CsvParser clone(){ return new CsvParser(_setup == null?null:_setup.clone()); } @Override public boolean parallelParseSupported(){return true;} @SuppressWarnings("fallthrough") @Override public final DataOut parallelParse(int cidx, final CustomParser.DataIn din, final CustomParser.DataOut dout) { ValueString _str = new ValueString(); byte[] bits = din.getChunkData(cidx); if( bits == null ) return dout; int offset = din.getChunkDataStart(cidx); // General cursor into the giant array of bytes final byte[] bits0 = bits; // Bits for chunk0 boolean firstChunk = true; // Have not rolled into the 2nd chunk byte[] bits1 = null; // Bits for chunk1, loaded lazily. // Starting state. Are we skipping the first (partial) line, or not? Skip // a header line, or a partial line if we're in the 2nd and later chunks. int state = (_setup._header || cidx > 0) ? SKIP_LINE : WHITESPACE_BEFORE_TOKEN; // If handed a skipping offset, then it points just past the prior partial line. if( offset >= 0 ) state = WHITESPACE_BEFORE_TOKEN; else offset = 0; // Else start skipping at the start int quotes = 0; long number = 0; int exp = 0; int sgn_exp = 1; boolean decimal = false; int fractionDigits = 0; int tokenStart = 0; // used for numeric token to backtrace if not successful int colIdx = 0; byte c = bits[offset]; // skip comments for the first chunk (or if not a chunk) if( cidx == 0 ) { while (c == '#' || c == '@'/*also treat as comments leading '@' from ARFF format*/) { while ((offset < bits.length) && (bits[offset] != CHAR_CR) && (bits[offset ] != CHAR_LF)) ++offset; if ((offset+1 < bits.length) && (bits[offset] == CHAR_CR) && (bits[offset+1] == CHAR_LF)) ++offset; ++offset; if (offset >= bits.length) return dout; c = bits[offset]; } } dout.newLine(); MAIN_LOOP: while (true) { NEXT_CHAR: switch (state) { // --------------------------------------------------------------------- case SKIP_LINE: if (isEOL(c)) { state = EOL; } else { break NEXT_CHAR; } continue MAIN_LOOP; // --------------------------------------------------------------------- case EXPECT_COND_LF: state = POSSIBLE_EMPTY_LINE; if (c == CHAR_LF) break NEXT_CHAR; continue MAIN_LOOP; // --------------------------------------------------------------------- case STRING: if (c == quotes) { state = COND_QUOTE; break NEXT_CHAR; } if (!isEOL(c) && ((quotes != 0) || (c != CHAR_SEPARATOR))) { _str.addChar(); break NEXT_CHAR; } // fallthrough to STRING_END // --------------------------------------------------------------------- case STRING_END: if ((c != CHAR_SEPARATOR) && (c == CHAR_SPACE)) break NEXT_CHAR; // we have parsed the string enum correctly if((_str.get_off() + _str.get_length()) > _str.get_buf().length){ // crossing chunk boundary assert _str.get_buf() != bits; _str.addBuff(bits); } if(_setup._types != null && colIdx < _setup._types.length && _str.equals(_setup._types[colIdx]._naStr)) dout.addInvalidCol(colIdx); else dout.addStrCol(colIdx, _str); _str.set(null, 0, 0); ++colIdx; state = SEPARATOR_OR_EOL; // fallthrough to SEPARATOR_OR_EOL // --------------------------------------------------------------------- case SEPARATOR_OR_EOL: if (c == CHAR_SEPARATOR) { state = WHITESPACE_BEFORE_TOKEN; break NEXT_CHAR; } if (c==CHAR_SPACE) break NEXT_CHAR; // fallthrough to EOL // --------------------------------------------------------------------- case EOL: if(quotes != 0){ System.err.println("Unmatched quote char " + ((char)quotes) + " " + (((_str.get_length()+1) < offset && _str.get_off() > 0)?new String(Arrays.copyOfRange(bits,_str.get_off()-1,offset)):"")); dout.invalidLine("Unmatched quote char " + ((char)quotes)); colIdx = 0; quotes = 0; }else if (colIdx != 0) { dout.newLine(); colIdx = 0; } state = (c == CHAR_CR) ? EXPECT_COND_LF : POSSIBLE_EMPTY_LINE; if( !firstChunk ) break MAIN_LOOP; // second chunk only does the first row break NEXT_CHAR; // --------------------------------------------------------------------- case POSSIBLE_CURRENCY: if (((c >= '0') && (c <= '9')) || (c == '-') || (c == CHAR_DECIMAL_SEPARATOR) || (c == '+')) { state = TOKEN; } else { _str.set(bits,offset-1,0); _str.addChar(); if (c == quotes) { state = COND_QUOTE; break NEXT_CHAR; } if ((quotes != 0) || ((!isEOL(c) && (c != CHAR_SEPARATOR)))) { state = STRING; } else { state = STRING_END; } } continue MAIN_LOOP; // --------------------------------------------------------------------- case POSSIBLE_EMPTY_LINE: if (isEOL(c)) { if (c == CHAR_CR) state = EXPECT_COND_LF; break NEXT_CHAR; } state = WHITESPACE_BEFORE_TOKEN; // fallthrough to WHITESPACE_BEFORE_TOKEN // --------------------------------------------------------------------- case WHITESPACE_BEFORE_TOKEN: if (c == CHAR_SPACE || (c == CHAR_TAB && CHAR_TAB!=CHAR_SEPARATOR)) { break NEXT_CHAR; } else if (c == CHAR_SEPARATOR) { // we have empty token, store as NaN dout.addInvalidCol(colIdx++); break NEXT_CHAR; } else if (isEOL(c)) { dout.addInvalidCol(colIdx++); state = EOL; continue MAIN_LOOP; } // fallthrough to COND_QUOTED_TOKEN // --------------------------------------------------------------------- case COND_QUOTED_TOKEN: state = TOKEN; if( CHAR_SEPARATOR!=HIVE_SEP && // Only allow quoting in CSV not Hive files ((_setup._singleQuotes && c == CHAR_SINGLE_QUOTE) || (c == CHAR_DOUBLE_QUOTE))) { assert (quotes == 0); quotes = c; break NEXT_CHAR; } // fallthrough to TOKEN // --------------------------------------------------------------------- case TOKEN: if(_setup._types != null && colIdx < _setup._types.length && _setup._types[colIdx]._type == ParserSetup.Coltype.STR){ state = STRING; // Do not attempt a number parse, just do a string parse _str.set(bits, offset, 0); continue MAIN_LOOP; } else if (((c >= '0') && (c <= '9')) || (c == '-') || (c == CHAR_DECIMAL_SEPARATOR) || (c == '+')) { state = NUMBER; number = 0; fractionDigits = 0; decimal = false; tokenStart = offset; if (c == '-') { exp = -1; break NEXT_CHAR; } else if(c == '+'){ exp = 1; break NEXT_CHAR; } else { exp = 1; } // fallthrough } else if (c == '$') { state = POSSIBLE_CURRENCY; break NEXT_CHAR; } else { state = STRING; _str.set(bits, offset, 0); continue MAIN_LOOP; } // fallthrough to NUMBER // --------------------------------------------------------------------- case NUMBER: if ((c >= '0') && (c <= '9')) { if (number >= LARGEST_DIGIT_NUMBER) state = NUMBER_SKIP; else number = (number*10)+(c-'0'); break NEXT_CHAR; } else if (c == CHAR_DECIMAL_SEPARATOR) { state = NUMBER_FRACTION; fractionDigits = offset; decimal = true; break NEXT_CHAR; } else if ((c == 'e') || (c == 'E')) { state = NUMBER_EXP_START; sgn_exp = 1; break NEXT_CHAR; } if (exp == -1) { number = -number; } exp = 0; // fallthrough to COND_QUOTED_NUMBER_END // --------------------------------------------------------------------- case COND_QUOTED_NUMBER_END: if ( c == quotes) { state = NUMBER_END; quotes = 0; break NEXT_CHAR; } // fallthrough NUMBER_END case NUMBER_END: if (c == CHAR_SEPARATOR && quotes == 0) { exp = exp - fractionDigits; dout.addNumCol(colIdx,number,exp); ++colIdx; // do separator state here too state = WHITESPACE_BEFORE_TOKEN; break NEXT_CHAR; } else if (isEOL(c)) { exp = exp - fractionDigits; dout.addNumCol(colIdx,number,exp); // do EOL here for speedup reasons colIdx = 0; dout.newLine(); state = (c == CHAR_CR) ? EXPECT_COND_LF : POSSIBLE_EMPTY_LINE; if( !firstChunk ) break MAIN_LOOP; // second chunk only does the first row break NEXT_CHAR; } else if ((c == '%')) { state = NUMBER_END; exp -= 2; break NEXT_CHAR; } else if ((c != CHAR_SEPARATOR) && ((c == CHAR_SPACE) || (c == CHAR_TAB))) { state = NUMBER_END; break NEXT_CHAR; } else { state = STRING; offset = tokenStart-1; _str.set(bits,tokenStart,0); break NEXT_CHAR; // parse as String token now } // --------------------------------------------------------------------- case NUMBER_SKIP: if ((c >= '0') && (c <= '9')) { exp++; break NEXT_CHAR; } else if (c == CHAR_DECIMAL_SEPARATOR) { state = NUMBER_SKIP_NO_DOT; break NEXT_CHAR; } else if ((c == 'e') || (c == 'E')) { state = NUMBER_EXP_START; sgn_exp = 1; break NEXT_CHAR; } state = COND_QUOTED_NUMBER_END; continue MAIN_LOOP; // --------------------------------------------------------------------- case NUMBER_SKIP_NO_DOT: if ((c >= '0') && (c <= '9')) { break NEXT_CHAR; } else if ((c == 'e') || (c == 'E')) { state = NUMBER_EXP_START; sgn_exp = 1; break NEXT_CHAR; } state = COND_QUOTED_NUMBER_END; continue MAIN_LOOP; // --------------------------------------------------------------------- case NUMBER_FRACTION: if ((c >= '0') && (c <= '9')) { if (number >= LARGEST_DIGIT_NUMBER) { if (decimal) fractionDigits = offset - 1 - fractionDigits; if (exp == -1) { number = -number; } exp = 0; state = NUMBER_SKIP_NO_DOT; } else { number = (number*10)+(c-'0'); } break NEXT_CHAR; } else if ((c == 'e') || (c == 'E')) { if (decimal) fractionDigits = offset - 1 - fractionDigits; state = NUMBER_EXP_START; sgn_exp = 1; break NEXT_CHAR; } state = COND_QUOTED_NUMBER_END; if (decimal) fractionDigits = offset - fractionDigits-1; if (exp == -1) { number = -number; } exp = 0; continue MAIN_LOOP; // --------------------------------------------------------------------- case NUMBER_EXP_START: if (exp == -1) { number = -number; } exp = 0; if (c == '-') { sgn_exp *= -1; break NEXT_CHAR; } else if (c == '+'){ break NEXT_CHAR; } if ((c < '0') || (c > '9')){ state = STRING; offset = tokenStart-1; _str.set(bits,tokenStart,0); break NEXT_CHAR; // parse as String token now } state = NUMBER_EXP; // fall through to NUMBER_EXP // --------------------------------------------------------------------- case NUMBER_EXP: if ((c >= '0') && (c <= '9')) { exp = (exp*10)+(c-'0'); break NEXT_CHAR; } exp *= sgn_exp; state = COND_QUOTED_NUMBER_END; continue MAIN_LOOP; // --------------------------------------------------------------------- case COND_QUOTE: if (c == quotes) { _str.addChar(); // _str.skipChar(); state = STRING; break NEXT_CHAR; } else { quotes = 0; state = STRING_END; continue MAIN_LOOP; } // --------------------------------------------------------------------- default: assert (false) : " We have wrong state "+state; } // end NEXT_CHAR ++offset; // do not need to adjust for offset increase here - the offset is set to tokenStart-1! if (offset < 0) { // Offset is negative? assert !firstChunk; // Caused by backing up from 2nd chunk into 1st chunk firstChunk = true; bits = bits0; offset += bits.length; _str.set(bits,offset,0); } else if (offset >= bits.length) { // Off end of 1st chunk? Parse into 2nd chunk // Attempt to get more data. if( firstChunk && bits1 == null ) bits1 = din.getChunkData(cidx+1); // if we can't get further we might have been the last one and we must // commit the latest guy if we had one. if( !firstChunk || bits1 == null ) { // No more data available or allowed // If we are mid-parse of something, act like we saw a LF to end the // current token. if ((state != EXPECT_COND_LF) && (state != POSSIBLE_EMPTY_LINE)) { c = CHAR_LF; if (!firstChunk) Log.warn("Row entry exceeded " + bits.length + " bytes in size, exceeded current parse limit."); continue MAIN_LOOP; } break MAIN_LOOP; // Else we are just done } // Now parsing in the 2nd chunk. All offsets relative to the 2nd chunk start. firstChunk = false; if (state == NUMBER_FRACTION) fractionDigits -= bits.length; offset -= bits.length; tokenStart -= bits.length; bits = bits1; // Set main parsing loop bits if( bits[0] == CHAR_LF && state == EXPECT_COND_LF ) break MAIN_LOOP; // when the first character we see is a line end } c = bits[offset]; if(isEOL(c) && state != COND_QUOTE && quotes != 0) // quoted string having newline character => fail the line! state = EOL; } // end MAIN_LOOP if (colIdx == 0) dout.rollbackLine(); // If offset is still validly within the buffer, save it so the next pass // can start from there. if( offset+1 < bits.length ) { if( state == EXPECT_COND_LF && bits[offset+1] == CHAR_LF ) offset++; if( offset+1 < bits.length ) din.setChunkDataStart(cidx+1, offset+1 ); } return dout; } // ========================================================================== // /** Setup of the parser. // * // * Simply holds the column names, their length also determines the number of // * columns, the separator used and whether the CSV file had a header or not. // */ // public static class Setup extends Iced { // public final byte _separator; // public final boolean _header; // // Row zero is column names. // // Remaining rows are parsed from the given data, until we run out // // of data or hit some arbitrary display limit. // public final String[][] _data; // public final int _numlines; // Number of lines parsed // public final byte[] _bits; // The original bits // // public Setup(byte separator, boolean header, String[][] data, int numlines, byte[] bits) { // _separator = separator; // _header = header; // _data = data; // _numlines = numlines; // _bits = bits; // } // public Setup(Setup S, boolean header) { // _separator = S._separator; // _header = header; // _data = S._data; // _numlines = S._numlines; // _bits = S._bits; // } // // public int numCols(){return _data == null?-1:_data[0].length;} // // @Override public boolean equals( Object o ) { // if( o == null || !(o instanceof Setup) ) return false; // if( o == this ) return true; // Setup s = (Setup)o; // // "Compatible" setups means same columns and same separators // return _separator == s._separator && // ((_data==null && s._data==null) || // (_data[0].length == s._data[0].length)); // } // @Override public String toString() { // return "'"+(char)_separator+"' head="+_header+" cols="+(_data==null?-2:(_data[0]==null?-1:_data[0].length)); // } // } /** Separators recognized by the parser. You can add new separators to this * list and the parser will automatically attempt to recognize them. In * case of doubt the separators are listed in descending order of * probability, with space being the last one - space must always be the * last one as it is used if all other fails because multiple spaces can be * used as a single separator. */ private static byte[] separators = new byte[] { HIVE_SEP/* '^A', Hive table column separator */, ',', ';', '|', '\t', ' '/*space is last in this list, because we allow multiple spaces*/ }; /** Dermines the number of separators in given line. Correctly handles quoted * tokens. */ private static int[] determineSeparatorCounts(String from, int single_quote) { int[] result = new int[separators.length]; byte[] bits = from.getBytes(); boolean in_quote = false; for( int j=0; j< bits.length; j++ ) { byte c = bits[j]; if( (c == single_quote) || (c == CHAR_DOUBLE_QUOTE) ) in_quote ^= true; if( !in_quote || c == HIVE_SEP ) for( int i = 0; i < separators.length; ++i) if (c == separators[i]) ++result[i]; } return result; } /** Determines the tokens that are inside a line and returns them as strings * in an array. Assumes the given separator. */ private static String[] determineTokens(String from, byte separator, int single_quote) { ArrayList<String> tokens = new ArrayList(); byte[] bits = from.getBytes(); int offset = 0; int quotes = 0; while (offset < bits.length) { while ((offset < bits.length) && (bits[offset] == CHAR_SPACE)) ++offset; // skip first whitespace if(offset == bits.length)break; StringBuilder t = new StringBuilder(); byte c = bits[offset]; if ((c == CHAR_DOUBLE_QUOTE) || (c == single_quote)) { quotes = c; ++offset; } while (offset < bits.length) { c = bits[offset]; if ((c == quotes)) { ++offset; if ((offset < bits.length) && (bits[offset] == c)) { t.append((char)c); ++offset; continue; } quotes = 0; } else if ((quotes == 0) && ((c == separator) || (c == CHAR_CR) || (c == CHAR_LF))) { break; } else { t.append((char)c); ++offset; } } c = (offset == bits.length) ? CHAR_LF : bits[offset]; tokens.add(t.toString()); if ((c == CHAR_CR) || (c == CHAR_LF) || (offset == bits.length)) break; if (c != separator) return new String[0]; // an error ++offset; // Skip separator } // If we have trailing empty columns (split by seperators) such as ",,\n" // then we did not add the final (empty) column, so the column count will // be down by 1. Add an extra empty column here if( bits[bits.length-1] == separator && bits[bits.length-1] != CHAR_SPACE) tokens.add(""); return tokens.toArray(new String[tokens.size()]); } private static boolean allStrings(String [] line){ ValueString str = new ValueString(); for( String s : line ) { try { Double.parseDouble(s); return false; // Number in 1st row guesses: No Column Header } catch (NumberFormatException e) { /*Pass - determining if number is possible*/ } if( ParseTime.attemptTimeParse(str.setTo(s)) != Long.MIN_VALUE ) return false; ParseTime.attemptUUIDParse0(str.setTo(s)); ParseTime.attemptUUIDParse1(str); if( str.get_off() != -1 ) return false; // Valid UUID parse } return true; } // simple heuristic to determine if we have headers: // return true iff the first line is all strings and second line has at least one number private static boolean hasHeader(String[] l1, String[] l2) { return allStrings(l1) && !allStrings(l2); } private static byte guessSeparator(String l1, String l2, int single_quote){ int[] s1 = determineSeparatorCounts(l1, single_quote); int[] s2 = determineSeparatorCounts(l2, single_quote); // Now we have the counts - if both lines have the same number of separators // the we assume it is the separator. Separators are ordered by their // likelyhoods. int max = 0; for( int i = 0; i < s1.length; ++i ) { if( s1[i] == 0 ) continue; // Separator does not appear; ignore it if( s1[max] < s1[i] ) max=i; // Largest count sep on 1st line if( s1[i] == s2[i] ) { // Sep counts are equal? try { String[] t1 = determineTokens(l1, separators[i], single_quote); String[] t2 = determineTokens(l2, separators[i], single_quote); if( t1.length != s1[i]+1 || t2.length != s2[i]+1 ) continue; // Token parsing fails return separators[i]; } catch (Exception e) { /*pass; try another parse attempt*/ } } } // No sep's appeared, or no sep's had equal counts on lines 1 & 2. If no // separators have same counts, the largest one will be used as the default // one. If there's no largest one, space will be used. if( s1[max]==0 ) max=separators.length-1; // Try last separator (space) if( s1[max]!=0 ) { String[] t1 = determineTokens(l1, separators[max], single_quote); String[] t2 = determineTokens(l2, separators[max], single_quote); if( t1.length == s1[max]+1 && t2.length == s2[max]+1 ) return separators[max]; } return AUTO_SEP; } private static int guessNcols(ParserSetup setup,String [][] data){ int res = data[0].length; if(setup._header)return res; boolean samelen = true; // True if all are same length boolean longest0 = true; // True if no line is longer than 1st line for(String [] s:data) { samelen &= (s.length == res); if( s.length > res ) longest0=false; } if(samelen)return res; // All same length, take it if( longest0 ) return res; // 1st line is longer than all the rest; take it // we don't have lines of same length, pick the most common length HashMap<Integer, Integer> lengths = new HashMap<Integer, Integer>(); for(String [] s:data){ if(!lengths.containsKey(s.length))lengths.put(s.length, 1); else lengths.put(s.length, lengths.get(s.length)+1); } int maxCnt = 0; for(Map.Entry<Integer, Integer> e:lengths.entrySet()) if(e.getValue() > maxCnt){ maxCnt = e.getValue(); res = e.getKey(); } return res; } /** Determines the CSV parser setup from the first two lines. Also parses * the next few lines, tossing out comments and blank lines. * * A separator is given or it is selected if both two lines have the same ammount of them * and the tokenization then returns same number of columns. */ public static CustomParser.PSetupGuess guessSetup(byte[] bits) { return guessSetup(bits, new ParserSetup(ParserType.CSV),true); } public static CustomParser.PSetupGuess guessSetup(byte[] bits, ParserSetup setup){return guessSetup(bits,setup,false);} public static CustomParser.PSetupGuess guessSetup(byte[] bits, ParserSetup setup, boolean checkHeader) { ArrayList<String> lines = new ArrayList(); int offset = 0; while (offset < bits.length && lines.size() < 10) { int lineStart = offset; while ((offset < bits.length) && (bits[offset] != CHAR_CR) && (bits[offset] != CHAR_LF)) ++offset; int lineEnd = offset; ++offset; if ((offset < bits.length) && (bits[offset] == CHAR_LF)) ++offset; if (bits[lineStart] == '#') continue; // Ignore comment lines if (bits[lineStart] == '@') continue; // Ignore ARFF comment lines if (lineEnd>lineStart){ String str = new String(bits, lineStart,lineEnd-lineStart).trim(); if(!str.isEmpty())lines.add(str); } } if(lines.isEmpty()) return new PSetupGuess(new ParserSetup(ParserType.AUTO,CsvParser.AUTO_SEP,0,false,null,setup._singleQuotes),0,0,null,false,new String[]{"No data!"}); boolean hasHeader = false; final int single_quote = setup._singleQuotes ? CHAR_SINGLE_QUOTE : -1; byte sep = setup._separator; final String [][] data = new String[lines.size()][]; int ncols; if( lines.size() < 2 ) { if(sep == AUTO_SEP){ if(lines.get(0).split(",").length > 2) sep = (byte)','; else if(lines.get(0).split(" ").length > 2) sep = ' '; else { data[0] = new String[]{lines.get(0)}; return new PSetupGuess(new ParserSetup(ParserType.CSV,CsvParser.AUTO_SEP,1,false,null,setup._singleQuotes),lines.size(),0,data,false,new String[]{"Failed to guess separator."}); } } if(lines.size() == 1) data[0] = determineTokens(lines.get(0), sep, single_quote); ncols = (setup._ncols > 0)?setup._ncols:data[0].length; hasHeader = (checkHeader && allStrings(data[0])) || setup._header; } else { if(setup._separator == AUTO_SEP){ // first guess the separator sep = guessSeparator(lines.get(0), lines.get(1), single_quote); if(sep == AUTO_SEP && lines.size() > 2){ if(sep == AUTO_SEP)sep = guessSeparator(lines.get(1), lines.get(2), single_quote); if(sep == AUTO_SEP)sep = guessSeparator(lines.get(0), lines.get(2), single_quote); } if(sep == AUTO_SEP)sep = (byte)' '; } for(int i = 0; i < lines.size(); ++i) data[i] = determineTokens(lines.get(i), sep, single_quote); // we do not have enough lines to decide ncols = (setup._ncols > 0)?setup._ncols:guessNcols(setup,data); if(checkHeader){ assert !setup._header; assert setup._columnNames == null; hasHeader = hasHeader(data[0],data[1]) && (data[0].length == ncols); } else if(setup._header){ if(setup._columnNames != null){ // we know what the header looks like, check if the current file has matching header hasHeader = data[0].length == setup._columnNames.length; for(int i = 0; hasHeader && i < data[0].length; ++i) hasHeader = data[0][i].equalsIgnoreCase(setup._columnNames[i]); } else // otherwise we're told to take the first line as header whatever it might be hasHeader = true; } } ParserSetup resSetup = new ParserSetup(ParserType.CSV, sep, ncols,hasHeader, hasHeader?data[0]:null,setup._singleQuotes); ArrayList<String> errors = new ArrayList<String>(); int ilines = 0; for(int i = 0; i < data.length; ++i){ if(data[i].length != resSetup._ncols){ errors.add("error at line " + i + " : incompatible line length. Got " + data[i].length + " columns."); ++ilines; } } String [] err = null; if(!errors.isEmpty()){ err = new String[errors.size()]; errors.toArray(err); } PSetupGuess res = new PSetupGuess(resSetup,lines.size()-ilines,ilines,data,setup.isSpecified() || lines.size() > ilines, err); if(res._isValid){ // now guess the types InputStream is = new ByteArrayInputStream(bits); CsvParser p = new CsvParser(res._setup); TypeGuesserDataOut dout = new TypeGuesserDataOut(res._setup._ncols); try{ p.streamParse(is, dout); res._setup._types = dout.guessTypes(); }catch(Throwable e){} } return res; } @Override public boolean isCompatible(CustomParser p) { return (p instanceof CsvParser) && p._setup._separator == _setup._separator && p._setup._ncols == _setup._ncols; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/parser/CustomParser.java
package water.parser; import java.io.IOException; import java.io.InputStream; import java.util.*; import water.*; import water.fvec.ParseDataset2.ParseProgressMonitor; public abstract class CustomParser extends Iced { public static final byte CHAR_TAB = '\t'; public static final byte CHAR_LF = 10; public static final byte CHAR_SPACE = ' '; public static final byte CHAR_CR = 13; public static final byte CHAR_DOUBLE_QUOTE = '"'; public static final byte CHAR_SINGLE_QUOTE = '\''; public final static int MAX_PREVIEW_COLS = 100; public final static int MAX_PREVIEW_LINES = 50; public final ParserSetup _setup; public CustomParser(ParserSetup setup){_setup = setup;} public static class PSetupGuess extends Iced { public final ParserSetup _setup; public final int _invalidLines; public final int _validLines; public final String [] _errors; public Key _setupFromFile; public Key _hdrFromFile; public String [][] _data; public final boolean _isValid; public PSetupGuess(ParserSetup ps, int vlines, int ilines, String [][] data, boolean isValid, String [] errors){ _setup = ps; _invalidLines = ilines; _validLines = vlines; _errors = errors; _data = data; _isValid = isValid; } public Set<String> checkDupColumnNames(){ return _setup.checkDupColumnNames(); } public final boolean hasErrors(){ return _errors != null && _errors.length > 0; } @Override public String toString(){ if(!_isValid) return "Parser setup appears to be broken, got " + _setup.toString(); else if(hasErrors()) return "Parser setup appears to work with some errors, got " + _setup.toString(); else return "Parser setup working fine, got " + _setup.toString(); } } public enum ParserType { AUTO(false),XLS(false),XLSX(false),CSV(true), SVMLight(true); public final boolean parallelParseSupported; ParserType(boolean par){parallelParseSupported = par;} } public static class ParserSetup extends Iced implements Cloneable{ public final ParserType _pType; public final byte _separator; public boolean _header; public boolean _singleQuotes; public String [] _columnNames; public final int _ncols; public enum Coltype { NUM,ZERO,STR,AUTO,INVALID; } public static class TypeInfo extends Iced{ Coltype _type; ValueString _naStr = new ValueString(""); boolean _strongGuess; public void merge(TypeInfo tinfo){ if(_type == Coltype.AUTO || !_strongGuess && tinfo._strongGuess){ // copy over stuff from the other _type = tinfo._type; _naStr = tinfo._naStr; _strongGuess = tinfo._strongGuess; } else if(tinfo._type != Coltype.AUTO && !_strongGuess){ tinfo._type = Coltype.INVALID; } // else just keep mine } } public String [][] domains; public double [] _min; public double [] _max; public int _nnums; public int _nstr; public int _missing; public int _nzeros; TypeInfo [] _types; public ParserSetup() { _pType = ParserType.AUTO; _separator = CsvParser.AUTO_SEP; _header = false; _ncols = 0; _columnNames = null; } protected ParserSetup(ParserType t) { this(t,CsvParser.AUTO_SEP,0,false,null,false); } public ParserSetup(ParserType t, byte sep, boolean header) { _pType = t; _separator = sep; _header = header; _columnNames = null; _ncols = 0; } public ParserSetup(ParserType t, byte sep, boolean header, boolean singleQuotes) { _pType = t; _separator = sep; _header = header; _columnNames = null; _ncols = 0; _singleQuotes = singleQuotes; } public ParserSetup(ParserType t, byte sep, int ncolumns, boolean header, String [] columnNames, boolean singleQuotes) { _pType = t; _separator = sep; _ncols = ncolumns; _header = header; _columnNames = columnNames; _singleQuotes = singleQuotes; } public boolean isSpecified(){ return _pType != ParserType.AUTO && _separator != CsvParser.AUTO_SEP && (_header || _ncols > 0); } public Set<String> checkDupColumnNames(){ HashSet<String> uniqueNames = new HashSet<String>(); HashSet<String> conflictingNames = new HashSet<String>(); if(_header){ for(String n:_columnNames){ if(!uniqueNames.contains(n)){ uniqueNames.add(n); } else { conflictingNames.add(n); } } } return conflictingNames; } @Override public ParserSetup clone(){ return new ParserSetup(_pType, _separator, _ncols,_header,null,_singleQuotes); } public boolean isCompatible(ParserSetup other){ if(other == null || _pType != other._pType)return false; if(_pType == ParserType.CSV && (_separator != other._separator || _ncols != other._ncols)) return false; if(_types == null) _types = other._types; else if(other._types != null){ for(int i = 0; i < _types.length; ++i) _types[i].merge(other._types[i]); } return true; } public CustomParser parser(){ switch(this._pType){ case CSV: return new CsvParser(this); case SVMLight: return new SVMLightParser(this); case XLS: return new XlsParser(this); default: throw H2O.unimpl(); } } @Override public String toString(){ StringBuilder sb = new StringBuilder(_pType.name()); switch(_pType){ case SVMLight: sb.append(" data with (estimated) " + _ncols + " columns."); break; case CSV: sb.append(" data with " + _ncols + " columns using '" + (char)_separator + "' (\\" + _separator + "04d) as separator."); break; case XLS: sb.append(" data with " + _ncols + " columns."); break; case AUTO: sb.append(""); break; default: throw H2O.unimpl(); } return sb.toString(); } } public boolean isCompatible(CustomParser p){return _setup == p._setup || (_setup != null && _setup.isCompatible(p._setup));} public DataOut parallelParse(int cidx, final DataIn din, final DataOut dout) {throw new UnsupportedOperationException();} public boolean parallelParseSupported(){return false;} public DataOut streamParse( final InputStream is, final DataOut dout) throws Exception { if(_setup._pType.parallelParseSupported){ StreamData din = new StreamData(is); int cidx=0; while( is.available() > 0 ) parallelParse(cidx++,din,dout); parallelParse(cidx++,din,dout); // Parse the remaining partial 32K buffer } else { throw H2O.unimpl(); } return dout; } // ------------------------------------------------------------------------ // Zipped file; no parallel decompression; decompress into local chunks, // parse local chunks; distribute chunks later. public DataOut streamParse( final InputStream is, final StreamDataOut dout, ParseProgressMonitor pmon) throws IOException { // All output into a fresh pile of NewChunks, one per column if(_setup._pType.parallelParseSupported){ StreamData din = new StreamData(is); int cidx=0; StreamDataOut nextChunk = dout; long lastProgress = pmon.progress(); while( is.available() > 0 ){ if (pmon.progress() > lastProgress) { lastProgress = pmon.progress(); nextChunk.close(); if(dout != nextChunk) dout.reduce(nextChunk); nextChunk = nextChunk.nextChunk(); } parallelParse(cidx++,din,nextChunk); } parallelParse(cidx++,din,nextChunk); // Parse the remaining partial 32K buffer nextChunk.close(); if(dout != nextChunk)dout.reduce(nextChunk); } else { throw H2O.unimpl(); } return dout; } protected static final boolean isWhitespace(byte c) { return (c == CHAR_SPACE) || (c == CHAR_TAB); } protected static final boolean isEOL(byte c) { return ((c == CHAR_LF) || (c == CHAR_CR)); } public interface DataIn { // Get another chunk of byte data public abstract byte[] getChunkData( int cidx ); public abstract int getChunkDataStart( int cidx ); public abstract void setChunkDataStart( int cidx, int offset ); } public interface DataOut extends Freezable { public void setColumnNames(String [] names); // Register a newLine from the parser public void newLine(); // True if already forced into a string column (skip number parsing) public boolean isString(int colIdx); // Add a number column with given digits & exp public void addNumCol(int colIdx, long number, int exp); // Add a number column with given digits & exp public void addNumCol(int colIdx, double d); // An an invalid / missing entry public void addInvalidCol(int colIdx); // Add a String column public void addStrCol( int colIdx, ValueString str ); // Final rolling back of partial line public void rollbackLine(); public void invalidLine(String err); public void invalidValue(int line, int col); } public interface StreamDataOut extends DataOut { StreamDataOut nextChunk(); StreamDataOut reduce(StreamDataOut dout); StreamDataOut close(); StreamDataOut close(Futures fs); } public static class StreamData implements CustomParser.DataIn { final transient InputStream _is; private byte[] _bits0 = new byte[2*1024*1024]; //allows for row lengths up to 2M private byte[] _bits1 = new byte[2*1024*1024]; private int _cidx0=-1, _cidx1=-1; // Chunk #s private int _coff0=-1, _coff1=-1; // Last used byte in a chunk public StreamData(InputStream is){_is = is;} @Override public byte[] getChunkData(int cidx) { if(cidx == _cidx0)return _bits0; if(cidx == _cidx1)return _bits1; assert cidx==_cidx0+1 || cidx==_cidx1+1; byte[] bits = _cidx0<_cidx1 ? _bits0 : _bits1; if( _cidx0<_cidx1 ) { _cidx0 = cidx; _coff0 = -1; } else { _cidx1 = cidx; _coff1 = -1; } // Read as much as the buffer will hold int off=0; try { while( off < bits.length ) { int len = _is.read(bits,off,bits.length-off); if( len == -1 ) break; off += len; } assert off == bits.length || _is.available() <= 0; } catch( IOException ioe ) { throw new RuntimeException(ioe); } if( off == bits.length ) return bits; // Final read is short; cache the short-read byte[] bits2 = (off == 0) ? null : Arrays.copyOf(bits,off); if( _cidx0==cidx ) _bits0 = bits2; else _bits1 = bits2; return bits2; } @Override public int getChunkDataStart(int cidx) { if( _cidx0 == cidx ) return _coff0; if( _cidx1 == cidx ) return _coff1; return 0; } @Override public void setChunkDataStart(int cidx, int offset) { if( _cidx0 == cidx ) _coff0 = offset; if( _cidx1 == cidx ) _coff1 = offset; } } public abstract CustomParser clone(); public String [] headers(){return null;} protected static class TypeGuesserDataOut extends Iced implements DataOut { transient private HashSet<String> [] _domains; int [] _nnums; int [] _nstrings; int [] _nzeros; int _nlines = 0; final int _ncols; public TypeGuesserDataOut(int ncols){ _ncols = ncols; _domains = new HashSet[ncols]; _nzeros = new int[ncols]; _nstrings = new int[ncols]; _nnums = new int[ncols]; for(int i = 0; i < ncols; ++i) _domains[i] = new HashSet<String>(); } // TODO: ugly quick hack, needs revisit public ParserSetup.TypeInfo[] guessTypes() { ParserSetup.TypeInfo [] res = new ParserSetup.TypeInfo[_ncols]; for(int i = 0; i < res.length; ++i) res[i] = new ParserSetup.TypeInfo(); for(int i = 0; i < _ncols; ++i){ if(_domains[i].size() <= 1) // only consider enums with multiple strings (otherwise it's probably garbage on NA) res[i]._type = ParserSetup.Coltype.NUM; else if(_nzeros[i] > 0 && (Math.abs(_nzeros[i] + _nstrings[i] - _nlines) <= 1)) { // enum with 0s for NAs res[i]._naStr = new ValueString("0"); res[i]._type = ParserSetup.Coltype.STR; res[i]._strongGuess = true; } else if(_nstrings[i] >= 9*(_nnums[i]+_nzeros[i])) { // probably generic enum res[i]._type = ParserSetup.Coltype.STR; } } return res; } @Override public void setColumnNames(String[] names) {} @Override public void newLine() { ++_nlines; } @Override public boolean isString(int colIdx) { return false; } @Override public void addNumCol(int colIdx, long number, int exp) { if(colIdx < _nnums.length) if (number == 0) ++_nzeros[colIdx]; else ++_nnums[colIdx]; } @Override public void addNumCol(int colIdx, double d) { if(colIdx < _nnums.length) if (d == 0) ++_nzeros[colIdx]; else ++_nnums[colIdx]; } @Override public void addInvalidCol(int colIdx) { } @Override public void addStrCol(int colIdx, ValueString str) { if(colIdx < _nstrings.length) { ++_nstrings[colIdx]; _domains[colIdx].add(str.toString()); } } @Override public void rollbackLine() {--_nlines;} @Override public void invalidLine(String err) {} @Override public void invalidValue(int line, int col) {} } protected static class CustomInspectDataOut extends Iced implements DataOut { public int _nlines; public int _ncols; public int _invalidLines; public boolean _header; private String [] _colNames; private String [][] _data = new String[MAX_PREVIEW_LINES][MAX_PREVIEW_COLS]; transient ArrayList<String> _errors; public CustomInspectDataOut() { for(int i = 0; i < MAX_PREVIEW_LINES;++i) Arrays.fill(_data[i],"NA"); } public String [][] data(){ String [][] res = Arrays.copyOf(_data, Math.min(MAX_PREVIEW_LINES, _nlines)); for(int i = 0; i < res.length; ++i) res[i] = Arrays.copyOf(_data[i], Math.min(MAX_PREVIEW_COLS,_ncols)); return (_data = res); } @Override public void setColumnNames(String[] names) { _colNames = names; _data[0] = names; ++_nlines; _ncols = names.length; _header = true; } @Override public void newLine() { ++_nlines; } @Override public boolean isString(int colIdx) {return false;} @Override public void addNumCol(int colIdx, long number, int exp) { if(colIdx < _ncols && _nlines < MAX_PREVIEW_LINES) _data[_nlines][colIdx] = Double.toString(number*PrettyPrint.pow10(exp)); } @Override public void addNumCol(int colIdx, double d) { if(colIdx < _ncols) { _ncols = Math.max(_ncols, colIdx); if (_nlines < MAX_PREVIEW_LINES && colIdx < MAX_PREVIEW_COLS) _data[_nlines][colIdx] = Double.toString(d); } } @Override public void addInvalidCol(int colIdx) { if(colIdx < _ncols && _nlines < MAX_PREVIEW_LINES) _data[_nlines][colIdx] = "NA"; } @Override public void addStrCol(int colIdx, ValueString str) { if(colIdx < _ncols && _nlines < MAX_PREVIEW_LINES) _data[_nlines][colIdx] = str.toString(); } @Override public void rollbackLine() {--_nlines;} @Override public void invalidLine(String err) { ++_invalidLines; _errors.add("Error at line: " + _nlines + ", reason: " + err); } @Override public void invalidValue(int linenum, int colnum) {} } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/parser/Enum.java
package water.parser; import java.util.*; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicInteger; import water.AutoBuffer; import water.H2O; import water.Iced; import water.nbhm.NonBlockingHashMap; /** * Class for tracking enum columns. * * Basically a wrapper around non blocking hash map. * In the first pass, we just collect set of unique strings per column * (if there are less than H2O.DATA_MAX_FACTOR_LEVELS unique elements). * * After pass1, the keys are sorted and indexed alphabetically. * In the second pass, map is used only for lookup and never updated. * * Enum objects are shared among threads on the local nodes! * * @author tomasnykodym * */ public final class Enum extends Iced implements Cloneable{ AtomicInteger _id = new AtomicInteger(); int _maxId = -1; long _nElems; volatile NonBlockingHashMap<ValueString, Integer> _map; public Enum(){_map = new NonBlockingHashMap<ValueString, Integer>();} private Enum(int id, long nElems, NonBlockingHashMap<ValueString,Integer>map){ _id = new AtomicInteger(id); _nElems = nElems; _map = map; } public Enum clone(){ NonBlockingHashMap<ValueString,Integer> map = _map; if(map != null)map = (NonBlockingHashMap<ValueString,Integer>)map.clone(); return new Enum(_id.get(),_nElems,map); } /** * Add key to this map (treated as hash set in this case). * All keys are added with value = 1. * @param str */ public int addKey(ValueString str) { // _map is shared and be cast to null (if enum is killed) -> grab local copy NonBlockingHashMap<ValueString, Integer> m = _map; if( m == null ) return Integer.MAX_VALUE; // Nuked already Integer res = m.get(str); if(res != null ) return res; // Recorded already assert str.get_length() < 65535; // Length limit so 65535 can be used as a sentinel Integer newVal = new Integer(_id.incrementAndGet()); res = m.putIfAbsent(new ValueString(str.toString()), newVal); if(res != null)return res; if(m.size() > H2O.DATA_MAX_FACTOR_LEVELS){ kill(); return Integer.MAX_VALUE; } return newVal; } public final boolean containsKey(Object key){return _map.containsKey(key);} public void addKey(String str) { addKey(new ValueString(str)); } public int getTokenId(String str) { return getTokenId(new ValueString(str)); } public String toString(){ StringBuilder sb = new StringBuilder("{"); for(Entry e: _map.entrySet())sb.append(" " + e.getKey().toString() + "->" + e.getValue().toString()); sb.append(" }"); return sb.toString(); } public long addedElems(){return _nElems;} public int getTokenId(ValueString str){ Integer I = _map.get(str); assert I != null : "missing value! " + str.toString(); return I; } public void merge(Enum other){ if( this == other ) return; if( isKilled() ) return; if( !other.isKilled() ) { // do the merge Map<ValueString, Integer> myMap = _map; Map<ValueString, Integer> otMap = other._map; if( myMap == otMap ) return; for( ValueString str : otMap.keySet() ) myMap.put(str, 1); if( myMap.size() <= H2O.DATA_MAX_FACTOR_LEVELS ) return; } kill(); // too many values, enum should be killed! } public int maxId(){return _maxId == -1?_id.get():_maxId;} public int size() { return _map.size(); } public boolean isKilled() { return _map == null; } public void kill() { _map = null; } // assuming single threaded public ValueString [] computeColumnDomain(){ if( isKilled() ) return null; ValueString vs[] = _map.keySet().toArray(new ValueString[_map.size()]); Arrays.sort(vs); // Alpha sort to be nice for( int j = 0; j < vs.length; ++j ) _map.put(vs[j], j); // Renumber in the map return vs; } // Since this is a *concurrent* hashtable, writing it whilst its being // updated is tricky. If the table is NOT being updated, then all is written // as expected. If the table IS being updated we only promise to write the // Keys that existed at the time the table write began. If elements are // being deleted, they may be written anyways. If the Values are changing, a // random Value is written. @Override public AutoBuffer write( AutoBuffer ab ) { if( _map == null ) return ab.put1(1); // Killed map marker ab.put1(0); // Not killed ab.put4(maxId()); for( ValueString key : _map.keySet() ) ab.put2((char)key.get_length()).putA1(key.get_buf(),key.get_length()).put4(_map.get(key)); return ab.put2((char)65535); // End of map marker } @Override public Enum read( AutoBuffer ab ) { assert _map == null || _map.size()==0; _map = null; if( ab.get1() == 1 ) return this; // Killed? _maxId = ab.get4(); _map = new NonBlockingHashMap<ValueString, Integer>(); int len = 0; while( (len = ab.get2()) != 65535 ) // Read until end-of-map marker _map.put(new ValueString(ab.getA1(len)),ab.get4()); return this; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/parser/GuessSetup.java
package water.parser; import java.util.*; import water.*; import water.fvec.Frame; import water.fvec.ParseDataset2; import water.parser.CustomParser.PSetupGuess; import water.parser.CustomParser.ParserSetup; import water.parser.CustomParser.ParserType; import water.util.*; import water.util.Utils.IcedArrayList; abstract public class GuessSetup { public static class ParseSetupGuessException extends RuntimeException { public final PSetupGuess _gSetup; public final Key [] _failed; public ParseSetupGuessException(String msg,PSetupGuess gSetup, Key [] failed){ super(msg + (gSetup != null?", found setup: " + gSetup.toString():"")); _gSetup = gSetup; _failed = failed; } public ParseSetupGuessException(PSetupGuess gSetup, Key [] failed){ super(gSetup != null?gSetup.toString():"Failed to guess parser setup."); _gSetup = gSetup; _failed = failed; } } public static CustomParser.PSetupGuess guessSetup(ArrayList<Key> keys,Key headerKey, CustomParser.ParserSetup setup, boolean checkHeader) { String [] colNames = null; CustomParser.PSetupGuess gSetup = null; boolean headerKeyPartOfParse = false; if(headerKey != null ){ if(keys.contains(headerKey)){ headerKeyPartOfParse = true; keys.remove(headerKey); // process the header key separately } } if(keys.size() > 1){ GuessSetupTsk t = new GuessSetupTsk(setup,checkHeader); Key [] ks = new Key[keys.size()]; keys.toArray(ks); // ks = ParseDataset2.filterEmptyFiles(ks); t.invoke(ks); gSetup = t._gSetup; if(gSetup._isValid && (!t._failedSetup.isEmpty() || !t._conflicts.isEmpty())){ // run guess setup once more, this time knowing the global setup to get rid of conflicts (turns them into failures) and bogus failures (i.e. single line files with unexpected separator) GuessSetupTsk t2 = new GuessSetupTsk(gSetup._setup, !gSetup._setup._header); HashSet<Key> keySet = new HashSet<Key>(t._conflicts); keySet.addAll(t._failedSetup); Key [] keys2 = new Key[keySet.size()]; t2.invoke(keySet.toArray(keys2)); t._failedSetup = t2._failedSetup; t._conflicts = t2._conflicts; if(!gSetup._setup._header && t2._gSetup._setup._header){ gSetup._setup._header = true; gSetup._setup._columnNames = t2._gSetup._setup._columnNames; t._gSetup._hdrFromFile = t2._gSetup._hdrFromFile; } } assert t._conflicts.isEmpty(); // we should not have any conflicts here, either we failed to find any valid global setup, or conflicts should've been converted into failures in the second pass if(!t._failedSetup.isEmpty()){ Key [] fks = new Key[t._failedSetup.size()]; throw new ParseSetupGuessException("Can not parse: Got incompatible files.", gSetup, t._failedSetup.toArray(fks)); } } else if(!keys.isEmpty()) gSetup = guessSetup(Utils.getFirstUnzipedBytes(keys.get(0)),setup,checkHeader); if( gSetup == null || !gSetup._isValid){ throw new ParseSetupGuessException(gSetup,null); } if(headerKey != null){ // separate headerKey Value v = DKV.get(headerKey); if(!v.isRawData()){ // either ValueArray or a Frame, just extract the headers if(v.isFrame()){ Frame fr = v.get(); colNames = fr._names; } else throw new ParseSetupGuessException("Headers can only come from unparsed data, ValueArray or a frame. Got " + v.className(),gSetup,null); } else { // check the hdr setup by parsing first bytes CustomParser.ParserSetup lSetup = gSetup._setup.clone(); lSetup._header = true; PSetupGuess hSetup = guessSetup(Utils.getFirstUnzipedBytes(headerKey),lSetup,false); if(hSetup == null || !hSetup._isValid) { // no match with global setup, try once more with general setup (e.g. header file can have different separator than the rest) ParserSetup stp = new ParserSetup(); stp._header = true; hSetup = guessSetup(Utils.getFirstUnzipedBytes(headerKey),stp,false); } if(!hSetup._isValid || hSetup._setup._columnNames == null) throw new ParseSetupGuessException("Invalid header file. I did not find any column names.",gSetup,null); if(hSetup._setup._ncols != gSetup._setup._ncols) throw new ParseSetupGuessException("Header file has different number of columns than the rest!, expected " + gSetup._setup._ncols + " columns, got " + hSetup._setup._ncols + ", header: " + Arrays.toString(hSetup._setup._columnNames),gSetup,null); if(hSetup._data != null && hSetup._data.length > 1){// the hdr file had both hdr and data, it better be part of the parse and represent the global parser setup if(!headerKeyPartOfParse) throw new ParseSetupGuessException(headerKey + " can not be used as a header file. Please either parse it separately first or include the file in the parse. Raw (unparsed) files can only be used as headers if they are included in the parse or they contain ONLY the header and NO DATA.",gSetup,null); else if(gSetup._setup.isCompatible(hSetup._setup)){ gSetup = hSetup; keys.add(headerKey); // put the key back so the file is parsed! }else throw new ParseSetupGuessException("Header file is not compatible with the other files.",gSetup, null); } else if(hSetup != null && hSetup._setup._columnNames != null) colNames = hSetup._setup._columnNames; else throw new ParseSetupGuessException("Invalid header file. I did not find any column names.",gSetup,null); } } // now set the header info in the final setup if(colNames != null){ gSetup._setup._header = true; gSetup._setup._columnNames = colNames; gSetup._hdrFromFile = headerKey; } return gSetup; } public static class GuessSetupTsk extends MRTask<GuessSetupTsk> { final CustomParser.ParserSetup _userSetup; final boolean _checkHeader; boolean _empty = true; public PSetupGuess _gSetup; IcedArrayList<Key> _failedSetup; IcedArrayList<Key> _conflicts; public GuessSetupTsk(CustomParser.ParserSetup userSetup, boolean checkHeader) { _userSetup = userSetup; assert _userSetup != null; _checkHeader = checkHeader; assert !_userSetup._header || !checkHeader; } public static final int MAX_ERRORS = 64; @Override public void map(Key key) { byte [] bits = Utils.getFirstUnzipedBytes(key); if(bits.length > 0) { _empty = false; _failedSetup = new IcedArrayList<Key>(); _conflicts = new IcedArrayList<Key>(); _gSetup = GuessSetup.guessSetup(bits, _userSetup, _checkHeader); if (_gSetup == null || !_gSetup._isValid) _failedSetup.add(key); else { _gSetup._setupFromFile = key; if (_checkHeader && _gSetup._setup._header) _gSetup._hdrFromFile = key; } } } @Override public void reduce(GuessSetupTsk drt) { if (drt._empty) return; if (_gSetup == null || !_gSetup._isValid) { _empty = false; _gSetup = drt._gSetup; if (_gSetup == null) System.out.println("haha"); // if(_gSetup != null) { try { _gSetup._hdrFromFile = drt._gSetup._hdrFromFile; _gSetup._setupFromFile = drt._gSetup._setupFromFile; // } } catch (Throwable t) { t.printStackTrace(); } } else if (drt._gSetup._isValid && !_gSetup._setup.isCompatible(drt._gSetup._setup)) { if (_conflicts.contains(_gSetup._setupFromFile) && !drt._conflicts.contains(drt._gSetup._setupFromFile)) { _gSetup = drt._gSetup; // setups are not compatible, select random setup to send up (thus, the most common setup should make it to the top) _gSetup._setupFromFile = drt._gSetup._setupFromFile; _gSetup._hdrFromFile = drt._gSetup._hdrFromFile; } else if (!drt._conflicts.contains(drt._gSetup._setupFromFile)) { _conflicts.add(_gSetup._setupFromFile); _conflicts.add(drt._gSetup._setupFromFile); } } else if (drt._gSetup._isValid) { // merge the two setups if (!_gSetup._setup._header && drt._gSetup._setup._header) { _gSetup._setup._header = true; _gSetup._hdrFromFile = drt._gSetup._hdrFromFile; _gSetup._setup._columnNames = drt._gSetup._setup._columnNames; } if (_gSetup._data.length < CustomParser.MAX_PREVIEW_LINES) { int n = _gSetup._data.length; int m = Math.min(CustomParser.MAX_PREVIEW_LINES, n + drt._gSetup._data.length - 1); _gSetup._data = Arrays.copyOf(_gSetup._data, m); for (int i = n; i < m; ++i) { _gSetup._data[i] = drt._gSetup._data[i - n + 1]; } } } // merge failures if (_failedSetup == null) { _failedSetup = drt._failedSetup; _conflicts = drt._conflicts; } else { _failedSetup.addAll(drt._failedSetup); _conflicts.addAll(drt._conflicts); } } } public static PSetupGuess guessSetup(byte [] bits){ return guessSetup(bits,new ParserSetup(),true); } public static PSetupGuess guessSetup(byte [] bits, ParserSetup setup, boolean checkHeader ) { if(bits == null) return new PSetupGuess(new ParserSetup(), 0, 0, null,false, null); ArrayList<PSetupGuess> guesses = new ArrayList<CustomParser.PSetupGuess>(); PSetupGuess res = null; if(setup == null)setup = new ParserSetup(); switch(setup._pType){ case CSV: return CsvParser.guessSetup(bits,setup,checkHeader); case SVMLight: return SVMLightParser.guessSetup(bits); case XLS: return XlsParser.guessSetup(bits); case AUTO: try{ if((res = XlsParser.guessSetup(bits)) != null && res._isValid) if(!res.hasErrors())return res; else guesses.add(res); }catch(Exception e){} try{ if((res = SVMLightParser.guessSetup(bits)) != null && res._isValid) if(!res.hasErrors())return res; else guesses.add(res); }catch(Exception e){} try{ if((res = CsvParser.guessSetup(bits,setup,checkHeader)) != null && res._isValid) if(!res.hasErrors())return res; else guesses.add(res); }catch(Exception e){e.printStackTrace();} if(res == null || !res._isValid && !guesses.isEmpty()){ for(PSetupGuess pg:guesses) if(res == null || pg._validLines > res._validLines) res = pg; } assert res != null; return res; default: throw H2O.unimpl(); } } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/parser/PMMLParser.java
package water.parser; import java.io.IOException; import java.io.InputStream; import java.util.*; import water.H2O; import water.score.*; import water.util.Log; /** Parse PMML models * * Full recursive-descent style parsing. MUCH easier to track the control * flows than a SAX-style parser, and does not require the entire doc like a * DOM-style. More tightly tied to the XML structure, but in theory PMML is * a multi-vendor standard and fairly stable. * * Like a good R-D parser, uses a separate function for parsing each XML * element. Each function expects to be at a particular parse-point * (generally after the openning '&lt;' and before the tag is parsed), and * always leaves the parse just after the close-tag '&gt;'. The semantic * interpretation is then interleaved with the parsing, with higher levels * passing down needed info to lower element levels, and lower levels * directly returning results to the higher levels. * * @author <a href="mailto:cliffc@0xdata.com"></a> * @version 1.0 */ public class PMMLParser { final InputStream _is; // Stream to read from int [] _buf; // Pushback buffer int _idx; // Pushback index /** Features datatypes promoted by PMML spec. These appear before we know what * kind of model we are parsing, so must be parsed globally (for all models). */ public static enum DataTypes { DOUBLE("double"), INT("int"), BOOLEAN("boolean"), STRING("String"); final String _jname; DataTypes( String jname ) { _jname = jname; } public static DataTypes parse(String s) {return DataTypes.valueOf(s.toUpperCase()); } public String jname() { return _jname; } } // Global (per-parse) type mappings. Examples: // <DataField name="Species" optype="categorical" dataType="string"> // <DataField name="creditScore" dataType="double" optype="continuous" /> public final HashMap<String,DataTypes> _types = new HashMap(); // Global (per-parse) enum mappings. Examples: //<DataField name="Species" optype="categorical" dataType="string"> // <Value value="setosa"/> // <Value value="versicolor"/> // <Value value="virginica"/> //</DataField> public final HashMap<String,String[]> _enums = new HashMap(); public static class ParseException extends RuntimeException { public ParseException( String msg ) { super(msg); } } public static ScoreModel parse( InputStream is ) { return new PMMLParser(is).parse(); } private PMMLParser(InputStream is) { _is = is; _buf=new int[2]; } private ScoreModel parse() { skipWS().expect('<'); if( peek()=='?' ) pXMLVersion().skipWS().expect('<'); return pPMML(); } // Parse/skip XML version element private PMMLParser pXMLVersion() { expect("?xml"); while( peek() != '?' ) { // Look for closing '?>' String attr = skipWS().token(); String val = skipWS().expect('=').str(); } return expect("?>"); } // The whole PMML element. Breaks out the different model types. private ScoreModel pPMML() { expect("PMML").skipAttrs(); expect('>').skipWS().expect('<'); pGeneric("Header"); // Skip a generic XML subtree skipWS().expect('<'); pDataDictionary(); String mtag = skipWS().expect('<').token(); ScoreModel scm = null; if( "Scorecard" .equals(mtag) ) scm = ScorecardModel.parse(this); //if( "MiningModel".equals(mtag) ) scm = RFScoreModel.parse(this); skipWS().expect("</PMML>"); return scm; } // Skip generic XML subtree public PMMLParser pGeneric(String hdr) { String t = token(); assert hdr==null || t.equals(hdr); skipAttrs(); if( peek()=='/' ) return expect("/>"); expect('>'); while( true ) { if( get()=='<' ) { if( peek()=='/' ) return expect('/').expect(t).expect('>'); pGeneric(null); } } } // Reads the DataDictionary element, accumulating fields & types private PMMLParser pDataDictionary() { expect("DataDictionary").skipAttrs(); expect('>'); while( skipWS().expect('<').peek() != '/' ) pDataField(); return expect("/DataDictionary>"); } // Read a single field name & type, plus any enum/factor/level info private PMMLParser pDataField() { HashMap<String,String> attrs = expect("DataField").attrs(); String name = attrs.get("name"); _types.put(name, DataTypes.parse(attrs.get("dataType"))); if( peek()=='/' ) return expect("/>"); expect('>'); ArrayList<String> str = new ArrayList(); while( skipWS().expect('<').peek() != '/' ) str.add(pDataFieldValue()); String[] ss = str.toArray(new String[0]); Arrays.sort(ss,null); _enums.put(name,ss); return expect("/DataField>"); } // A single enum/level value private String pDataFieldValue() { expect("Value").skipWS().expect("value="); String str = str(); expect("/>"); return str; } // Parse out an PMML predicate. Common across several models. public Predicate pPredicate() { String t = token(); HashMap<String,String> attrs = attrs(); if( "SimplePredicate" .equals(t) ) return pSimplePredicate(attrs); if( "CompoundPredicate" .equals(t) ) return pCompoundPredicate(attrs); if( "SimpleSetPredicate".equals(t) ) return pSimpleSetPredicate(attrs); if( "True".equals(t) ) { expect("/>"); return new True(); } expect("unhandled_predicate"); return null; } private Predicate pSimplePredicate(HashMap<String,String> attrs) { expect("/>"); return Comparison.makeSimple(attrs.get("field"), Operators.valueOf(attrs.get("operator")), attrs.get("value")); } private Predicate pCompoundPredicate(HashMap<String,String> attrs) { expect(">"); CompoundPredicate cp = CompoundPredicate.make(BooleanOperators.valueOf(attrs.get("booleanOperator"))); cp._l = skipWS().expect('<').pPredicate(); cp._r = skipWS().expect('<').pPredicate(); skipWS().expect("</CompoundPredicate>"); return cp; } private Predicate pSimpleSetPredicate(HashMap<String,String> attrs) { expect('>'); IsIn in = IsIn.make(attrs.get("field"), BooleanOperators.valueOf(attrs.get("booleanOperator"))); in._values = skipWS().expect('<').pArray(); skipWS().expect("</SimpleSetPredicate>"); return in; } private String[] pArray() { HashMap<String,String> attrs = expect("Array").attrs(); expect('>'); int len = Integer.parseInt(attrs.get("n")); assert attrs.get("type").equals("string"); String[] ss = new String[len]; for( int i=0; i<len; i++ ) { int b = skipWS().peek(); // Allow both quoted and unquoted tokens ss[i] = (b=='&' || b=='"') ? str() : token(); } skipWS().expect("</Array>"); return ss; } public int get() { if( _idx > 0 ) return _buf[--_idx]; try { int b = _is.read(); if( b != -1 ) return b; } catch( IOException ioe ) { Log.err(ioe); } throw new ParseException("Premature EOF"); } public int peek() { if( _idx > 0 ) return _buf[_idx-1]; try { int b = _is.read(); if( b != -1 ) return push(b); } catch( IOException e ) { Log.err(e); } throw new ParseException("Premature EOF"); } int push( int b ) { return (_buf[_idx++] = b); } public int qget() { int b = get(); if( b!='&' ) return b; expect("quot;"); return '"'; } // Read from stream, skipping whitespace public PMMLParser skipWS() { int c; while( Character.isWhitespace(c=get()) ) ; push(c); return this; } // Assert correct token is found public PMMLParser expect( char tok ) { char c = (char)get(); return c == tok ? this : barf(tok,c); } public PMMLParser expect( String toks ) { for( int i=0; i<toks.length(); i++ ) expect(toks.charAt(i)); return this; } public PMMLParser barf( char tok, char c ) { StringBuilder sb = new StringBuilder(); sb.append("Expected '").append(tok).append("' but found '").append(c).append("'"); int line=0; for( int i=0; i<512; i++ ) { try { c = (char)get(); } catch( ParseException ioe ) { break; } sb.append(c); if( c=='\n' && line++ > 2 ) break; } throw new ParseException(sb.toString()); } // Read from stream a valid PMML token public String token() { int b = get(); if( !Character.isJavaIdentifierStart(b) ) throw new ParseException("Expected token start but found '"+(char)b+"'"); StringBuilder sb = new StringBuilder(); sb.append((char)b); b = get(); while( Character.isJavaIdentifierPart(b) || b==':' ) { sb.append((char)b); b = get(); } push(b); return sb.toString(); } // Read from stream a "string". Skips the trailing close-quote private String str() { int q = skipWS().qget(); if( q!='"' && q!='\'' ) throw new ParseException("Expected one of ' or \" but found '"+(char)q+"'"); StringBuilder sb = new StringBuilder(); int b = get(); while( b != q ) { sb.append((char)b); b = qget(); } return sb.toString(); } // Any number of attributes, or '/' or '>' public HashMap<String,String> attrs() { HashMap<String,String> attrs = null; while( true ) { int b = skipWS().peek(); if( b == '/' || b == '>' ) return attrs; if( attrs == null ) attrs = new HashMap(); String attr = token(); String val = skipWS().expect('=').str(); attrs.put(attr,val); } } public void skipAttrs() { while( true ) { int b = skipWS().peek(); if( b == '/' || b == '>' ) return; while( (b=get())!= '=' ) ; int q = skipWS().get(); if( q!='"' && q!='\'' ) throw new ParseException("Expected one of ' or \" but found '"+(char)q+"'"); while( (b=get())!= q ) ; } } // ------------------------------------------------------------------------- // ------------------------------------------------------------------------- // Common PMML Operators public static enum Operators { lessOrEqual, lessThan, greaterOrEqual, greaterThan, equal, isMissing; } public static enum BooleanOperators { isNotIn, and, or, isIn; } public static abstract class Predicate { public abstract boolean match(Comparable value); public abstract boolean match(String sval, double dval); public abstract StringBuilder toJavaNum( StringBuilder sb, String jname ); public StringBuilder toJavaBool( StringBuilder sb, String jname ) { throw H2O.unimpl(); } public StringBuilder toJavaStr( StringBuilder sb, String jname ) { throw H2O.unimpl(); } public static Predicate makeSimple(String field, Operators op, String cons) { if( cons==null ) { assert op==Operators.isMissing; return new IsMissing(field); } switch (op) { case lessOrEqual : return new LessOrEqual (field,cons); case lessThan : return new LessThan (field,cons); case greaterOrEqual: return new GreaterOrEqual(field,cons); case greaterThan : return new GreaterThan (field,cons); case equal : return new Equals (field,cons); default : throw new RuntimeException("missing "+field+" "+op+" "+cons); } } public String unique_name() { throw H2O.unimpl(); } } public static abstract class Comparison extends Predicate { // Used to define comparisons like: // "income < 10000" which _name==income, and _str=="10000", _num==10000 public final String _name;// Feature name, e.g. "bad_email" or "income" public final String _str; // Constant compare value as a String public final double _num; // Constant compare value or NaN if not applicable public final double _bool;// Constant boolean value or NaN if not applicable public Comparison(String name, String str) { _name = name; _str = str; _num = getNumber (str);// Convert to a 'double' _bool= getBoolean(str);// Convert to a 'boolean' } public String unique_name() { return _name; } } /** Less or equal */ public static class LessOrEqual extends Comparison { public LessOrEqual(String name, String str) { super(name,str); } @Override public boolean match(Comparable value) { if( !Double.isNaN(_num ) ) return getNumber (value) <= _num ; if( !Double.isNaN(_bool) ) return getBoolean(value) <= _bool; String s = getString(value); return s==null ? false : s.compareTo(_str) <= 0; } @Override public boolean match(String sval, double dval) { return dval <= _num; } @Override public String toString() { return "X<=" + _str; } @Override public StringBuilder toJavaNum( StringBuilder sb, String jname ) { return sb.append(jname).append("<=").append(_num); } } public static class LessThan extends Comparison { public LessThan(String name, String str) { super(name,str); } @Override public boolean match(Comparable value) { if( !Double.isNaN(_num ) ) return getNumber (value) < _num ; if( !Double.isNaN(_bool) ) return getBoolean(value) < _bool; String s = getString(value); return s==null ? false : s.compareTo(_str) < 0; } @Override public boolean match(String sval, double dval) { return dval < _num; } @Override public String toString() { return "X<" + _str; } @Override public StringBuilder toJavaNum( StringBuilder sb, String jname ) { return sb.append(jname).append("<").append(_num); } } public static class GreaterOrEqual extends Comparison { public GreaterOrEqual(String name, String con) { super(name,con); } @Override public boolean match(Comparable value) { if( !Double.isNaN(_num ) ) return getNumber (value) >= _num ; if( !Double.isNaN(_bool) ) return getBoolean(value) >= _bool; String s = getString(value); return s==null ? false : s.compareTo(_str) >= 0; } @Override public boolean match(String sval, double dval) { return dval >= _num; } @Override public String toString() { return "X>=" + _str; } @Override public StringBuilder toJavaNum( StringBuilder sb, String jname ) { return sb.append(jname).append(">=").append(_num); } } public static class GreaterThan extends Comparison { public GreaterThan(String name, String str) { super(name,str); } @Override public boolean match(Comparable value) { if( !Double.isNaN(_num ) ) return getNumber (value) > _num ; if( !Double.isNaN(_bool) ) return getBoolean(value) > _bool; String s = getString(value); return s==null ? false : s.compareTo(_str) > 0; } @Override public boolean match(String sval, double dval) { return dval > _num; } @Override public String toString() { return "X>" + _str; } @Override public StringBuilder toJavaNum( StringBuilder sb, String jname ) { return sb.append(jname).append(">").append(_num); } } public static class IsMissing extends Predicate { public final String _name; // Feature name, like 'dependents' public IsMissing( String name ) { _name=name; } @Override public boolean match(Comparable value) { return value==null; } @Override public boolean match(String sval, double dval) { return Double.isNaN(dval); } @Override public String toString() { return "isMissing"; } @Override public StringBuilder toJavaNum( StringBuilder sb, String jname ) { return sb.append("Double.isNaN("+jname+")"); } @Override public StringBuilder toJavaBool( StringBuilder sb, String jname ) { return sb.append("Double.isNaN("+jname+")"); } @Override public StringBuilder toJavaStr( StringBuilder sb, String jname ) { return sb.append(jname).append("==null"); } public String unique_name() { return _name; } } public static class Equals extends Comparison { public Equals(String name, String str) { super(name,str); } @Override public boolean match(Comparable value) { if( !Double.isNaN(_num ) ) return getNumber (value) == _num ; if( !Double.isNaN(_bool) ) return getBoolean(value) == _bool; String s = getString(value); return s==null ? false : s.compareTo(_str) == 0; } @Override public boolean match(String sval, double dval) { if( !Double.isNaN(_num ) ) return dval == _num ; if( !Double.isNaN(_bool) ) return dval == _bool; return _str.equals(sval); } @Override public String toString() { return "X==" + _str; } @Override public StringBuilder toJavaNum( StringBuilder sb, String jname ) { return sb.append(jname).append("==").append(_num); } @Override public StringBuilder toJavaBool( StringBuilder sb, String jname ) { return sb.append(jname).append("==").append(_bool); } @Override public StringBuilder toJavaStr( StringBuilder sb, String jname ) { return sb.append("\"").append(_str).append("\".equals(").append(jname).append(")"); } } public static abstract class CompoundPredicate extends Predicate { Predicate _l,_r; @Override public StringBuilder toJavaNum( StringBuilder sb, String jname ) { throw H2O.unimpl(); } public StringBuilder makeNum(StringBuilder sb, String jname, String rel) { sb.append("("); _l.toJavaNum(sb,jname); sb.append(" ").append(rel).append(" "); _r.toJavaNum(sb,jname); sb.append(")"); return sb; } public StringBuilder makeStr(StringBuilder sb, String jname, String rel) { sb.append("("); _l.toJavaStr(sb,jname); sb.append(" ").append(rel).append(" "); _r.toJavaStr(sb,jname); sb.append(")"); return sb; } public static CompoundPredicate make(BooleanOperators op) { switch( op ) { case and: return new And(); case or : return new Or(); default : return null; } } public String unique_name() { return _l.unique_name(); } } public static class And extends CompoundPredicate { @Override public final boolean match(Comparable value) { return _l.match(value) && _r.match(value); } @Override public final boolean match(String sval, double dval) { return _l.match(sval,dval) && _r.match(sval,dval); } @Override public String toString() { return "(" + _l.toString() + " and " + _r.toString() + ")"; } @Override public StringBuilder toJavaNum( StringBuilder sb, String jname ) { return makeNum(sb,jname,"&&"); } @Override public StringBuilder toJavaStr( StringBuilder sb, String jname ) { return makeStr(sb,jname,"&&"); } } public static class Or extends CompoundPredicate { @Override public final boolean match(Comparable value) { return _l.match(value) || _r.match(value); } @Override public final boolean match(String sval, double dval) { return _l.match(sval,dval) || _r.match(sval,dval); } @Override public String toString() { return "(" + _l.toString() + " or " + _r.toString() + ")"; } @Override public StringBuilder toJavaNum( StringBuilder sb, String jname ) { return makeNum(sb,jname,"||"); } @Override public StringBuilder toJavaStr( StringBuilder sb, String jname ) { return makeStr(sb,jname,"||"); } } public static class IsIn extends Predicate { public final String _name; // Feature name, like 'state' public String[] _values; public IsIn(String name, String[] values) { _name=name; _values = values; } @Override public boolean match(Comparable value) { for( String t : _values ) if (t.equals(value)) return true; return false; } @Override public boolean match(String sval, double dval) { for( String t : _values ) if (t.equals(sval)) return true; return false; } @Override public String toString() { String x = ""; for( String s: _values ) x += s + " "; return "X is in {" + x + "}"; } @Override public StringBuilder toJavaNum( StringBuilder sb, String jname ) { throw H2O.unimpl(); } @Override public StringBuilder toJavaStr( StringBuilder sb, String jname ) { for( String s : _values ) sb.append("\"").append(s).append("\".equals(").append(jname).append(") || "); return sb.append("false"); } public static IsIn make(String name, BooleanOperators op) { switch( op ) { case isIn : return new IsIn (name,null); case isNotIn: return new IsNotIn(name,null); default : return null; } } public String unique_name() { return _name; } } public static class IsNotIn extends IsIn { public IsNotIn(String name, String[] values) { super(name,values); } @Override public boolean match(Comparable value) { return ! super.match(value); } @Override public boolean match(String sval, double dval) { return ! super.match(sval,dval); } @Override public String toString() { return "!("+super.toString()+")"; } @Override public StringBuilder toJavaNum( StringBuilder sb, String jname ) { throw H2O.unimpl(); } @Override public StringBuilder toJavaStr( StringBuilder sb, String jname ) { sb.append("!("); super.toJavaStr(sb,jname); return sb.append(")"); } } public static class True extends Predicate { @Override public boolean match(Comparable value) { return true; } @Override public boolean match(String sval, double dval) { return true; } @Override public String toString() { return "true"; } @Override public StringBuilder toJavaNum( StringBuilder sb, String jname ) { return sb.append("true"); } @Override public StringBuilder toJavaBool( StringBuilder sb, String jname ) { return sb.append("true"); } @Override public StringBuilder toJavaStr( StringBuilder sb, String jname ) { return sb.append("true"); } @Override public String unique_name() { return ""; } } // Happy Helper Methods for the generated code public static double getNumber( HashMap<String,Comparable> row, String s ) { return getNumber(row.get(s)); } public static double getNumber( Comparable o ) { // hint to the jit to do a instanceof breakdown tree if( o instanceof Double ) return ((Double)o).doubleValue(); if( o instanceof Long ) return ((Long )o).doubleValue(); if( o instanceof Number ) return ((Number)o).doubleValue(); if( o instanceof String ) { try { return Double.valueOf((String)o); } catch( Throwable t ) { } } return Double.NaN; } public static double getBoolean( HashMap<String,Comparable> row, String s ) { return getBoolean(row.get(s)); } public static double getBoolean( Comparable o ) { if( o instanceof Boolean ) return ((Boolean)o) ? 1.0 : 0.0; if( o instanceof String ) { try { if( "true" .equalsIgnoreCase((String) o) ) return 1.0; if( "false".equalsIgnoreCase((String) o) ) return 0.0; } catch( Throwable t ) { Log.err(t); } } return Double.NaN; } public static String getString( HashMap<String,Comparable> row, String s ) { return getString(row.get(s)); } public static String getString( Comparable o ) { if( o instanceof String ) return (String)o; return o == null ? null : o.toString(); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/parser/SVMLightParser.java
package water.parser; import java.io.ByteArrayInputStream; import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; import water.Iced; import water.PrettyPrint; /** * Parser for SVM light format. * @author tomasnykodym * */ public class SVMLightParser extends CustomParser{ private static final byte SKIP_LINE = 0; private static final byte EXPECT_COND_LF = 1; private static final byte EOL = 2; private static final byte TOKEN = 3; private static final byte SKIP_TOKEN = 4; private static final byte NUMBER = 5; private static final byte NUMBER_FRACTION = 6; private static final byte NUMBER_EXP = 7; private static final byte INVALID_NUMBER = 8; private static final byte NUMBER_EXP_START = 9; private static final byte NUMBER_END = 10; private static final byte WHITESPACE_BEFORE_TOKEN = 11; private static final byte POSSIBLE_EMPTY_LINE = 12; private static final byte QID0 = 13; private static final byte QID1 = 14; // line global states private static final int TGT = 1; private static final int COL = 2; private static final int VAL = 3; private static final long LARGEST_DIGIT_NUMBER = 1000000000000000000L; final static char DECIMAL_SEP = '.'; public SVMLightParser(ParserSetup setup) {super(setup);} @Override public SVMLightParser clone(){return new SVMLightParser(_setup);} @Override public boolean parallelParseSupported(){return true;} /** * Try to parse the bytes as svm light format, return SVMParser instance if the input is in svm light format, null otherwise. * @param bytes * @return SVMLightPArser instance or null */ public static PSetupGuess guessSetup(byte [] bytes){ // find the last eof int i = bytes.length-1; while(i > 0 && bytes[i] != '\n')--i; assert i >= 0; InputStream is = new ByteArrayInputStream(Arrays.copyOf(bytes,i)); SVMLightParser p = new SVMLightParser(new ParserSetup(ParserType.SVMLight, CsvParser.AUTO_SEP, false)); InspectDataOut dout = new InspectDataOut(); try{p.streamParse(is, dout);}catch(Exception e){throw new RuntimeException(e);} return new PSetupGuess(new ParserSetup(ParserType.SVMLight, CsvParser.AUTO_SEP, dout._ncols,false,null,false),dout._nlines,dout._invalidLines,dout.data(),dout._ncols > 0 && dout._nlines > 0 && dout._nlines > dout._invalidLines,dout.errors()); } @Override public boolean isCompatible(CustomParser p){return p instanceof SVMLightParser;} @SuppressWarnings("fallthrough") @Override public final DataOut parallelParse(int cidx, final CustomParser.DataIn din, final CustomParser.DataOut dout) { ValueString _str = new ValueString(); byte[] bits = din.getChunkData(cidx); if( bits == null ) return dout; final byte[] bits0 = bits; // Bits for chunk0 boolean firstChunk = true; // Have not rolled into the 2nd chunk byte[] bits1 = null; // Bits for chunk1, loaded lazily. int offset = 0; // General cursor into the giant array of bytes // Starting state. Are we skipping the first (partial) line, or not? Skip // a header line, or a partial line if we're in the 2nd and later chunks. int lstate = (cidx > 0)? SKIP_LINE : WHITESPACE_BEFORE_TOKEN; int gstate = TGT; long number = 0; int zeros = 0; int exp = 0; int sgn_exp = 1; boolean decimal = false; int fractionDigits = 0; int colIdx = 0; byte c = bits[offset]; // skip comments for the first chunk (or if not a chunk) if( cidx == 0 ) { while (c == '#') { while ((offset < bits.length) && (bits[offset] != CHAR_CR) && (bits[offset ] != CHAR_LF)) ++offset; if ((offset+1 < bits.length) && (bits[offset] == CHAR_CR) && (bits[offset+1] == CHAR_LF)) ++offset; ++offset; if (offset >= bits.length) return dout; c = bits[offset]; } } //dout.newLine(); int linestart = 0; // String linePrefix = ""; MAIN_LOOP: while (true) { NEXT_CHAR: switch (lstate) { // --------------------------------------------------------------------- case SKIP_LINE: if (!isEOL(c)) break NEXT_CHAR; // fall through case EOL: if (colIdx != 0) { colIdx = 0; linestart = offset+1; if(lstate != SKIP_LINE) dout.newLine(); } if( !firstChunk ) break MAIN_LOOP; // second chunk only does the first row lstate = (c == CHAR_CR) ? EXPECT_COND_LF : POSSIBLE_EMPTY_LINE; gstate = TGT; linestart = offset; break NEXT_CHAR; // --------------------------------------------------------------------- case EXPECT_COND_LF: lstate = POSSIBLE_EMPTY_LINE; if (c == CHAR_LF) break NEXT_CHAR; continue MAIN_LOOP; // --------------------------------------------------------------------- // --------------------------------------------------------------------- // --------------------------------------------------------------------- case POSSIBLE_EMPTY_LINE: if (isEOL(c)) { if (c == CHAR_CR) lstate = EXPECT_COND_LF; break NEXT_CHAR; } lstate = WHITESPACE_BEFORE_TOKEN; // fallthrough to WHITESPACE_BEFORE_TOKEN // --------------------------------------------------------------------- case WHITESPACE_BEFORE_TOKEN: if (isWhitespace(c)) break NEXT_CHAR; if (isEOL(c)){ lstate = EOL; continue MAIN_LOOP; } // fallthrough to TOKEN case TOKEN: if (((c >= '0') && (c <= '9')) || (c == '-') || (c == DECIMAL_SEP) || (c == '+')) { lstate = NUMBER; number = 0; fractionDigits = 0; decimal = false; if (c == '-') { exp = -1; break NEXT_CHAR; } else if(c == '+'){ exp = 1; break NEXT_CHAR; } else { exp = 1; } // fallthrough } else if(c == 'q'){ lstate = QID0; } else { // failed, skip the line // TODO dout.invalidLine("Unexpected character, expected number or qid, got '" + new String(Arrays.copyOfRange(bits, offset,Math.min(bits.length,offset+5))) + "...'"); lstate = SKIP_LINE; continue MAIN_LOOP; } // fallthrough to NUMBER // --------------------------------------------------------------------- case NUMBER: if ((c >= '0') && (c <= '9')) { number = (number*10)+(c-'0'); if (number >= LARGEST_DIGIT_NUMBER) lstate = INVALID_NUMBER; break NEXT_CHAR; } else if (c == DECIMAL_SEP) { lstate = NUMBER_FRACTION; fractionDigits = offset; decimal = true; break NEXT_CHAR; } else if ((c == 'e') || (c == 'E')) { lstate = NUMBER_EXP_START; sgn_exp = 1; break NEXT_CHAR; } if (exp == -1) { number = -number; } exp = 0; // fallthrough NUMBER_END case NUMBER_END: exp = exp - fractionDigits; switch(gstate){ case COL: if(c == ':'){ if(exp == 0 && number >= colIdx && (int)number == number){ colIdx = (int)number; gstate = VAL; lstate = WHITESPACE_BEFORE_TOKEN; } else { // wrong col Idx, just skip the token and try to continue // col idx is either too small (according to spec, cols must come in strictly increasing order) // or too small (col ids currently must fit into int) String err = ""; if(number <= colIdx) err = "Columns come in non-increasing sequence. Got " + number + " after " + colIdx + "."; else if(exp != 0) err = "Got non-integer as column id: " + number*PrettyPrint.pow10(exp); else err = "column index out of range, " + number + " does not fit into integer."; dout.invalidLine("invalid column id:" + err); lstate = SKIP_LINE; } } else { // we're probably out of sync, skip the rest of the line dout.invalidLine("unexpected character after column id: " + c); lstate = SKIP_LINE; // TODO output error } break NEXT_CHAR; case TGT: case VAL: dout.addNumCol(colIdx++,number,exp); lstate = WHITESPACE_BEFORE_TOKEN; gstate = COL; continue MAIN_LOOP; } // --------------------------------------------------------------------- case NUMBER_FRACTION: if(c == '0'){ ++zeros; break NEXT_CHAR; } if ((c > '0') && (c <= '9')) { if (number < LARGEST_DIGIT_NUMBER) { number = (number*PrettyPrint.pow10i(zeros+1))+(c-'0'); } else { dout.invalidLine("number " + number + " is out of bounds."); lstate = SKIP_LINE; } zeros = 0; break NEXT_CHAR; } else if ((c == 'e') || (c == 'E')) { if (decimal) fractionDigits = offset - zeros - 1 - fractionDigits; lstate = NUMBER_EXP_START; sgn_exp = 1; zeros = 0; break NEXT_CHAR; } lstate = NUMBER_END; if (decimal) fractionDigits = offset - zeros - fractionDigits-1; if (exp == -1) { number = -number; } exp = 0; zeros = 0; continue MAIN_LOOP; // --------------------------------------------------------------------- case NUMBER_EXP_START: if (exp == -1) { number = -number; } exp = 0; if (c == '-') { sgn_exp *= -1; break NEXT_CHAR; } else if (c == '+'){ break NEXT_CHAR; } if ((c < '0') || (c > '9')){ lstate = INVALID_NUMBER; continue MAIN_LOOP; } lstate = NUMBER_EXP; // fall through to NUMBER_EXP // --------------------------------------------------------------------- case NUMBER_EXP: if ((c >= '0') && (c <= '9')) { exp = (exp*10)+(c-'0'); break NEXT_CHAR; } exp *= sgn_exp; lstate = NUMBER_END; continue MAIN_LOOP; // --------------------------------------------------------------------- case INVALID_NUMBER: if(gstate == TGT) { // invalid tgt -> skip the whole row lstate = SKIP_LINE; dout.invalidLine("invalid number (expecting target)"); continue MAIN_LOOP; } if(gstate == VAL){ // add invalid value and skip until whitespace or eol dout.addInvalidCol(colIdx++); gstate = COL; } case QID0: if(c == 'i'){ lstate = QID1; break NEXT_CHAR; } else { lstate = SKIP_TOKEN; break NEXT_CHAR; } case QID1: if(c == 'd'){ lstate = SKIP_TOKEN; // skip qid for now break NEXT_CHAR; } else { // TODO report an error lstate = SKIP_TOKEN;; break NEXT_CHAR; } // fall through case SKIP_TOKEN: if(isEOL(c)) lstate = EOL; else if(isWhitespace(c)) lstate = WHITESPACE_BEFORE_TOKEN; break NEXT_CHAR; default: assert (false) : " We have wrong state "+lstate; } // end NEXT_CHAR ++offset; // do not need to adjust for offset increase here - the offset is set to tokenStart-1! if (offset < 0) { // Offset is negative? assert !firstChunk; // Caused by backing up from 2nd chunk into 1st chunk firstChunk = true; bits = bits0; offset += bits.length; _str.set(bits,offset,0); } else if (offset >= bits.length) { // Off end of 1st chunk? Parse into 2nd chunk // Attempt to get more data. if( firstChunk && bits1 == null ){ bits1 = din.getChunkData(cidx+1); // linePrefix = new String(Arrays.copyOfRange(bits, linestart, bits.length)); linestart = 0; } // if we can't get further we might have been the last one and we must // commit the latest guy if we had one. if( !firstChunk || bits1 == null ) { // No more data available or allowed // If we are mid-parse of something, act like we saw a LF to end the // current token. if ((lstate != EXPECT_COND_LF) && (lstate != POSSIBLE_EMPTY_LINE)) { c = CHAR_LF; continue MAIN_LOOP; } break MAIN_LOOP; // Else we are just done } // Now parsing in the 2nd chunk. All offsets relative to the 2nd chunk start. firstChunk = false; if (lstate == NUMBER_FRACTION) fractionDigits -= bits.length; offset -= bits.length; bits = bits1; // Set main parsing loop bits if( bits[0] == CHAR_LF && lstate == EXPECT_COND_LF ) break MAIN_LOOP; // when the first character we see is a line end } c = bits[offset]; } // end MAIN_LOOP return dout; } private static class InspectDataOut extends Iced implements DataOut { public int _nlines; public int _ncols; public int _invalidLines; public final static int MAX_COLS = 100; public final static int MAX_LINES = 10; private String [][] _data = new String[MAX_LINES][MAX_COLS]; transient ArrayList<String> _errors = new ArrayList<String>(); public InspectDataOut() { for(int i = 0; i < MAX_LINES;++i) Arrays.fill(_data[i],"0"); } public String [][] data(){ if(_data.length <= _nlines && _data[0].length <= _ncols) return _data; String [][] res = Arrays.copyOf(_data, Math.min(MAX_LINES, _nlines)); for(int i = 0; i < res.length; ++i) res[i] = Arrays.copyOf(_data[i], Math.min(MAX_COLS,_ncols)); return (_data = res); } @Override public void setColumnNames(String[] names) {} @Override public void newLine() { ++_nlines; } @Override public boolean isString(int colIdx) {return false;} @Override public void addNumCol(int colIdx, long number, int exp) { _ncols = Math.max(_ncols,colIdx); if(colIdx < MAX_COLS && _nlines < MAX_LINES) _data[_nlines][colIdx] = Double.toString(number*PrettyPrint.pow10(exp)); } @Override public void addNumCol(int colIdx, double d) { _ncols = Math.max(_ncols,colIdx); if(colIdx < MAX_COLS) _data[_nlines][colIdx] = Double.toString(d); } @Override public void addInvalidCol(int colIdx) {} @Override public void addStrCol(int colIdx, ValueString str) {} @Override public void rollbackLine() {--_nlines;} @Override public void invalidLine(String error) { ++_invalidLines; if(_errors.size() < 10) _errors.add("error at line " + (_nlines +_invalidLines) + ", cause: " + error); } @Override public void invalidValue(int linenum, int colnum) {} public String [] errors(){ String [] res = new String[_errors.size()]; return _errors.toArray(res); } } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/parser/ValueString.java
package water.parser; import java.util.ArrayList; import water.Iced; public final class ValueString extends Iced implements Comparable<ValueString> { private byte [] _buf; private int _off; private int _len; public ValueString() {} public ValueString(byte [] buf, int off, int len){ _buf = buf; _off = off; _len = len; } public ValueString(String from) { _buf = from.getBytes(); _off = 0; _len = get_buf().length; } public ValueString(byte [] buf){ this(buf,0,buf.length); } @Override public int compareTo( ValueString o ) { int len = Math.min(_len,o._len); for( int i=0; i<len; i++ ) { int x = (0xFF&_buf[_off+i]) - (0xFF&o._buf[o._off+i]); if( x != 0 ) return x; } return _len - o._len; } @Override public int hashCode(){ int hash = 0; int n = get_off() + get_length(); for (int i = get_off(); i < n; ++i) hash = 31 * hash + get_buf()[i]; return hash; } void addChar(){_len++;} void addBuff(byte [] bits){ byte [] buf = new byte[get_length()]; int l1 = get_buf().length-get_off(); System.arraycopy(get_buf(), get_off(), buf, 0, l1); System.arraycopy(bits, 0, buf, l1, get_length()-l1); _off = 0; _buf = buf; } // WARNING: LOSSY CONVERSION!!! // Converting to a String will truncate all bytes with high-order bits set, // even if they are otherwise a valid member of the field/ValueString. // Converting back to a ValueString will then make something with fewer // characters than what you started with, and will fail all equals() tests. @Override public String toString(){ return new String(_buf,_off,_len); } public static String[] toString( ValueString vs[] ) { if( vs==null ) return null; String[] ss = new String[vs.length]; for( int i=0; i<vs.length; i++ ) ss[i] = vs[i].toString(); return ss; } void set(byte [] buf, int off, int len){ _buf = buf; _off = off; _len = len; } public ValueString setTo(String what) { _buf = what.getBytes(); _off = 0; _len = _buf.length; return this; } public void setOff(int off) { _off=off; } @Override public boolean equals(Object o){ if(!(o instanceof ValueString)) return false; ValueString str = (ValueString)o; if(str.get_length() != get_length())return false; for(int i = 0; i < get_length(); ++i) if(get_buf()[get_off()+i] != str.get_buf()[str.get_off()+i]) return false; return true; } public final byte [] get_buf() {return _buf;} public final int get_off() {return _off;} public final int get_length() {return _len;} }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/parser/XlsParser.java
package water.parser; import java.io.*; import java.util.ArrayList; import org.apache.poi.hssf.eventusermodel.*; import org.apache.poi.hssf.eventusermodel.dummyrecord.LastCellOfRowDummyRecord; import org.apache.poi.hssf.eventusermodel.dummyrecord.MissingCellDummyRecord; import org.apache.poi.hssf.record.*; import org.apache.poi.poifs.filesystem.POIFSFileSystem; import water.util.Log; import water.util.Log.Tag.Sys; public class XlsParser extends CustomParser implements HSSFListener { private transient POIFSFileSystem _fs; private transient FormatTrackingHSSFListener _formatListener; private transient final ValueString _str = new ValueString(); private transient CustomParser.DataOut _dout; public XlsParser(){super(new ParserSetup(ParserType.XLS,CsvParser.AUTO_SEP,0,false,null,false));} public XlsParser(CustomParser.ParserSetup setup){super(null);} public XlsParser clone(){return new XlsParser(_setup);} @Override public DataOut streamParse( final InputStream is, final DataOut dout) throws Exception { _dout = dout; _firstRow = true; try { _fs = new POIFSFileSystem(is); MissingRecordAwareHSSFListener listener = new MissingRecordAwareHSSFListener(this); _formatListener = new FormatTrackingHSSFListener(listener); HSSFEventFactory factory = new HSSFEventFactory(); HSSFRequest request = new HSSFRequest(); request.addListenerForAllRecords(_formatListener); factory.processWorkbookEvents(request, _fs); } finally { try { is.close(); } catch (IOException e) { } } return dout; } /** * Try to parse the bits as svm light format, return SVMParser instance if the input is in svm light format, null otherwise. * @param bits * @return SVMLightPArser instance or null */ public static PSetupGuess guessSetup(byte [] bits){ InputStream is = new ByteArrayInputStream(bits); XlsParser p = new XlsParser(); CustomInspectDataOut dout = new CustomInspectDataOut(); try{p.streamParse(is, dout);}catch(Exception e){} return new PSetupGuess(new ParserSetup(ParserType.XLS,CsvParser.AUTO_SEP,dout._ncols, dout._header,dout._header?dout.data()[0]:null,false),dout._nlines,dout._invalidLines,dout.data(),dout._nlines > dout._invalidLines,null); } transient ArrayList<String> _columnNames = new ArrayList(); boolean _firstRow; @Override public void processRecord(Record record) { int curCol = -1; double curNum = Double.NaN; ValueString curStr = null; switch( record.getSid() ) { case BoundSheetRecord.sid: case BOFRecord.sid: // we just run together multiple sheets break; case SSTRecord.sid: _sstRecord = (SSTRecord) record; break; case BlankRecord.sid: BlankRecord brec = (BlankRecord) record; curCol = brec.getColumn(); curStr = _str.setTo(""); break; case BoolErrRecord.sid: BoolErrRecord berec = (BoolErrRecord) record; curCol = berec.getColumn(); curStr = _str.setTo(""); break; case FormulaRecord.sid: FormulaRecord frec = (FormulaRecord) record; curCol = frec.getColumn(); curNum = frec.getValue(); if( Double.isNaN(curNum) ) { // Formula result is a string // This is stored in the next record _outputNextStringRecord = true; _nextCol = frec.getColumn(); } break; case StringRecord.sid: if( _outputNextStringRecord ) { // String for formula StringRecord srec = (StringRecord) record; curStr = _str.setTo(srec.getString()); curCol = _nextCol; _outputNextStringRecord = false; } break; case LabelRecord.sid: LabelRecord lrec = (LabelRecord) record; curCol = lrec.getColumn(); curStr = _str.setTo(lrec.getValue()); break; case LabelSSTRecord.sid: LabelSSTRecord lsrec = (LabelSSTRecord) record; if( _sstRecord == null ) { Log.warn(Sys.EXCEL,"[ExcelParser] Missing SST record"); } else { curCol = lsrec.getColumn(); curStr = _str.setTo(_sstRecord.getString(lsrec.getSSTIndex()).toString()); } break; case NoteRecord.sid: Log.warn(Sys.EXCEL,"Warning cell notes are unsupported"); break; case NumberRecord.sid: NumberRecord numrec = (NumberRecord) record; curCol = numrec.getColumn(); curNum = numrec.getValue(); break; case RKRecord.sid: Log.warn(Sys.EXCEL,"Warning RK records are unsupported"); break; default: break; } // Handle missing column if( record instanceof MissingCellDummyRecord ) { MissingCellDummyRecord mc = (MissingCellDummyRecord) record; curCol = mc.getColumn(); curNum = Double.NaN; } // Handle end of row if( record instanceof LastCellOfRowDummyRecord ) { if (_firstRow) { _firstRow = false; String[] arr = new String[_columnNames.size()]; arr = _columnNames.toArray(arr); _dout.setColumnNames(arr); } else { _dout.newLine(); curCol = -1; } } if (curCol == -1) return; if (_firstRow) { _columnNames.add(curStr == null ? ("C" + (curCol+1)) : curStr.toString()); } else { if (curStr == null) if (Double.isNaN(curNum)) _dout.addInvalidCol(curCol); else _dout.addNumCol(curCol, curNum); else _dout.addStrCol(curCol, curStr); } } private transient SSTRecord _sstRecord; private int _nextCol; private boolean _outputNextStringRecord; @Override public boolean isCompatible(CustomParser p) { return p instanceof XlsParser; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/persist/HdfsLoader.java
package water.persist; import java.io.File; import water.Boot; import water.H2O; import water.util.Log; import com.google.common.base.Objects; import com.google.common.base.Strings; public class HdfsLoader { private static final String DEFAULT_HDFS_VERSION = "cdh4"; private static final String MAPRFS_HDFS_VERSION = "mapr2.1.3"; public static void loadJars() { if (H2O.OPT_ARGS.hdfs_skip != null) { // When H2O is launched by hadoop itself, it should use the HDFS library that // the hadoop mapper task picks up by default. // // Do not load any hadoop jar that is packed with H2O. Log.info("H2O was started by Hadoop; inheriting HDFS library from mapper task."); return; } if (H2O.OPT_ARGS.hdfs_version != null) { Log.info("HDFS version specified on the command line: " + H2O.OPT_ARGS.hdfs_version); } // Load the HDFS backend for existing hadoop installations. // FIX! hadoop/mapr supports other variants? also why isn't port an option on mapr, and why volume? // port should be optional // understands -hdfs=hdfs://server:port OR -hdfs=maprfs:///mapr/node_name/volume // -hdfs-root=root // -hdfs-config=config file String version = Objects.firstNonNull(H2O.OPT_ARGS.hdfs_version, DEFAULT_HDFS_VERSION); // If HDFS URI is MapR-fs - Switch to MapR version of hadoop // FIX! shouldn't we just use whatever the hdfs_version specifies previously? if( "mapr".equals(version) || Strings.nullToEmpty(H2O.OPT_ARGS.hdfs).startsWith("maprfs:///") ) { version = MAPRFS_HDFS_VERSION; } try { if( Boot._init.fromJar() ) { File f = new File(version); if( f.exists() ) { Boot._init.addExternalJars(f); } else { Boot._init.addInternalJars("hadoop/" + version + "/"); } } } catch( Exception e ) { Log.err(e); Log.die("[hdfs] Unable to initialize hadoop version " + version + " please use different version."); } } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/persist/Persist.java
package water.persist; import java.io.File; import java.io.IOException; import java.net.URI; import java.util.Arrays; import water.*; import water.api.Constants.Schemes; import water.util.Log; public abstract class Persist<T> { // All available back-ends, C.f. Value for indexes public static final Persist[] I = new Persist[8]; public static final long UNKNOWN = 0; public static void initialize() {} static { Persist ice = null; URI uri = H2O.ICE_ROOT; if( uri != null ) { // Otherwise class loaded for reflection boolean windowsPath = uri.toString().matches("^[a-zA-Z]:.*"); // System.out.println("TOM uri getPath(): " + uri.getPath()); // System.out.println("TOM windowsPath: " + (windowsPath ? "true" : "false")); if ( windowsPath ) { ice = new PersistFS(new File(uri.toString())); } else if ((uri.getScheme() == null) || Schemes.FILE.equals(uri.getScheme())) { ice = new PersistFS(new File(uri.getPath())); } else if( Schemes.HDFS.equals(uri.getScheme()) ) { ice = new PersistHdfs(uri); } // System.out.println("TOM ice is null: " + ((ice == null) ? "true" : "false")); // TODO ice on other back-ends? // else if( Schemes.S3.equals(uri.getScheme()) ) { // ice = new PersistS3(uri); // } else if( Schemes.NFS.equals(uri.getScheme()) ) { // ice = new PersistNFS(uri); // } I[Value.ICE ] = ice; I[Value.HDFS ] = new PersistHdfs(); I[Value.S3 ] = new PersistS3(); I[Value.NFS ] = new PersistNFS(); I[Value.TACHYON] = new PersistTachyon(); // By popular demand, clear out ICE on startup instead of trying to preserve it if( H2O.OPT_ARGS.keepice == null ) ice.clear(); else ice.loadExisting(); } } public static Persist getIce() { return I[Value.ICE]; } public abstract String getPath(); public abstract void clear(); /** * Load all Key/Value pairs that can be found on the backend. */ public abstract void loadExisting(); /** * Value should already be persisted to disk. A racing delete can trigger a failure where we get a * null return, but no crash (although one could argue that a racing load and delete is a bug no * matter what). */ public abstract byte[] load(Value v); public abstract void store(Value v); public abstract void delete(Value v); public long getUsableSpace() { return UNKNOWN; } public long getTotalSpace() { return UNKNOWN; } //the filename can be either byte encoded if it starts with % followed by // a number, or is a normal key name with special characters encoded in // special ways. // It is questionable whether we need this because the only keys we have on // ice are likely to be arraylet chunks static String getIceName(Value v) { return getIceName(v._key, (byte) 'V'); } static String getIceName(Key k, byte type) { return getIceDirectory(k) + File.separator + key2Str(k, type); } static String getIceDirectory(Key key) { return "not_an_arraylet"; } // Verify bijection of key/file-name mappings. private static String key2Str(Key k, byte type) { String s = key2Str_impl(k, type); Key x; assert (x = str2Key_impl(s)).equals(k) : "bijection fail " + k + "." + (char) type + " <-> " + s + " <-> " + x; return s; } // Verify bijection of key/file-name mappings. static Key str2Key(String s) { Key k = str2Key_impl(s); assert key2Str_impl(k, decodeType(s)).equals(s) : "bijection fail " + s + " <-> " + k; return k; } private static byte decodeType(String s) { String ext = s.substring(s.lastIndexOf('.') + 1); return (byte) ext.charAt(0); } // Convert a Key to a suitable filename string private static String key2Str_impl(Key k, byte type) { // check if we are system key StringBuilder sb = new StringBuilder(k._kb.length / 2 + 4); int i = 0; if( k._kb[0] < 32 ) { // System keys: hexalate all the leading non-ascii bytes sb.append('%'); int j = k._kb.length - 1; // Backwards scan for 1st non-ascii while( j >= 0 && k._kb[j] >= 32 && k._kb[j] < 128 ) j--; for( ; i <= j; i++ ) { byte b = k._kb[i]; int nib0 = ((b >>> 4) & 15) + '0'; if( nib0 > '9' ) nib0 += 'A' - 10 - '0'; int nib1 = ((b >>> 0) & 15) + '0'; if( nib1 > '9' ) nib1 += 'A' - 10 - '0'; sb.append((char) nib0).append((char) nib1); } sb.append('%'); } // Escape the special bytes from 'i' to the end return escapeBytes(k._kb, i, sb).append('.').append((char) type).toString(); } private static StringBuilder escapeBytes(byte[] bytes, int i, StringBuilder sb) { for( ; i < bytes.length; i++ ) { char b = (char)bytes[i], c=0; switch( b ) { case '%': c='%'; break; case '.': c='d'; break; case '/': c='s'; break; case ':': c='c'; break; case '"': c='q'; break; case '>': c='g'; break; case '\\':c='b'; break; case '\0':c='z'; break; } if( c!=0 ) sb.append('%').append(c); else sb.append(b); } return sb; } // Convert a filename string to a Key private static Key str2Key_impl(String s) { String key = s.substring(0, s.lastIndexOf('.')); // Drop extension byte[] kb = new byte[(key.length() - 1) / 2]; int i = 0, j = 0; if( (key.length() > 2) && (key.charAt(0) == '%') && (key.charAt(1) >= '0') && (key.charAt(1) <= '9') ) { // Dehexalate until '%' for( i = 1; i < key.length(); i += 2 ) { if( key.charAt(i) == '%' ) break; char b0 = (char) (key.charAt(i + 0) - '0'); if( b0 > 9 ) b0 += '0' + 10 - 'A'; char b1 = (char) (key.charAt(i + 1) - '0'); if( b1 > 9 ) b1 += '0' + 10 - 'A'; kb[j++] = (byte) ((b0 << 4) | b1); // De-hexelated byte } i++; // Skip the trailing '%' } // a normal key - ASCII with special characters encoded after % sign for( ; i < key.length(); ++i ) { byte b = (byte) key.charAt(i); if( b == '%' ) { switch( key.charAt(++i) ) { case '%': b = '%'; break; case 'c': b = ':'; break; case 'd': b = '.'; break; case 'g': b = '>'; break; case 'q': b = '"'; break; case 's': b = '/'; break; case 'b': b = '\\'; break; case 'z': b = '\0'; break; default: Log.warn("Invalid format of filename " + s + " at index " + i); } } if( j >= kb.length ) kb = Arrays.copyOf(kb, Math.max(2, j * 2)); kb[j++] = b; } // now in kb we have the key name return Key.make(Arrays.copyOf(kb, j)); } /** Return default URI of server to fetch data */ public String getDefaultURI() { return null; } /** Create a client to communicate with default URI server */ public final T createClient() throws IOException { return createClient(getDefaultURI()); } /** Create a client for given URI. */ public T createClient(String uri) throws IOException { throw H2O.unimpl(); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/persist/PersistFS.java
package water.persist; import java.io.*; import water.*; import water.util.Log; import water.util.Utils; /** * Persistence backend using local file system. */ public final class PersistFS extends Persist { public final File _root; public final File _dir; PersistFS(File root) { _root = root; _dir = new File(root, "ice" + H2O.API_PORT); // Make the directory as-needed root.mkdirs(); if( !(root.isDirectory() && root.canRead() && root.canWrite()) ) { Log.die("ice_root not a read/writable directory"); } } @Override public String getPath() { return _dir.toString(); } @Override public void clear() { clear(_dir); } private void clear(File f) { File[] cs = f.listFiles(); if( cs != null ) { for( File c : cs ) { if( c.isDirectory() ) clear(c); c.delete(); } } } @Override public void loadExisting() { loadExisting(_dir); } private void loadExisting(File f) { for( File c : f.listFiles() ) { if( c.isDirectory() ) { loadExisting(c); // Recursively keep loading K/V pairs } else { Key k = str2Key(c.getName()); Value ice = new Value(k, (int) c.length()); ice.setdsk(); H2O.putIfAbsent_raw(k, ice); } } } private File getFile(Value v) { return new File(_dir, getIceName(v)); } @Override public byte[] load(Value v) { File f = getFile(v); if( f.length() < v._max ) { // Should be fully on disk... // or it's a racey delete of a spilled value assert !v.isPersisted() : f.length() + " " + v._max + " " + v._key; return null; // No value } try { FileInputStream s = new FileInputStream(f); try { AutoBuffer ab = new AutoBuffer(s.getChannel(), true, Value.ICE); byte[] b = ab.getA1(v._max); ab.close(); return b; } finally { s.close(); } } catch( IOException e ) { // Broken disk / short-file??? throw new RuntimeException(Log.err("File load failed: ", e)); } } // Store Value v to disk. @Override public void store(Value v) { assert !v.isPersisted(); new File(_dir, getIceDirectory(v._key)).mkdirs(); // Nuke any prior file. FileOutputStream s = null; try { s = new FileOutputStream(getFile(v)); } catch( FileNotFoundException e ) { String info = "Key: " + v._key.toString() + "\nEncoded: " + getFile(v); throw new RuntimeException(Log.err("Encoding a key to a file failed!\n" + info, e)); } try { byte[] m = v.memOrLoad(); // we are not single threaded anymore assert m != null && m.length == v._max : "Trying to save partial file: value key=" + v._key + ", length to save=" + m + ", value max size=" + v._max; // Assert not saving partial files new AutoBuffer(s.getChannel(), false, Value.ICE).putA1(m, m.length).close(); v.setdsk(); // Set as write-complete to disk } finally { Utils.close(s); } } @Override public void delete(Value v) { assert !v.isPersisted(); // Upper layers already cleared out File f = getFile(v); f.delete(); } @Override public long getUsableSpace() { return _root.getUsableSpace(); } @Override public long getTotalSpace() { return _root.getTotalSpace(); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/persist/PersistHdfs.java
package water.persist; import java.io.*; import java.net.SocketTimeoutException; import java.net.URI; import java.util.ArrayList; import java.util.concurrent.Callable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.s3.S3Exception; import water.*; import water.Job.ProgressMonitor; import water.api.Constants; import water.api.Constants.Extensions; import water.fvec.*; import water.util.*; import water.util.Log.Tag.Sys; import com.google.common.base.Strings; import com.google.common.io.ByteStreams; import dontweave.gson.*; public final class PersistHdfs extends Persist { public static final Configuration CONF; private final Path _iceRoot; // Returns String with path for given key. private static String getPathForKey(Key k) { final int off = k._kb[0]==Key.DVEC ? Vec.KEY_PREFIX_LEN : 0; return new String(k._kb,off,k._kb.length-off); } static { Configuration conf = null; if( H2O.OPT_ARGS.hdfs_config != null ) { conf = new Configuration(); File p = new File(H2O.OPT_ARGS.hdfs_config); if( !p.exists() ) Log.die("Unable to open hdfs configuration file " + p.getAbsolutePath()); conf.addResource(new Path(p.getAbsolutePath())); Log.debug(Sys.HDFS_, "resource ", p.getAbsolutePath(), " added to the hadoop configuration"); Log.info(Sys.HDFS_, "resource ", p.getAbsolutePath(), " added to the hadoop configuration"); } else { conf = new Configuration(); if( !Strings.isNullOrEmpty(H2O.OPT_ARGS.hdfs) ) { // setup default remote Filesystem - for version 0.21 and higher conf.set("fs.defaultFS", H2O.OPT_ARGS.hdfs); // To provide compatibility with version 0.20.0 it is necessary to setup the property // fs.default.name which was in newer version renamed to 'fs.defaultFS' conf.set("fs.default.name", H2O.OPT_ARGS.hdfs); } } CONF = conf; } // Loading HDFS files PersistHdfs() { _iceRoot = null; } // Loading/Writing ice to HDFS PersistHdfs(URI uri) { try { _iceRoot = new Path(uri + "/ice" + H2O.SELF_ADDRESS.getHostAddress() + "-" + H2O.API_PORT); // Make the directory as-needed FileSystem fs = FileSystem.get(_iceRoot.toUri(), CONF); fs.mkdirs(_iceRoot); } catch( Exception e ) { throw Log.errRTExcept(e); } } @Override public String getPath() { return _iceRoot != null ? _iceRoot.toString() : null; } @Override public void loadExisting() { // TODO? throw new UnsupportedOperationException(); } @Override public void clear() { assert this == getIce(); run(new Callable() { @Override public Object call() throws Exception { FileSystem fs = FileSystem.get(_iceRoot.toUri(), CONF); fs.delete(_iceRoot, true); return null; } }, false, 0); } private static class H2OHdfsInputStream extends RIStream { final FileSystem _fs; final Path _path; public H2OHdfsInputStream(Path p, long offset, ProgressMonitor pmon) throws IOException { super(offset, pmon); _path = p; _fs = FileSystem.get(p.toUri(), CONF); setExpectedSz(_fs.getFileStatus(p).getLen()); open(); } @Override protected InputStream open(long offset) throws IOException { FSDataInputStream is = _fs.open(_path); is.seek(offset); return is; } } public static InputStream openStream(Key k, ProgressMonitor pmon) throws IOException { H2OHdfsInputStream res = null; Path p = new Path(k.toString()); try { res = new H2OHdfsInputStream(p, 0, pmon); } catch( IOException e ) { try { Thread.sleep(1000); } catch( Exception ex ) {} Log.warn("Error while opening HDFS key " + k.toString() + ", will wait and retry."); res = new H2OHdfsInputStream(p, 0, pmon); } return res; } @Override public byte[] load(final Value v) { final byte[] b = MemoryManager.malloc1(v._max); long skip = 0; Key k = v._key; if(k._kb[0] == Key.DVEC) skip = FileVec.chunkOffset(k); // The offset final Path p = _iceRoot == null?new Path(getPathForKey(k)):new Path(_iceRoot, getIceName(v)); final long skip_ = skip; run(new Callable() { @Override public Object call() throws Exception { FileSystem fs = FileSystem.get(p.toUri(), CONF); FSDataInputStream s = null; try { s = fs.open(p); // NOTE: // The following line degrades performance of HDFS load from S3 API: s.readFully(skip,b,0,b.length); // Google API's simple seek has better performance // Load of 300MB file via Google API ~ 14sec, via s.readFully ~ 5min (under the same condition) ByteStreams.skipFully(s, skip_); ByteStreams.readFully(s, b); assert v.isPersisted(); } finally { Utils.close(s); } return null; } }, true, v._max); return b; } @Override public void store(Value v) { // Should be used only if ice goes to HDFS assert this == getIce(); assert !v.isPersisted(); byte[] m = v.memOrLoad(); assert (m == null || m.length == v._max); // Assert not saving partial files store(new Path(_iceRoot, getIceName(v)), m); v.setdsk(); // Set as write-complete to disk } public static void store(final Path path, final byte[] data) { run(new Callable() { @Override public Object call() throws Exception { FileSystem fs = FileSystem.get(path.toUri(), CONF); fs.mkdirs(path.getParent()); FSDataOutputStream s = fs.create(path); try { s.write(data); } finally { s.close(); } return null; } }, false, data.length); } @Override public void delete(final Value v) { assert this == getIce(); assert !v.isPersisted(); // Upper layers already cleared out run(new Callable() { @Override public Object call() throws Exception { Path p = new Path(_iceRoot, getIceName(v)); FileSystem fs = FileSystem.get(p.toUri(), CONF); fs.delete(p, true); return null; } }, false, 0); } private static class Size { int _value; } private static void run(Callable c, boolean read, int size) { // Count all i/o time from here, including all retry overheads long start_io_ms = System.currentTimeMillis(); while( true ) { try { long start_ns = System.nanoTime(); // Blocking i/o call timing - without counting repeats c.call(); TimeLine.record_IOclose(start_ns, start_io_ms, read ? 1 : 0, size, Value.HDFS); break; // Explicitly ignore the following exceptions but // fail on the rest IOExceptions } catch( EOFException e ) { ignoreAndWait(e, false); } catch( SocketTimeoutException e ) { ignoreAndWait(e, false); } catch( S3Exception e ) { // Preserve S3Exception before IOException // Since this is tricky code - we are supporting different HDFS version // New version declares S3Exception as IOException // But old versions (0.20.xxx) declares it as RuntimeException // So we have to catch it before IOException !!! ignoreAndWait(e, false); } catch( IOException e ) { ignoreAndWait(e, true); } catch( Exception e ) { throw Log.errRTExcept(e); } } } private static void ignoreAndWait(final Exception e, boolean printException) { H2O.ignore(e, "Hit HDFS reset problem, retrying...", printException); try { Thread.sleep(500); } catch( InterruptedException ie ) {} } /* * Load all files in a folder. */ public static void addFolder(Path p, JsonArray succeeded, JsonArray failed) throws IOException { FileSystem fs = FileSystem.get(p.toUri(), PersistHdfs.CONF); if(!fs.exists(p)){ JsonObject o = new JsonObject(); o.addProperty(Constants.FILE, p.toString()); o.addProperty(Constants.ERROR, "Path does not exist!"); failed.add(o); return; } addFolder(fs, p, succeeded, failed); } public static void addFolder2(Path p, ArrayList<String> keys,ArrayList<String> failed) throws IOException { FileSystem fs = FileSystem.get(p.toUri(), PersistHdfs.CONF); if(!fs.exists(p)){ failed.add("Path does not exist: '" + p.toString() + "'"); return; } addFolder2(fs, p, keys, failed); } private static void addFolder2(FileSystem fs, Path p, ArrayList<String> keys, ArrayList<String> failed) { try { if( fs == null ) return; Futures futures = new Futures(); for( FileStatus file : fs.listStatus(p) ) { Path pfs = file.getPath(); if( file.isDir() ) { addFolder2(fs, pfs, keys, failed); } else { long size = file.getLen(); Key res; if( pfs.getName().endsWith(Extensions.JSON) ) { throw H2O.unimpl(); } else if( pfs.getName().endsWith(Extensions.HEX) ) { // Hex file? throw H2O.unimpl(); } else { Key k = null; keys.add((k = HdfsFileVec.make(file, futures)).toString()); Log.info("PersistHdfs: DKV.put(" + k + ")"); } } } } catch( Exception e ) { Log.err(e); failed.add(p.toString()); } } private static void addFolder(FileSystem fs, Path p, JsonArray succeeded, JsonArray failed) { try { if( fs == null ) return; for( FileStatus file : fs.listStatus(p) ) { Path pfs = file.getPath(); if( file.isDir() ) { addFolder(fs, pfs, succeeded, failed); } else { Key k = Key.make(pfs.toString()); long size = file.getLen(); Value val = new Value(k, (int) size, Value.HDFS); // Plain Value val.setdsk(); DKV.put(k, val); Log.info("PersistHdfs: DKV.put(" + k + ")"); JsonObject o = new JsonObject(); o.addProperty(Constants.KEY, k.toString()); o.addProperty(Constants.FILE, pfs.toString()); o.addProperty(Constants.VALUE_SIZE, file.getLen()); succeeded.add(o); } } } catch( Exception e ) { Log.err(e); JsonObject o = new JsonObject(); o.addProperty(Constants.FILE, p.toString()); o.addProperty(Constants.ERROR, e.getMessage()); failed.add(o); } } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/persist/PersistNFS.java
package water.persist; import java.io.*; import java.nio.channels.FileChannel; import water.*; // Persistence backend for network file system. // Just for loading or storing files. // // @author cliffc public final class PersistNFS extends Persist { static final String KEY_PREFIX = "nfs:"; public static final int KEY_PREFIX_LENGTH = KEY_PREFIX.length(); // file implementation ------------------------------------------------------- public static Key decodeFile(File f) { String kname = KEY_PREFIX + File.separator + f.toString(); assert (kname.length() <= 512); // all NFS keys are NFS-kind keys return Key.make(kname.getBytes()); } // Returns the file for given key. private static File getFileForKey(Key k) { final int len = KEY_PREFIX_LENGTH+1; // Strip key prefix & leading slash final int off = k._kb[0]==Key.DVEC ? water.fvec.Vec.KEY_PREFIX_LEN : 0; String s = new String(k._kb,len+off,k._kb.length-(len+off)); return new File(s); } public static InputStream openStream(Key k) throws IOException { return new FileInputStream(getFileForKey(k)); } // Read up to 'len' bytes of Value. Value should already be persisted to // disk. A racing delete can trigger a failure where we get a null return, // but no crash (although one could argue that a racing load&delete is a bug // no matter what). @Override public byte[] load(Value v) { long skip = 0; Key k = v._key; // Convert a chunk into a long-offset from the base file. if( k._kb[0] == Key.DVEC ) skip = water.fvec.NFSFileVec.chunkOffset(k); // The offset try { FileInputStream s = null; try { s = new FileInputStream(getFileForKey(k)); FileChannel fc = s.getChannel(); fc.position(skip); AutoBuffer ab = new AutoBuffer(fc, true, Value.NFS); byte[] b = ab.getA1(v._max); ab.close(); assert v.isPersisted(); return b; } finally { if( s != null ) s.close(); } } catch( IOException e ) { // Broken disk / short-file??? H2O.ignore(e); return null; } } // Store Value v to disk. @Override public void store(Value v) { // Only the home node does persistence on NFS if( !v._key.home() ) return; // A perhaps useless cutout: the upper layers should test this first. if( v.isPersisted() ) return; try { File f = getFileForKey(v._key); f.mkdirs(); FileOutputStream s = new FileOutputStream(f); try { byte[] m = v.memOrLoad(); assert (m == null || m.length == v._max); // Assert not saving partial files if( m != null ) new AutoBuffer(s.getChannel(), false, Value.NFS).putA1(m, m.length).close(); v.setdsk(); // Set as write-complete to disk } finally { s.close(); } } catch( IOException e ) { H2O.ignore(e); } } // TODO needed if storing ice to S3 @Override public String getPath() { throw new UnsupportedOperationException(); } @Override public void clear() { throw new UnsupportedOperationException(); } @Override public void loadExisting() { throw new UnsupportedOperationException(); } @Override public void delete(Value v) { throw new UnsupportedOperationException(); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/persist/PersistS3.java
package water.persist; import java.io.*; import java.net.SocketTimeoutException; import java.util.Arrays; import java.util.Properties; import water.*; import water.Job.ProgressMonitor; import water.api.Constants.Extensions; import water.fvec.FileVec; import water.fvec.Vec; import water.util.Log; import water.util.RIStream; import com.amazonaws.*; import com.amazonaws.auth.*; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3Client; import com.amazonaws.services.s3.model.*; import com.google.common.base.Objects; import com.google.common.io.ByteStreams; /** Persistence backend for S3 */ public final class PersistS3 extends Persist { private static final String HELP = "You can specify a credentials properties file with the -aws_credentials command line switch."; private static final String KEY_PREFIX = "s3://"; private static final int KEY_PREFIX_LEN = KEY_PREFIX.length(); private static final Object _lock = new Object(); private static volatile AmazonS3 _s3; public static AmazonS3 getClient() { if( _s3 == null ) { synchronized( _lock ) { if( _s3 == null ) { try { _s3 = new AmazonS3Client(new H2OAWSCredentialsProviderChain(), s3ClientCfg()); } catch( Throwable e ) { StringBuilder msg = new StringBuilder(); msg.append(e.getMessage() + "\n"); msg.append("Unable to load S3 credentials."); if( H2O.OPT_ARGS.aws_credentials == null ) msg.append(HELP); throw Log.err(new RuntimeException(msg.toString())); } } } } return _s3; } /** Modified version of default credentials provider which includes H2O-specific * credentials provider. */ public static class H2OAWSCredentialsProviderChain extends AWSCredentialsProviderChain { public H2OAWSCredentialsProviderChain() { super(new H2OArgCredentialsProvider(), new InstanceProfileCredentialsProvider(), new EnvironmentVariableCredentialsProvider(), new SystemPropertiesCredentialsProvider()); } } /** A simple credentials provider reading file-based credentials from given * command argument <code>--aws_credentials</code>. */ static class H2OArgCredentialsProvider implements AWSCredentialsProvider { // Default location of the AWS credentials file public static final String DEFAULT_CREDENTIALS_LOCATION = "AwsCredentials.properties"; @Override public AWSCredentials getCredentials() { File credentials = new File(Objects.firstNonNull(H2O.OPT_ARGS.aws_credentials, DEFAULT_CREDENTIALS_LOCATION)); try { return new PropertiesCredentials(credentials); } catch (IOException e) { throw new AmazonClientException("Unable to load AWS credentials from file " + credentials); } } @Override public void refresh() {} @Override public String toString() { return getClass().getSimpleName(); } } public static final class H2SO3InputStream extends RIStream { Key _k; long _to; String[] _bk; @Override protected InputStream open(long offset) { return getClient().getObject(new GetObjectRequest(_bk[0], _bk[1]).withRange(offset, _to)).getObjectContent(); } public H2SO3InputStream(Key k, ProgressMonitor pmon) { this(k, pmon, 0, Long.MAX_VALUE); } public H2SO3InputStream(Key k, ProgressMonitor pmon, long from, long to) { super(from, pmon); _k = k; _to = Math.min(DKV.get(k).length() - 1, to); _bk = decodeKey(k); open(); } } public static InputStream openStream(Key k, ProgressMonitor pmon) throws IOException { return new H2SO3InputStream(k, pmon); } public static Key loadKey(S3ObjectSummary obj) throws IOException { Key k = encodeKey(obj.getBucketName(), obj.getKey()); long size = obj.getSize(); Value val = new Value(k, (int) size, Value.S3); // Plain Value val.setdsk(); DKV.put(k, val); return k; } // file implementation ------------------------------------------------------- // Read up to 'len' bytes of Value. Value should already be persisted to // disk. A racing delete can trigger a failure where we get a null return, // but no crash (although one could argue that a racing load&delete is a bug // no matter what). @Override public byte[] load(Value v) { long start_io_ms = System.currentTimeMillis(); byte[] b = MemoryManager.malloc1(v._max); Key k = v._key; long skip = 0; // Skip offset based on chunk number if(k._kb[0] == Key.DVEC) skip = FileVec.chunkOffset(k); // The offset // Too complicate matters, S3 likes to reset connections when H2O hits it // too hard. We "fix" this by just trying again, assuming we're getting // hit with a bogus resource limit (H2O doing a parse looks like a DDOS to // Amazon S3). S3ObjectInputStream s = null; while( true ) { // Loop, in case we get premature EOF's try { long start_ns = System.nanoTime(); // Blocking i/o call timing - without counting repeats s = getObjectForKey(k, skip, v._max).getObjectContent(); ByteStreams.readFully(s, b); // delegate work to Google (it reads the byte buffer in a cycle as we did) assert v.isPersisted(); TimeLine.record_IOclose(start_ns, start_io_ms, 1/* read */, v._max, Value.S3); return b; // Explicitly ignore the following exceptions but // fail on the rest IOExceptions } catch( EOFException e ) { ignoreAndWait(e, false); } catch( SocketTimeoutException e ) { ignoreAndWait(e, false); } catch( IOException e ) { ignoreAndWait(e, true); } finally { try { if( s != null ) s.close(); } catch( IOException e ) {} } } } private static void ignoreAndWait(final Exception e, boolean printException) { H2O.ignore(e, "Hit the S3 reset problem, waiting and retrying...", printException); try { Thread.sleep(500); } catch( InterruptedException ie ) {} } // Store Value v to disk. @Override public void store(Value v) { if( !v._key.home() ) return; throw H2O.unimpl(); // VA only } /** * Creates the key for given S3 bucket and key. Returns the H2O key, or null if the key cannot be * created. * * @param bucket * Bucket name * @param key * Key name (S3) * @return H2O key pointing to the given bucket and key. */ public static Key encodeKey(String bucket, String key) { Key res = encodeKeyImpl(bucket, key); // assert checkBijection(res, bucket, key); return res; } /** * Decodes the given H2O key to the S3 bucket and key name. Returns the array of two strings, * first one is the bucket name and second one is the key name. * * @param k * Key to be decoded. * @return Pair (array) of bucket name and key name. */ public static String[] decodeKey(Key k) { return decodeKeyImpl(k); // assert checkBijection(k, res[0], res[1]); // return res; } // private static boolean checkBijection(Key k, String bucket, String key) { // Key en = encodeKeyImpl(bucket, key); // String[] de = decodeKeyImpl(k); // boolean res = Arrays.equals(k._kb, en._kb) && bucket.equals(de[0]) && key.equals(de[1]); // assert res : "Bijection failure:" + "\n\tKey 1:" + k + "\n\tKey 2:" + en + "\n\tBkt 1:" + bucket + "\n\tBkt 2:" // + de[0] + "\n\tStr 1:" + key + "\n\tStr 2:" + de[1] + ""; // return res; // } private static Key encodeKeyImpl(String bucket, String key) { return Key.make(KEY_PREFIX + bucket + '/' + key); } private static String[] decodeKeyImpl(Key k) { String s = new String((k._kb[0] == Key.DVEC)?Arrays.copyOfRange(k._kb, Vec.KEY_PREFIX_LEN, k._kb.length):k._kb); assert s.startsWith(KEY_PREFIX) && s.indexOf('/') >= 0 : "Attempting to decode non s3 key: " + k; s = s.substring(KEY_PREFIX_LEN); int dlm = s.indexOf('/'); String bucket = s.substring(0, dlm); String key = s.substring(dlm + 1); return new String[] { bucket, key }; } // Gets the S3 object associated with the key that can read length bytes from offset private static S3Object getObjectForKey(Key k, long offset, long length) throws IOException { String[] bk = decodeKey(k); GetObjectRequest r = new GetObjectRequest(bk[0], bk[1]); r.setRange(offset, offset + length - 1); // Range is *inclusive* according to docs??? return getClient().getObject(r); } // Gets the object metadata associated with given key. private static ObjectMetadata getObjectMetadataForKey(Key k) { String[] bk = decodeKey(k); assert (bk.length == 2); return getClient().getObjectMetadata(bk[0], bk[1]); } /** S3 socket timeout property name */ public final static String S3_SOCKET_TIMEOUT_PROP = "water.s3.socketTimeout"; /** S3 connection timeout property name */ public final static String S3_CONNECTION_TIMEOUT_PROP = "water.s3.connectionTimeout"; /** S3 maximal error retry number */ public final static String S3_MAX_ERROR_RETRY_PROP = "water.s3.maxErrorRetry"; /** S3 maximal http connections */ public final static String S3_MAX_HTTP_CONNECTIONS_PROP = "water.s3.maxHttpConnections"; static ClientConfiguration s3ClientCfg() { ClientConfiguration cfg = new ClientConfiguration(); Properties prop = System.getProperties(); if( prop.containsKey(S3_SOCKET_TIMEOUT_PROP) ) cfg.setSocketTimeout(Integer.getInteger(S3_SOCKET_TIMEOUT_PROP)); if( prop.containsKey(S3_CONNECTION_TIMEOUT_PROP) ) cfg.setConnectionTimeout(Integer .getInteger(S3_CONNECTION_TIMEOUT_PROP)); if( prop.containsKey(S3_MAX_ERROR_RETRY_PROP) ) cfg.setMaxErrorRetry(Integer.getInteger(S3_MAX_ERROR_RETRY_PROP)); if( prop.containsKey(S3_MAX_HTTP_CONNECTIONS_PROP) ) cfg.setMaxConnections(Integer .getInteger(S3_MAX_HTTP_CONNECTIONS_PROP)); cfg.setProtocol(Protocol.HTTP); return cfg; } // TODO needed if storing ice to S3 @Override public String getPath() { throw new UnsupportedOperationException(); } @Override public void clear() { throw new UnsupportedOperationException(); } @Override public void loadExisting() { throw new UnsupportedOperationException(); } @Override public void delete(Value v) { throw new UnsupportedOperationException(); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/persist/PersistTachyon.java
package water.persist; import java.io.IOException; import java.io.InputStream; import java.util.Arrays; import com.google.common.io.ByteStreams; import tachyon.client.ReadType; import tachyon.client.TachyonFS; import water.*; import water.Job.ProgressMonitor; import water.fvec.FileVec; import water.fvec.Vec; import water.util.*; public class PersistTachyon extends Persist<TachyonFS> { public static final String PREFIX = "tachyon://"; public static final String DEFAULT_CLIENT_URI = "tachyon://localhost:19998"; private final String _defaultUri; PersistTachyon() { this(DEFAULT_CLIENT_URI); } PersistTachyon(String uri) { _defaultUri = uri; } public static InputStream openStream(Key k, ProgressMonitor pmon) { return new H2OTachyonInputStream(k, pmon); } @Override public byte[] load(Value v) { Key k = v._key; // key for value if (k._kb[0] != Key.DVEC) throw H2O.unimpl(); // Load only from values stored in vector long skip = FileVec.chunkOffset(k); // Compute skip for this value long start_io_ms = System.currentTimeMillis(); final byte[] b = MemoryManager.malloc1(v._max); String[] keyComp = decodeKey(k); String clientUri = keyComp[0]; String fpath = keyComp[1]; TachyonFS tfs = null; InputStream is = null; try { tfs = (TachyonFS) (Persist.I[Value.TACHYON].createClient(clientUri)); long start_ns = System.nanoTime(); // Blocking i/o call timing - without counting repeats is = tfs.getFile(fpath).getInStream(ReadType.NO_CACHE); ByteStreams.skipFully(is, skip); ByteStreams.readFully(is, b); TimeLine.record_IOclose(start_ns, start_io_ms, 1/* read */, v._max, Value.TACHYON); return b; } catch (IOException e) { throw new RuntimeException(Log.err("File load failed: ", e)); } finally { if (is!=null) Utils.close(is); } } public static final class H2OTachyonInputStream extends RIStream { final private Key key; final private String clientURI; final private String fpath; protected H2OTachyonInputStream(Key k, long from, ProgressMonitor pmon) { super(from, pmon); key = k; String[] c = decodeKey(k); clientURI = c[0]; fpath = c[1]; } public H2OTachyonInputStream(Key k, ProgressMonitor pmon) { this(k, 0, pmon); } @Override protected InputStream open(long offset) throws IOException { TachyonFS tfs = (TachyonFS) (Persist.I[Value.TACHYON].createClient(clientURI)); InputStream is = tfs.getFile(fpath).getInStream(ReadType.NO_CACHE); is.skip(offset); return is; } } /** Split key name composed of tachyon://<client-uri>/filename into two parts: * - client-uri without tachyon:// prefix * - filename * And returns both components. */ private static String[] decodeKey(Key k) { String s = new String((k.isChunkKey()) ? Arrays.copyOfRange(k._kb, Vec.KEY_PREFIX_LEN, k._kb.length):k._kb); return decode(s); } public static String[] decode(String s) { assert s.startsWith(PREFIX) : "Unsupported key name for tachyon: " + s; s = s.substring(PREFIX.length()); int nextSlash = s.indexOf('/'); String clientUri, filename; if (nextSlash!=-1) { clientUri = s.substring(0, nextSlash); filename = s.substring(nextSlash); } else { clientUri = s; filename = "/"; } return new String[] { clientUri, filename }; } // // Un-implemented methods // @Override public String getPath() { throw new UnsupportedOperationException(); } @Override public void clear() { throw new UnsupportedOperationException(); } @Override public void loadExisting() { throw new UnsupportedOperationException(); } @Override public void store(Value v) { throw new UnsupportedOperationException(); } @Override public void delete(Value v) { throw new UnsupportedOperationException(); } // // Methods providing low-level client implementation // @Override public String getDefaultURI() { return _defaultUri; } @Override public TachyonFS createClient(String uri) throws IOException { if (!uri.startsWith(PREFIX)) uri = PREFIX + uri; return TachyonFS.get(uri); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/schemas/API.java
package water.schemas; import java.lang.annotation.*; /** API Annotation * * API annotations are used to document *input field* behaviors for the * external REST API. Each input field to some web page is described by a * matching Java field, plus these annotations. */ @Retention(RetentionPolicy.RUNTIME) @Target({ElementType.FIELD}) @Documented public @interface API { // A Short help text to appear beside the input String help(); // The following are markers for *input* fields. // If at least one of these annotations appears, this is an input field. // If none appear, this is NOT an input field. // A list of field names that this field depends on String[] dependsOn() default {}; // A short boolean expression that can be executed *on the front end* to // validate inputs with requiring as much chatty traffic with the server. // The language here is TBD, but will be easily eval'd by JavaScript. // // For example, a "this field is required" test can be done by checking that // the URL string is not empty in the front end. // // The Big Hammer Notation for overriding all other validation schemes in the // API language is to call out a ?validation URL: // "Cloud?validation=some_java_func" calls // boolean CloudV1Handler.some_java_func(CloudV1 cv1) String validation() default ""; // A short JS-like expression, same as "validation" above, that returns a // selection of valid values. Used for e.g. drop-down menus where response // times are interactive. String values() default ""; }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/schemas/HTTP404V1.java
package water.schemas; import java.util.Arrays; import water.AutoBuffer; import water.H2O; import water.api.Handler; import water.util.RString; // import water.util.DocGen.HTML; public class HTTP404V1 extends Schema { // This Schema has no inputs // Output fields @API(help="Error message") final String errmsg; @API(help="Error url") final String errurl; public HTTP404V1( String msg, String url ) { errmsg = msg; errurl = url; } @Override public HTTP404V1 fillInto( Handler h ) { throw H2O.fail(); } @Override public HTTP404V1 fillFrom( Handler h ) { throw H2O.fail(); } /* @Override public HTML writeHTML_impl( HTML ab ) { ab.bodyHead(); ab.title("HTTP 404 - Not Found"); ab.p("<div class='alert alert-error'>").p(errmsg).p("</div>"); return ab.bodyTail(); } */ }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/schemas/HTTP500V1.java
package water.schemas; import java.util.Arrays; import water.H2O; import water.api.Handler; public class HTTP500V1 extends Schema { final String error; final String stackTrace; public HTTP500V1( Exception e ) { error = e.getClass().getSimpleName()+": "+e.getMessage(); stackTrace = Arrays.toString(e.getStackTrace()); } @Override public HTTP500V1 fillInto( Handler h ) { throw H2O.fail(); } @Override public HTTP500V1 fillFrom( Handler h ) { throw H2O.fail(); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/schemas/ModelBuildersMetadataV1.java
package water.schemas; import water.api.handlers.ModelBuildersMetadataHandlerV1; public class ModelBuildersMetadataV1 extends Schema<ModelBuildersMetadataHandlerV1, ModelBuildersMetadataV1> { /* // Output fields @API(help="List of model builders.") List<ModelBuilder> modelBuilders; */ // Version&Schema-specific filling into the handler public ModelBuildersMetadataV1 fillInto( ModelBuildersMetadataHandlerV1 h ) { throw new UnsupportedOperationException("ModelBuildersMetadataV1.fillInto"); } // Version&Schema-specific filling from the handler public ModelBuildersMetadataV1 fillFrom( ModelBuildersMetadataHandlerV1 h ) { throw new UnsupportedOperationException("ModelBuildersMetadataV1.fillFrom"); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/schemas/Schema.java
package water.schemas; import java.lang.reflect.*; import java.util.ArrayList; import java.util.Properties; import water.*; import water.api.Handler; import water.fvec.Frame; /** Base Schema Class * * All Schemas inherit from here. Schemas have a State section (broken into * Input fields and Output fields) and an Adapter section to fill the State to * and from URLs and JSON. The base Adapter logic is here, and will by * default copy same-named fields to and from Schemas to concrete Iced objects. * * Schema Fields must have a single API annotation describing in they are an * input field or not (all fields will be output by default), and any extra * requirements on the input (prior field dependencies and other validation * checks). Transient & Static fields are ignored. */ public abstract class Schema<H extends Handler<H,S>,S extends Schema<H,S>> extends Iced { private final transient int _version; protected final int getVersion() { return _version; } protected Schema() { // Check version number String n = this.getClass().getSimpleName(); assert n.charAt(n.length()-2)=='V' : "Schema classname does not end in a 'V' and a version #"; _version = n.charAt(n.length()-1)-'0'; assert 0 <= _version && _version <= 9 : "Schema classname does not contain version"; } // Version&Schema-specific filling into the handler abstract public S fillInto( H h ); // Version&Schema-specific filling from the handler abstract public S fillFrom( H h ); // This Schema accepts a Frame as it's first & main argument, used by the // Frame Inspect & Parse pages to give obvious options for Modeling, Summary, // export-to-CSV etc options. Return a URL or null if not appropriate. public String acceptsFrame( Frame fr ) { return null; } // Fill self from parms. Limited to dumb primitive parsing and simple // reflective field filling. Ignores fields not in the Schema. Throws IAE // if the primitive parameter cannot be parsed as the primitive field type. // Dupped args are handled by Nano, as 'parms' can only have a single arg // mapping for a given name. // Also does various sanity checks for broken Schemas. Fields must not be // private. Input fields get filled here, so must not be final. public S fillFrom( Properties parms ) { // Get passed-in fields, assign into Schema Class clz = getClass(); for( String key : parms.stringPropertyNames() ) { try { Field f = clz.getDeclaredField(key); // No such field error, if parm is junk int mods = f.getModifiers(); if( Modifier.isTransient(mods) || Modifier.isStatic(mods) ) // Attempting to set a transient or static; treat same as junk fieldname throw new IllegalArgumentException("Unknown argument "+key); // Only support a single annotation which is an API, and is required API api = (API)f.getAnnotations()[0]; // Must have one of these set to be an input field if( api.validation().length()==0 && api.values ().length()==0 && api.dependsOn ().length ==0 ) throw new IllegalArgumentException("Attempting to set output field "+key); // Primitive parse by field type f.set(this,parse(parms.getProperty(key),f.getType())); } catch( NoSuchFieldException nsfe ) { // Convert missing-field to IAE throw new IllegalArgumentException("Unknown argument "+key); } catch( ArrayIndexOutOfBoundsException aioobe ) { // Come here if missing annotation throw new RuntimeException("Broken internal schema; missing API annotation: "+key); } catch( IllegalAccessException iae ) { // Come here if field is final or private throw new RuntimeException("Broken internal schema; cannot be private nor final: "+key); } } // Here every thing in 'parms' was set into some field - so we have already // checked for unknown or extra parms. // Confirm required fields are set do { for( Field f : clz.getDeclaredFields() ) { int mods = f.getModifiers(); if( Modifier.isTransient(mods) || Modifier.isStatic(mods) ) continue; // Ignore transient & static API api = (API)f.getAnnotations()[0]; if( api.validation().length() > 0 ) { // TODO: execute "validation language" in the BackEnd, which includes a "required check", if any if( parms.getProperty(f.getName()) == null ) throw new IllegalArgumentException("Required field "+f.getName()+" not specified"); } } clz = clz.getSuperclass(); } while( Iced.class.isAssignableFrom(clz.getSuperclass()) ); return (S)this; } // URL parameter parse private <E> Object parse( String s, Class fclz ) { if( fclz.equals(String.class) ) return s; // Strings already the right primitive type if( fclz.isArray() ) { // An array? read(s, 0 ,'[',fclz); read(s,s.length()-1,']',fclz); String[] splits = s.substring(1,s.length()-1).split(","); Class<E> afclz = (Class<E>)fclz.getComponentType(); E[] a= (E[])Array.newInstance(afclz,splits.length); for( int i=0; i<splits.length; i++ ) a[i] = (E)parse(splits[i],afclz); return a; } if( fclz.equals(Key.class) ) return Key.make(s); throw new RuntimeException("Unimplemented schema fill from "+fclz.getSimpleName()); } private int read( String s, int x, char c, Class fclz ) { if( peek(s,x,c) ) return x+1; throw new IllegalArgumentException("Expected '"+c+"' while reading a "+fclz.getSimpleName()+", but found "+s); } private boolean peek( String s, int x, char c ) { return x < s.length() && s.charAt(x) == c; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/score/ScoreModel.java
package water.score; import java.util.HashMap; import java.util.HashSet; import water.util.Log; import water.util.Log.Tag.Sys; /** * Embedded Scoring model */ public abstract class ScoreModel { public final String _name; public final String _colNames[]; // Column names ScoreModel( String name, String colNames[] ) { _name = name; _colNames = colNames; } // Convert an XML name to a java name protected static String xml2jname( String xml ) { // Convert pname to a valid java name StringBuilder nn = new StringBuilder(); char[] cs = xml.toCharArray(); if( !Character.isJavaIdentifierStart(cs[0]) ) nn.append('X'); for( char c : cs ) { if( !Character.isJavaIdentifierPart(c) ) { nn.append('_'); } else { nn.append(c); } } String jname = nn.toString(); return jname; } // The list of JIT'd classes, each a specific subclass of ScorecardModel // representing the optimized version of a particular set of scoring rules. final static HashSet<String> CLASS_NAMES = new HashSet<String>(); // Make a unique class name for jit'd subclasses of ScoreModel protected static String uniqueClassName(String name) { // Make a unique class name String cname = xml2jname(name); if( CLASS_NAMES.contains(cname) ) { int i=0; while( CLASS_NAMES.contains(cname+i) ) i++; cname = cname+i; } CLASS_NAMES.add(cname); return cname; } // A mapping from the dense columns desired by the model, to the above // feature list, computed by asking the model for a mapping (given a list of // features). Some features may be unused and won't appear in the mapping. // If the data row features list does not mention all the features the model // needs, then this map will contain a -1 for the missing feature index. public int[] columnMapping( String[] features ) { int[] map = new int[_colNames.length]; for( int i=0; i<_colNames.length; i++ ) { map[i] = -1; // Assume it is missing for( int j=0; j<features.length; j++ ) { if( _colNames[i].equals(features[j]) ) { if( map[i] != -1 ) throw new IllegalArgumentException("duplicate feature "+_colNames[i]); map[i] = j; } } if( map[i] == -1 ) Log.warn(Sys.SCORM,"Model feature "+_colNames[i]+" not in the provided feature list from the data"); } return map; } /** Score this model on the specified row of data, where the data is * specified as a collection of K/V pairs - Values are one of String or * Boolean or Number (or subclasses of Number) */ public abstract double score(final HashMap<String, Comparable> row ); /** Score this model on the specified row of data, where the data is * specified as the members of arrays. MAP is used to map between the SS/DS * columns and the columns desired by the Model; this map can be made by a * single call to columnMapping. SS/DS hold either String values (for * enum/categorical data) or a primitive double. This format exchanges a * HashMap lookup for a bare array access, and can be faster (perhaps much * faster) for models that are alread quick to score. */ public abstract double score(int[] MAP, String[] SS, double[] DS); }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/score/ScorecardModel.java
package water.score; import java.lang.reflect.Constructor; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import javassist.*; import water.parser.PMMLParser.DataTypes; import water.parser.PMMLParser.Predicate; import water.parser.PMMLParser; import water.score.ScoreModel; import water.util.Log; import water.util.Log.Tag.Sys; /** * Scorecard model - decision table. */ public class ScorecardModel extends ScoreModel { /** Initial score */ final double _initialScore; /** The rules to each for each feature, they map 1-to-1 with the Model's * column list. */ final RuleTable _rules[]; /** Score this model on the specified row of data. */ public double score(final HashMap<String, Comparable> row ) { // By default, use the scoring interpreter. The Builder JITs a new // subclass with an overloaded 'score(row)' call which has a JIT'd version // of the rules. i.e., calling 'score(row)' on the returned ScorecardModel // instance runs the fast version, but you can cast to the base version if // you want the interpreter. return score_interpreter(row); } // Use the rule interpreter public double score_interpreter(final HashMap<String, Comparable> row ) { double score = _initialScore; for( int i=0; i<_rules.length; i++ ) score += _rules[i].score(row.get(_colNames[i])); return score; } public double score(int[] MAP, String[] SS, double[] DS) { return score_interpreter(MAP,SS,DS); } private double score_interpreter(int[] MAP, String[] SS, double[] DS) { double score = _initialScore; for( int i=0; i<_rules.length; i++ ) { int idx = MAP[i]; String ss = idx==-1 ? null : SS[idx]; double dd = idx==-1 ? Double.NaN : DS[idx]; double s = _rules[i].score(ss,dd); score += s; } return score; } // JIT a score method with signature 'double score(HashMap row)' public void makeScoreHashMethod(CtClass scClass) { // Map of previously extracted PMML names, and their java equivs HashMap<String,String> vars = new HashMap<String,String>(); StringBuilder sb = new StringBuilder(); sb.append("double score( java.util.HashMap row ) {\n"+ " double score = "+_initialScore+";\n"); try { for( int i=0; i<_rules.length; i++ ) _rules[i].makeFeatureHashMethod(sb,vars,scClass); sb.append(" return score;\n}\n"); CtMethod happyMethod = CtMethod.make(sb.toString(),scClass); scClass.addMethod(happyMethod); } catch( Exception re ) { Log.err(Sys.SCORM,"Crashing:"+sb.toString(), new RuntimeException(re)); } } public void makeScoreAryMethod(CtClass scClass) { // Map of previously extracted PMML names, and their java equivs HashMap<String,String> vars = new HashMap<String,String>(); StringBuilder sb = new StringBuilder(); sb.append("double score( int[] MAP, java.lang.String[] SS, double[] DS ) {\n"+ " double score = "+_initialScore+";\n"); try { for( int i=0; i<_rules.length; i++ ) _rules[i].makeFeatureAryMethod(sb,vars,scClass,i); sb.append(" return score;\n}\n"); CtMethod happyMethod = CtMethod.make(sb.toString(),scClass); scClass.addMethod(happyMethod); } catch( Exception re ) { Log.err(Sys.SCORM,"Crashing:"+sb.toString(), new RuntimeException(re)); } } // Return the java-equivalent from the PMML variable name, creating and // installing it as needed. If the value is created, we also emit Java code // to emit it at runtime. public static String getName( String pname, DataTypes type, StringBuilder sb ) { String jname = xml2jname(pname); // Emit the code to do the load return jname; } /** Feature decision table */ public static class RuleTable { final String _name; final Rule[] _rule; final DataTypes _type; final double _baseScore; public RuleTable(String name, DataTypes type, Rule[] decisions, double baseScore) { _name = name; _type = type; _rule = decisions; _baseScore = baseScore; } public void makeFeatureHashMethod( StringBuilder sbParent, HashMap<String,String> vars, CtClass scClass ) { if( _type == null ) { Log.warn("Ignore untyped feature "+_name); return; } String jname = xml2jname(_name); StringBuilder sb = new StringBuilder(); sb.append("double ").append(jname).append("( java.util.HashMap row ) {\n"+ " double score = 0;\n"); switch( _type ) { case STRING : sb.append(" String " ); break; case BOOLEAN: sb.append(" double "); break; default : sb.append(" double " ); break; } sb.append(jname); switch( _type ) { case STRING : sb.append(" = water.parser.PMMLParser.getString (row,\""); break; case BOOLEAN: sb.append(" = water.parser.PMMLParser.getBoolean(row,\"" ); break; default : sb.append(" = water.parser.PMMLParser.getNumber (row,\"" ); break; } sb.append(_name).append("\");\n"); sb.append(" if( false ) ;\n"); for (Rule r : _rule) if( _type == DataTypes.STRING) r.toJavaStr(sb,jname); else if( _type == DataTypes.BOOLEAN) r.toJavaBool(sb,jname); else r.toJavaNum(sb,jname); // close the dangling 'else' from all the prior rules sb.append(" return score;\n}\n"); sbParent.append(" score += ").append(jname).append("(row);\n"); // Now install the method try { CtMethod happyMethod = CtMethod.make(sb.toString(),scClass); scClass.addMethod(happyMethod); } catch( Exception re ) { Log.err(Sys.SCORM,"Crashing:"+sb.toString(), new RuntimeException(re)); } } public void makeFeatureAryMethod( StringBuilder sbParent, HashMap<String,String> vars, CtClass scClass, int fidx ) { if( _type == null ) return; // Untyped, ignore String jname = xml2jname(_name); StringBuilder sb = new StringBuilder(); sb.append("double ").append(jname); sb.append("( int[]MAP, java.lang.String[]SS, double[]DS ) {\n"+ " double score = 0;\n"+ " int didx=MAP[").append(fidx).append("];\n"); switch( _type ) { case STRING : sb.append(" String " ); break; case BOOLEAN: sb.append(" boolean "); break; default : sb.append(" double " ); break; } sb.append(jname); switch( _type ) { case STRING : sb.append(" = didx==-1 ? null : SS[didx];\n"); break; case BOOLEAN: sb.append(" = didx==-1 ? false : DS[didx]==1.0;\n"); break; default : sb.append(" = didx==-1 ? Double.NaN : DS[didx];\n" ); break; } sb.append(" if( false ) ;\n"); for (Rule r : _rule) if( _type == DataTypes.STRING) r.toJavaStr(sb,jname); else if( _type == DataTypes.BOOLEAN) r.toJavaBool(sb,jname); else r.toJavaNum(sb,jname); // close the dangling 'else' from all the prior rules sb.append(" return score;\n}\n"); sbParent.append(" score += ").append(jname).append("(MAP,SS,DS);\n"); // Now install the method try { CtMethod happyMethod = CtMethod.make(sb.toString(),scClass); scClass.addMethod(happyMethod); } catch( Exception re ) { Log.err(Sys.SCORM,"Crashing:"+sb.toString(), new RuntimeException(re)); } } // The rule interpreter double score(Comparable value) { double score = 0; for (Rule r : _rule) { if( r.match(value) ) { score += r._score; break; } } return score; } double score(String s, double d) { double score = 0; for (Rule r : _rule) { if( r.match(s,d) ) { score += r._score; break; } } return score; } @Override public String toString() { return "RuleTable [_name=" + _name + ", _rule=" + Arrays.toString(_rule) + ", _type=" + _type + " baseScore="+_baseScore+"]"; } } /** Scorecard decision rule */ public static class Rule { final double _score; final Predicate _predicate; public Rule(double score, Predicate pred) { assert pred != null; _score = score; _predicate = pred; } boolean match(Comparable value) { return _predicate.match(value); } boolean match(String s, double d) { return _predicate.match(s,d); } @Override public String toString() { return _predicate.toString() + " => " + _score; } public StringBuilder toJavaNum( StringBuilder sb, String jname ) { sb.append(" else if( "); return _predicate.toJavaNum(sb,jname).append(" ) score += ").append(_score).append(";\n"); } public StringBuilder toJavaBool( StringBuilder sb, String jname ) { sb.append(" else if( "); return _predicate.toJavaBool(sb,jname).append(" ) score += ").append(_score).append(";\n"); } public StringBuilder toJavaStr( StringBuilder sb, String jname ) { sb.append(" else if( "); return _predicate.toJavaStr(sb,jname).append(" ) score += ").append(_score).append(";\n"); } String unique_name() { return _predicate.unique_name(); } } @Override public String toString() { return super.toString()+", _initialScore=" + _initialScore; } private ScorecardModel(String name, String[] colNames, double initialScore, RuleTable[] rules) { super(name,colNames); assert colNames.length==rules.length; _initialScore = initialScore; _rules = rules; } protected ScorecardModel(ScorecardModel base) { this(base._name,base._colNames,base._initialScore,base._rules); } /** Scorecard model builder: JIT a subclass with the fast version wired in to * 'score(row)' */ public static ScorecardModel make(final String name, final double initialScore, RuleTable[] rules) { // Get the list of features String[] colNames = new String[rules.length]; for( int i=0; i<rules.length; i++ ) colNames[i] = rules[i]._name; // javassist support for rewriting class files ClassPool _pool = ClassPool.getDefault(); try { // Make a javassist class in the java hierarchy String cname = uniqueClassName(name); CtClass scClass = _pool.makeClass(cname); CtClass baseClass = _pool.get("water.score.ScorecardModel"); // Full Name Lookup scClass.setSuperclass(baseClass); // Produce the scoring method(s) ScorecardModel scm = new ScorecardModel(name, colNames,initialScore, rules); scm.makeScoreHashMethod(scClass); scm.makeScoreAryMethod(scClass); // Produce a 1-arg constructor String cons = " public "+cname+"(water.score.ScorecardModel base) { super(base); }"; CtConstructor happyConst = CtNewConstructor.make(cons,scClass); scClass.addConstructor(happyConst); Class myClass = scClass.toClass(ScorecardModel.class.getClassLoader(), null); Constructor<ScorecardModel> co = myClass.getConstructor(ScorecardModel.class); ScorecardModel jitted_scm = co.newInstance(scm); return jitted_scm; } catch( Exception e ) { Log.err(Sys.SCORM,"Javassist failed",e); } return null; } // ------------------------------------------------------------------------- public static ScorecardModel parse( PMMLParser pmml ) { HashMap<String,String> attrs = pmml.attrs(); pmml.expect('>'); pmml.skipWS().expect('<').pGeneric("MiningSchema"); pmml.skipWS().expect('<').pGeneric("Output"); pmml.skipWS().expect('<'); RuleTable[] rules = pCharacteristics(pmml); pmml.skipWS().expect("</Scorecard>"); String is = attrs.get("initialScore"); double initialScore = is==null?0:PMMLParser.getNumber(is); return make(attrs.get("modelName"), initialScore, rules); } private static RuleTable[] pCharacteristics( PMMLParser pmml ) { pmml.expect("Characteristics>"); ArrayList<RuleTable> rts = new ArrayList(); while( pmml.skipWS().expect('<').peek() != '/' ) rts.add(pCharacteristic(pmml)); pmml.expect("/Characteristics>"); return rts.toArray(new RuleTable[0]); } private static RuleTable pCharacteristic( PMMLParser pmml ) { HashMap<String,String> attrs = pmml.expect("Characteristic").attrs(); pmml.expect('>'); ArrayList<Rule> rules = new ArrayList(); while( pmml.skipWS().expect('<').peek() != '/' ) rules.add(pAttribute(pmml)); pmml.expect("/Characteristic>"); String name = rules.get(0).unique_name(); DataTypes t = pmml._types.get(name); String bls = attrs.get("baselineScore"); double baseScore = bls == null?0:PMMLParser.getNumber(bls); return new RuleTable(name,t,rules.toArray(new Rule[0]),baseScore); } private static Rule pAttribute( PMMLParser pmml ) { HashMap<String,String> attrs = pmml.expect("Attribute").attrs(); pmml.expect('>').skipWS().expect('<'); Predicate pred = pmml.pPredicate(); pmml.skipWS().expect("</Attribute>"); String ps = attrs.get("partialScore"); double partialScore = ps==null?0:PMMLParser.getNumber(ps); return new Rule(partialScore,pred); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/serial/AutoBufferSerializer.java
package water.serial; import java.io.IOException; import water.AutoBuffer; import water.Freezable; public abstract class AutoBufferSerializer<T extends Freezable> implements Serializer<T, AutoBuffer, AutoBuffer> { @Override public void save(T m, AutoBuffer output) { postSave(m, m.write( preSave(m,output))); } @Override public T load(T e, AutoBuffer input) { // Check model compatibility T r = e.read( preLoad(e,input)); postLoad(r,input); return e; } @Override public T load(AutoBuffer input) throws IOException { throw new UnsupportedOperationException(); } /** Hook which is call before the model is serialized. */ protected AutoBuffer preSave (T m, AutoBuffer ab) { return ab; } /** Hook which is call after the model is serialized. */ protected AutoBuffer postSave(T m, AutoBuffer ab) { return ab; } /** Hook which is call before the model is loaded from <code>AutoBuffer</code>. */ protected AutoBuffer preLoad (T m, AutoBuffer ab) { return ab; } /** Hook which is call after the model is loaded from <code>AutoBuffer</code>. */ protected AutoBuffer postLoad(T m, AutoBuffer ab) { return ab; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/serial/AutoBufferWithoutTypeIds.java
package water.serial; import java.nio.channels.FileChannel; import water.*; import water.util.Log; /** Simple wrapper around {@link AutoBuffer} * which uses class names instead of type ids. * * @see AutoBuffer * @see TypeMap */ class AutoBufferWithoutTypeIds extends AutoBuffer { public AutoBufferWithoutTypeIds() { super(); } public AutoBufferWithoutTypeIds(byte[] b) { super(b); } public AutoBufferWithoutTypeIds(FileChannel fc, boolean read) { super(fc,read, (byte) 0); } private static String NULL = "^"; private <T extends Freezable> T newInstance(String klazz) { try { return (T) Class.forName(klazz).newInstance(); } catch( Exception e ) { throw Log.errRTExcept(e); } } @Override public AutoBuffer put(Iced f) { return put((Freezable) f); } @Override public AutoBuffer put(Freezable f) { if( f == null ) return putStr(NULL); putStr(f.getClass().getName()); return f.write(this); } @Override public <T extends Freezable> T get(Class<T> t) { String klazz = getStr(); if (NULL.equals(klazz)) return null; return newInstance(klazz).read(this); } @Override public <T extends Iced> T get() { String klazz = getStr(); if (NULL.equals(klazz)) return null; return newInstance(klazz).read(this); } @Override public AutoBuffer putA(Iced[] fs) { return super.putA(fs); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/serial/Model2FileBinarySerializer.java
package water.serial; import java.io.*; import java.nio.channels.FileChannel; import water.*; import water.util.Utils; /** * Model serializer targeting file based output. */ public class Model2FileBinarySerializer extends BinarySerializer<Model, File, File> { @Override public void save(Model m, File f) throws IOException { assert m!=null : "Model cannot be null!"; FileOutputStream fo = null; AutoBuffer ab = null; try { fo = new FileOutputStream(f); m.getModelSerializer().save(m, saveHeader( m, ab=ab4write(fo.getChannel()) ) ); } catch( FileNotFoundException e ) { throw new IllegalArgumentException("Cannot open given file!", e); } finally { if (ab!=null) ab.close(); Utils.close(fo); } } @Override public Model load(File f) throws IOException { FileInputStream fi = null; AutoBuffer ab = null; Model m = null; try { fi = new FileInputStream(f); m = loadHeader(ab=ab4read(fi.getChannel())); m.getModelSerializer().load(m, ab); if (m._key!=null) { DKV.put(m._key, m); } } catch( FileNotFoundException e ) { throw new IllegalArgumentException("Cannot open given file!", e); } finally { if (ab!=null) ab.close(); Utils.close(fi); } return m; } @Override public Model load(Model m, File f) throws IOException { throw new UnsupportedOperationException(); } private AutoBuffer ab4read (FileChannel fc) { return new AutoBufferWithoutTypeIds(fc, true); } private AutoBuffer ab4write (FileChannel fc) { return new AutoBufferWithoutTypeIds(fc, false); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/serial/Model2HDFSBinarySerializer.java
package water.serial; import java.io.IOException; import org.apache.hadoop.fs.*; import water.*; /** * Model serializer targeting file based output. */ public class Model2HDFSBinarySerializer extends BinarySerializer<Model, Path, Path> { private final FileSystem _hfs; private final boolean _force; public Model2HDFSBinarySerializer(FileSystem fs, boolean force) { _hfs = fs; _force = force; } @Override public void save(Model m, Path f) throws IOException { assert m!=null : "Model cannot be null!"; AutoBuffer ab = null; // Save given mode to autobuffer m.getModelSerializer().save(m, saveHeader( m,ab=ab4write() ) ); // Spill it into disk _hfs.mkdirs(f.getParent()); FSDataOutputStream os = _hfs.create(f, _force); try { os.write(ab.buf()); } finally { os.close(); } } @Override public Model load(Path f) throws IOException { FSDataInputStream is = _hfs.open(f); byte buf[] = MemoryManager.malloc1((int) _hfs.getContentSummary(f).getLength()); try { is.readFully(buf); } finally { is.close(); } AutoBuffer ab=ab4read(buf); Model m = loadHeader(ab); m.getModelSerializer().load(m, ab); if (m._key!=null) { DKV.put(m._key, m); } return m; } @Override public Model load(Model m, Path f) throws IOException { throw new UnsupportedOperationException(); } private AutoBuffer ab4read (byte[] b) { return new AutoBufferWithoutTypeIds(b); } private AutoBuffer ab4write () { return new AutoBufferWithoutTypeIds(); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/serial/Serializer.java
package water.serial; import java.io.IOException; import java.lang.reflect.Field; import water.AutoBuffer; import water.Model; /** * A simple serializer interface. */ interface Serializer<T, O, I> { /** * Save given object into given target. * @param e object to serialize * @param output serialization destination * @throws IOException */ public void save(T e, O output) throws IOException; /** * Load object from given destination. * @param e object to be filled from * @param output * @return */ public T load(T e, I input) throws IOException; public T load(I input) throws IOException; } abstract class BinarySerializer<T,O,I> implements Serializer<Model, O, I> { protected int id(T m) { int r = m.getClass().getCanonicalName().hashCode(); for (Field f : m.getClass().getDeclaredFields()) r ^= f.getName().hashCode(); return r; } protected AutoBuffer saveHeader(T m, AutoBuffer ab) { ab.put4(id(m)); ab.putStr(m.getClass().getName()); return ab; } protected T loadHeader(AutoBuffer ab) { int smId = ab.get4(); // type hash String smCN = ab.getStr(); // type name // Load it Class klazz = null; T m = null; try { klazz = Class.forName(smCN); m = (T) klazz.newInstance(); } catch( Exception e ) { throw new IllegalArgumentException("Cannot instantiate the type " + smCN, e); } int amId = id(m); if (amId != smId) throw new IllegalArgumentException("Trying to load incompatible model! Actual model id = " + amId + ", stored id = " + smId+", type="+smCN); return m; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/ByteBufferInputStream.java
package water.util; import java.io.*; import java.nio.ByteBuffer; import java.util.List; public final class ByteBufferInputStream extends InputStream { private final List<ByteBuffer> _buffers; private int _current; public ByteBufferInputStream(List<ByteBuffer> buffers) { this._buffers = buffers; } @Override public int read() throws IOException { return buffer().get() & 0xff; } @Override public int read(byte[] b, int off, int len) throws IOException { if( len == 0 ) return 0; ByteBuffer buffer = buffer(); int remaining = buffer.remaining(); if( len > remaining ) { buffer.get(b, off, remaining); return remaining; } buffer.get(b, off, len); return len; } private ByteBuffer buffer() throws IOException { while( _current < _buffers.size() ) { ByteBuffer buffer = _buffers.get(_current); if( buffer.hasRemaining() ) return buffer; _current++; } throw new EOFException(); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/Check.java
package water.util; import java.lang.reflect.Field; import java.lang.reflect.Modifier; import java.util.List; import java.util.Map.Entry; import java.util.regex.Matcher; import java.util.regex.Pattern; import com.google.common.collect.Lists; import dontweave.gson.*; public class Check { private static final Pattern JSON_PATTERN = Pattern.compile("[_a-z0-9]*[/_a-z]*"); private static final List<String> RESERVED_WORDS = Lists.newArrayList( // python reserved words "and", "assert", "break", "class", "continue", "def", "del", "elif", "else", "except", "exec", "finally", "for", "from", "global", "if", "import", "in", "is", "not", "or", "pass", "print", "raise", "return", "try", "while", // "lambda", - while lambda is a python reserved word, this word is also // the main term-of-the-art in GLM. People expect to see 'lambda' in // reference to GLM. // java reserved words "public", "private", "protected", "static", "true", "false", "final", "volatile", "transient", "package", "catch" ); public static boolean paramName(String s) { Matcher m = JSON_PATTERN.matcher(s); assert m.matches() : "Name " + s + " does not match convention: " + JSON_PATTERN; assert !RESERVED_WORDS.contains(s) : "Name " + s + " is a reserved word"; return true; } public static boolean staticFinalStrings(Class<?> c) { try { for( Field f : c.getFields() ) { if( !Modifier.isFinal (f.getModifiers()) ) continue; if( !Modifier.isStatic(f.getModifiers()) ) continue; if( !f.getType().equals(String.class) ) continue; Check.paramName((String) f.get(null)); } return true; } catch( Exception e ) { throw Log.errRTExcept(e); } } public static boolean jsonKeyNames(JsonArray a) { if( a == null ) return true; for(JsonElement v : a) { if( v.isJsonObject() ) { Check.jsonKeyNames(v.getAsJsonObject()); } else if( v.isJsonArray() ) { Check.jsonKeyNames(v.getAsJsonArray()); } } return true; } public static boolean jsonKeyNames(JsonObject o) { if( o == null ) return true; for(Entry<String, JsonElement> e : o.entrySet()) { Check.paramName(e.getKey()); JsonElement v = e.getValue(); if( v.isJsonObject() ) { Check.jsonKeyNames(v.getAsJsonObject()); } else if( v.isJsonArray() ) { Check.jsonKeyNames(v.getAsJsonArray()); } } return true; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/ChunkSummary.java
package water.util; import water.H2O; import water.MRTask2; import water.PrettyPrint; import water.fvec.Chunk; import water.fvec.Vec; /** * Simple summary of how many chunks of each type are in a Frame */ public class ChunkSummary extends MRTask2<ChunkSummary> { // static list of chunks for which statistics are to be gathered final transient static String[] chunkTypes = new String[]{ "C0L", "C0D", "CBS", "C1", "C1N", "C1S", "C2", "C2S", "C4", "C4S", "C4F", "C8", "C16", "CXI", "CXD", "CX0", "C8D", //leave this as last -> no compression }; // OUTPUT private long[] chunk_counts; private long total_chunk_count; private long[] chunk_byte_sizes; private long total_chunk_byte_size; private long[] byte_size_per_node; //averaged over all chunks private float byte_size_per_node_mean; private float byte_size_per_node_min; private float byte_size_per_node_max; private float byte_size_per_node_stddev; @Override public void map(Chunk[] cs) { chunk_counts = new long[chunkTypes.length]; chunk_byte_sizes = new long[chunkTypes.length]; byte_size_per_node = new long[H2O.CLOUD.size()]; for (Chunk c : cs) { boolean found = false; for (int j = 0; j < chunkTypes.length; ++j) { if (c.getClass().getSimpleName().equals(chunkTypes[j] + "Chunk")) { found = true; chunk_counts[j]++; chunk_byte_sizes[j] += c.byteSize(); byte_size_per_node[H2O.SELF.index()] += c.byteSize(); } } if (!found) { throw H2O.unimpl(); } } } @Override public void reduce(ChunkSummary mrt) { if (mrt.chunk_counts == chunk_counts) return; for (int j = 0; j < chunkTypes.length; ++j) { chunk_counts[j] += mrt.chunk_counts[j]; chunk_byte_sizes[j] += mrt.chunk_byte_sizes[j]; } for (int i = 0; i<H2O.CLOUD.size(); ++i) { byte_size_per_node[i] += mrt.byte_size_per_node[i]; } } @Override protected void postGlobal() { //special case for 0 rows (no map was ever called) if (chunk_counts == null || chunk_byte_sizes == null || byte_size_per_node == null) return; // compute counts and sizes total_chunk_byte_size = 0; total_chunk_count = 0; for (int j = 0; j < chunkTypes.length; ++j) { total_chunk_byte_size += chunk_byte_sizes[j]; total_chunk_count += chunk_counts[j]; } long check = 0; for (Vec v : _fr.vecs()) check += v.nChunks(); assert(total_chunk_count == check); assert(total_chunk_byte_size == _fr.byteSize()); // compute min, max, mean byte_size_per_node_min = Float.MAX_VALUE; byte_size_per_node_max = Float.MIN_VALUE; byte_size_per_node_mean = 0; for (long aByte_size_per_node : byte_size_per_node) { byte_size_per_node_min = Math.min(aByte_size_per_node, byte_size_per_node_min); byte_size_per_node_max = Math.max(aByte_size_per_node, byte_size_per_node_max); byte_size_per_node_mean += aByte_size_per_node; } byte_size_per_node_mean /= byte_size_per_node.length; // compute standard deviation (doesn't have to be single pass...) byte_size_per_node_stddev = 0; for (long aByte_size_per_node : byte_size_per_node) { byte_size_per_node_stddev += Math.pow(aByte_size_per_node - byte_size_per_node_mean, 2); } byte_size_per_node_stddev /= byte_size_per_node.length; byte_size_per_node_stddev = (float)Math.sqrt(byte_size_per_node_stddev); } String display(long val) { return String.format("%10s", val == 0 ? " 0 B" : PrettyPrint.bytes(val)); } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("Internal FluidVec compression/distribution summary:\n"); sb.append("Chunk type count fraction size rel. size\n"); for (int j = 0; j < chunkTypes.length; ++j) { if (chunk_counts != null && chunk_counts[j] > 0) sb.append(String.format("%8s %10d %10.3f %% %10s %10.3f %%\n", chunkTypes[j], chunk_counts[j], (float) chunk_counts[j] / total_chunk_count * 100., display(chunk_byte_sizes[j]), (float) chunk_byte_sizes[j] / total_chunk_byte_size * 100.)); } // if more than 50% is double data, inform the user to consider compressing to single precision if (chunk_byte_sizes != null && (float)chunk_byte_sizes[chunk_byte_sizes.length-1] / total_chunk_byte_size > 0.5 && !H2O.SINGLE_PRECISION) { sb.append("** Warning: Significant amount of double precision data (C8DChunk),\n" + " consider launching with -single_precision to reduce memory consumption **\n"); } // if standard deviation is more than 20% of mean, then show detailed per-node distribution if (byte_size_per_node != null && byte_size_per_node_stddev > 0.2 * byte_size_per_node_mean) { sb.append("** Note: Dataset is not well distributed, consider rebalancing **\n"); for (int i = 0; i < byte_size_per_node.length; ++i) { sb.append(" size on node " + i + " : " + display(byte_size_per_node[i]) + "\n"); } } // display chunk distribution if (byte_size_per_node != null && byte_size_per_node.length > 1) { sb.append(" mean size per node : " + display((long) byte_size_per_node_mean) + "\n"); sb.append(" min size per node : " + display((long) byte_size_per_node_min) + "\n"); sb.append(" max size per node : " + display((long) byte_size_per_node_max) + "\n"); sb.append("stddev of node size : " + display((long) byte_size_per_node_stddev) + "\n"); } sb.append(" Total memory usage : " + display(total_chunk_byte_size) + "\n"); return sb.toString(); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/Counter.java
package water.util; import water.api.Constants; import dontweave.gson.JsonObject; public class Counter { double _min = Double.MAX_VALUE, _max = Double.MIN_VALUE; int _count; double _total; public void add(double what) { _total += what; _min = Math.min(what, _min); _max = Math.max(what, _max); ++_count; } public double mean() { return _total / _count; } public JsonObject toJson() { JsonObject json = new JsonObject(); json.addProperty(Constants.MIN, _min); json.addProperty(Constants.MEAN, mean()); json.addProperty(Constants.MAX, _max); return json; } @Override public String toString() { return _count==0 ? " / / " : String.format("%4.1f / %4.1f / %4.1f", _min, mean(), _max); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/CrossValUtils.java
package water.util; import hex.NFoldFrameExtractor; import water.*; import water.fvec.Frame; import water.fvec.Vec; public class CrossValUtils { /** * Cross-Validate a ValidatedJob * @param job (must contain valid entries for n_folds, validation, destination_key, source, response) */ public static void crossValidate(Job.ValidatedJob job) { if (job.state != Job.JobState.RUNNING) return; //don't do cross-validation if the full model builder failed if (job.validation != null) throw new IllegalArgumentException("Cannot provide validation dataset and n_folds > 0 at the same time."); if (job.n_folds <= 1) throw new IllegalArgumentException("n_folds must be >= 2 for cross-validation."); final String basename = job.destination_key.toString(); long[] offsets = new long[job.n_folds +1]; Frame[] cv_preds = new Frame[job.n_folds]; try { for (int i = 0; i < job.n_folds; ++i) { if (job.state != Job.JobState.RUNNING) break; Key[] destkeys = new Key[]{Key.make(basename + "_xval" + i + "_train"), Key.make(basename + "_xval" + i + "_holdout")}; NFoldFrameExtractor nffe = new NFoldFrameExtractor(job.source, job.n_folds, i, destkeys, Key.make() /*key used for locking only*/); H2O.submitTask(nffe); Frame[] splits = nffe.getResult(); // Cross-validate individual splits try { job.crossValidate(splits, cv_preds, offsets, i); //this removes the enum-ified response! job._cv_count++; } finally { // clean-up the results if (!job.keep_cross_validation_splits) for(Frame f : splits) f.delete(); } } if (job.state != Job.JobState.RUNNING) return; final int resp_idx = job.source.find(job._responseName); Vec response = job.source.vecs()[resp_idx]; boolean put_back = UKV.get(job.response._key) == null; // In the case of rebalance, rebalance response will be deleted if (put_back) { job.response = response; if (job.classification) job.response = job.response.toEnum(); DKV.put(job.response._key, job.response); //put enum-ified response back to K-V store } ((Model)UKV.get(job.destination_key)).scoreCrossValidation(job, job.source, response, cv_preds, offsets); if (put_back) UKV.remove(job.response._key); } finally { // clean-up prediction frames for splits for(Frame f: cv_preds) if (f!=null) f.delete(); } } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/D3Plot.java
package water.util; import water.api.RequestBuilders; /** * Helper class to plot simple 2D scatter plots. * Input: x and y are two equal-sized float arrays with X and Y coordinates to be plotted. */ public class D3Plot { private float[] x; private float[] y; private String xaxislabel = "x axis"; private String yaxislabel = "y axis"; private String title = "Missing Title"; private boolean ordinal_interpolation = false; private boolean hide_toggle = true; // default values are usually fine - might want to add a formatting method later private String link = "Toggle view of plot"; private int width = 1000; private int height = 400; private int padding = 40; private int font_size = 11; public D3Plot(float[] x, float[] y, String xaxislabel, String yaxislabel, String title, boolean ordinal_interpolation, boolean hide_toggle) { this.yaxislabel = yaxislabel; this.xaxislabel = xaxislabel; this.title = title; this.x = x; this.y = y; this.link = "Toggle view of plot of " + title; assert(x.length == y.length); this.ordinal_interpolation = ordinal_interpolation; this.hide_toggle = hide_toggle; } public D3Plot(float[] x, float[] y, String xaxislabel, String yaxislabel, String title) { this.yaxislabel = yaxislabel; this.xaxislabel = xaxislabel; this.title = title; this.x = x; this.y = y; this.link = "Toggle view of plot of " + title; assert(x.length == y.length); } // populate the StringBuilder object with the Javascript code to display a 2D plot in a HTML page public void generate(StringBuilder sb) { final String plot = title.replaceAll(" ", ""); sb.append("<script type=\"text/javascript\" src='/h2o/js/d3.v3.min.js'></script>"); sb.append("<div>"); sb.append("<script>\n"); sb.append("$(document).on(\"click\", \"#pl" + plot + "\", function() { $(\"#plot" + plot + "\").toggleClass(\"hide\");});\n"); sb.append("</script>\n"); if (hide_toggle) { sb.append("<button class = 'btn btn-inverse btn-mini' id = \"pl" + plot +"\">" + link + "</button>\n"); sb.append("<div class=\"hide\" id=\"" + "plot" + plot + "\">"); } else { sb.append("<div id=\"" + "plot" + plot + "\">"); } sb.append("<style type=\"text/css\">"); sb.append(".axis path," + ".axis line {\n" + "fill: none;\n" + "stroke: black;\n" + "shape-rendering: crispEdges;\n" + "}\n" + ".axis text {\n" + "font-family: sans-serif;\n" + "font-size: " + font_size + "px;\n" + "}\n"); sb.append("</style>"); sb.append("<div id=\"" + "plot" + plot + "\" style=\"display:inline;\">"); sb.append("<script type=\"text/javascript\">"); sb.append("//Width and height\n"); sb.append("var w = " + width + ";\n"+ "var h = " + height + ";\n"+ "var padding = " + padding + ";\n" ); sb.append("var dataset = ["); for(int c = 0; c < x.length; c++) { if (c == 0) { sb.append("["+String.valueOf(x[c])+",").append(RequestBuilders.ElementBuilder.format(y[c])).append("]"); } sb.append(", ["+String.valueOf(x[c])+",").append(RequestBuilders.ElementBuilder.format(y[c])).append("]"); } sb.append("];"); sb.append( "//Create scale functions\n"+ "var xScale = d3.scale.linear()\n"+ ".domain([0, d3.max(dataset, function(d) { return d[0]; })])\n"+ ".range([padding, w - padding * 2]);\n"+ "var yScale = d3.scale.linear()"+ ".domain([0, d3.max(dataset, function(d) { return d[1]; })])\n"+ ".range([h - padding, padding]);\n"+ "var rScale = d3.scale.linear()"+ ".domain([0, d3.max(dataset, function(d) { return d[1]; })])\n"+ ".range([2, 5]);\n"+ "var lineFunction = d3.svg.line().interpolate(\"ordinal\")\n"+ ".x(function(d) {return xScale(d[0]); })\n"+ ".y(function(d) { return yScale(d[1]); });\n"+ "//Define X axis\n"+ "var xAxis = d3.svg.axis()\n"+ ".scale(xScale)\n"+ ".orient(\"bottom\")\n"+ ".ticks(5);\n"+ "//Define Y axis\n"+ "var yAxis = d3.svg.axis()\n"+ ".scale(yScale)\n"+ ".orient(\"left\")\n"+ ".ticks(5);\n"+ "//Create SVG element\n"+ "var svg = d3.select(\"#" + "plot" + plot + "\")\n"+ ".append(\"svg\")\n"+ ".attr(\"width\", w)\n"+ ".attr(\"height\", h);\n"+ "//Create circles\n"+ "svg.selectAll(\"circle\")\n"+ ".data(dataset)\n"+ ".enter()\n"+ ".append(\"circle\")\n"+ ".attr(\"cx\", function(d) {\n"+ "return xScale(d[0]);\n"+ "})\n"+ ".attr(\"cy\", function(d) {\n"+ "return yScale(d[1]);\n"+ "})\n"+ ".attr(\"r\", function(d) {\n"+ "return 2;\n"+//rScale(d[1]);\n"+ "});\n"+ "/*"+ "//Create labels\n"+ "svg.selectAll(\"text\")"+ ".data(dataset)"+ ".enter()"+ ".append(\"text\")"+ ".text(function(d) {"+ "return d[0] + \",\" + d[1];"+ "})"+ ".attr(\"x\", function(d) {"+ "return xScale(d[0]);"+ "})"+ ".attr(\"y\", function(d) {"+ "return yScale(d[1]);"+ "})"+ ".attr(\"font-family\", \"sans-serif\")"+ ".attr(\"font-size\", \"11px\")"+ ".attr(\"fill\", \"red\");"+ "*/\n"+ "//Create X axis\n"+ "svg.append(\"g\")"+ ".attr(\"class\", \"axis\")"+ ".attr(\"transform\", \"translate(0,\" + (h - padding) + \")\")"+ ".call(xAxis);\n"+ "//X axis label\n"+ "d3.select('#" + "plot" + plot + " svg')"+ ".append(\"text\")"+ ".attr(\"x\",w/2)"+ ".attr(\"y\",h - 5)"+ ".attr(\"text-anchor\", \"middle\")"+ ".text(\"" + xaxislabel + "\");\n"+ "//Create Y axis\n"+ "svg.append(\"g\")"+ ".attr(\"class\", \"axis\")"+ ".attr(\"transform\", \"translate(\" + padding + \",0)\")"+ ".call(yAxis);\n"+ "//Y axis label\n"+ "d3.select('#" + "plot" + plot + " svg')"+ ".append(\"text\")"+ ".attr(\"x\",150)"+ ".attr(\"y\",-2)"+ ".attr(\"transform\", \"rotate(90)\")"+ //".attr(\"transform\", \"translate(0,\" + (h - padding) + \")\")"+ ".attr(\"text-anchor\", \"middle\")"+ ".text(\"" + yaxislabel + "\");\n"+ "//Title\n"+ "d3.select('#" + "plot" + plot + " svg')"+ ".append(\"text\")"+ ".attr(\"x\",w/2)"+ ".attr(\"y\",padding - 20)"+ ".attr(\"text-anchor\", \"middle\")"+ ".text(\"" + title + "\");\n"); if (ordinal_interpolation) { sb.append("var linesGroup = svg.append(\"g\").attr(\"class\", \"line\");\n"+ "linesGroup.append(\"path\")\n"+ ".attr(\"d\", lineFunction(dataset))\n"+ ".attr(\"class\", \"lines\")\n"+ ".attr(\"fill\", \"none\")\n"+ ".attr(\"stroke\", function(d, i) {\n"+ "return linedata.color;\n"+ "});\n"); } sb.append("</script>"); sb.append("</div>"); sb.append("</script>"); sb.append("</div>"); sb.append("<style>"); sb.append(".line {\n" + " fill: none;\n" + " stroke: steelblue;\n" + " stroke-width: 1.5px;\n" + " }"); sb.append("</style>"); sb.append("</div>"); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/FSUtils.java
package water.util; import java.util.regex.Matcher; import java.util.regex.Pattern; public class FSUtils { public static boolean isHdfs (String path) { return path.startsWith("hdfs://"); } public static boolean isS3N (String path) { return path.startsWith("s3n://"); } public static boolean isS3 (String path) { return path.startsWith("s3://"); } public static boolean isHTTP (String path) { return path.startsWith("http://"); } public static boolean isHTTPS(String path) { return path.startsWith("https://"); } public static boolean isH2O (String path) { return path.startsWith("h2o://"); } public static boolean isBareS3NBucketWithoutTrailingSlash(String s) { Pattern p = Pattern.compile("s3n://[^/]*"); Matcher m = p.matcher(s); boolean b = m.matches(); return b; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/FileIntegrityChecker.java
package water.util; import java.io.File; import java.util.ArrayList; import water.*; import water.fvec.*; import water.persist.PersistNFS; public class FileIntegrityChecker extends DRemoteTask<FileIntegrityChecker> { final String _root; // Root of directory final String[] _files; // File names found locally final long [] _sizes; // File sizes found locally int[][] _ok; // OUTPUT: files which are globally compatible @Override public void lcompute() { _ok = new int[_files.length][H2O.CLOUD.size()]; for (int i = 0; i < _files.length; ++i) { File f = new File(_files[i]); if (f.exists() && (f.length()==_sizes[i])) _ok[i][H2O.SELF.index()] = 1; } tryComplete(); } @Override public void reduce(FileIntegrityChecker o) { if( _ok == null ) _ok = o._ok; else Utils.add(_ok,o._ok); } @Override public byte priority() { return H2O.GUI_PRIORITY; } private void addFolder(File folder, ArrayList<File> filesInProgress ) { if( !folder.canRead() ) return; if (folder.isDirectory()) { for (File f: folder.listFiles()) { if( !f.canRead() ) continue; // Ignore unreadable files if( f.isHidden() && !folder.isHidden() ) continue; // Do not dive into hidden dirs unless asked if (f.isDirectory()) addFolder(f,filesInProgress); else filesInProgress.add(f); } } else { filesInProgress.add(folder); } } public static FileIntegrityChecker check(File r) { return new FileIntegrityChecker(r).invokeOnAllNodes(); } public FileIntegrityChecker(File root) { _root = PersistNFS.decodeFile(new File(root.getAbsolutePath())).toString(); ArrayList<File> filesInProgress = new ArrayList(); addFolder(root,filesInProgress); _files = new String[filesInProgress.size()]; _sizes = new long[filesInProgress.size()]; for (int i = 0; i < _files.length; ++i) { File f = filesInProgress.get(i); _files[i] = f.getAbsolutePath(); _sizes[i] = f.length(); } } public int size() { return _files.length; } public String getFileName(int i) { return _files[i]; } // Sync this directory with H2O. Record all files that appear to be visible // to the entire cloud, and give their Keys. List also all files which appear // on this H2O instance but are not consistent around the cluster, and Keys // which match the directory name but are not on disk. public Key syncDirectory(ArrayList<String> files, ArrayList<String> keys, ArrayList<String> fails, ArrayList<String> dels) { Futures fs = new Futures(); Key k = null; // Find all Keys which match ... for( int i = 0; i < _files.length; ++i ) { boolean failed = false; for (int j = 0; j < H2O.CLOUD.size(); ++j) { if (_ok[i][j] == 0) { failed = true; fails.add("missing file " + _files[i] + " at node " + H2O.CLOUD._memary[j]); } } if(!failed){ File f = new File(_files[i]); k = PersistNFS.decodeFile(f); if( files != null ) files.add(_files[i]); if( keys != null ) keys .add(k.toString()); if(DKV.get(k) != null)dels.add(k.toString()); new Frame(k).delete_and_lock(null); NFSFileVec nfs = DKV.get(NFSFileVec.make(f, fs)).get(); Frame fr = new Frame(k,new String[] { "0" }, new Vec[] { nfs }); fr.update(null); fr.unlock(null); } } fs.blockForPending(); return k; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/FrameUtils.java
package water.util; import java.io.File; import water.Futures; import water.Key; import water.fvec.*; public class FrameUtils { /** Create a frame with single column represented by given vector. * * @param name name of the column * @param vec column data * @return a new frame */ public static Frame frame(String name, Vec vec) { return new Frame().add(name, vec); } /** * Create a new frame based on given column data. * @param names name of frame columns * @param vecs columns data represented by individual data * @return a new frame composed of given vectors. */ public static Frame frame(String[] names, Vec[] vecs) { return new Frame(names, vecs); } /** * Create a new frame based on given row data. * @param names names of frame columns * @param rows data given in the form of rows * @return new frame which contains columns named according given names and including given data */ public static Frame frame(String[] names, double[]... rows) { assert names == null || names.length == rows[0].length; Futures fs = new Futures(); Vec[] vecs = new Vec[rows[0].length]; Key keys[] = Vec.VectorGroup.VG_LEN1.addVecs(vecs.length); for( int c = 0; c < vecs.length; c++ ) { AppendableVec vec = new AppendableVec(keys[c]); NewChunk chunk = new NewChunk(vec, 0); for( int r = 0; r < rows.length; r++ ) chunk.addNum(rows[r][c]); chunk.close(0, fs); vecs[c] = vec.close(fs); } fs.blockForPending(); return new Frame(names, vecs); } /** Parse given file into the form of frame represented by the given key. * * @param okey destination key for parsed frame * @param files files to parse * @return a new frame */ public static Frame parseFrame(Key okey, File ...files) { assert files.length > 0 : "Ups. No files to parse!"; for (File f : files) if (!f.exists()) throw new RuntimeException("File not found " + f); // Create output key if not specified if(okey == null) okey = Key.make(files[0].getName()); Key[] fkeys = new Key[files.length]; int cnt = 0; for (File f : files) fkeys[cnt++] = NFSFileVec.make(f); return parseFrame(okey, fkeys); } public static Frame parseFrame(Key okey, Key ...ikeys) { assert okey != null; return ParseDataset2.parse(okey, ikeys); } /** * Compute a chunk summary (how many chunks of each type, relative size, total size) * @param fr * @return chunk summary */ public static ChunkSummary chunkSummary(Frame fr) { return new ChunkSummary().doAll(fr); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/IndentingAppender.java
package water.util; import java.io.*; public class IndentingAppender implements Appendable, Flushable, Closeable { private final String _indent; private final Appendable _a; private boolean _pendingNewline = false; private int _l = 0; public IndentingAppender(Appendable base) { _a = base; _indent = " "; } public IndentingAppender incrementIndent() { ++_l; return this; } public IndentingAppender decrementIndent() { --_l; return this; } public IndentingAppender appendln(CharSequence csq) throws IOException { return append(csq, 0, csq.length()).append('\n'); } @Override public IndentingAppender append(CharSequence csq) throws IOException { return append(csq, 0, csq.length()); } @Override public IndentingAppender append(CharSequence csq, int start, int end) throws IOException { for( int i = start; i < end; ++i ) append(csq.charAt(i)); return this; } @Override public IndentingAppender append(char c) throws IOException { handlePending(); if( c == '\n' ) { _pendingNewline = true; } else { _a.append(c); } return this; } @Override public void flush() throws IOException { handlePending(); if( _a instanceof Flushable ) ((Flushable) _a).flush(); } @Override public void close() throws IOException { flush(); if( _a instanceof Closeable ) ((Closeable) _a).close(); } private void handlePending() throws IOException { if( _pendingNewline ) { _a.append('\n'); for( int i = 0; i < _l; ++i ) _a.append(_indent); } _pendingNewline = false; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/JCodeGen.java
package water.util; import water.fvec.Frame; import water.fvec.Vec; public class JCodeGen { /** Generates data sample as a dedicated class with static <code>double[][]</code> member. */ public static SB toClass(SB sb, String classSig, String varname, Frame f, int nrows, String comment) { sb.p(classSig).p(" {").nl().ii(1); toStaticVar(sb, varname, f, nrows, comment).di(1); return sb.p("}").nl(); } /** * Outputs given frame as static variable with given name. */ public static SB toStaticVar(SB sb, String varname, Frame f, int nrows, String comment) { if (comment!=null) sb.i(1).p("// ").p(comment).nl(); sb.i(1).p("public static final double[][] ").p(varname).p(" = new double[][] {").nl(); if (f!=null) { Vec[] vecs = f.vecs(); for( int row = 0; row < Math.min(nrows,f.numRows()); row++ ) { sb.i(2).p(row > 0 ? "," : "").p("new double[] {"); for( int v = 0; v < vecs.length; v++ ) sb.p(v > 0 ? "," : "").p(vecs[v].at(row)); sb.p("}").nl(); } } sb.i(1).p("};").nl(); return sb; } public static SB toStaticVar(SB sb, String varname, int value) { return toStaticVar(sb, varname, value, null); } public static SB toStaticVar(SB sb, String varname, int value, String comment) { if (comment!=null) sb.i(1).p("// ").p(comment).nl(); return sb.i(1).p("public static final int ").p(varname).p(" = ").p(value).p(';').nl(); } public static SB toStaticVar(SB sb, String varname, String[] values) { return toStaticVar(sb, varname, values, null); } public static SB toStaticVar(SB sb, String varname, String[] values, String comment) { if (comment!=null) sb.i(1).p("// ").p(comment).nl(); sb.i(1).p("public static final String[] ").p(varname).p(" = "); if (values == null) return sb.p("null;").nl(); sb.p("{").p("\""+values[0]+"\""); for (int i = 1; i < values.length; ++i) sb.p(",").p("\""+values[i]+"\""); return sb.p("};").nl(); } public static SB toStaticVar(SB sb, String varname, int[] values) { return toStaticVar(sb, varname, values, null); } public static SB toStaticVar(SB sb, String varname, int[] values, String comment) { if (comment!=null) sb.i(1).p("// ").p(comment).nl(); sb.i(1).p("public static final int[] ").p(varname).p(" = "); if (values == null || values.length == 0) return sb.p("null;").nl(); sb.p("{").p(values[0]); for (int i = 1; i < values.length; ++i) sb.p(",").p(values[i]); return sb.p("};").nl(); } public static SB toStaticVar(SB sb, String varname, float[] values) { return toStaticVar(sb, varname, values, null); } public static SB toStaticVar(SB sb, String varname, float[] values, String comment) { if (comment!=null) sb.i(1).p("// ").p(comment).nl(); sb.i(1).p("public static final float[] ").p(varname).p(" = "); if (values == null) return sb.p("null;").nl(); sb.p("{").pj(values[0]); for (int i = 1; i < values.length; ++i) sb.p(",").pj(values[i]); return sb.p("};").nl(); } public static SB toStaticVar(SB sb, String varname, double[] values) { return toStaticVar(sb, varname, values, null); } public static SB toStaticVar(SB sb, String varname, double[] values, String comment) { if (comment!=null) sb.i(1).p("// ").p(comment).nl(); sb.i(1).p("public static final double[] ").p(varname).p(" = "); if (values == null) return sb.p("null;").nl(); sb.p("{").pj(values[0]); for (int i = 1; i < values.length; ++i) sb.p(",").pj(values[i]); return sb.p("};").nl(); } /** * Generates a new class with one static member called <em>VALUES</em> which * is filled by values of given array. * <p>The generator can generate more classes to avoid limit of class constant * pool holding all generated literals</p>. * * @param sb output * @param className name of generated class * @param values array holding values which should be hold in generated field VALUES. * @return output buffer */ public static SB toClassWithArray(SB sb, String modifiers, String className, String[] values) { sb.i().p(modifiers!=null ? modifiers+" ": "").p("class ").p(className).p(" {").nl().ii(1); sb.i().p("public static final String[] VALUES = "); if (values==null) sb.p("null;").nl(); else { sb.p("new String[").p(values.length).p("];").nl(); // Static part int s = 0; int remain = values.length; int its = 0; SB sb4fillers = new SB().ci(sb); sb.i().p("static {").ii(1).nl(); while (remain>0) { String subClzName = className + "_" + its++; int len = Math.min(MAX_STRINGS_IN_CONST_POOL, remain); toClassWithArrayFill(sb4fillers, subClzName, values, s, len); sb.i().p(subClzName).p(".fill(VALUES);").nl(); s += len; remain -= len; } sb.di(1).i().p("}").nl(); sb.p(sb4fillers); } return sb.di(1).p("}").nl(); } /** * * @param sb * @param className * @param values * @return */ public static SB toClassWithArray(SB sb, String modifiers, String className, float[] values) { sb.i().p(modifiers != null ? modifiers + " " : "").p("class ").p(className).p(" {").nl().ii(1); sb.i().p("public static final float[] VALUES = "); if (values == null) { sb.p("null;").nl(); } else { sb.p("new float[").p(values.length).p("];").nl(); // Static part int s = 0; int remain = values.length; int its = 0; SB sb4fillers = new SB().ci(sb); sb.i().p("static {").ii(1).nl(); while (remain>0) { String subClzName = className + "_" + its++; int len = Math.min(MAX_STRINGS_IN_CONST_POOL, remain); toClassWithArrayFill(sb4fillers, subClzName, values, s, len); sb.i().p(subClzName).p(".fill(VALUES);").nl(); s += len; remain -= len; } sb.di(1).i().p("}").nl(); sb.p(sb4fillers); } return sb.di(1).p("}").nl(); } /** Maximum number of string generated per class (static initializer) */ public static int MAX_STRINGS_IN_CONST_POOL = 3000; public static SB toClassWithArrayFill(SB sb, String clzName, String[] values, int start, int len) { sb.i().p("static final class ").p(clzName).p(" {").ii(1).nl(); sb.i().p("static final void fill(String[] sa) {").ii(1).nl(); for (int i=0; i<len; i++) { sb.i().p("sa[").p(start+i).p("] = ").ps(values[start+i]).p(";").nl(); } sb.di(1).i().p("}").nl(); sb.di(1).i().p("}").nl(); return sb; } public static SB toClassWithArrayFill(SB sb, String clzName, float[] values, int start, int len) { sb.i().p("static final class ").p(clzName).p(" {").ii(1).nl(); sb.i().p("static final void fill(float[] fa) {").ii(1).nl(); for (int i=0; i<len; i++) { sb.i().p("fa[").p(start+i).p("] = ").pj(values[start + i]).p(";").nl(); } sb.di(1).i().p("}").nl(); sb.di(1).i().p("}").nl(); return sb; } public static SB toField(SB sb, String modifiers, String type, String fname, String finit) { sb.i().p(modifiers).s().p(type).s().p(fname); if (finit!=null) sb.p(" = ").p(finit); sb.p(";").nl(); return sb; } /** * Transform given string to legal java Identifier (see Java grammar http://docs.oracle.com/javase/specs/jls/se7/html/jls-3.html#jls-3.8) * */ public static String toJavaId(String s) { StringBuilder sb = new StringBuilder(s); return Utils.replace(sb, "+-*/ !@#$%^&()={}[]|\\;:'\"<>,.?/", "_______________________________").toString(); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/JStackCollectorTask.java
package water.util; import java.util.Map; import java.util.Map.Entry; import water.DRemoteTask; import water.H2O; public class JStackCollectorTask extends DRemoteTask<JStackCollectorTask> { public String[] _result; // for each node in the cloud it contains all threads stack traces @Override public void reduce(JStackCollectorTask that) { if( _result == null ) _result = that._result; else for (int i=0; i<_result.length; ++i) if (_result[i] == null) _result[i] = that._result[i]; } @Override public void lcompute() { _result = new String[H2O.CLOUD.size()]; Map<Thread, StackTraceElement[]> allStackTraces = Thread.getAllStackTraces(); StringBuilder sb = new StringBuilder(); for (Entry<Thread,StackTraceElement[]> el : allStackTraces.entrySet()) { append(sb, el.getKey()); append(sb, el.getValue()); sb.append('\n'); } _result[H2O.SELF.index()] = sb.toString(); tryComplete(); } @Override public byte priority() { return H2O.GUI_PRIORITY; } private void append(final StringBuilder sb, final Thread t) { sb.append('"').append(t.getName()).append('"'); if (t.isDaemon()) sb.append(" daemon"); sb.append(" prio=").append(t.getPriority()); sb.append(" tid=").append(t.getId()); sb.append(" java.lang.Thread.State: ").append(t.getState()); sb.append('\n'); } private void append(final StringBuilder sb, final StackTraceElement[] trace) { for (int i=0; i < trace.length; i++) sb.append("\tat ").append(trace[i]).append('\n'); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/JsonUtil.java
package water.util; import java.util.*; import java.util.Map.Entry; import com.google.common.base.Objects; import com.google.common.collect.Maps; import dontweave.gson.*; public class JsonUtil { private static final Map<JsonPrimitive, JsonPrimitive> SPECIAL = Maps.newHashMap(); static { SPECIAL.put(new JsonPrimitive(Double.NaN), new JsonPrimitive("NaN")); SPECIAL.put(new JsonPrimitive(Double.POSITIVE_INFINITY), new JsonPrimitive("Infinity")); SPECIAL.put(new JsonPrimitive(Double.NEGATIVE_INFINITY), new JsonPrimitive("-Infinity")); } public static JsonObject escape(JsonObject json) { JsonObject res = new JsonObject(); for( Entry<String, JsonElement> e : json.entrySet() ) res.add(e.getKey(), escape(e.getValue())); return res; } public static JsonArray escape(JsonArray json) { JsonArray res = new JsonArray(); for( JsonElement v : json ) res.add(escape(v)); return res; } public static JsonElement escape(JsonElement v) { if( v.isJsonObject() ) return escape(v.getAsJsonObject()); if( v.isJsonArray() ) return escape(v.getAsJsonArray()); return Objects.firstNonNull(SPECIAL.get(v), v); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/LinuxProcFileReader.java
package water.util; import java.io.*; import java.util.ArrayList; import java.util.BitSet; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.commons.lang.SystemUtils; /** * Linux /proc file reader. * * Read tick information for the system and the current process in order to provide * stats on the cloud page about CPU utilization. * * Tick counts are monotonically increasing since boot. * * Find definitions of /proc file info here. * http://man7.org/linux/man-pages/man5/proc.5.html */ public class LinuxProcFileReader { private String _systemData; private String _processData; private String _processStatus; private String _pid; private long _systemIdleTicks = -1; private long _systemTotalTicks = -1; private long _processTotalTicks = -1; private long _processRss = -1; private int _processCpusAllowed = -1; private int _processNumOpenFds = -1; private ArrayList<long[]> _cpuTicks = null; /** * Constructor. */ public LinuxProcFileReader() { } /** * @return ticks the system was idle. in general: idle + busy == 100% */ public long getSystemIdleTicks() { assert _systemIdleTicks > 0; return _systemIdleTicks; } /** * @return ticks the system was up. */ public long getSystemTotalTicks() { assert _systemTotalTicks > 0; return _systemTotalTicks; } /** * @return ticks this process was running. */ public long getProcessTotalTicks() { assert _processTotalTicks > 0; return _processTotalTicks; } /** * Array of ticks. * [cpu number][tick type] * * tick types are: * * [0] user ticks * [1] system ticks * [2] other ticks (i/o) * [3] idle ticks * * @return ticks array for each cpu of the system. */ public long[][] getCpuTicks() { assert _cpuTicks != null; return _cpuTicks.toArray(new long[0][0]); } /** * @return resident set size (RSS) of this process. */ public long getProcessRss() { assert _processRss > 0; return _processRss; } /** * @return number of CPUs allowed by this process. */ public int getProcessCpusAllowed() { if(!SystemUtils.IS_OS_LINUX) return Runtime.getRuntime().availableProcessors(); assert _processCpusAllowed > 0; return _processCpusAllowed; } /** * @return number of currently open fds of this process. */ public int getProcessNumOpenFds() { assert _processNumOpenFds > 0; return _processNumOpenFds; } /** * @return process id for this node as a String. */ public String getProcessID() { return _pid; } /** * Read and parse data from /proc/stat and /proc/&lt;pid&gt;/stat. * If this doesn't work for some reason, the values will be -1. */ public void read() { String pid = "-1"; try { pid = getProcessId(); _pid = pid; } catch (Exception xe) {} File f = new File ("/proc/stat"); if (! f.exists()) { return; } try { readSystemProcFile(); readProcessProcFile(pid); readProcessNumOpenFds(pid); readProcessStatusFile(pid); parseSystemProcFile(_systemData); parseProcessProcFile(_processData); parseProcessStatusFile(_processStatus); } catch (Exception xe) {} } /** * @return true if all the values are ok to use; false otherwise. */ public boolean valid() { return ((_systemIdleTicks >= 0) && (_systemTotalTicks >= 0) && (_processTotalTicks >= 0) && (_processNumOpenFds >= 0)); } /** * @return number of set bits in hexadecimal string (chars must be 0-F) */ public static int numSetBitsHex(String s) { // Look-up table for num set bits in 4-bit char final int[] bits_set = {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4}; int nset = 0; for(int i = 0; i < s.length(); i++) { Character ch = s.charAt(i); int x = Integer.parseInt(ch.toString(), 16); nset += bits_set[x]; } return nset; } private static String getProcessId() throws Exception { // Note: may fail in some JVM implementations // therefore fallback has to be provided // something like '<pid>@<hostname>', at least in SUN / Oracle JVMs final String jvmName = java.lang.management.ManagementFactory.getRuntimeMXBean().getName(); final int index = jvmName.indexOf('@'); if (index < 1) { // part before '@' empty (index = 0) / '@' not found (index = -1) throw new Exception ("Can't get process Id"); } return Long.toString(Long.parseLong(jvmName.substring(0, index))); } private String readFile(File f) throws Exception { char[] buffer = new char[16 * 1024]; FileReader fr = new FileReader(f); int bytesRead = 0; while (true) { int n = fr.read(buffer, bytesRead, buffer.length - bytesRead); if (n < 0) { fr.close(); return new String (buffer, 0, bytesRead); } else if (n == 0) { // This is weird. fr.close(); throw new Exception("LinuxProcFileReader readFile read 0 bytes"); } bytesRead += n; if (bytesRead >= buffer.length) { fr.close(); throw new Exception("LinuxProcFileReader readFile unexpected buffer full"); } } } private void readSystemProcFile() { try { _systemData = readFile(new File("/proc/stat")); } catch (Exception xe) {} } /** * @param s String containing contents of proc file. */ private void parseSystemProcFile(String s) { if (s == null) return; try { BufferedReader reader = new BufferedReader(new StringReader(s)); String line = reader.readLine(); // Read aggregate cpu values { Pattern p = Pattern.compile("cpu\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+).*"); Matcher m = p.matcher(line); boolean b = m.matches(); if (!b) { return; } long systemUserTicks = Long.parseLong(m.group(1)); long systemNiceTicks = Long.parseLong(m.group(2)); long systemSystemTicks = Long.parseLong(m.group(3)); _systemIdleTicks = Long.parseLong(m.group(4)); _systemTotalTicks = systemUserTicks + systemNiceTicks + systemSystemTicks + _systemIdleTicks; } // Read individual cpu values _cpuTicks = new ArrayList<long[]>(); line = reader.readLine(); while (line != null) { Pattern p = Pattern.compile("cpu(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+).*"); Matcher m = p.matcher(line); boolean b = m.matches(); if (! b) { break; } // Copying algorithm from http://gee.cs.oswego.edu/dl/code/ // See perfbar.c in gtk_perfbar package. // int cpuNum = Integer.parseInt(m.group(1)); long cpuUserTicks = 0; long cpuSystemTicks = 0; long cpuOtherTicks = 0; long cpuIdleTicks = 0; cpuUserTicks += Long.parseLong(m.group(2)); cpuOtherTicks += Long.parseLong(m.group(3)); cpuSystemTicks += Long.parseLong(m.group(4)); cpuIdleTicks += Long.parseLong(m.group(5)); cpuOtherTicks += Long.parseLong(m.group(6)); cpuSystemTicks += Long.parseLong(m.group(7)); cpuSystemTicks += Long.parseLong(m.group(8)); long[] oneCpuTicks = {cpuUserTicks, cpuSystemTicks, cpuOtherTicks, cpuIdleTicks}; _cpuTicks.add(oneCpuTicks); line = reader.readLine(); } } catch (Exception xe) {} } private void readProcessProcFile(String pid) { try { String s = "/proc/" + pid + "/stat"; _processData = readFile(new File(s)); } catch (Exception xe) {} } private void parseProcessProcFile(String s) { if (s == null) return; try { BufferedReader reader = new BufferedReader(new StringReader(s)); String line = reader.readLine(); Pattern p = Pattern.compile( "(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)" + "\\s+" + "(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)" + "\\s+" + "(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)" + "\\s+" + "(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)" + "\\s+" + "(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)" + ".*"); Matcher m = p.matcher(line); boolean b = m.matches(); if (! b) { return; } long processUserTicks = Long.parseLong(m.group(14)); long processSystemTicks = Long.parseLong(m.group(15)); _processTotalTicks = processUserTicks + processSystemTicks; _processRss = Long.parseLong(m.group(24)); } catch (Exception xe) {} } private void readProcessNumOpenFds(String pid) { try { String s = "/proc/" + pid + "/fd"; File f = new File(s); String[] arr = f.list(); if (arr != null) { _processNumOpenFds = arr.length; } } catch (Exception xe) {} } private void readProcessStatusFile(String pid) { try { String s = "/proc/" + pid + "/status"; _processStatus = readFile(new File(s)); } catch (Exception xe) {} } private void parseProcessStatusFile(String s) { if(s == null) return; try { Pattern p = Pattern.compile("Cpus_allowed:\\s+([A-Fa-f0-9]+)"); Matcher m = p.matcher(s); boolean b = m.find(); if (! b) { return; } _processCpusAllowed = numSetBitsHex(m.group(1)); } catch (Exception xe) {} } /** * Main is purely for command-line testing. */ public static void main(String[] args) { final String sysTestData = "cpu 43559117 24094 1632164 1033740407 245624 29 200080 0 0 0\n"+ "cpu0 1630761 1762 62861 31960072 40486 15 10614 0 0 0\n"+ "cpu1 1531923 86 62987 32118372 13190 0 6806 0 0 0\n"+ "cpu2 1436788 332 66513 32210723 10867 0 6772 0 0 0\n"+ "cpu3 1428700 1001 64574 32223156 8751 0 6811 0 0 0\n"+ "cpu4 1424410 152 62649 32232602 6552 0 6836 0 0 0\n"+ "cpu5 1427172 1478 58744 32233938 5471 0 6708 0 0 0\n"+ "cpu6 1418433 348 60957 32241807 5301 0 6639 0 0 0\n"+ "cpu7 1404882 182 60640 32258150 3847 0 6632 0 0 0\n"+ "cpu8 1485698 3593 67154 32101739 38387 0 9016 0 0 0\n"+ "cpu9 1422404 1601 66489 32193865 15133 0 8800 0 0 0\n"+ "cpu10 1383939 3386 69151 32233567 11219 0 8719 0 0 0\n"+ "cpu11 1376904 3051 65256 32246197 8307 0 8519 0 0 0\n"+ "cpu12 1381437 1496 68003 32237894 6966 0 8676 0 0 0\n"+ "cpu13 1376250 1527 66598 32247951 7020 0 8554 0 0 0\n"+ "cpu14 1364352 1573 65520 32262764 5093 0 8531 0 0 0\n"+ "cpu15 1359076 1176 64380 32269336 5219 0 8593 0 0 0\n"+ "cpu16 1363844 6 29612 32344252 4830 2 4366 0 0 0\n"+ "cpu17 1477797 1019 70211 32190189 6278 0 3731 0 0 0\n"+ "cpu18 1285849 30 29219 32428612 3549 0 3557 0 0 0\n"+ "cpu19 1272308 0 27306 32445340 2089 0 3541 0 0 0\n"+ "cpu20 1326369 5 29152 32386824 2458 0 4416 0 0 0\n"+ "cpu21 1320883 28 31886 32384709 2327 1 4869 0 0 0\n"+ "cpu22 1259498 1 26954 32458931 2247 0 3511 0 0 0\n"+ "cpu23 1279464 0 26694 32439550 1914 0 3571 0 0 0\n"+ "cpu24 1229977 19 32308 32471217 4191 0 4732 0 0 0\n"+ "cpu25 1329079 92 79253 32324092 5267 0 4821 0 0 0\n"+ "cpu26 1225922 30 34837 32475220 4000 0 4711 0 0 0\n"+ "cpu27 1261848 56 43928 32397341 3552 0 5625 0 0 0\n"+ "cpu28 1226707 20 36281 32463498 3935 4 5943 0 0 0\n"+ "cpu29 1379751 19 35593 32317723 2872 4 5913 0 0 0\n"+ "cpu30 1247661 0 32636 32455845 2033 0 4775 0 0 0\n"+ "cpu31 1219016 10 33804 32484916 2254 0 4756 0 0 0\n"+ "intr 840450413 1194 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 55 0 0 0 0 0 0 45 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 593665 88058 57766 41441 62426 61320 39848 39787 522984 116724 99144 95021 113975 99093 78676 78144 0 168858 168858 168858 162 2986764 4720950 3610168 5059579 3251008 2765017 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 00 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 00 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 00 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 00 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 00 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 00 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 00 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 00 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n"+ "ctxt 1506565570\n"+ "btime 1385196580\n"+ "processes 1226464\n"+ "procs_running 21\n"+ "procs_blocked 0\n"+ "softirq 793917930 0 156954983 77578 492842649 1992553 0 7758971 51856558 228040 82206598\n"; final String procTestData = "16790 (java) S 1 16789 16789 0 -1 4202496 6714145 0 0 0 4773058 5391 0 0 20 0 110 0 33573283 64362651648 6467228 18446744073709551615 1073741824 1073778376 140734614041280 140734614032416 140242897981768 0 0 3 16800972 18446744073709551615 0 0 17 27 0 0 0 0 0\n"; LinuxProcFileReader lpfr = new LinuxProcFileReader(); lpfr.parseSystemProcFile(sysTestData); lpfr.parseProcessProcFile(procTestData); System.out.println("System idle ticks: " + lpfr.getSystemIdleTicks()); System.out.println("System total ticks: " + lpfr.getSystemTotalTicks()); System.out.println("Process total ticks: " + lpfr.getProcessTotalTicks()); System.out.println("Process RSS: " + lpfr.getProcessRss()); System.out.println("Number of cpus: " + lpfr.getCpuTicks().length); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/Log.java
package water.util; import java.io.*; import java.lang.management.ManagementFactory; import java.util.ArrayList; import java.util.Locale; import org.apache.log4j.Level; import org.apache.log4j.LogManager; import org.apache.log4j.PropertyConfigurator; import water.*; import water.api.Constants.Schemes; import water.util.Log.Tag.Kind; import water.util.Log.Tag.Sys; /** Log for H2O. This class should be loaded before we start to print as it wraps around * System.{out,err}. * * There are three kinds of message: INFO, WARN and ERRR, for general information, * events that look wrong, and runtime exceptions. * WARN messages and uncaught exceptions are printed on Standard output. Some * INFO messages are also printed on standard output. Many more messages are * printed to the log file in the ice directory and to the K/V store. * * Messages can come from a number of subsystems, Sys.RANDF for instance * denotes the Random forest implementation. Subsystem names are five letter * mnemonics to keep formatting nicely even. * * To print messages from a subsystem to the log file, set a property on the command line * -Dlog.RANDF=true * -Dlog.RANDF=false // turn off * or call the API function * Log.setFlag(Sys.RANDF); * Log.unsetFlag(Sys.RANDF); // turn off * * * OOME: when the VM is low on memory, OutOfMemoryError can be thrown in the * logging framework while it is trying to print a message. In this case the * first message that fails is recorded for later printout, and a number of * messages can be discarded. The framework will attempt to print the recorded * message later, and report the number of dropped messages, but this done in * a best effort and lossy manner. Basically when an OOME occurs during * logging, no guarantees are made about the messages. **/ public abstract class Log { /** Tags for log messages */ public static interface Tag { /** Which subsystem of h2o? */ public static enum Sys implements Tag { RANDF, GBM__, DRF__, GENLM, KMEAN, PARSE, STORE, WATER, HDFS_, HTTPD, CLEAN, CONFM, EXCEL, SCORM, LOCKS, HTLOG; boolean _enable; } /** What kind of message? */ public static enum Kind implements Tag { TRAC, DEBG, INFO, WARN, ERRR, FATL; } } static { for(Kind k : Kind.values()) assert k.name().length() == Kind.INFO.name().length(); for(Sys s : Sys.values()) assert s.name().length() == Sys.RANDF.name().length(); } public static final Kind[] KINDS = Kind.values(); public static final Sys[] SYSS = Sys.values(); private static final String NL = System.getProperty("line.separator"); static public void wrap() { ///Turning off wrapping for now... If this breaks stuff will put it back on. /// System.setOut(new Wrapper(System.out)); System.setErr(new Wrapper(System.err)); } /** Local log file */ static String LOG_DIR = null; /** Key for the log in the KV store */ public static Key LOG_KEY = null; /** Time from when this class loaded. */ static final Timer time = new Timer(); /** Some guess at the process ID. */ public static final long PID = getPid(); /** Additional logging for debugging. */ private static String _longHeaders; private static boolean printAll; /** Per subsystem debugging flags. */ static { String pa = System.getProperty("log.printAll"); printAll = (pa!=null && pa.equals("true")); setFlag(Sys.WATER); setFlag(Sys.RANDF); setFlag(Sys.HTTPD); for(Sys s : Sys.values()) { String str = System.getProperty("log."+s); if (str == null) continue; if (str.equals("false")) unsetFlag(s); else setFlag(s); } } /** Check if a subsystem will print debug message to the LOG file */ public static boolean flag(Sys t) { return t._enable || printAll; } /** Set the debug flag. */ public static void setFlag(Sys t) { t._enable = true; } /** Unset the debug flag. */ public static void unsetFlag(Sys t) { t._enable = false; } /** * Events are created for all calls to the logging API. **/ static class Event { Kind kind; Sys sys; Timer when; long msFromStart; Throwable ouch; Object[] messages; Object message; String thread; /**True if we have yet finished printing this event.*/ volatile boolean printMe; private volatile static Timer lastGoodTimer = new Timer(); private volatile static Event lastEvent = new Event(); private volatile static int missed; static Event make(Tag.Sys sys, Tag.Kind kind, Throwable ouch, Object[] messages) { return make0(sys, kind, ouch, messages , null); } static Event make(Tag.Sys sys, Tag.Kind kind, Throwable ouch, Object message) { return make0(sys, kind, ouch, null , message); } static private Event make0(Tag.Sys sys, Tag.Kind kind, Throwable ouch, Object[] messages, Object message) { Event result = null; try { result = new Event(); result.init(sys, kind, ouch, messages, message, lastGoodTimer = new Timer()); } catch (OutOfMemoryError e){ synchronized (Event.class){ if (lastEvent.printMe) { missed++; return null; }// Giving up; record the number of lost messages result = lastEvent; result.init(sys, kind, ouch, messages, null, lastGoodTimer); } } return result; } private void init(Tag.Sys sys, Tag.Kind kind, Throwable ouch, Object[] messages, Object message, Timer t) { this.kind = kind; this.ouch = ouch; this.messages = messages; this.message = message; this.sys = sys; this.when = t; this.printMe = true; } public String toString() { StringBuilder buf = longHeader(new StringBuilder(120)); int headroom = buf.length(); buf.append(body(headroom)); return buf.toString(); } public String toShortString() { StringBuilder buf = shortHeader(new StringBuilder(120)); int headroom = buf.length(); buf.append(body(headroom)); return buf.toString(); } private String body(int headroom) { //if( body != null ) return body; // the different message have different padding ... can't quite cache. StringBuilder buf = new StringBuilder(120); if (messages!=null) for( Object m : messages ) buf.append(m.toString()); else if (message !=null ) buf.append(message.toString()); // --- "\n" vs NL --- // Embedded strings often use "\n" to denote a new-line. This is either // 1 or 2 chars ON OUTPUT depending Unix vs Windows, but always 1 char in // the incoming string. We search & split the incoming string based on // the 1 character "\n", but we build result strings with NL (a String of // length 1 or 2). i.e. // GOOD: String.indexOf("\n"); SB.append( NL ) // BAD : String.indexOf( NL ); SB.append("\n") if( buf.indexOf("\n") != -1 ) { String s = buf.toString(); String[] lines = s.split("\n"); if (lines.length > 0) { //gracefully handle s = "\n" StringBuilder buf2 = new StringBuilder(2 * buf.length()); buf2.append(lines[0]); for (int i = 1; i < lines.length; i++) { buf2.append(NL).append("+"); for (int j = 1; j < headroom; j++) buf2.append(" "); buf2.append(lines[i]); } buf = buf2; } } if( ouch != null ) { buf.append(NL); Writer wr = new StringWriter(); PrintWriter pwr = new PrintWriter(wr); ouch.printStackTrace(pwr); String mess = wr.toString(); String[] lines = mess.split("\n"); for( int i = 0; i < lines.length; i++ ) { buf.append("+"); for( int j = 1; j < headroom; j++ ) buf.append(" "); buf.append(lines[i]); if( i != lines.length - 1 ) buf.append(NL); } } return buf.toString(); } private StringBuilder longHeader(StringBuilder buf) { String headers = _longHeaders; if(headers == null) { String host = H2O.SELF_ADDRESS != null ? H2O.SELF_ADDRESS.getHostAddress() : ""; headers = fixedLength(host + ":" + H2O.API_PORT + " ", 22) + fixedLength(PID + " ", 6); if(H2O.SELF_ADDRESS != null) _longHeaders = headers; } buf.append(when.startAsString()).append(" ").append(headers); if( thread == null ) thread = fixedLength(Thread.currentThread().getName() + " ", 10); buf.append(thread); buf.append(kind.toString()).append(" ").append(sys.toString()).append(": "); return buf; } private StringBuilder shortHeader(StringBuilder buf) { buf.append(when.startAsShortString()).append(" "); if(H2O.DEBUG) { String host = H2O.SELF_ADDRESS != null ? H2O.SELF_ADDRESS.getHostAddress() : ""; buf.append(fixedLength(host + ":" + H2O.API_PORT + " ", 18)); } if( thread == null ) thread = fixedLength(Thread.currentThread().getName() + " ", 8); buf.append(thread); if(!H2O.DEBUG) buf.append(kind.toString()).append(" ").append(sys.toString()).append(": "); return buf; } } /** Write different versions of E to the three outputs. */ private static void write(Event e, boolean printOnOut, boolean logToKV) { try { write0(e,printOnOut,logToKV); if (Event.lastEvent.printMe || Event.missed > 0) { synchronized(Event.class){ if ( Event.lastEvent.printMe) { Event ev = Event.lastEvent; write0(ev,true,logToKV); Event.lastEvent = new Event(); } if (Event.missed > 0) { if (Event.lastEvent.printMe==false) { Event.lastEvent.init(Sys.WATER, Kind.WARN, null, null, "Logging framework dropped a message", Event.lastGoodTimer); Event.missed--; } } } } } catch (OutOfMemoryError xe) { synchronized (Event.class){ if (Event.lastEvent.printMe == false) Event.lastEvent = e; else Event.missed++; } } } private static org.apache.log4j.Logger _logger = null; public static String getLogDir() { if (LOG_DIR == null) { return "unknown-log-dir"; } return LOG_DIR; } /** * @return The common prefix for all of the different log files for this process. */ public static String getLogPathFileNameStem() { String ip; if (H2O.SELF_ADDRESS == null) { ip = "UnknownIP"; } else { ip = H2O.SELF_ADDRESS.getHostAddress(); } // Somehow, the above process for producing an IP address has a slash // in it, which is mystifying. Remove it. int port = H2O.API_PORT; String portString = Integer.toString(port); String logFileName = getLogDir() + File.separator + "h2o_" + ip + "_" + portString; return logFileName; } /** * @return This is what shows up in the Web UI when clicking on show log file. */ public static String getLogPathFileName() { return getLogPathFileNameStem() + "-2-debug.log"; } private static org.apache.log4j.Logger getLog4jLogger() { return _logger; } private static void setLog4jProperties(String logDirParent, java.util.Properties p) { LOG_DIR = logDirParent + File.separator + "h2ologs"; String logPathFileName = getLogPathFileNameStem(); // H2O-wide logging p.setProperty("log4j.rootLogger", "TRACE, R1, R2, R3, R4, R5, R6"); p.setProperty("log4j.appender.R1", "org.apache.log4j.RollingFileAppender"); p.setProperty("log4j.appender.R1.Threshold", "TRACE"); p.setProperty("log4j.appender.R1.File", logPathFileName + "-1-trace.log"); p.setProperty("log4j.appender.R1.MaxFileSize", "1MB"); p.setProperty("log4j.appender.R1.MaxBackupIndex", "3"); p.setProperty("log4j.appender.R1.layout", "org.apache.log4j.PatternLayout"); p.setProperty("log4j.appender.R1.layout.ConversionPattern", "%m%n"); p.setProperty("log4j.appender.R2", "org.apache.log4j.RollingFileAppender"); p.setProperty("log4j.appender.R2.Threshold", "DEBUG"); p.setProperty("log4j.appender.R2.File", logPathFileName + "-2-debug.log"); p.setProperty("log4j.appender.R2.MaxFileSize", "3MB"); p.setProperty("log4j.appender.R2.MaxBackupIndex", "3"); p.setProperty("log4j.appender.R2.layout", "org.apache.log4j.PatternLayout"); p.setProperty("log4j.appender.R2.layout.ConversionPattern", "%m%n"); p.setProperty("log4j.appender.R3", "org.apache.log4j.RollingFileAppender"); p.setProperty("log4j.appender.R3.Threshold", "INFO"); p.setProperty("log4j.appender.R3.File", logPathFileName + "-3-info.log"); p.setProperty("log4j.appender.R3.MaxFileSize", "2MB"); p.setProperty("log4j.appender.R3.MaxBackupIndex", "3"); p.setProperty("log4j.appender.R3.layout", "org.apache.log4j.PatternLayout"); p.setProperty("log4j.appender.R3.layout.ConversionPattern", "%m%n"); p.setProperty("log4j.appender.R4", "org.apache.log4j.RollingFileAppender"); p.setProperty("log4j.appender.R4.Threshold", "WARN"); p.setProperty("log4j.appender.R4.File", logPathFileName + "-4-warn.log"); p.setProperty("log4j.appender.R4.MaxFileSize", "256KB"); p.setProperty("log4j.appender.R4.MaxBackupIndex", "3"); p.setProperty("log4j.appender.R4.layout", "org.apache.log4j.PatternLayout"); p.setProperty("log4j.appender.R4.layout.ConversionPattern", "%m%n"); p.setProperty("log4j.appender.R5", "org.apache.log4j.RollingFileAppender"); p.setProperty("log4j.appender.R5.Threshold", "ERROR"); p.setProperty("log4j.appender.R5.File", logPathFileName + "-5-error.log"); p.setProperty("log4j.appender.R5.MaxFileSize", "256KB"); p.setProperty("log4j.appender.R5.MaxBackupIndex", "3"); p.setProperty("log4j.appender.R5.layout", "org.apache.log4j.PatternLayout"); p.setProperty("log4j.appender.R5.layout.ConversionPattern", "%m%n"); p.setProperty("log4j.appender.R6", "org.apache.log4j.RollingFileAppender"); p.setProperty("log4j.appender.R6.Threshold", "FATAL"); p.setProperty("log4j.appender.R6.File", logPathFileName + "-6-fatal.log"); p.setProperty("log4j.appender.R6.MaxFileSize", "256KB"); p.setProperty("log4j.appender.R6.MaxBackupIndex", "3"); p.setProperty("log4j.appender.R6.layout", "org.apache.log4j.PatternLayout"); p.setProperty("log4j.appender.R6.layout.ConversionPattern", "%m%n"); // HTTPD logging p.setProperty("log4j.logger.water.api.RequestServer", "TRACE, HTTPD"); p.setProperty("log4j.additivity.water.api.RequestServer", "false"); p.setProperty("log4j.appender.HTTPD", "org.apache.log4j.RollingFileAppender"); p.setProperty("log4j.appender.HTTPD.Threshold", "TRACE"); p.setProperty("log4j.appender.HTTPD.File", logPathFileName + "-httpd.log"); p.setProperty("log4j.appender.HTTPD.MaxFileSize", "1MB"); p.setProperty("log4j.appender.HTTPD.MaxBackupIndex", "3"); p.setProperty("log4j.appender.HTTPD.layout", "org.apache.log4j.PatternLayout"); p.setProperty("log4j.appender.HTTPD.layout.ConversionPattern", "%m%n"); // Turn down the logging for some class hierarchies. p.setProperty("log4j.logger.org.apache.http", "WARN"); p.setProperty("log4j.logger.com.amazonaws", "WARN"); p.setProperty("log4j.logger.org.apache.hadoop", "WARN"); p.setProperty("log4j.logger.org.jets3t.service", "WARN"); // See the following document for information about the pattern layout. // http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/PatternLayout.html // // Uncomment this line to find the source of unwanted messages. // p.setProperty("log4j.appender.R1.layout.ConversionPattern", "%p %C %m%n"); } private static org.apache.log4j.Logger createLog4jLogger(String logDirParent) { synchronized (water.util.Log.class) { if (_logger != null) { return _logger; } // If a log4j properties file was specified on the command-line, use it. // Otherwise, create some default properties on the fly. String log4jProperties = System.getProperty ("log4j.properties"); if (log4jProperties != null) { PropertyConfigurator.configure(log4jProperties); // TODO: Need some way to set LOG_DIR here for LogCollectorTask to work. } else { java.util.Properties p = new java.util.Properties(); setLog4jProperties(logDirParent, p); PropertyConfigurator.configure(p); } _logger = LogManager.getLogger(Log.class.getName()); } return _logger; } public static void setLogLevel(int log_level) throws IllegalArgumentException { Level l; switch(log_level) { case 1: l = Level.TRACE; break; case 2: l = Level.DEBUG; break; case 3: l = Level.INFO; break; case 4: l = Level.WARN; break; case 5: l = Level.ERROR; break; case 6: l = Level.FATAL; break; default: throw new IllegalArgumentException("Log level " + log_level + " is invalid"); } _logger.setLevel(l); System.out.println("Set log level to " + l); _logger.info("Set log level to " + l); } static volatile boolean loggerCreateWasCalled = false; static private Object startupLogEventsLock = new Object(); static volatile private ArrayList<Event> startupLogEvents = new ArrayList<Event>(); private static void log0(org.apache.log4j.Logger l4j, Event e) { if (e.sys == Sys.HTLOG) { // As a special additional log, put HTLOG requests in their own file. // HTLOG are requests from RequestServer that haven't been filtered out. // HTLOG requests should only come at INFO. e.sys = Sys.HTTPD; String s = "tid(" + Thread.currentThread().getId() + ") " + e.toString(); org.apache.log4j.Logger httpdLogger = LogManager.getLogger("water.api.RequestServer"); if (e.kind == Kind.INFO) { httpdLogger.info(s); } else { httpdLogger.error(s); } return; } String s = e.toString(); if (e.kind == Kind.FATL) { l4j.fatal(s); } else if (e.kind == Kind.ERRR) { l4j.error(s); } else if (e.kind == Kind.WARN) { l4j.warn(s); } else if (e.kind == Kind.INFO) { l4j.info(s); } else if (e.kind == Kind.DEBG) { l4j.debug(s); } else if (e.kind == Kind.TRAC) { l4j.trace(s); } else { // Choose error by default if we can't figure out the right logging level. l4j.error(s); } } /** the actual write code. */ private static void write0(Event e, boolean printOnOut, boolean logToKV) { org.apache.log4j.Logger l4j = getLog4jLogger(); // If no logger object exists, try to build one. // Disable for debug, causes problems for multiple nodes per VM if ((l4j == null) && !loggerCreateWasCalled && !H2O.DEBUG) { if (H2O.SELF != null) { File dir; boolean windowsPath = H2O.ICE_ROOT.toString().matches("^[a-zA-Z]:.*"); // Use ice folder if local, or default if (windowsPath) dir = new File(H2O.ICE_ROOT.toString()); else if( H2O.ICE_ROOT.getScheme() == null || Schemes.FILE.equals(H2O.ICE_ROOT.getScheme()) ) dir = new File(H2O.ICE_ROOT.getPath()); else dir = new File(H2O.DEFAULT_ICE_ROOT()); loggerCreateWasCalled = true; l4j = createLog4jLogger(dir.toString()); } } // Log if we can, buffer if we cannot. if (l4j == null) { // Calling toString has side-effects about how the output looks. So call // it early here, even if we're just going to buffer the event. e.toString(); // buffer. synchronized (startupLogEventsLock) { if (startupLogEvents != null) { startupLogEvents.add(e); } else { // there is an inherent race condition here where we might drop a message // during startup. this is only a danger in multithreaded situations. // it's ok, just be aware of it. } } } else { // drain buffer if it exists. for performance reasons, don't enter // lock unless the buffer exists. if (startupLogEvents != null) { synchronized (startupLogEventsLock) { for (int i = 0; i < startupLogEvents.size(); i++) { Event bufferedEvent = startupLogEvents.get(i); log0(l4j, bufferedEvent); } startupLogEvents = null; } } // log. log0(l4j, e); } // if( Paxos._cloudLocked && logToKV ) logToKV(e.when.startAsString(), e.thread, e.kind, e.sys, e.body(0)); if(printOnOut || printAll) unwrap(System.out, e.toShortString()); e.printMe = false; } /** We also log events to the store. */ private static void logToKV(final String date, final String thr, final Kind kind, final Sys sys, final String msg) { // Make the LOG_KEY lazily, since we cannot make it before the cloud forms if( LOG_KEY == null ) if( !Paxos._cloudLocked ) return; // No K/V logging before cloud formed synchronized(Log.class) { if( LOG_KEY == null ) LOG_KEY = Key.make("Log", (byte) 0, Key.BUILT_IN_KEY); } final long pid = PID; // Run locally final H2ONode h2o = H2O.SELF; // Run locally new TAtomic<LogStr>() { @Override public LogStr atomic(LogStr l) { return new LogStr(l, date, h2o, pid, thr, kind, sys, msg); } }.fork(LOG_KEY); } /** Record an exception to the log file and store. */ static public <T extends Throwable> T err(Sys t, String msg, T exception) { Event e = Event.make(t, Kind.ERRR, exception, msg ); write(e,true,false); return exception; } /** Record a message to the log file and store. */ static public void err(Sys t, String msg) { Event e = Event.make(t, Kind.ERRR, null, msg ); write(e,true,false); } /** Record an exception to the log file and store. */ static public <T extends Throwable> T err(String msg, T exception) { return err(Sys.WATER, msg, exception); } /** Record a message to the log file and store. */ static public void err(String msg) { err(Sys.WATER, msg); } /** Record an exception to the log file and store. */ static public <T extends Throwable> T err(Sys t, T exception) { return err(t, "", exception); } /** Record an exception to the log file and store. */ static public <T extends Throwable> T err(T exception) { return err(Sys.WATER, "", exception); } /** Record an exception to the log file and store and return a new * RuntimeException that wraps around the exception. */ static public RuntimeException errRTExcept(Throwable exception) { return new RuntimeException(err(Sys.WATER, "", exception)); } /** Log a warning to standard out, the log file and the store. */ static public <T extends Throwable> T warn(Sys t, String msg, T exception) { Event e = Event.make(t, Kind.WARN, exception, msg); write(e,true,true); return exception; } /** Log a warning to standard out, the log file and the store. */ static public Throwable warn(Sys t, String msg) { return warn(t, msg, null); } /** Log a warning to standard out, the log file and the store. */ static public Throwable warn(String msg) { return warn(Sys.WATER, msg, null); } /** Log an information message to standard out, the log file and the store. */ static public void info_no_stdout(Sys t, Object... objects) { Event e = Event.make(t, Kind.INFO, null, objects); write(e,false,true); } static public void info_no_DKV(Sys t, Object... objects) { Event e = Event.make(t, Kind.INFO, null, objects); write(e,false,false); } /** Log an information message to standard out, the log file and the store. */ static public void info(Sys t, Object... objects) { Event e = Event.make(t, Kind.INFO, null, objects); write(e,true,true); } /** Log an information message to standard out, the log file and the store. */ static public void info_no_stdout(Object... objects) { info_no_stdout(Sys.WATER, objects); } /** Log an information message to standard out, the log file and the store. */ static public void info(Object... objects) { info(Sys.WATER, objects); } /** Log a debug message to the log file and the store if the subsystem's flag is set. */ static public void debug(Object... objects) { if (flag(Sys.WATER) == false) return; Event e = Event.make(Sys.WATER, Kind.DEBG, null, objects); write(e,false,true); } /** Log a debug message to the log file and the store if the subsystem's flag is set. */ static public void debug(Sys t, Object... objects) { if (flag(t) == false) return; Event e = Event.make( t, Kind.DEBG, null, objects); write(e,false,true); } /** Log a debug message to the log file and the store if the subsystem's flag is set. */ static public void trace(Object... objects) { if (flag(Sys.WATER) == false) return; Event e = Event.make(Sys.WATER, Kind.TRAC, null, objects); write(e,false,true); } /** Temporary log statement. Search for references to make sure they have been removed. */ static public void tmp(Object... objects) { info(objects); } public static String fixedLength(String s, int length) { String r = padRight(s, length); if( r.length() > length ) { int a = Math.max(r.length() - length + 1, 0); int b = Math.max(a, r.length()); r = "#" + r.substring(a, b); } return r; } public static String padRight(String stringToPad, int size) { StringBuilder strb = new StringBuilder(stringToPad); while( strb.length() < size ) if( strb.length() < size ) strb.append(' '); return strb.toString(); } /// ==== FROM OLD LOG ==== // Survive "die" calls - used in some debugging modes public static boolean _dontDie; // Return process ID, or -1 if not supported private static long getPid() { try { String n = ManagementFactory.getRuntimeMXBean().getName(); int i = n.indexOf('@'); if( i == -1 ) return -1; return Long.parseLong(n.substring(0, i)); } catch( Throwable t ) { return -1; } } // Print to the original STDERR & die public static void die(String s) { System.err.println(s); if( !_dontDie ) H2O.exit(-1); } /** Print a message to the stream without the logging information. */ public static void unwrap(PrintStream stream, String s) { if( stream instanceof Wrapper ) ((Wrapper) stream).printlnParent(s); else stream.println(s); } public static PrintStream unwrap(PrintStream stream){ return stream instanceof Wrapper ? ((Wrapper)stream).parent: stream; } public static void log(File file, PrintStream stream) throws Exception { BufferedReader reader = new BufferedReader(new FileReader(file)); try { for( ;; ) { String line = reader.readLine(); if( line == null ) break; stream.println(line); } } finally { reader.close(); } } public static final class Wrapper extends PrintStream { PrintStream parent; Wrapper(PrintStream parent) { super(parent); this.parent=parent; } private static String log(Locale l, boolean nl, String format, Object... args) { String msg = String.format(l, format, args); Event e = Event.make(Sys.WATER,Kind.INFO,null, msg); Log.write(e,false,true); return e.toShortString()+NL; } @Override public PrintStream printf(String format, Object... args) { super.print(log(null, false, format, args)); return this; } @Override public PrintStream printf(Locale l, String format, Object... args) { super.print(log(l, false, format, args)); return this; } @Override public void println(String x) { super.print(log(null, true, "%s", x)); } void printlnParent(String s) { super.println(s); } } // Class to hold a ring buffer of log messages in the K/V store public static class LogStr extends Iced { public static final int MAX = 1024; // Number of log entries public final int _idx; // Index into the ring buffer public final byte _kinds[]; public final byte _syss[]; public final String _dates[]; public final H2ONode _h2os[]; public final long _pids[]; public final String _thrs[]; public final String _msgs[]; LogStr(LogStr l, String date, H2ONode h2o, long pid, String thr, Kind kind, Sys sys, String msg) { _dates = l == null ? new String[MAX] : l._dates; _h2os = l == null ? new H2ONode[MAX] : l._h2os; _pids = l == null ? new long[MAX] : l._pids; _thrs = l == null ? new String[MAX] : l._thrs; _kinds = l == null ? new byte[MAX] : l._kinds; _syss = l == null ? new byte[MAX] : l._syss; _msgs = l == null ? new String[MAX] : l._msgs; _idx = l == null ? 0 : (l._idx + 1) & (MAX - 1); _dates[_idx] = date; _h2os[_idx] = h2o; _pids[_idx] = pid; _thrs[_idx] = thr; _kinds[_idx] = (byte) kind.ordinal(); _syss[_idx] = (byte) sys.ordinal(); _msgs[_idx] = msg; } } /** * POST stands for "Power on self test". * Stamp a POST code to /tmp. * This is for bringup, when no logging or stdout I/O is reliable. * (Especially when embedded, such as in hadoop mapreduce, for example.) * * @param n POST code. * @param s String to emit. */ // private static final Object postLock = new Object(); public static void POST(int n, String s) { // DO NOTHING UNLESS ENABLED BY REMOVING THIS RETURN! return; // synchronized (postLock) { // File f = new File ("/tmp/h2o.POST"); // if (! f.exists()) { // boolean success = f.mkdirs(); // if (! success) { // try { System.err.print ("Exiting from POST now!"); } catch (Exception _) {} // H2O.exit (0); // } // } // // f = new File ("/tmp/h2o.POST/" + n); // try { // f.createNewFile(); // FileWriter fstream = new FileWriter(f.getAbsolutePath(), true); // BufferedWriter out = new BufferedWriter(fstream); // out.write(s + "\n"); // out.close(); // } // catch (Exception e) { // try { System.err.print ("Exiting from POST now!"); } catch (Exception _) {} // H2O.exit (0); // } // } } public static void POST(int n, Exception e) { if (e.getMessage() != null) { POST(n, e.getMessage()); } POST(n, e.toString()); StackTraceElement[] els = e.getStackTrace(); for (int i = 0; i < els.length; i++) { POST(n, els[i].toString()); } } public static void main(String[]args) { Log.info("hi"); Log.info("h","i"); unwrap(System.out,"hi"); unwrap(System.err,"hi"); Log.info("ho ",new Object(){ int i; public String toString() { if (i++ ==0) throw new OutOfMemoryError(); else return super.toString(); } } ); Log.info("ha ",new Object(){ int i; public String toString() { if (i++ ==0) throw new OutOfMemoryError(); else return super.toString(); } } ); Log.info("hi"); Log.info("hi"); Log.info("hi"); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/LogCollectorTask.java
package water.util; import java.io.*; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; import water.DRemoteTask; import water.H2O; public class LogCollectorTask extends DRemoteTask { final int MB = 1 << 20; final int MAX_SIZE = 25 * MB; public byte[][] _result; public LogCollectorTask() {} private transient ByteArrayOutputStream baos = null; @Override public void lcompute() { _result = new byte[H2O.CLOUD._memary.length][]; int idx = H2O.SELF.index(); baos = new ByteArrayOutputStream(); ZipOutputStream zos = new ZipOutputStream(baos); try { zipDir(Log.LOG_DIR, zos); } catch (IOException e) { H2O.ignore(e); } finally { try { zos.close(); baos.close(); } catch (Exception xe) { // do nothing } byte[] arr = baos.toByteArray(); _result[idx] = arr; tryComplete(); } } //here is the code for the method private void zipDir(String dir2zip, ZipOutputStream zos) throws IOException { try { //create a new File object based on the directory we have to zip. File zipDir = new File(dir2zip); //get a listing of the directory content String[] dirList = zipDir.list(); byte[] readBuffer = new byte[4096]; int bytesIn = 0; //loop through dirList, and zip the files for(int i=0; i<dirList.length; i++) { File f = new File(zipDir, dirList[i]); if(f.isDirectory()) { //if the File object is a directory, call this //function again to add its content recursively String filePath = f.getPath(); zipDir(filePath, zos); //loop again continue; } //if we reached here, the File object f was not a directory //create a FileInputStream on top of f FileInputStream fis = new FileInputStream(f); // create a new zip entry ZipEntry anEntry = new ZipEntry(f.getPath()); anEntry.setTime(f.lastModified()); //place the zip entry in the ZipOutputStream object zos.putNextEntry(anEntry); //now write the content of the file to the ZipOutputStream boolean stopEarlyBecauseTooMuchData = false; while((bytesIn = fis.read(readBuffer)) != -1) { zos.write(readBuffer, 0, bytesIn); if (baos.size() > MAX_SIZE) { stopEarlyBecauseTooMuchData = true; break; } } //close the Stream fis.close(); zos.closeEntry(); if (stopEarlyBecauseTooMuchData) { Log.warn("LogCollectorTask stopEarlyBecauseTooMuchData"); break; } } } catch(Exception e) { //handle exception } } @Override public void reduce(DRemoteTask drt) { LogCollectorTask another = (LogCollectorTask) drt; if( _result == null ) _result = another._result; else for (int i=0; i<_result.length; ++i) if (_result[i] == null) _result[i] = another._result[i]; } @Override public byte priority() { return H2O.GUI_PRIORITY; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/MRUtils.java
package water.util; import static water.util.Utils.getDeterRNG; import water.*; import water.fvec.*; import java.util.Random; public class MRUtils { /** * Sample rows from a frame. * Can be unlucky for small sampling fractions - will continue calling itself until at least 1 row is returned. * @param fr Input frame * @param rows Approximate number of rows to sample (across all chunks) * @param seed Seed for RNG * @return Sampled frame */ public static Frame sampleFrame(Frame fr, final long rows, final long seed) { if (fr == null) return null; final float fraction = rows > 0 ? (float)rows / fr.numRows() : 1.f; if (fraction >= 1.f) return fr; Frame r = new MRTask2() { @Override public void map(Chunk[] cs, NewChunk[] ncs) { final Random rng = getDeterRNG(seed + cs[0].cidx()); int count = 0; for (int r = 0; r < cs[0]._len; r++) if (rng.nextFloat() < fraction || (count == 0 && r == cs[0]._len-1) ) { count++; for (int i = 0; i < ncs.length; i++) { ncs[i].addNum(cs[i].at0(r)); } } } }.doAll(fr.numCols(), fr).outputFrame(fr.names(), fr.domains()); if (r.numRows() == 0) { Log.warn("You asked for " + rows + " rows (out of " + fr.numRows() + "), but you got none (seed=" + seed + ")."); Log.warn("Let's try again. You've gotta ask yourself a question: \"Do I feel lucky?\""); return sampleFrame(fr, rows, seed+1); } return r; } /** * Row-wise shuffle of a frame (only shuffles rows inside of each chunk) * @param fr Input frame * @return Shuffled frame */ public static Frame shuffleFramePerChunk(Frame fr, final long seed) { return shuffleFramePerChunk(null, fr, seed); } public static Frame shuffleFramePerChunk(Key outputFrameKey, Frame fr, final long seed) { Frame r = new MRTask2() { @Override public void map(Chunk[] cs, NewChunk[] ncs) { long[] idx = new long[cs[0]._len]; for (int r=0; r<idx.length; ++r) idx[r] = r; Utils.shuffleArray(idx, seed); for (int r=0; r<idx.length; ++r) { for (int i = 0; i < ncs.length; i++) { ncs[i].addNum(cs[i].at0((int)idx[r])); } } } }.doAll(fr.numCols(), fr).outputFrame(outputFrameKey, fr.names(), fr.domains()); return r; } /** * Global redistribution of a Frame (balancing of chunks), done by calling process (all-to-one + one-to-all) * @param fr Input frame * @param seed RNG seed * @param shuffle whether to shuffle the data globally * @return Shuffled frame */ public static Frame shuffleAndBalance(final Frame fr, int splits, long seed, final boolean local, final boolean shuffle) { if( (fr.vecs()[0].nChunks() < splits || shuffle) && fr.numRows() > splits) { Vec[] vecs = fr.vecs().clone(); Log.info("Load balancing dataset, splitting it into up to " + splits + " chunks."); long[] idx = null; if (shuffle) { idx = new long[splits]; for (int r=0; r<idx.length; ++r) idx[r] = r; Utils.shuffleArray(idx, seed); } Key keys[] = new Vec.VectorGroup().addVecs(vecs.length); final long rows_per_new_chunk = (long)(Math.ceil((double)fr.numRows()/splits)); //loop over cols (same indexing for each column) Futures fs = new Futures(); for(int col=0; col<vecs.length; col++) { AppendableVec vec = new AppendableVec(keys[col]); // create outgoing chunks for this col NewChunk[] outCkg = new NewChunk[splits]; for(int i=0; i<splits; ++i) outCkg[i] = new NewChunk(vec, i); //loop over all incoming chunks for( int ckg = 0; ckg < vecs[col].nChunks(); ckg++ ) { final Chunk inCkg = vecs[col].chunkForChunkIdx(ckg); // loop over local rows of incoming chunks (fast path) for (int row = 0; row < inCkg._len; ++row) { int outCkgIdx = (int)((inCkg._start + row) / rows_per_new_chunk); // destination chunk idx if (shuffle) outCkgIdx = (int)(idx[outCkgIdx]); //shuffle: choose a different output chunk assert(outCkgIdx >= 0 && outCkgIdx < splits); outCkg[outCkgIdx].addNum(inCkg.at0(row)); } } for(int i=0; i<outCkg.length; ++i) outCkg[i].close(i, fs); Vec t = vec.close(fs); t._domain = vecs[col]._domain; vecs[col] = t; } fs.blockForPending(); Log.info("Load balancing done."); return new Frame(fr.names(), vecs); } return fr; } /** * Compute the class distribution from a class label vector * (not counting missing values) * * Usage 1: Label vector is categorical * ------------------------------------ * Vec label = ...; * assert(label.isEnum()); * long[] dist = new ClassDist(label).doAll(label).dist(); * * Usage 2: Label vector is numerical * ---------------------------------- * Vec label = ...; * int num_classes = ...; * assert(label.isInt()); * long[] dist = new ClassDist(num_classes).doAll(label).dist(); * */ public static class ClassDist extends ClassDistHelper { public ClassDist(final Vec label) { super(label.domain().length); } public ClassDist(int n) { super(n); } public final long[] dist() { return _ys; } public final float[] rel_dist() { float[] rel = new float[_ys.length]; for (int i=0; i<_ys.length; ++i) rel[i] = (float)_ys[i]; final float sum = Utils.sum(rel); assert(sum != 0.); Utils.div(rel, sum); return rel; } } private static class ClassDistHelper extends MRTask2<ClassDist> { private ClassDistHelper(int nclass) { _nclass = nclass; } final int _nclass; protected long[] _ys; @Override public void map(Chunk ys) { _ys = new long[_nclass]; for( int i=0; i<ys._len; i++ ) if( !ys.isNA0(i) ) _ys[(int)ys.at80(i)]++; } @Override public void reduce( ClassDist that ) { Utils.add(_ys,that._ys); } } /** * Stratified sampling for classifiers * @param fr Input frame * @param label Label vector (must be enum) * @param sampling_ratios Optional: array containing the requested sampling ratios per class (in order of domains), will be overwritten if it contains all 0s * @param maxrows Maximum number of rows in the returned frame * @param seed RNG seed for sampling * @param allowOversampling Allow oversampling of minority classes * @param verbose Whether to print verbose info * @return Sampled frame, with approximately the same number of samples from each class (or given by the requested sampling ratios) */ public static Frame sampleFrameStratified(final Frame fr, Vec label, float[] sampling_ratios, long maxrows, final long seed, final boolean allowOversampling, final boolean verbose) { if (fr == null) return null; assert(label.isEnum()); assert(maxrows >= label.domain().length); long[] dist = new ClassDist(label).doAll(label).dist(); assert(dist.length > 0); Log.info("Doing stratified sampling for data set containing " + fr.numRows() + " rows from " + dist.length + " classes. Oversampling: " + (allowOversampling ? "on" : "off")); if (verbose) { for (int i=0; i<dist.length;++i) { Log.info("Class " + label.domain(i) + ": count: " + dist[i] + " prior: " + (float)dist[i]/fr.numRows()); } } // create sampling_ratios for class balance with max. maxrows rows (fill existing array if not null) if (sampling_ratios == null || (Utils.minValue(sampling_ratios) == 0 && Utils.maxValue(sampling_ratios) == 0)) { // compute sampling ratios to achieve class balance if (sampling_ratios == null) { sampling_ratios = new float[dist.length]; } assert(sampling_ratios.length == dist.length); for (int i=0; i<dist.length;++i) { if (dist[i] == 0) { Log.warn("No rows of class " + label.domain()[i] + " found."); } sampling_ratios[i] = dist[i] == 0 ? 1 // don't sample if there's no rows of a certain class (avoid division by 0) : ((float)fr.numRows() / label.domain().length) / dist[i]; // prior^-1 / num_classes assert(sampling_ratios[i] >= 0); } final float inv_scale = Utils.minValue(sampling_ratios); //majority class has lowest required oversampling factor to achieve balance if (!Float.isNaN(inv_scale) && !Float.isInfinite(inv_scale)) Utils.div(sampling_ratios, inv_scale); //want sampling_ratio 1.0 for majority class (no downsampling) } for (float s : sampling_ratios) assert(!Float.isNaN(s) && !Float.isInfinite(s)); if (!allowOversampling) { for (int i=0; i<sampling_ratios.length; ++i) { sampling_ratios[i] = Math.min(1.0f, sampling_ratios[i]); } } for (float s : sampling_ratios) assert(!Float.isNaN(s) && !Float.isInfinite(s)); // given these sampling ratios, and the original class distribution, this is the expected number of resulting rows float numrows = 0; for (int i=0; i<sampling_ratios.length; ++i) { numrows += sampling_ratios[i] * dist[i]; } final long actualnumrows = Math.min(maxrows, Math.round(numrows)); //cap #rows at maxrows assert(actualnumrows >= 0); //can have no matching rows in case of sparse data where we had to fill in a makeZero() vector Log.info("Stratified sampling to a total of " + String.format("%,d", actualnumrows) + " rows" + (actualnumrows < numrows ? " (limited by max_after_balance_size).":".")); if (actualnumrows != numrows) { assert(numrows > 0); Utils.mult(sampling_ratios, (float)actualnumrows/numrows); //adjust the sampling_ratios by the global rescaling factor if (verbose) Log.info("Downsampling majority class by " + (float)actualnumrows/numrows + " to limit number of rows to " + String.format("%,d", maxrows)); } if (Utils.minIndex(sampling_ratios) == Utils.maxIndex(sampling_ratios)) { Log.info("All classes are sampled with sampling ratio: " + Utils.minValue(sampling_ratios)); } else { for (int i=0;i<label.domain().length;++i) { Log.info("Class '" + label.domain()[i].toString() + "' sampling ratio: " + sampling_ratios[i]); } } return sampleFrameStratified(fr, label, sampling_ratios, seed, verbose); } /** * Stratified sampling * @param fr Input frame * @param label Label vector (from the input frame) * @param sampling_ratios Given sampling ratios for each class, in order of domains * @param seed RNG seed * @param debug Whether to print debug info * @return Stratified frame */ public static Frame sampleFrameStratified(final Frame fr, Vec label, final float[] sampling_ratios, final long seed, final boolean debug) { return sampleFrameStratified(fr, label, sampling_ratios, seed, debug, 0); } // internal version with repeat counter // currently hardcoded to do up to 10 tries to get a row from each class, which can be impossible for certain wrong sampling ratios private static Frame sampleFrameStratified(final Frame fr, Vec label, final float[] sampling_ratios, final long seed, final boolean debug, int count) { if (fr == null) return null; assert(label.isEnum()); assert(sampling_ratios != null && sampling_ratios.length == label.domain().length); for (float s : sampling_ratios) assert(!Float.isNaN(s)); final int labelidx = fr.find(label); //which column is the label? assert(labelidx >= 0); final boolean poisson = false; //beta feature Frame r = new MRTask2() { @Override public void map(Chunk[] cs, NewChunk[] ncs) { final Random rng = getDeterRNG(seed + cs[0].cidx()); for (int r = 0; r < cs[0]._len; r++) { if (cs[labelidx].isNA0(r)) continue; //skip missing labels final int label = (int)cs[labelidx].at80(r); assert(sampling_ratios.length > label && label >= 0); int sampling_reps; if (poisson) { sampling_reps = Utils.getPoisson(sampling_ratios[label], rng); } else { final float remainder = sampling_ratios[label] - (int)sampling_ratios[label]; sampling_reps = (int)sampling_ratios[label] + (rng.nextFloat() < remainder ? 1 : 0); } for (int i = 0; i < ncs.length; i++) { for (int j = 0; j < sampling_reps; ++j) { ncs[i].addNum(cs[i].at0(r)); } } } } }.doAll(fr.numCols(), fr).outputFrame(fr.names(), fr.domains()); assert(r.numCols() == fr.numCols()); // Confirm the validity of the distribution long[] dist = new ClassDist(r.vecs()[labelidx]).doAll(r.vecs()[labelidx]).dist(); // if there are no training labels in the test set, then there is no point in sampling the test set if (dist == null) { r.delete(); return fr; } if (debug) { long sumdist = Utils.sum(dist); Log.info("After stratified sampling: " + sumdist + " rows."); for (int i=0; i<dist.length;++i) { Log.info("Class " + r.vecs()[labelidx].domain(i) + ": count: " + dist[i] + " sampling ratio: " + sampling_ratios[i] + " actual relative frequency: " + (float)dist[i] / sumdist * dist.length); } } // Re-try if we didn't get at least one example from each class if (Utils.minValue(dist) == 0 && count < 10) { Log.info("Re-doing stratified sampling because not all classes were represented (unlucky draw)."); r.delete(); return sampleFrameStratified(fr, label, sampling_ratios, seed+1, debug, ++count); } // shuffle intra-chunk Frame shuffled = shuffleFramePerChunk(r, seed+0x580FF13); r.delete(); return shuffled; } /** * Compute the L2 norm for each row of the frame * @param fr Input frame * @return Vec containing L2 values for each row, is in K-V store */ public static Vec getL2(final Frame fr, final double[] scale) { // add workspace vec at end final int idx = fr.numCols(); assert(scale.length == idx) : "Mismatch for number of columns"; fr.add("L2", fr.anyVec().makeZero()); Vec res; try { new MRTask2() { @Override public void map(Chunk[] cs) { for (int r = 0; r < cs[0]._len; r++) { double norm2 = 0; for (int i = 0; i < idx; i++) norm2 += Math.pow(cs[i].at0(r) * scale[i], 2); cs[idx].set0(r, Math.sqrt(norm2)); } } }.doAll(fr); } finally { res = fr.remove(idx); } res.rollupStats(); return res; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/ModelUtils.java
package water.util; import java.util.*; import water.H2O; /** * Shared static code to support modeling, prediction, and scoring. * * <p>Used by interpreted models as well as by generated model code.</p> * * <p><strong>WARNING:</strong> The class should have no other H2O dependencies * since it is provided for generated code as h2o-model.jar which contains * only a few files.</p> * */ public class ModelUtils { /** List of default thresholds */ public static float[] DEFAULT_THRESHOLDS = new float [] { 0.00f, 0.01f, 0.02f, 0.03f, 0.04f, 0.05f, 0.06f, 0.07f, 0.08f, 0.09f, 0.10f, 0.11f, 0.12f, 0.13f, 0.14f, 0.15f, 0.16f, 0.17f, 0.18f, 0.19f, 0.20f, 0.21f, 0.22f, 0.23f, 0.24f, 0.25f, 0.26f, 0.27f, 0.28f, 0.29f, 0.30f, 0.31f, 0.32f, 0.33f, 0.34f, 0.35f, 0.36f, 0.37f, 0.38f, 0.39f, 0.40f, 0.41f, 0.42f, 0.43f, 0.44f, 0.45f, 0.46f, 0.47f, 0.48f, 0.49f, 0.50f, 0.51f, 0.52f, 0.53f, 0.54f, 0.55f, 0.56f, 0.57f, 0.58f, 0.59f, 0.60f, 0.61f, 0.62f, 0.63f, 0.64f, 0.65f, 0.66f, 0.67f, 0.68f, 0.69f, 0.70f, 0.71f, 0.72f, 0.73f, 0.74f, 0.75f, 0.76f, 0.77f, 0.78f, 0.79f, 0.80f, 0.81f, 0.82f, 0.83f, 0.84f, 0.85f, 0.86f, 0.87f, 0.88f, 0.89f, 0.90f, 0.91f, 0.92f, 0.93f, 0.94f, 0.95f, 0.96f, 0.97f, 0.98f, 0.99f, 1.00f }; /** * Utility function to get a best prediction from an array of class * prediction distribution. It returns index of max value if predicted * values are unique. In the case of tie, the implementation solve it in * pseudo-random way. * @param preds an array of prediction distribution. Length of arrays is equal to a number of classes+1. * @return the best prediction (index of class, zero-based) */ public static int getPrediction( float[] preds, double data[] ) { int best=1, tieCnt=0; // Best class; count of ties for( int c=2; c<preds.length; c++) { if( preds[best] < preds[c] ) { best = c; // take the max index tieCnt=0; // No ties } else if (preds[best] == preds[c]) { tieCnt++; // Ties } } if( tieCnt==0 ) return best-1; // Return zero-based best class // Tie-breaking logic float res = preds[best]; // One of the tied best results long hash = 0; // hash for tie-breaking if( data != null ) for( double d : data ) hash ^= Double.doubleToRawLongBits(d) >> 6; // drop 6 least significants bits of mantisa (layout of long is: 1b sign, 11b exp, 52b mantisa) int idx = (int)hash%(tieCnt+1); // Which of the ties we'd like to keep for( best=1; best<preds.length; best++) if( res == preds[best] && --idx < 0 ) return best-1; // Return best throw H2O.fail(); // Should Not Reach Here } /** * Create labels from per-class probabilities with pseudo-random tie-breaking, if needed. * @param numK Number of top probabilities to make labels for * @param preds Predictions (first element is ignored here: placeholder for a label) * @param data Data to break ties (typically, the test set data for this row) * @return Array of predicted labels */ public static int[] getPredictions( int numK, float[] preds, double data[] ) { assert(numK <= preds.length-1); int[] labels = new int[numK]; // create a sorted mapping from probability to label(s) TreeMap<Float, List<Integer> > prob_idx = new TreeMap<Float, List<Integer> >(new Comparator<Float>() { @Override public int compare(Float o1, Float o2) { if (o1 > o2) return -1; if (o2 > o1) return 1; return 0; } }); for (int i = 1; i < preds.length; ++i) { final Float prob = preds[i]; final int label = i-1; assert(prob >= 0 && prob <= 1) : "prob is not inside [0,1]: " + prob; if (prob_idx.containsKey(prob)) { prob_idx.get(prob).add(label); //add all ties } else { // add prob to top K probs only if either: // 1) don't have K probs yet // 2) prob is greater than the smallest prob in the store -> evict the smallest if (prob_idx.size() < numK || prob > prob_idx.lastKey()) { List<Integer> li = new LinkedList<Integer>(); li.add(label); prob_idx.put(prob, li); } // keep size small, only need the best numK probabilities (max-heap) if (prob_idx.size()>numK) { prob_idx.remove(prob_idx.lastKey()); } } } assert(!prob_idx.isEmpty()); assert(prob_idx.size() <= numK); //have at most numK probabilities, maybe less if there are ties int i = 0; //which label we are filling in while (i < numK && !prob_idx.isEmpty()) { final Map.Entry p_id = prob_idx.firstEntry(); final Float prob = (Float)p_id.getKey(); //max prob. final List<Integer> indices = (List<Integer>)p_id.getValue(); //potential candidate labels if there are ties if (i + indices.size() <= numK) for (Integer id : indices) labels[i++] = id; else { // Tie-breaking logic: pick numK-i classes (indices) from the list of indices. // if data == null, then pick the first numK-i indices, otherwise break ties pseudo-randomly. while (i<numK) { assert(!indices.isEmpty()); long hash = 0; if( data != null ) for( double d : data ) hash ^= Double.doubleToRawLongBits(d+i) >> 6; // drop 6 least significant bits of mantissa (layout of long is: 1b sign, 11b exp, 52b mantissa) labels[i++] = indices.remove((int)(Math.abs(hash)%indices.size())); } assert(i==numK); } prob_idx.remove(prob); } assert(i==numK); return labels; } public static int getPrediction(float[] preds, int row) { int best=1, tieCnt=0; // Best class; count of ties for( int c=2; c<preds.length; c++) { if( preds[best] < preds[c] ) { best = c; // take the max index tieCnt=0; // No ties } else if (preds[best] == preds[c]) { tieCnt++; // Ties } } if( tieCnt==0 ) return best-1; // Return zero-based best class // Tie-breaking logic float res = preds[best]; // One of the tied best results int idx = row%(tieCnt+1); // Which of the ties we'd like to keep for( best=1; best<preds.length; best++) if( res == preds[best] && --idx < 0 ) return best-1; // Return best throw H2O.fail(); // Should Not Reach Here } /** * Correct a given list of class probabilities produced as a prediction by a model back to prior class distribution * * <p>The implementation is based on Eq. (27) in <a href="http://gking.harvard.edu/files/0s.pdf">the paper</a>. * * @param scored list of class probabilities beginning at index 1 * @param priorClassDist original class distribution * @param modelClassDist class distribution used for model building (e.g., data was oversampled) * @return corrected list of probabilities */ public static float[] correctProbabilities(float[] scored, float[] priorClassDist, float[] modelClassDist) { double probsum=0; for( int c=1; c<scored.length; c++ ) { final double original_fraction = priorClassDist[c-1]; final double oversampled_fraction = modelClassDist[c-1]; assert(!Double.isNaN(scored[c])); if (original_fraction != 0 && oversampled_fraction != 0) scored[c] *= original_fraction / oversampled_fraction; probsum += scored[c]; } if (probsum>0) for (int i=1;i<scored.length;++i) scored[i] /= probsum; return scored; } /** * Sample out-of-bag rows with given rate with help of given sampler. * It returns array of sampled rows. The first element of array contains a number * of sampled rows. The returned array can be larger than number of returned sampled * elements. * * @param nrows number of rows to sample from. * @param rate sampling rate * @param sampler random "dice" * @return an array contains numbers of sampled rows. The first element holds a number of sampled rows. The array length * can be greater than number of sampled rows. */ public static int[] sampleOOBRows(int nrows, float rate, Random sampler) { return sampleOOBRows(nrows, rate, sampler, new int[2+Math.round((1f-rate)*nrows*1.2f+0.5f)]); } /** * In-situ version of {@link #sampleOOBRows(int, float, Random)}. * * @param oob an initial array to hold sampled rows. Can be internally reallocated. * @return an array containing sampled rows. * * @see #sampleOOBRows(int, float, Random) */ public static int[] sampleOOBRows(int nrows, float rate, Random sampler, int[] oob) { int oobcnt = 0; // Number of oob rows Arrays.fill(oob, 0); for(int row = 0; row < nrows; row++) { if (sampler.nextFloat() >= rate) { // it is out-of-bag row oob[1+oobcnt++] = row; if (1+oobcnt>=oob.length) oob = Arrays.copyOf(oob, Math.round(1.2f*nrows+0.5f)+2); } } oob[0] = oobcnt; return oob; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/OSUtils.java
package water.util; import java.lang.management.ManagementFactory; import javax.management.MBeanServer; import javax.management.ObjectName; public class OSUtils { /** Safe call to obtain size of total physical memory. * * <p>It is platform dependent and returns size of machine physical * memory in bytes</p> * * @return total size of machine physical memory in bytes or -1 if the attribute is not available. */ public static long getTotalPhysicalMemory() { long memory = -1; try { MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer(); Object attribute = mBeanServer.getAttribute(new ObjectName("java.lang","type","OperatingSystem"), "TotalPhysicalMemorySize"); return (Long) attribute; } catch (Throwable e) { e.printStackTrace(); } return memory; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/ParamUtils.java
package water.util; import java.util.HashSet; import java.util.Set; import water.api.DocGen.FieldDoc; import water.api.ParamImportance; /** A helper class proving queries over Iced objects parameters. */ public class ParamUtils { /** * Names of the model parameters which will always be shown in a short description of * the model (e.g., for a tree model it would include ntrees and depth). */ public static Set<String> getCriticalParamNames(FieldDoc[] doc) { return getParamNames(doc, ParamImportance.CRITICAL); } /** * Names of the model parameters which will also be shown in a longer description of * the model (e.g., learning rate). */ public static Set<String> getSecondaryParamNames(FieldDoc[] doc) { return getParamNames(doc, ParamImportance.SECONDARY); } /** * Names of the model parameters which will be shown only in an expert view of * the model (e.g., for Deep Learning it would include initial_weight_scale). */ public static Set<String> getExpertParamNames(FieldDoc[] doc) { return getParamNames(doc, ParamImportance.EXPERT); } public static Set<String> getParamNames(FieldDoc[] doc, ParamImportance filter) { HashSet<String> r = new HashSet<String>(); for (FieldDoc d : doc) if (d.importance()==filter) r.add(d.name()); return r; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/ProfileCollectorTask.java
package water.util; import water.DRemoteTask; import water.H2O; import water.Iced; import water.api.DocGen; import water.api.Request; import java.util.*; import java.util.Map.Entry; public class ProfileCollectorTask extends DRemoteTask<ProfileCollectorTask> { public ProfileCollectorTask(int stack_depth) { _stack_depth = stack_depth; } public static class NodeProfile extends Iced { static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code. NodeProfile(int len) { stacktraces = new String[len]; counts = new int[len]; } @Request.API(help="Stack traces") public String[] stacktraces; @Request.API(help="Stack trace counts") public int[] counts; } public NodeProfile[] _result; public final int _stack_depth; @Override public void reduce(ProfileCollectorTask that) { if( _result == null ) _result = that._result; else for (int i=0; i<_result.length; ++i) if (_result[i] == null) _result[i] = that._result[i]; } @Override public void lcompute() { int idx = H2O.SELF.index(); _result = new NodeProfile[H2O.CLOUD.size()]; Map<String, Integer> countedStackTraces = new HashMap<String, Integer>(); final int repeats = 100; for (int i=0; i<repeats; ++i) { Map<Thread, StackTraceElement[]> allStackTraces = Thread.getAllStackTraces(); for (Entry<Thread, StackTraceElement[]> el : allStackTraces.entrySet()) { StringBuilder sb = new StringBuilder(); int j=0; for (StackTraceElement ste : el.getValue()) { String val = ste.toString(); // filter out unimportant stuff if( j==0 && ( val.equals("sun.misc.Unsafe.park(Native Method)") || val.equals("java.lang.Object.wait(Native Method)") || val.equals("java.lang.Thread.sleep(Native Method)") || val.equals("java.lang.Thread.yield(Native Method)") || val.equals("java.net.PlainSocketImpl.socketAccept(Native Method)") || val.equals("sun.nio.ch.ServerSocketChannelImpl.accept0(Native Method)") || val.equals("sun.nio.ch.DatagramChannelImpl.receive0(Native Method)") || val.equals("java.lang.Thread.dumpThreads(Native Method)") ) ) { break; } sb.append(ste.toString()); sb.append("\n"); j++; if (j==_stack_depth) break; } String st = sb.toString(); boolean found = false; for (Entry<String, Integer> entry : countedStackTraces.entrySet()) { if (entry.getKey().equals(st)) { entry.setValue(entry.getValue() + 1); found = true; break; } } if (!found) countedStackTraces.put(st, 1); } try { Thread.sleep(1); } catch (InterruptedException e) { e.printStackTrace(); } } int i=0; _result[idx] = new NodeProfile(countedStackTraces.size()); for (Entry<String, Integer> entry : countedStackTraces.entrySet()) { _result[idx].stacktraces[i] = entry.getKey(); _result[idx].counts[i] = entry.getValue(); i++; } // sort it Map<Integer, String> sorted = new TreeMap<Integer, String>(Collections.reverseOrder()); for (int j=0; j<_result[idx].counts.length; ++j) { if (_result[idx].stacktraces[j] != null && _result[idx].stacktraces[j].length() > 0) sorted.put(_result[idx].counts[j], _result[idx].stacktraces[j]); } // overwrite results String[] sorted_stacktraces = new String[sorted.entrySet().size()]; int[] sorted_counts = new int[sorted.entrySet().size()]; i=0; for (Map.Entry<Integer, String> e : sorted.entrySet()) { sorted_stacktraces[i] = e.getValue(); sorted_counts[i] = e.getKey(); i++; } _result[idx].stacktraces = sorted_stacktraces; _result[idx].counts = sorted_counts; tryComplete(); } @Override public byte priority() { return H2O.GUI_PRIORITY; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/RIStream.java
package water.util; import java.io.IOException; import java.io.InputStream; import water.Job.ProgressMonitor; import com.google.common.base.Throwables; public abstract class RIStream extends InputStream { InputStream _is; ProgressMonitor _pmon; public final int _retries = 5; String [] _bk; private long _off; boolean _knownSize; long _expectedSz; protected RIStream( long off, ProgressMonitor pmon){ _off = off; } public final long off(){return _off;} public final long expectedSz(){ return _knownSize?_expectedSz:-1; } public void setExpectedSz(long sz){ _knownSize = true; _expectedSz = sz; } public final void open(){ assert _is == null; try{ _is = open(_off); } catch(IOException e){ throw new RuntimeException(e); } } protected abstract InputStream open(long offset) throws IOException; public void closeQuietly(){ try{close();} catch(Exception e){} // ignore any errors } private void try2Recover(int attempt, IOException e) { if(attempt == _retries) Throwables.propagate(e); Log.warn("[H2OS3InputStream] Attempt("+attempt + ") to recover from " + e.getMessage() + "), off = " + _off); try{_is.close();}catch(IOException ex){} _is = null; if(attempt > 0) try {Thread.sleep(256 << attempt);}catch(InterruptedException ex){} open(); return; } private void updateOffset(int off) { if(_knownSize)assert (off + _off) <= _expectedSz; _off += off; } @Override public boolean markSupported(){ return false; } @Override public void mark(int readLimit){throw new UnsupportedOperationException();} @Override public void reset(){throw new UnsupportedOperationException();} private void checkEof() throws IOException { if(_knownSize && _off < _expectedSz) throw new IOException("premature end of file reported, expected " + _expectedSz + " bytes, but got eof after " + _off + " bytes"); } @Override public final int available() throws IOException { int attempts = 0; while(true){ try { int res = _is.available(); if(res == 0) checkEof(); return _is.available(); } catch (IOException e) { try2Recover(attempts++,e); } } } @Override public int read() throws IOException { int attempts = 0; while(true){ try{ int res = _is.read(); if(res == -1) checkEof(); if(res != -1){ updateOffset(1); if(_pmon != null)_pmon.update(1); } return res; }catch (IOException e){ try2Recover(attempts++,e); } } } @Override public int read(byte [] b) throws IOException { int attempts = 0; while(true){ try { int res = _is.read(b); if(res == -1) checkEof(); if(res > 0){ updateOffset(res); if(_pmon != null)_pmon.update(res); } return res; } catch(IOException e) { try2Recover(attempts++,e); } } } @Override public int read(byte [] b, int off, int len) throws IOException { int attempts = 0; while(true){ try { int res = _is.read(b,off,len); if(res == -1) checkEof(); if(res > 0){ updateOffset(res); if(_pmon != null)_pmon.update(res); } return res; } catch(IOException e) { try2Recover(attempts++,e); } } } @Override public void close() throws IOException { if(_is != null){ _is.close(); _is = null; } } @Override public long skip(long n) throws IOException { int attempts = 0; while(true){ try{ long res = _is.skip(n); if(res > 0){ updateOffset((int)res); if(_pmon != null)_pmon.update(res); } return res; } catch (IOException e) { try2Recover(attempts++,e); } } } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/RString.java
package water.util; import com.google.common.collect.ArrayListMultimap; import dontweave.gson.*; import java.io.IOException; import java.net.URLEncoder; import java.util.List; import java.util.Map.Entry; import java.util.NoSuchElementException; import water.Key; /** * List that has labels to it (something like copyable iterators) and some very * basic functionality for it. * * Since it is not a public class only the things we require are filled in. The * labels do not expect or deal with improper use, so make sure you know what * you are doing when using directly this class. */ class LabelledStringList { // Inner item of the list, single linked private static class Item { String value; Item next; Item(String value, Item next) { this.value = value; this.next = next; } } // Label to the list, which acts as a restricted form of an iterator. Notably // a label can be used to add items in the middle of the list and also to // delete all items in between two labels. public class Label { // element before the label Item _prev; // Creates the label from given inner list item so that the label points // right after it. If null, label points at the very beginnig of the list. Label(Item prev) { _prev = prev; } // Creates a new copy of the label that points to the same place @Override public Label clone() { return new Label(_prev); } // Inserts new string after the label public void insert(String value) { if( _prev == null ) { _begin = new Item(value, _begin); } else { _prev.next = new Item(value, _prev.next); } ++_noOfElements; _length += value.length(); } // Inserts new string after the label and then advances the label. Thus in // theory inserting before the label. public void insertAndAdvance(String value) { insert(value); if( _prev == null ) { _prev = _begin; } else { _prev = _prev.next; } } // Removes the element after the label. public void remove() throws NoSuchElementException { if( _prev == null ) { if( _begin == null ) { throw new NoSuchElementException(); } _length -= _begin.value.length(); _begin = _begin.next; } else { if( _prev.next == null ) { throw new NoSuchElementException(); } _length -= _prev.next.value.length(); _prev.next = _prev.next.next; } --_noOfElements; } // Removes all elements between the label and the other label. The other // label must come after the first label, otherwise everything after the // label will be deleted. public void removeTill(Label other) { if( _prev == null ) { if( other._prev == null ) { return; } while( ((_begin != null) && (_begin.next != other._prev.next)) ) { _length -= _begin.value.length(); _begin = _begin.next; --_noOfElements; } } else { if( other._prev == null ) { clear(); _prev = null; } else { Item end = other._prev.next; while( (_prev.next != null) && (_prev.next != end) ) { remove(); } } } other._prev = _prev; } } // first item private Item _begin; // length in characters of the total stored string private int _length; // number of String elemets stored private int _noOfElements; // Creates an empty string list public LabelledStringList() { _length = 0; _noOfElements = 0; } // Returns a label to the first item public Label begin() { return new Label(null); } // Returns the number of elements stored in the list public int length() { return _noOfElements; } // Clears all elements in the list (all labels should be cleared by the // user when calling this method). public void clear() { _begin = null; _length = 0; _noOfElements = 0; } // Concatenates all parts of the string and returns them as single string @Override public String toString() { StringBuilder s = new StringBuilder(_length); Item i = _begin; while( i != null ) { s.append(i.value.toString()); i = i.next; } return s.toString(); } } /** * A replaceable string that allows very easy and simple replacements. * * %placeholder is normally inserted * * %$placeholder is inserted in URL encoding for UTF-8 charset. This should be * used for all hrefs. * */ public class RString { // A placeholder information with replcement group and start and end labels. private static class Placeholder { LabelledStringList.Label start; LabelledStringList.Label end; RString group; // Creates new placeholder public Placeholder(LabelledStringList.Label start, LabelledStringList.Label end) { this.start = start; this.end = end; this.group = null; } // Creates new placeholder for replacement group public Placeholder(LabelledStringList.Label start, LabelledStringList.Label end, String from) { this.start = start; this.end = end; this.group = new RString(from, this); } } // Placeholders ArrayListMultimap<String, Placeholder> _placeholders; // Parts of the final string (replacements and originals together). LabelledStringList _parts; // Parent placeholder if the RString is a replacement group. Placeholder _parent; // Passes only valid placeholder name characters static private boolean isIdentChar(char x) { return (x == '$') || ((x >= 'a') && (x <= 'z')) || ((x >= 'A') && (x <= 'Z')) || ((x >= '0') && (x <= '9')) || (x == '_'); } // Creates a string that is itself a replacement group. private RString(final String from, Placeholder parent) { this(from); _parent = parent; } // Creates the RString from given normal string. The placeholders must begin // with % sign and if the placeholder name is followed by { }, the placeholder // is treated as a replacement group. Replacement groups cannot be tested. // Only letters, numbers and underscore can form a placeholder name. In the // constructor the string is parsed into parts and placeholders so that all // replacements in the future are very quick (hashmap lookup in fact). public RString(final String from) { _parts = new LabelledStringList(); _placeholders = ArrayListMultimap.create(); LabelledStringList.Label cur = _parts.begin(); int start = 0; int end = 0; while( true ) { start = from.indexOf("%", end); if( start == -1 ) { cur.insertAndAdvance(from.substring(end, from.length())); break; } ++start; if( start == from.length() ) { throw new ArrayIndexOutOfBoundsException(); } if( from.charAt(start) == '%' ) { cur.insertAndAdvance(from.substring(end, start)); end = start + 1; } else { cur.insertAndAdvance(from.substring(end, start - 1)); end = start; while( (end < from.length()) && (isIdentChar(from.charAt(end))) ) { ++end; } String pname = from.substring(start, end); if( (end == from.length()) || (from.charAt(end) != '{') ) { // it is a normal placeholder _placeholders.put(pname, new Placeholder(cur.clone(), cur.clone())); } else { // it is another RString start = end + 1; end = from.indexOf("}", end); if( end == -1 ) { throw new ArrayIndexOutOfBoundsException("Missing } after replacement group"); } _placeholders.put(pname, new Placeholder(cur.clone(), cur.clone(), from.substring(start, end))); ++end; } } } } // Returns the sstring with all replaced material. @Override public String toString() { return _parts.toString(); } // Removes all replacements from the string (keeps the placeholders so that // they can be used again. public void clear() { for( Placeholder p : _placeholders.values() ) { p.start.removeTill(p.end); } } public void replace(JsonObject json) { for(Entry<String, JsonElement> obj : json.entrySet()) { JsonElement v = obj.getValue(); if( v.isJsonPrimitive() && ((JsonPrimitive)v).isString() ) { replace(obj.getKey(), v.getAsString()); } else if( v.isJsonArray() ) { for( JsonElement e : (JsonArray)v ) { assert e instanceof JsonObject; RString sub = restartGroup(obj.getKey()); sub.replace((JsonObject) e); sub.append(); } } else { replace(obj.getKey(), v); } } } public void replace(String what, Key key) { replace(what, key.user_allowed() ? key.toString() : "<code>"+key.toString()+"</code>"); } // Replaces the given placeholder with an object. On a single placeholder, // multiple replaces can be called in which case they are appended one after // another in order. public void replace(String what, Object with) { if (what.charAt(0)=='$') throw new RuntimeException("$ is now control char that denotes URL encoding!"); for (Placeholder p : _placeholders.get(what)) p.end.insertAndAdvance(with.toString()); for (Placeholder p : _placeholders.get("$"+what)) try { p.end.insertAndAdvance(URLEncoder.encode(with.toString(),"UTF-8")); } catch (IOException e) { p.end.insertAndAdvance(e.toString()); } } // Returns a replacement group of the given name and clears it so that it // can be filled again. public RString restartGroup(String what) { List<Placeholder> all = _placeholders.get(what); assert all.size() == 1; Placeholder result = all.get(0); if( result.group == null ) { throw new NoSuchElementException("Element " + what + " is not a group."); } result.group.clear(); return result.group; } // If the RString itself is a replacement group, adds its contents to the // placeholder. public void append() { if( _parent == null ) { throw new UnsupportedOperationException("Cannot append if no parent is specified."); } _parent.end.insertAndAdvance(toString()); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/RemoveAllKeysTask.java
package water.util; import water.*; public class RemoveAllKeysTask extends DRemoteTask { public RemoveAllKeysTask() {} @Override public void lcompute() { int keysetSize = H2O.localKeySet().size(); int numNodes = H2O.CLOUD._memary.length; int nodeIdx = H2O.SELF.index(); Log.info("Removing "+keysetSize+" keys on this node; nodeIdx("+nodeIdx+") numNodes("+numNodes+")"); // Now remove all keys. Futures fs = new Futures(); for( Key key : H2O.localKeySet() ) DKV.remove(key, fs); fs.blockForPending(); Log.info("Keys remaining: "+H2O.store_size()); tryComplete(); } @Override public void reduce(DRemoteTask drt) { } @Override public byte priority() { return H2O.GUI_PRIORITY; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/SB.java
package water.util; // Tight/tiny StringBuilder wrapper. // Short short names on purpose; so they don't obscure the printing. // Can't believe this wasn't done long long ago. public class SB { public final StringBuilder _sb; private int _indent = 0; public SB( ) { _sb = new StringBuilder( ); } public SB(String s) { _sb = new StringBuilder(s); } public SB ps( String s ) { _sb.append("\""); pj(s); _sb.append("\""); return this; } public SB p( String s ) { _sb.append(s); return this; } public SB p( float s ) { if( Float.isNaN(s) ) _sb.append( "Float.NaN"); else _sb.append(s); return this; } public SB p( double s ) { if( Double.isNaN(s) ) _sb.append("Double.NaN"); else _sb.append(s); return this; } public SB p( char s ) { _sb.append(s); return this; } public SB p( int s ) { _sb.append(s); return this; } // Not spelled "p" on purpose: too easy to accidentally say "p(1.0)" and // suddenly call the the autoboxed version. public SB pobj( Object s ) { _sb.append(s.toString()); return this; } public SB i( int d ) { for( int i=0; i<d+_indent; i++ ) p(" "); return this; } public SB i( ) { return i(0); } public SB s() { _sb.append(' '); return this; } // Java specific append of double public SB pj( double s ) { if (Double.isInfinite(s)) _sb.append("Double.").append(s>0? "POSITIVE_INFINITY" : "NEGATIVE_INFINITY"); else if (Double.isNaN(s)) _sb.append("Double.NaN"); else _sb.append(s); return this; } // Java specific append of float public SB pj( float s ) { if (Float.isInfinite(s)) _sb.append("Float.").append(s>0? "POSITIVE_INFINITY" : "NEGATIVE_INFINITY"); else if (Float.isNaN(s)) _sb.append("Float.NaN"); else _sb.append(s).append('f'); return this; } /* Append Java string - escape all " and \ */ public SB pj( String s ) { _sb.append(Utils.escapeJava(s)); return this; } // Increase indentation public SB ii( int i) { _indent += i; return this; } // Decrease indentation public SB di( int i) { _indent -= i; return this; } // Copy indent from given string buffer public SB ci( SB sb) { _indent = sb._indent; return this; } public SB nl( ) { return p('\n'); } // Convert a String[] into a valid Java String initializer public SB toJavaStringInit( String[] ss ) { if (ss==null) return p("null"); p('{'); for( int i=0; i<ss.length-1; i++ ) p('"').pj(ss[i]).p("\","); if( ss.length > 0 ) p('"').pj(ss[ss.length-1]).p('"'); return p('}'); } public SB toJavaStringInit( float[] ss ) { if (ss==null) return p("null"); p('{'); for( int i=0; i<ss.length-1; i++ ) pj(ss[i]).p(','); if( ss.length > 0 ) pj(ss[ss.length-1]); return p('}'); } public SB toJSArray(float[] nums) { p('['); for (int i=0; i<nums.length; i++) { if (i>0) p(','); p(nums[i]); } return p(']'); } public SB toJSArray(String[] ss) { p('['); for (int i=0; i<ss.length; i++) { if (i>0) p(','); p('"').p(ss[i]).p('"'); } return p(']'); } // Mostly a fail, since we should just dump into the same SB. public SB p( SB sb ) { _sb.append(sb._sb); return this; } @Override public String toString() { return _sb.toString(); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/TimelineSnapshot.java
package water.util; import java.net.InetAddress; import java.util.*; import water.*; /** * Wrapper around timeline snapshot. Implements iterator interface (events are * ordered according to send/receive dependencies across the nodes and trivial time * dependencies inside node) * * @author tomas */ public final class TimelineSnapshot implements Iterable<TimelineSnapshot.Event>, Iterator<TimelineSnapshot.Event> { final long[][] _snapshot; final Event[] _events; final HashMap<Event, Event> _edges; final public HashMap<Event, ArrayList<Event>> _sends; final H2O _cloud; boolean _processed; public TimelineSnapshot(H2O cloud, long[][] snapshot) { _cloud = cloud; _snapshot = snapshot; _edges = new HashMap<Event, Event>(); _sends = new HashMap<Event, ArrayList<Event>>(); _events = new Event[snapshot.length]; // DEBUG: print out the event stack as we got it // System.out.println("# of nodes: " + _events.length); // for (int j = 0; j < TimeLine.length(); ++j) { // System.out.print("row# " + j + ":"); // for (int i = 0; i < _events.length; ++i) { // System.out.print(" || " + new Event(i, j)); // } // System.out.println(" ||"); // } for (int i = 0; i < _events.length; ++i) { // For a new Snapshot, most of initial entries are all zeros. Skip them // until we start finding entries... which will be the oldest entries. // The timeline is age-ordered (per-thread, we hope the threads are // fairly consistent) _events[i] = new Event(i, 0); if (_events[i].isEmpty()) { if (!_events[i].next()) _events[i] = null; } if (_events[i] != null) processEvent(_events[i]); assert (_events[i] == null) || (_events[i]._eventIdx < TimeLine.MAX_EVENTS); } // now build the graph (i.e. go through all the events once) for (@SuppressWarnings("unused") Event e : this) ; _processed = true; for (int i = 0; i < _events.length; ++i) { // For a new Snapshot, most of initial entries are all zeros. Skip them // until we start finding entries... which will be the oldest entries. // The timeline is age-ordered (per-thread, we hope the threads are // fairly consistent) _events[i] = new Event(i, 0); if (_events[i].isEmpty()) { if (!_events[i].next()) _events[i] = null; } assert (_events[i] == null) || (_events[i]._eventIdx < TimeLine.MAX_EVENTS); } } // convenience wrapper around event stored in snapshot // contains methods to access event data, move to the next previous event // and to test whether two events form valid sender/receiver pair // // it is also needed to keep track of send/recv dependencies when iterating // over events in timeline public class Event { public final int _nodeId; // Which node/column# in the snapshot final long[] _val; // The column from the snapshot int _eventIdx; // Which row in the snapshot // For send-packets, the column# is the cloud-wide idx of the sender, and // the packet contains the reciever. Vice-versa for received packets, // where the column# is the cloud-wide idx of the receiver, and the packet // contains the sender. H2ONode _packh2o; // The H2O in the packet boolean _blocked; public Event(int nodeId, int eventIdx) { _nodeId = nodeId; _eventIdx = eventIdx; _val = _snapshot[nodeId]; computeH2O(false); } @Override public final int hashCode() { return (_nodeId <<10)^_eventIdx; } @Override public final boolean equals(Object o) { Event e = (Event)o; return _nodeId==e._nodeId && _eventIdx==e._eventIdx; } // (re)compute the correct H2ONode, if the _eventIdx changes. private boolean computeH2O(boolean b) { H2ONode h2o = null; if( dataLo() != 0 ) { // Dead/initial packet InetAddress inet = addrPack(); if( !inet.isMulticastAddress() ) { // Is multicast? h2o = H2ONode.intern(inet,portPack()); if( isSend() && h2o == recoH2O() ) // Another multicast indicator: sending to self h2o = null; // Flag as multicast } } _packh2o = h2o; return b; // For flow-coding } public final int send_recv() { return TimeLine.send_recv(_val, _eventIdx); } public final int dropped () { return TimeLine.dropped (_val, _eventIdx); } public final boolean isSend() { return send_recv() == 0; } public final boolean isRecv() { return send_recv() == 1; } public final boolean isDropped() { return dropped() != 0; } public final InetAddress addrPack() { return TimeLine.inet(_val, _eventIdx); } public final long dataLo() { return TimeLine.l0(_val, _eventIdx); } public final long dataHi() { return TimeLine.l8(_val, _eventIdx); } public final long ns() { return TimeLine.ns(_val, _eventIdx); } public final boolean isTCP(){return (ns() & 4) != 0;} public final long ms() { return TimeLine.ms(_val, _eventIdx) + recoH2O()._heartbeat._jvm_boot_msec; } public H2ONode packH2O() { return _packh2o; } // H2O in packet public H2ONode recoH2O() { return _cloud._memary[_nodeId]; } // H2O recording packet public final int portPack() { int i = (int) dataLo(); // 1st byte is UDP type, so shift right by 8. // Next 2 bytes are UDP port #, so mask by 0xFFFF. return ((0xFFFF) & (i >> 8)); } public final String addrString() { return _packh2o==null ? "multicast" : _packh2o.toString(); } public final String ioflavor() { int flavor = is_io(); return flavor == -1 ? (isTCP()?"TCP":"UDP") : Value.nameOfPersist(flavor); } public final int is_io() { int udp_type = (int) (dataLo() & 0xff); // First byte is UDP packet type return UDP.udp.i_o.ordinal() == udp_type ? (int)((dataLo()>>24)&0xFF) : -1; } // ms doing I/O public final int ms_io() { return (int)(dataLo()>>32); } public final int size_io() { return (int)dataHi(); } public String toString() { int udp_type = (int) (dataLo() & 0xff); // First byte is UDP packet type UDP.udp udpType = UDP.getUdp(udp_type); String operation = isSend() ? " SEND " : " RECV "; String host1 = addrString(); String host2 = recoH2O().toString(); String networkPart = isSend() ? (host2 + " -> " + host1) : (host1 + " -> " + host2); return "Node(" + _nodeId + ": " + ns() + ") " + udpType.toString() + operation + networkPart + (isDropped()?" DROPPED ":"") + ", data = '" + Long.toHexString(this.dataLo()) + ',' + Long.toHexString(this.dataHi()) + "'"; } /** * Check if two events form valid sender/receiver pair. * * Two events are valid sender/receiver pair iff the ports, adresses and * payload match. * * @param other * @return true iff the two events form valid sender/receiver pair */ final boolean match(Event ev) { // check we're matching send and receive if (send_recv() == ev.send_recv()) return false; // compare the packet payload matches long myl0 = dataLo(); long evl0 = ev.dataLo(); int my_udp_type = (int) (myl0 & 0xff); // first byte is udp type int ev_udp_type = (int) (evl0 & 0xff); // first byte is udp type if (my_udp_type != ev_udp_type) return false; UDP.udp e = UDP.getUdp(my_udp_type); switch (e) { case rebooted: case timeline: // compare only first 3 bytes here (udp type and port), // but port# is checked below as part of address break; case ack: case ackack: case fetchack: case exec: case heartbeat: // compare 3 ctrl bytes + 4 bytes task # // if ((myl0 & 0xFFFFFFFFFFFFFFl) != (evl0 & 0xFFFFFFFFFFFFFFl)) if( (int)(myl0>>24) != (int)(evl0>>24)) return false; break; case i_o: // Shows up as I/O-completing recorded packets return false; default: throw new RuntimeException("unexpected udp packet type " + e.toString()); } // Check that port numbers are compatible. Really check that the // H2ONode's are compatible. The port#'s got flipped during recording to // allow this check (and a null _packh2o is a multicast). if( _packh2o!=null && _packh2o.index()!=ev._nodeId ) return false; if( ev._packh2o!=null && ev._packh2o.index()!= _nodeId ) return false; return true; } public final boolean isEmpty() { return (_eventIdx < TimeLine.length()) ? TimeLine.isEmpty(_val, _eventIdx) : false; } public final Event clone() { return new Event(_nodeId, _eventIdx); } boolean prev(int minIdx) { int min = Math.max(minIdx, -1); if (_eventIdx <= minIdx) return false; while (--_eventIdx > min) if (!isEmpty()) return computeH2O(true); return computeH2O(false); } boolean prev() { return prev(-1); } Event previousEvent(int minIdx) { Event res = new Event(_nodeId, _eventIdx); return (res.prev(minIdx)) ? res : null; } Event previousEvent() { return previousEvent(-1); } boolean next(int maxIdx) { int max = Math.min(maxIdx, TimeLine.length()); if (_eventIdx >= max) return false; while (++_eventIdx < max) if (!isEmpty()) return computeH2O(true); return computeH2O(false); } boolean next() { return next(TimeLine.length()); } Event nextEvent(int maxIdx) { Event res = new Event(_nodeId, _eventIdx); return (res.next(maxIdx)) ? res : null; } Event nextEvent() { return nextEvent(TimeLine.length()); } /** * Used to determine ordering of events not bound by any dependency. * * Events compared according to following rules: * Receives go before sends. Since we are only here with unbound events, * unbound receives means their sender has already appeared and they * should go adjacent to their sender. * For two sends, pick the one with receives with smallest timestamp (ms) * otherwise pick the sender with smallest timestamp (ms) * * @param ev other Event to compare * @return */ public final int compareTo(Event ev) { if( ev == null ) return -1; if( ev == this ) return 0; if( ev.equals(this) ) return 0; int res = ev.send_recv() - send_recv(); // recvs should go before sends if( res != 0 ) return res; if (isSend()) { // compare by the time of receivers long myMinMs = Long.MAX_VALUE; long evMinMs = Long.MAX_VALUE; ArrayList<Event> myRecvs = _sends.get(this); ArrayList<Event> evRecvs = _sends.get(ev ); for (Event e : myRecvs) if (e.ms() < myMinMs) myMinMs = e.ms(); for (Event e : evRecvs) if (e.ms() < evMinMs) evMinMs = e.ms(); res = (int) (myMinMs - evMinMs); if( myMinMs == Long.MAX_VALUE && evMinMs != Long.MAX_VALUE ) res = -1; if( myMinMs != Long.MAX_VALUE && evMinMs == Long.MAX_VALUE ) res = 1; } if (res == 0) res = (int) (ms() - ev.ms()); if( res == 0 ) res = (int) (ns() - ev.ns()); return res; } } /** * Check whether two events can be put together in sender/recv relationship. * * Events must match, also each sender can have only one receiver per node. * * @param senderCnd * @param recvCnd * @return */ private boolean isSenderRecvPair(Event senderCnd, Event recvCnd) { if (senderCnd.isSend() && recvCnd.isRecv() && senderCnd.match(recvCnd)) { ArrayList<Event> recvs = _sends.get(senderCnd); if (recvs.isEmpty() || senderCnd.packH2O()==null ) { for (Event e : recvs) if (e._nodeId == recvCnd._nodeId) return false; return true; } } return false; } /** * Process new event. For sender, check if there are any blocked receives * waiting for this send. For receiver, try to find matching sender, otherwise * block. * * @param idx */ void processEvent(Event e) { assert !_processed; // Event e = _events[idx]; if (e.isSend()) { _sends.put(e, new ArrayList<TimelineSnapshot.Event>()); for (Event otherE : _events) { if ((otherE != null) && (otherE != e) && (!otherE.equals(e)) && otherE._blocked && otherE.match(e)) { _edges.put(otherE, e); _sends.get(e).add(otherE); otherE._blocked = false; } } } else { // look for matching send, otherwise set _blocked assert !_edges.containsKey(e); int senderIdx = e.packH2O().index(); if (senderIdx < 0) { // binary search did not find member, should not happen? // no possible sender - return and do not block Log.warn("no sender found! port = " + e.portPack() + ", ip = " + e.addrPack().toString()); return; } Event senderCnd = _events[senderIdx]; if (senderCnd != null) { if (isSenderRecvPair(senderCnd, e)) { _edges.put(e, senderCnd.clone()); _sends.get(senderCnd).add(e); return; } senderCnd = senderCnd.clone(); while (senderCnd.prev()) { if (isSenderRecvPair(senderCnd, e)) { _edges.put(e, senderCnd); _sends.get(senderCnd).add(e); return; } } } e._blocked = true; } assert (e == null) || (e._eventIdx < TimeLine.MAX_EVENTS); } @Override public Iterator<TimelineSnapshot.Event> iterator() { return this; } /** * Just check if there is any non null non-issued event. */ @Override public boolean hasNext() { for (int i = 0; i < _events.length; ++i) if (_events[i] != null && (!_events[i].isEmpty() || _events[i].next())) { assert (_events[i] == null) || ((_events[i]._eventIdx < TimeLine.MAX_EVENTS) && !_events[i].isEmpty()); return true; } else { assert (_events[i] == null) || ((_events[i]._eventIdx < TimeLine.MAX_EVENTS) && !_events[i].isEmpty()); _events[i] = null; } return false; } public Event getDependency(Event e) { return _edges.get(e); } /** * Get the next event of the timeline according to the ordering. Ordering is * performed in this method. Basically there are n ordered stream of events * with possible dependenencies caused by send/rcv relation. * * Sends are always eligible to be scheduled. Receives are eligible only if * their matching send was already issued. In situation when current events of * all streams are blocked (should not happen!) the oldest one is unblocked * and issued. * * Out of all eligible events, the smallest one (according to Event.compareTo) * is picked. */ @Override public TimelineSnapshot.Event next() { if (!hasNext()) throw new NoSuchElementException(); int selectedIdx = -1; for (int i = 0; i < _events.length; ++i) { if (_events[i] == null || _events[i]._blocked) continue; if (_events[i].isRecv()) { // check edge dependency Event send = _edges.get(_events[i]); if ((send != null) && (_events[send._nodeId] != null) && send._eventIdx >= _events[send._nodeId]._eventIdx) continue; } selectedIdx = ((selectedIdx == -1) || _events[i] .compareTo(_events[selectedIdx]) < 0) ? i : selectedIdx; } if (selectedIdx == -1) { // we did not select anything -> all event streams // must be blocked return the oldest one (assuming // corresponding send was in previous snapshot) // System.out.println("*** all blocked ***"); selectedIdx = 0; long selectedNs = (_events[selectedIdx] != null) ? _events[selectedIdx] .ns() : Long.MAX_VALUE; long selectedMs = (_events[selectedIdx] != null) ? _events[selectedIdx] .ms() : Long.MAX_VALUE; for (int i = 1; i < _events.length; ++i) { if (_events[i] == null) continue; if ((_events[i].ms() < selectedMs) && (_events[i].ns() < selectedNs)) { selectedIdx = i; selectedNs = _events[i].ns(); selectedMs = _events[i].ms(); } } } assert (selectedIdx != -1); assert (_events[selectedIdx] != null) && ((_events[selectedIdx]._eventIdx < TimeLine.MAX_EVENTS) && !_events[selectedIdx] .isEmpty()); Event res = _events[selectedIdx]; _events[selectedIdx] = _events[selectedIdx].nextEvent(); if (_events[selectedIdx] != null && !_processed) processEvent(_events[selectedIdx]); // DEBUG // if (_processed) // if (res.isRecv()) // System.out.println("# " + res + " PAIRED WITH " // + (_edges.containsKey(res) ? _edges.get(res) : "*** NONE ****")); // else // System.out.println("# " + res + " receivers: " // + _sends.get(res).toString()); return res; } @Override public void remove() { throw new UnsupportedOperationException(); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/UIUtils.java
package water.util; import water.Key; import water.Model; import water.api.RequestStatics.RequestType; public class UIUtils { /** Return the query link to this page */ public static <T> String qlink(Class<T> page, Key k, String content) { return qlink(page, "source", k, content ); } public static <T> String qlink(Class<T> page, String keyPlaceholder, Key k, String content) { return link(page, RequestType.query, keyPlaceholder, k.toString(), content); } public static <T> String link(Class<T> page, String keyPlaceholder, String k, String content) { return link(page, RequestType.www, keyPlaceholder, k, content); } public static <T> String link(Class<T> page, RequestType rtype, String keyPlaceholder, String k, String content) { RString rs = new RString("<a href='/2/%page%rtype?%keyPlaceholder=%$key'>%content</a>"); rs.replace("keyPlaceholder", keyPlaceholder); rs.replace("rtype", rtype._suffix); rs.replace("page", page.getSimpleName()); rs.replace("key", k); rs.replace("content", content); return rs.toString(); } public static <T extends Model> String builderModelLink(Class<T> model, Key source, String response, String content) { return builderModelLink(model, source, response, content, null); } public static <T extends Model> String builderModelLink(Class<T> model, Key source, String response, String content, String onClick) { String name = model.getSimpleName(); name = name.substring(0, name.indexOf("Model")); RString rs = new RString("<a href='/2/%page.query?source=%$source&response=%response' %onclick >%content</a>"); rs.replace("page", name); rs.replace("source", source!=null ? source.toString() : ""); rs.replace("response", response); rs.replace("content", content); rs.replace("onclick", onClick!=null ? "onclick=\""+onClick+"\"" : ""); return rs.toString(); } public static <T extends Model> String builderLink(Class<T> model, Key source, String response, Key checkpoint, String content) { String name = model.getSimpleName(); name = name.substring(0, name.indexOf("Model")); RString rs = new RString("<a href='/2/%page.query?source=%$source&response=%response&checkpoint=%$checkpoint'>%content</a>"); rs.replace("page", name); rs.replace("source", source!=null ? source.toString() : ""); rs.replace("response", response); rs.replace("content", content); rs.replace("checkpoint", checkpoint!=null ? checkpoint.toString() : ""); return rs.toString(); } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/UnsafeUtils.java
package water.util; import sun.misc.Unsafe; import water.nbhm.UtilUnsafe; public class UnsafeUtils { private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); private static final long _Bbase = _unsafe.arrayBaseOffset(byte[].class); public static int get2 ( byte[] buf, int off ) { return _unsafe.getShort (buf, _Bbase+off); } public static int get4 ( byte[] buf, int off ) { return _unsafe.getInt (buf, _Bbase+off); } public static long get8 ( byte[] buf, int off ) { return _unsafe.getLong (buf, _Bbase+off); } public static float get4f( byte[] buf, int off ) { return _unsafe.getFloat (buf, _Bbase+off); } public static double get8d( byte[] buf, int off ) { return _unsafe.getDouble(buf, _Bbase+off); } public static int set2 (byte[] buf, int off, short x ) {_unsafe.putShort (buf, _Bbase+off, x); return 2;} public static int set4 (byte[] buf, int off, int x ) {_unsafe.putInt (buf, _Bbase+off, x); return 4;} public static int set4f(byte[] buf, int off, float f ) {_unsafe.putFloat (buf, _Bbase+off, f); return 4;} public static int set8 (byte[] buf, int off, long x ) {_unsafe.putLong (buf, _Bbase+off, x); return 8;} public static int set8d(byte[] buf, int off, double x) {_unsafe.putDouble(buf, _Bbase+off, x); return 8;} }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/UserSpecifiedNetwork.java
package water.util; import java.net.InetAddress; import java.util.ArrayList; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Data structure for holding network info specified by the user on the command line. */ public class UserSpecifiedNetwork { int _o1; int _o2; int _o3; int _o4; int _bits; /** * Create object from user specified data. * @param o1 First octet * @param o2 Second octet * @param o3 Third octet * @param o4 Fourth octet * @param bits Bits on the left to compare */ public UserSpecifiedNetwork(int o1, int o2, int o3, int o4, int bits) { _o1 = o1; _o2 = o2; _o3 = o3; _o4 = o4; _bits = bits; } private boolean oValid(int o) { if (o < 0) return false; if (o > 255) return false; return true; } private boolean valid() { if (! (oValid(_o1))) return false; if (! (oValid(_o2))) return false; if (! (oValid(_o3))) return false; if (! (oValid(_o4))) return false; if (_bits < 0) return false; if (_bits > 32) return false; return true; } /** * Test if an internet address lives on this user specified network. * @param ia Address to test. * @return true if the address is on the network; false otherwise. */ public boolean inetAddressOnNetwork(InetAddress ia) { int i = (_o1 << 24) | (_o2 << 16) | (_o3 << 8) | (_o4 << 0); byte[] barr = ia.getAddress(); if (barr.length != 4) { return false; } int j = (((int)barr[0] & 0xff) << 24) | (((int)barr[1] & 0xff) << 16) | (((int)barr[2] & 0xff) << 8) | (((int)barr[3] & 0xff) << 0); // Do mask math in 64-bit to handle 32-bit wrapping cases. long mask1 = ((long)1 << (32 - _bits)); long mask2 = mask1 - 1; long mask3 = ~mask2; int mask4 = (int) (mask3 & 0xffffffff); if ((i & mask4) == (j & mask4)) { return true; } return false; } public static ArrayList<UserSpecifiedNetwork> calcArrayList(String networkOpt) { ArrayList<UserSpecifiedNetwork> networkList = new ArrayList<UserSpecifiedNetwork>(); if (networkOpt == null) return networkList; String[] networks; if (networkOpt.contains(",")) { networks = networkOpt.split(","); } else { networks = new String[1]; networks[0] = networkOpt; } for (int j = 0; j < networks.length; j++) { String n = networks[j]; Pattern p = Pattern.compile("(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+)/(\\d+)"); Matcher m = p.matcher(n); boolean b = m.matches(); if (! b) { Log.err("network invalid: " + n); return null; } assert (m.groupCount() == 5); int o1 = Integer.parseInt(m.group(1)); int o2 = Integer.parseInt(m.group(2)); int o3 = Integer.parseInt(m.group(3)); int o4 = Integer.parseInt(m.group(4)); int bits = Integer.parseInt(m.group(5)); UserSpecifiedNetwork usn = new UserSpecifiedNetwork(o1, o2, o3, o4, bits); if (! usn.valid()) { Log.err("network invalid: " + n); return null; } networkList.add(usn); } return networkList; } }
0
java-sources/ai/h2o/h2o-classic/2.8/water
java-sources/ai/h2o/h2o-classic/2.8/water/util/Utils.java
package water.util; import hex.rng.H2ORandomRNG; import hex.rng.H2ORandomRNG.RNGKind; import hex.rng.H2ORandomRNG.RNGType; import hex.rng.MersenneTwisterRNG; import hex.rng.XorShiftRNG; import sun.misc.Unsafe; import water.*; import water.api.DocGen; import water.api.DocGen.FieldDoc; import water.fvec.Chunk; import water.fvec.ParseDataset2.Compression; import water.fvec.Vec; import water.nbhm.UtilUnsafe; import java.io.*; import java.net.Socket; import java.security.SecureRandom; import java.text.DecimalFormat; import java.util.*; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.zip.GZIPInputStream; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; import java.util.zip.ZipInputStream; import static java.lang.Double.isNaN; public class Utils { /** Returns the index of the largest value in the array. * In case of a tie, an the index is selected randomly. */ public static int maxIndex(int[] from, Random rand) { assert rand != null; int result = 0; int maxCount = 0; // count of maximal element for a 1 item reservoir sample for( int i = 1; i < from.length; ++i ) { if( from[i] > from[result] ) { result = i; maxCount = 1; } else if( from[i] == from[result] ) { if( rand.nextInt(++maxCount) == 0 ) result = i; } } return result; } public static int maxIndex(float[] from, Random rand) { assert rand != null; int result = 0; int maxCount = 0; // count of maximal element for a 1 item reservoir sample for( int i = 1; i < from.length; ++i ) { if( from[i] > from[result] ) { result = i; maxCount = 1; } else if( from[i] == from[result] ) { if( rand.nextInt(++maxCount) == 0 ) result = i; } } return result; } public static int maxIndex(int[] from) { int result = 0; for (int i = 1; i<from.length; ++i) if (from[i]>from[result]) result = i; return result; } public static int maxIndex(long[] from) { int result = 0; for (int i = 1; i<from.length; ++i) if (from[i]>from[result]) result = i; return result; } public static int maxIndex(float[] from) { int result = 0; for (int i = 1; i<from.length; ++i) if (from[i]>from[result]) result = i; return result; } public static int minIndex(int[] from) { int result = 0; for (int i = 1; i<from.length; ++i) if (from[i]<from[result]) result = i; return result; } public static int minIndex(float[] from) { int result = 0; for (int i = 1; i<from.length; ++i) if (from[i]<from[result]) result = i; return result; } public static double maxValue(double[] from) { double result = from[0]; for (int i = 1; i<from.length; ++i) if (from[i]>result) result = from[i]; return result; } public static float maxValue(float[] from) { return maxValue(from, 0, from.length); } public static float maxValue(float[] from, int start, int end) { float result = from[start]; for (int i = start+1; i<end; ++i) if (from[i]>result) result = from[i]; return result; } public static double minValue(double[] from) { double result = from[0]; for (int i = 1; i<from.length; ++i) if (from[i]<result) result = from[i]; return result; } public static float minValue(float[] from) { float result = from[0]; for (int i = 1; i<from.length; ++i) if (from[i]<result) result = from[i]; return result; } public static long maxValue(long[] from) { long result = from[0]; for (int i = 1; i<from.length; ++i) if (from[i]>result) result = from[i]; return result; } public static long minValue(long[] from) { long result = from[0]; for (int i = 1; i<from.length; ++i) if (from[i]<result) result = from[i]; return result; } /** * Compare two numbers to see if they are within one ulp of the smaller decade. * Order of the arguments does not matter. * * @param a First number * @param b Second number * @return true if a and b are essentially equal, false otherwise. */ public static boolean equalsWithinOneSmallUlp(float a, float b) { if (Float.isInfinite(a) || Float.isInfinite(b) && (a<b || b<a)) return false; float ulp_a = Math.ulp(a); float ulp_b = Math.ulp(b); float small_ulp = Math.min(ulp_a, ulp_b); float absdiff_a_b = Math.abs(a - b); // subtraction order does not matter, due to IEEE 754 spec return absdiff_a_b <= small_ulp; } public static boolean equalsWithinOneSmallUlp(double a, double b) { if (Double.isInfinite(a) || Double.isInfinite(b) && (a<b || b<a)) return false; double ulp_a = Math.ulp(a); double ulp_b = Math.ulp(b); double small_ulp = Math.min(ulp_a, ulp_b); double absdiff_a_b = Math.abs(a - b); // subtraction order does not matter, due to IEEE 754 spec return absdiff_a_b <= small_ulp; } public static boolean compareDoubles(double a, double b) { if( a==b ) return true; if( ( Double.isNaN(a) && !Double.isNaN(b)) || (!Double.isNaN(a) && Double.isNaN(b)) ) return false; if( Double.isInfinite(a) || Double.isInfinite(b) ) return false; return equalsWithinOneSmallUlp(a,b); } public static double lnF(double what) { return (what < 1e-06) ? 0 : what * Math.log(what); } public static String p2d(double d) { return !Double.isNaN(d) ? new DecimalFormat ("0.##" ).format(d) : "nan"; } public static String p5d(double d) { return !Double.isNaN(d) ? new DecimalFormat ("0.#####").format(d) : "nan"; } public static int set4( byte[] buf, int off, int x ) { for( int i=0; i<4; i++ ) buf[i+off] = (byte)(x>>(i<<3)); return 4; } public static int get4( byte[] buf, int off ) { int sum=0; for( int i=0; i<4; i++ ) sum |= (0xff&buf[off+i])<<(i<<3); return sum; } public static int set8d( byte[] buf, int off, double d ) { long x = Double.doubleToLongBits(d); for( int i=0; i<8; i++ ) buf[i+off] = (byte)(x>>(i<<3)); return 8; } public static double get8d( byte[] buf, int off ) { long sum=0; for( int i=0; i<8; i++ ) sum |= ((long)(0xff&buf[off+i]))<<(i<<3); return Double.longBitsToDouble(sum); } public static long sum(final long[] from) { long result = 0; for (long d: from) result += d; return result; } public static int sum(final int[] from) { int result = 0; for (int d: from) result += d; return result; } public static float sum(final float[] from) { float result = 0; for (float d: from) result += d; return result; } public static double sum(final double[] from) { double result = 0; for (double d: from) result += d; return result; } public static float sumSquares(final float[] a) { return sumSquares(a, 0, a.length); } /** * Approximate sumSquares * @param a Array with numbers * @param from starting index (inclusive) * @param to ending index (exclusive) * @return approximate sum of squares based on a sample somewhere in the middle of the array (pos determined by bits of a[0]) */ public static float approxSumSquares(final float[] a, int from, int to) { final int len = to-from; final int samples = Math.max(len / 16, 1); final int offset = from + Math.abs(Float.floatToIntBits(a[0])) % (len-samples); assert(offset+samples <= to); return sumSquares(a, offset, offset + samples) * (float)len / (float)samples; } public static float sumSquares(final float[] a, int from, int to) { float result = 0; final int cols = to-from; final int extra=cols-cols%8; final int multiple = (cols/8)*8-1; float psum1 = 0, psum2 = 0, psum3 = 0, psum4 = 0; float psum5 = 0, psum6 = 0, psum7 = 0, psum8 = 0; for (int c = from; c < from + multiple; c += 8) { psum1 += a[c+0]*a[c+0]; psum2 += a[c+1]*a[c+1]; psum3 += a[c+2]*a[c+2]; psum4 += a[c+3]*a[c+3]; psum5 += a[c+4]*a[c+4]; psum6 += a[c+5]*a[c+5]; psum7 += a[c+6]*a[c+6]; psum8 += a[c+7]*a[c+7]; } result += psum1 + psum2 + psum3 + psum4; result += psum5 + psum6 + psum7 + psum8; for (int c = from + extra; c < to; ++c) { result += a[c]*a[c]; } return result; } public static String sampleToString(int[] val, int max) { if (val == null || val.length < max) return Arrays.toString(val); StringBuilder b = new StringBuilder(); b.append('['); max -= 10; int valMax = val.length -1; for (int i = 0; ; i++) { b.append(val[i]); if (i == max) { b.append(", ..."); i = val.length - 10; } if ( i == valMax) { return b.append(']').toString(); } b.append(", "); } } public static String sampleToString(double[] val, int max) { if (val == null || val.length < max) return Arrays.toString(val); StringBuilder b = new StringBuilder(); b.append('['); max -= 10; int valMax = val.length -1; for (int i = 0; ; i++) { b.append(val[i]); if (i == max) { b.append(", ..."); i = val.length - 10; } if ( i == valMax) { return b.append(']').toString(); } b.append(", "); } } /* Always returns a deterministic java.util.Random RNG. * * The determinism is important for re-playing sampling. */ public static Random getDeterRNG(long seed) { return new H2ORandomRNG(seed); } public static void setUsedRNGKind(final RNGKind kind) { switch (kind) { case DETERMINISTIC: setUsedRNGType(RNGType.MersenneTwisterRNG); break; case NON_DETERMINISTIC: setUsedRNGType(RNGType.SecureRNG); break; } } /* Returns the configured random generator */ public static Random getRNG(long... seed) { assert _rngType != null : "Random generator type has to be configured"; switch (_rngType) { case JavaRNG: assert seed.length >= 1; return new H2ORandomRNG(seed[0]); case MersenneTwisterRNG: // do not copy the seeds - use them, and initialize the first two ints by seeds based given argument // the call is locked, and also MersenneTwisterRNG will just copy the seeds into its datastructures assert seed.length == 1; int[] inSeeds = unpackInts(seed); return new MersenneTwisterRNG(inSeeds); case XorShiftRNG: assert seed.length >= 1; return new XorShiftRNG(seed[0]); case SecureRNG: return new SecureRandom(); } throw new IllegalArgumentException("Unknown random generator type: " + _rngType); } private static RNGType _rngType = RNGType.MersenneTwisterRNG; public static void setUsedRNGType(RNGType rngType) { Utils._rngType = rngType; } public static RNGType getUsedRNGType() { return Utils._rngType; } public static RNGKind getUsedRNGKind() { return Utils._rngType.kind(); } /* * Compute entropy value for an array of bytes. * * The returned number represents entropy per bit! * For good long number seed (8bytes seed) it should be in range <2.75,3> (higher is better) * * For large set of bytes (>100) it should be almost 8 (means almost 8 random bits per byte). */ public static float entropy(byte[] f) { int counts[] = new int[256]; float entropy = 0; float total = f.length; for (byte b : f) counts[b+128]++; for (int c : counts) { if (c == 0) continue; float p = c / total; /* Compute entropy per bit in byte. * * To compute entropy per byte compute log with base 256 = log(p)/log(256). */ entropy -= p * Math.log(p)/Math.log(2); } return entropy; } public static int[] unpackInts(long... longs) { int len = 2*longs.length; int result[] = new int[len]; int i = 0; for (long l : longs) { result[i++] = (int) (l & 0xffffffffL); result[i++] = (int) (l>>32); } return result; } public static void shuffleArray(long[] a, long seed) { int n = a.length; Random random = getDeterRNG(seed); random.nextInt(); for (int i = 0; i < n; i++) { int change = i + random.nextInt(n - i); swap(a, i, change); } } /** * Extract a shuffled array of integers * @param a input array * @param n number of elements to extract * @param result array to store the results into (will be of size n) * @param seed random number seed * @param startIndex offset into a * @return result */ public static int[] shuffleArray(int[] a, int n, int result[], long seed, int startIndex) { if (n<=0) return result; Random random = getDeterRNG(seed); if (result == null || result.length != n) result = new int[n]; result[0] = a[startIndex]; for (int i = 1; i < n; i++) { int j = random.nextInt(i+1); if (j!=i) result[i] = result[j]; result[j] = a[startIndex+i]; } for (int i = 0; i < n; ++i) assert(Utils.contains(result, a[startIndex+i])); return result; } private static void swap(long[] a, int i, int change) { long helper = a[i]; a[i] = a[change]; a[change] = helper; } private static void swap(int[] a, int i, int change) { int helper = a[i]; a[i] = a[change]; a[change] = helper; } public static void close(Closeable...closeable) { for(Closeable c : closeable) try { if( c != null ) c.close(); } catch( IOException xe ) { } } public static void close(Socket s) { try { if( s != null ) s.close(); } catch( IOException xe ) { } } public static String readConsole() { BufferedReader console = new BufferedReader(new InputStreamReader(System.in)); try { return console.readLine(); } catch( IOException e ) { throw Log.errRTExcept(e); } } public static File writeFile(String content) { try { return writeFile(File.createTempFile("h2o", null), content); } catch( IOException e ) { throw Log.errRTExcept(e); } } public static File writeFile(File file, String content) { FileWriter w = null; try { w = new FileWriter(file); w.write(content); } catch(IOException e) { Log.errRTExcept(e); } finally { close(w); } return file; } public static void writeFileAndClose(File file, InputStream in) { OutputStream out = null; try { out = new FileOutputStream(file); byte[] buffer = new byte[1024]; int len = in.read(buffer); while (len > 0) { out.write(buffer, 0, len); len = in.read(buffer); } } catch(IOException e) { throw Log.errRTExcept(e); } finally { close(in, out); } } public static String readFile(File file) { FileReader r = null; try { r = new FileReader(file); char[] data = new char[(int) file.length()]; r.read(data); return new String(data); } catch(IOException e) { throw Log.errRTExcept(e); } finally { close(r); } } public static void readFile(File file, OutputStream out) { BufferedInputStream in = null; try { in = new BufferedInputStream(new FileInputStream(file)); byte[] buffer = new byte[1024]; while( true ) { int count = in.read(buffer); if( count == -1 ) break; out.write(buffer, 0, count); } } catch(IOException e) { throw Log.errRTExcept(e); } finally { close(in); } } public static String join(char sep, Object[] array) { return join(sep, Arrays.asList(array)); } public static String join(char sep, Iterable it) { String s = ""; for( Object o : it ) s += (s.length() == 0 ? "" : sep) + o.toString(); return s; } public static byte[] or(byte[] a, byte[] b) { for(int i = 0; i < a.length; i++ ) a[i] |= b[i]; return a; } public static int[] or(int[] a, int[] b) { for(int i = 0; i < a.length; i++ ) a[i] |= b[i]; return a; } public static byte[] add(byte[] a, byte[] b) { for(int i = 0; i < a.length; i++ ) a[i] += b[i]; return a; } public static byte[][] add(byte[][] a, byte[][] b) { for(int i = 0; i < a.length; i++ ) add(a[i],b[i]); return a; } public static byte[][][] add(byte[][][] a, byte[][][] b) { for(int i = 0; i < a.length; i++ ) add(a[i],b[i]); return a; } public static int[] add(int[] a, int[] b) { for(int i = 0; i < a.length; i++ ) a[i] += b[i]; return a; } public static int[][] add(int[][] a, int[][] b) { for(int i = 0; i < a.length; i++ ) add(a[i],b[i]); return a; } public static int[][][] add(int[][][] a, int[][][] b) { for(int i = 0; i < a.length; i++ ) add(a[i],b[i]); return a; } public static long[] add(long[] a, long[] b) { if( b==null ) return a; for(int i = 0; i < a.length; i++ ) a[i] += b[i]; return a; } public static long[][] add(long[][] a, long[][] b) { for(int i = 0; i < a.length; i++ ) add(a[i],b[i]); return a; } public static long[][][] add(long[][][] a, long[][][] b) { for(int i = 0; i < a.length; i++ ) add(a[i],b[i]); return a; } public static float[] add(float[] a, float[] b) { if( b==null ) return a; for(int i = 0; i < a.length; i++ ) a[i] += b[i]; return a; } public static float[][] add(float[][] a, float[][] b) { for(int i = 0; i < a.length; i++ ) add(a[i],b[i]); return a; } public static float[][][] add(float[][][] a, float[][][] b) { for(int i = 0; i < a.length; i++ ) add(a[i],b[i]); return a; } public static double[] add(double[] a, double[] b) { if( a==null ) return b; for(int i = 0; i < a.length; i++ ) a[i] += b[i]; return a; } public static double[][] add(double[][] a, double[][] b) { for(int i = 0; i < a.length; i++ ) a[i] = add(a[i],b[i]); return a; } public static double[][][] add(double[][][] a, double[][][] b) { for(int i = 0; i < a.length; i++ ) add(a[i],b[i]); return a; } public static double[][] append(double[][] a, double[][] b) { double[][] res = new double[a.length + b.length][]; System.arraycopy(a, 0, res, 0, a.length); System.arraycopy(b, 0, res, a.length, b.length); return res; } public static int[] append(int[] a, int[] b) { int[] res = new int[a.length + b.length]; System.arraycopy(a, 0, res, 0, a.length); System.arraycopy(b, 0, res, a.length, b.length); return res; } public static String[] append(String[] a, String[] b) { String[] res = new String[a.length + b.length]; System.arraycopy(a, 0, res, 0, a.length); System.arraycopy(b, 0, res, a.length, b.length); return res; } public static double[] append(double[] a, double e) { a = Arrays.copyOf(a,a.length+1); a[a.length-1] = e; return a; } public static double[] append(double[] a, double [] e) { double [] res = Arrays.copyOf(a,a.length + e.length); System.arraycopy(e,0,res,a.length,e.length); return res; } public static long[][][] append(long[][][] a, long[][] e) { a = Arrays.copyOf(a,a.length+1); a[a.length-1] = e; return a; } public static <T> T[] append(T[] a, T... b) { if( a==null ) return b; T[] tmp = Arrays.copyOf(a,a.length+b.length); System.arraycopy(b,0,tmp,a.length,b.length); return tmp; } public static <T> T[] remove(T[] a, int i) { T[] tmp = Arrays.copyOf(a,a.length-1); System.arraycopy(a,i+1,tmp,i,tmp.length-i); return tmp; } public static int[] remove(int[] a, int i) { int[] tmp = Arrays.copyOf(a,a.length-1); System.arraycopy(a,i+1,tmp,i,tmp.length-i); return tmp; } public static <T> T[] subarray(T[] a, int off, int len) { return Arrays.copyOfRange(a,off,off+len); } public static void clearFolder(String folder) { clearFolder(new File(folder)); } public static void clearFolder(File folder) { if (folder.exists()) { for (File child : folder.listFiles()) { if (child.isDirectory()) clearFolder(child); if (!child.delete()) throw new RuntimeException("Cannot delete " + child); } } } /** * Returns the system temporary folder, e.g. /tmp */ public static File tmp() { try { return File.createTempFile("h2o", null).getParentFile(); } catch( IOException e ) { throw new RuntimeException(e); } } public static byte [] getFirstUnzipedBytes(Key k){ return getFirstUnzipedBytes(DKV.get(k)); } public static byte [] getFirstUnzipedBytes(Value v){ byte [] bits = v.getFirstBytes(); try{ return unzipBytes(bits, guessCompressionMethod(bits)); } catch(Exception e){ throw new RuntimeException(e); } } public static Compression guessCompressionMethod(byte [] bits){ // Look for ZIP magic if( bits.length > ZipFile.LOCHDR && UDP.get4(bits,0) == ZipFile.LOCSIG ) return Compression.ZIP; if( bits.length > 2 && UDP.get2u(bits,0) == GZIPInputStream.GZIP_MAGIC ) return Compression.GZIP; return Compression.NONE; } public static byte [] unzipBytes(byte [] bs, Compression cmp) { InputStream is = null; int off = 0; try { switch(cmp) { case NONE: // No compression return bs; case ZIP: { ZipInputStream zis = new ZipInputStream(new ByteArrayInputStream(bs)); ZipEntry ze = zis.getNextEntry(); // Get the *FIRST* entry // There is at least one entry in zip file and it is not a directory. if( ze != null && !ze.isDirectory() ) { is = zis; break; } zis.close(); return bs; // Don't crash, ignore file if cannot unzip } case GZIP: is = new GZIPInputStream(new ByteArrayInputStream(bs)); break; default: assert false:"cmp = " + cmp; } // If reading from a compressed stream, estimate we can read 2x uncompressed assert( is != null ):"is is NULL, cmp = " + cmp; bs = new byte[bs.length * 2]; // Now read from the (possibly compressed) stream while( off < bs.length ) { int len = is.read(bs, off, bs.length - off); if( len < 0 ) break; off += len; if( off == bs.length ) { // Dataset is uncompressing alot! Need more space... if( bs.length >= water.fvec.Vec.CHUNK_SZ ) break; // Already got enough bs = Arrays.copyOf(bs, bs.length * 2); } } } catch( IOException ioe ) { // Stop at any io error Log.err(ioe); } finally { Utils.close(is); } return bs; } public static String formatPct(double pct) { String s = "N/A"; if( !isNaN(pct) ) s = String.format("%5.2f %%", 100 * pct); return s; } public static int maxValue(byte[] from ) { int result = from[0]&0xFF; for (int i = 1; i < from.length; ++i) if ( (from[i]&0xFF) > result) result = from[i]&0xFF; return result; } /** * Simple wrapper around ArrayList with support for H2O serialization * @author tomasnykodym * @param <T> */ public static class IcedArrayList<T extends Iced> extends ArrayList<T> implements Freezable { @Override public AutoBuffer write(AutoBuffer bb) { bb.put4(size()); for(T t:this) bb.put(t); return bb; } @Override public IcedArrayList<T> read(AutoBuffer bb) { int n = bb.get4(); for(int i = 0; i < n; ++i) add(bb.<T>get()); return this; } @Override public <T2 extends Freezable> T2 newInstance() { return (T2)new IcedArrayList<T>(); } private static int _frozen$type; @Override public int frozenType() { return _frozen$type == 0 ? (_frozen$type=water.TypeMap.onIce(IcedArrayList.class.getName())) : _frozen$type; } @Override public AutoBuffer writeJSONFields(AutoBuffer bb) { return bb; } @Override public FieldDoc[] toDocField() { return null; } } public static class IcedInt extends Iced { public final int _val; public IcedInt(int v){_val = v;} @Override public boolean equals( Object o ) { if( !(o instanceof IcedInt) ) return false; return ((IcedInt)o)._val == _val; } @Override public int hashCode() { return _val; } @Override public String toString() { return Integer.toString(_val); } } public static class IcedLong extends Iced { public long _val; public IcedLong(long v){_val = v;} @Override public boolean equals( Object o ) { if( !(o instanceof IcedLong) ) return false; return ((IcedLong)o)._val == _val; } @Override public int hashCode() { return (int)_val; } @Override public String toString() { return Long.toString(_val); } } public static class IcedDouble extends Iced { public final double _val; public IcedDouble(double v){_val = v;} @Override public boolean equals( Object o ) { if( !(o instanceof IcedDouble) ) return false; return ((IcedDouble)o)._val == _val; } @Override public int hashCode() { return (int)Double.doubleToLongBits(_val); } @Override public String toString() { return Double.toString(_val); } } public static class IcedString extends Iced { public final String _val; public IcedString(String v){_val = v;} @Override public boolean equals( Object o ) { if( !(o instanceof IcedString) ) return false; return ((IcedString)o)._val.equals(_val); } @Override public int hashCode() { return _val.hashCode(); } @Override public String toString() { return _val; } } public static class IcedBitSet extends Iced { public final byte[] _val; public final int _nbits; public final int _offset; // Number of bits discarded from beginning (inclusive min) public IcedBitSet(byte[] v, int nbits, int offset) { if(nbits < 0) throw new NegativeArraySizeException("nbits < 0: " + nbits); if(offset < 0) throw new IndexOutOfBoundsException("offset < 0: " + offset); assert (nbits >> 3) <= v.length; _val = v; _nbits = nbits; _offset = offset; } public IcedBitSet(int nbits) { this(nbits, 0); } public IcedBitSet(int nbits, int offset) { if(nbits < 0) throw new NegativeArraySizeException("nbits < 0: " + nbits); if(offset < 0) throw new IndexOutOfBoundsException("offset < 0: " + offset); _nbits = nbits; _offset = offset; _val = new byte[((nbits-1) >> 3) + 1]; } public boolean get(int idx) { if(idx < 0 || idx >= _nbits) throw new IndexOutOfBoundsException("Must have 0 <= idx <= " + Integer.toString(_nbits-1) + ": " + idx); return (_val[idx >> 3] & ((byte)1 << (idx % 8))) != 0; } public boolean contains(int idx) { if(idx < 0) throw new IndexOutOfBoundsException("idx < 0: " + idx); if(Double.isNaN(idx) || idx >= _nbits) return false; return get(idx); } public void set(int idx) { if(idx < 0 || idx >= _nbits) throw new IndexOutOfBoundsException("Must have 0 <= idx <= " + Integer.toString(_nbits-1) + ": " + idx); _val[idx >> 3] |= ((byte)1 << (idx % 8)); } public void clear(int idx) { if(idx < 0 || idx >= _nbits) throw new IndexOutOfBoundsException("Must have 0 <= idx <= " + Integer.toString(_nbits-1) + ": " + idx); _val[idx >> 3] &= ~((byte)1 << (idx % 8)); } public int cardinality() { int nbits = 0; for(int i = 0; i < _val.length; i++) nbits += Integer.bitCount(_val[i]); return nbits; } public int nextSetBit(int idx) { if(idx < 0 || idx >= _nbits) throw new IndexOutOfBoundsException("Must have 0 <= idx <= " + Integer.toString(_nbits-1) + ": " + idx); int idx_next = idx >> 3; byte bt_next = (byte)(_val[idx_next] & ((byte)0xff << idx)); while(bt_next == 0) { if(++idx_next >= _val.length) return -1; bt_next = _val[idx_next]; } return (idx_next << 3) + Integer.numberOfTrailingZeros(bt_next); } public int nextClearBit(int idx) { if(idx < 0 || idx >= _nbits) throw new IndexOutOfBoundsException("Must have 0 <= idx <= " + Integer.toString(_nbits-1) + ": " + idx); int idx_next = idx >> 3; byte bt_next = (byte)(~_val[idx_next] & ((byte)0xff << idx)); // Mask out leftmost bits not in use if(idx_next == _val.length-1 && _nbits % 8 > 0) bt_next &= ~((byte)0xff << (_nbits % 8)); while(bt_next == 0) { if(++idx_next >= _val.length) return -1; bt_next = (byte)(~_val[idx_next]); if(idx_next == _val.length-1 && _nbits % 8 > 0) bt_next &= ~((byte)0xff << (_nbits % 8)); } return (idx_next << 3) + Integer.numberOfTrailingZeros(bt_next); } public int size() { return _val.length << 3; } public int numBytes() { return _val.length; }; @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (_offset>0) sb.append("...").append(_offset).append(" 0-bits... "); for(int i = 0; i < _val.length; i++) { if (i>0) sb.append(' '); sb.append(String.format("%8s", Integer.toBinaryString(0xFF & _val[i])).replace(' ', '0')); } sb.append("}"); return sb.toString(); } public String toStrArray() { StringBuilder sb = new StringBuilder(); sb.append("{").append(_val[0]); for(int i = 1; i < _val.length; i++) sb.append(", ").append(_val[i]); sb.append("}"); return sb.toString(); } } /** * Simple wrapper around HashMap with support for H2O serialization * @author tomasnykodym */ public static class IcedHashMap<K extends Iced, V extends Iced> extends HashMap<K,V> implements Freezable { @Override public AutoBuffer write(AutoBuffer bb) { bb.put4(size()); for( Map.Entry<K, V> e : entrySet() ) bb.put(e.getKey()).put(e.getValue()); return bb; } @Override public IcedHashMap<K,V> read(AutoBuffer bb) { int n = bb.get4(); for(int i = 0; i < n; ++i) put(bb.<K>get(),bb.<V>get()); return this; } @Override public IcedHashMap<K,V> newInstance() { return new IcedHashMap<K,V>(); } private static int _frozen$type; @Override public int frozenType() { return _frozen$type == 0 ? (_frozen$type=water.TypeMap.onIce(IcedHashMap.class.getName())) : _frozen$type; } @Override public AutoBuffer writeJSONFields(AutoBuffer bb) { return bb; } @Override public FieldDoc[] toDocField() { return null; } } public static final boolean hasNaNsOrInfs(double [] arr){ for(double d:arr) if(Double.isNaN(d) || Double.isInfinite(d))return true; return false; } public static class ExpectedExceptionForDebug extends RuntimeException { } public static String getStackAsString(Throwable t) { Writer result = new StringWriter(); PrintWriter printWriter = new PrintWriter(result); t.printStackTrace(printWriter); return result.toString(); } /** Returns a mapping of given domain to values (0, ... max(dom)). * Unused domain items has mapping to -1. * precondition - dom is sorted dom[0] contains minimal value, dom[dom.length-1] represents max. value. */ public static int[] mapping(int[] dom) { if (dom.length == 0) return new int[] {}; assert dom[0] <= dom[dom.length-1] : "Domain is not sorted"; int min = dom[0]; int max = dom[dom.length-1]; int[] result = new int[(max-min)+1]; for (int i=0; i<result.length; i++) result[i] = -1; // not used fields for (int i=0; i<dom.length; i++) result[dom[i]-min] = i; return result; } public static String[] toString(long[] dom) { String[] result = new String[dom.length]; for (int i=0; i<dom.length; i++) result[i] = String.valueOf(dom[i]); return result; } public static String[] toString(int[] dom) { String[] result = new String[dom.length]; for (int i=0; i<dom.length; i++) result[i] = String.valueOf(dom[i]); return result; } public static String[] toStringMap(int first, int last) { if(first > last) throw new IllegalArgumentException("first must be an integer less than or equal to last"); String[] result = new String[last-first+1]; for(int i = first; i <= last; i++) result[i-first] = String.valueOf(i); return result; } public static int[] compose(int[] first, int[] transf) { for (int i=0; i<first.length; i++) { if (first[i]!=-1) first[i] = transf[first[i]]; } return first; } public static int[][] compose(int[][] first, int[][] second) { int[] firstDom = first[0]; int[] firstRan = first[1]; // flat transformation int[] secondDom = second[0]; int[] secondRan = second[1]; boolean[] filter = new boolean[firstDom.length]; int fcnt = 0; int[] resDom = firstDom.clone(); int[] resRan = firstRan!=null ? firstRan.clone() : new int[firstDom.length]; for (int i=0; i<resDom.length; i++) { int v = firstRan!=null ? firstRan[i] : i; // resulting value int vi = Arrays.binarySearch(secondDom, v); // Do not be too strict in composition assert vi >=0 : "Trying to compose two incompatible transformation: first=" + Arrays.deepToString(first) + ", second=" + Arrays.deepToString(second); if (vi<0) { filter[i] = true; fcnt++; } else resRan[i] = secondRan!=null ? secondRan[vi] : vi; } return new int[][] { filter(resDom,filter,fcnt), filter(resRan,filter,fcnt) }; } private static final DecimalFormat default_dformat = new DecimalFormat("0.#####"); public static String pprint(double[][] arr){ return pprint(arr,default_dformat); } // pretty print Matrix(2D array of doubles) public static String pprint(double[][] arr,DecimalFormat dformat) { int colDim = 0; for( double[] line : arr ) colDim = Math.max(colDim, line.length); StringBuilder sb = new StringBuilder(); int max_width = 0; int[] ilengths = new int[colDim]; Arrays.fill(ilengths, -1); for( double[] line : arr ) { for( int c = 0; c < line.length; ++c ) { double d = line[c]; String dStr = dformat.format(d); if( dStr.indexOf('.') == -1 ) dStr += ".0"; ilengths[c] = Math.max(ilengths[c], dStr.indexOf('.')); int prefix = (d >= 0 ? 1 : 2); max_width = Math.max(dStr.length() + prefix, max_width); } } for( double[] line : arr ) { for( int c = 0; c < line.length; ++c ) { double d = line[c]; String dStr = dformat.format(d); if( dStr.indexOf('.') == -1 ) dStr += ".0"; for( int x = dStr.indexOf('.'); x < ilengths[c] + 1; ++x ) sb.append(' '); sb.append(dStr); if( dStr.indexOf('.') == -1 ) sb.append('.'); for( int i = dStr.length() - Math.max(0, dStr.indexOf('.')); i <= 5; ++i ) sb.append('0'); } sb.append("\n"); } return sb.toString(); } static public boolean isEmpty(int[] a) { return a==null || a.length == 0; } static public boolean contains(int[] a, int d) { for(int i=0; i<a.length; i++) if (a[i]==d) return true; return false; } // warning: Non-Symmetric! Returns all elements in a that are not in b (but NOT the other way around) static public int[] difference(int a[], int b[]) { if (a == null) return new int[]{}; if (b == null) return a.clone(); int[] r = new int[a.length]; int cnt = 0; for (int i=0; i<a.length; i++) { if (!contains(b, a[i])) r[cnt++] = a[i]; } return Arrays.copyOf(r, cnt); } /** Generates sequence (start, stop) of integers: (start, start+1, ...., stop-1) */ static public int[] seq(int start, int stop) { assert start<stop; int len = stop-start; int[] res = new int[len]; for(int i=start; i<stop;i++) res[i-start] = i; return res; } public static String className(String path) { return path.replace('\\', '/').replace('/', '.').substring(0, path.length() - 6); } public static double avg(double[] nums) { double sum = 0; for(double n: nums) sum+=n; return sum/nums.length; } public static double avg(long[] nums) { long sum = 0; for(long n: nums) sum+=n; return sum/nums.length; } public static float[] div(float[] nums, int n) { for (int i=0; i<nums.length; i++) nums[i] /= n; return nums; } public static float[] div(float[] nums, float n) { assert !Float.isInfinite(n) : "Trying to divide " + Arrays.toString(nums) + " by " + n; // Almost surely not what you want for (int i=0; i<nums.length; i++) nums[i] /= n; return nums; } public static double[] div(double[] nums, double n) { assert !Double.isInfinite(n) : "Trying to divide " + Arrays.toString(nums) + " by " + n; // Almost surely not what you want for (int i=0; i<nums.length; i++) nums[i] /= n; return nums; } public static float[] mult(float[] nums, float n) { assert !Float.isInfinite(n) : "Trying to multiply " + Arrays.toString(nums) + " by " + n; // Almost surely not what you want for (int i=0; i<nums.length; i++) nums[i] *= n; return nums; } public static double[] mult(double[] nums, double n) { assert !Double.isInfinite(n) : "Trying to multiply " + Arrays.toString(nums) + " by " + n; // Almost surely not what you want for (int i=0; i<nums.length; i++) nums[i] *= n; return nums; } /** * Fast approximate sqrt * @param x * @return sqrt(x) with up to 5% relative error */ final public static double approxSqrt(double x) { return Double.longBitsToDouble(((Double.doubleToLongBits(x) >> 32) + 1072632448) << 31); } /** * Fast approximate sqrt * @param x * @return sqrt(x) with up to 5% relative error */ final public static float approxSqrt(float x) { return Float.intBitsToFloat(532483686 + (Float.floatToRawIntBits(x) >> 1)); } /** * Fast approximate 1./sqrt * @param x * @return 1./sqrt(x) with up to 2% relative error */ final public static double approxInvSqrt(double x) { double xhalf = 0.5d*x; x = Double.longBitsToDouble(0x5fe6ec85e7de30daL - (Double.doubleToLongBits(x)>>1)); return x*(1.5d - xhalf*x*x); } /** * Fast approximate 1./sqrt * @param x * @return 1./sqrt(x) with up to 2% relative error */ final public static float approxInvSqrt(float x) { float xhalf = 0.5f*x; x = Float.intBitsToFloat(0x5f3759df - (Float.floatToIntBits(x)>>1)); return x*(1.5f - xhalf*x*x); } /** * Fast approximate exp * @param x * @return exp(x) with up to 5% relative error */ final public static double approxExp(double x) { return Double.longBitsToDouble(((long)(1512775 * x + 1072632447)) << 32); } /** * Fast approximate log for values greater than 1, otherwise exact * @param x * @return log(x) with up to 0.1% relative error */ final public static double approxLog(double x){ if (x > 1) return ((Double.doubleToLongBits(x) >> 32) - 1072632447d) / 1512775d; else return Math.log(x); } /** * Replace given characters in a given string builder. * The number of characters to replace has to match to number of * characters serving as a replacement. * * @param sb string builder containing a string to be modified * @param from characters to replaced * @param to replacement characters * @return original string builder with replaced characters. */ public static StringBuilder replace(StringBuilder sb, CharSequence from, CharSequence to) { assert from.length() == to.length(); for (int i=0; i<sb.length(); i++) for (int j=0; j<from.length(); j++) if (sb.charAt(i)==from.charAt(j)) sb.setCharAt(i, to.charAt(j)); return sb; } /** * Returns true if given string contains at least on of character of * given sequence. * @param s string * @param cs a sequence of character * @return true if s contains at least one of character from given sequence, else false */ public static boolean contains(String s, CharSequence cs) { for (int i=0; i<s.length(); i++) for (int j=0; j<cs.length(); j++) if (s.charAt(i) == cs.charAt(j)) return true; return false; } // Atomically-updated float array public static class AtomicFloatArray { private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); private static final int _Fbase = _unsafe.arrayBaseOffset(float[].class); private static final int _Fscale = _unsafe.arrayIndexScale(float[].class); private static long rawIndex(final float[] ary, final int idx) { assert idx >= 0 && idx < ary.length; return _Fbase + idx * _Fscale; } static public void setMin( float fs[], int i, float min ) { float old = fs[i]; while( min < old && !_unsafe.compareAndSwapInt(fs,rawIndex(fs,i), Float.floatToRawIntBits(old), Float.floatToRawIntBits(min) ) ) old = fs[i]; } static public void setMax( float fs[], int i, float max ) { float old = fs[i]; while( max > old && !_unsafe.compareAndSwapInt(fs,rawIndex(fs,i), Float.floatToRawIntBits(old), Float.floatToRawIntBits(max) ) ) old = fs[i]; } static public String toString( float fs[] ) { SB sb = new SB(); sb.p('['); for( float f : fs ) sb.p(f==Float.MAX_VALUE ? "max": (f==-Float.MAX_VALUE ? "min": Float.toString(f))).p(','); return sb.p(']').toString(); } } // Atomically-updated double array public static class AtomicDoubleArray { private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); private static final int _Dbase = _unsafe.arrayBaseOffset(double[].class); private static final int _Dscale = _unsafe.arrayIndexScale(double[].class); private static long rawIndex(final double[] ary, final int idx) { assert idx >= 0 && idx < ary.length; return _Dbase + idx * _Dscale; } static public void add( double ds[], int i, double y ) { long adr = rawIndex(ds,i); double old = ds[i]; while( !_unsafe.compareAndSwapLong(ds,adr, Double.doubleToRawLongBits(old), Double.doubleToRawLongBits(old+y) ) ) old = ds[i]; } } // Atomically-updated long array. Instead of using the similar JDK pieces, // allows the bare array to be exposed for fast readers. public static class AtomicLongArray { private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); private static final int _Lbase = _unsafe.arrayBaseOffset(long[].class); private static final int _Lscale = _unsafe.arrayIndexScale(long[].class); private static long rawIndex(final long[] ary, final int idx) { assert idx >= 0 && idx < ary.length; return _Lbase + idx * _Lscale; } static public void incr( long ls[], int i ) { long adr = rawIndex(ls,i); long old = ls[i]; while( !_unsafe.compareAndSwapLong(ls,adr, old, old+1) ) old = ls[i]; } } // Atomically-updated int array. Instead of using the similar JDK pieces, // allows the bare array to be exposed for fast readers. public static class AtomicIntArray { private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); private static final int _Ibase = _unsafe.arrayBaseOffset(int[].class); private static final int _Iscale = _unsafe.arrayIndexScale(int[].class); private static long rawIndex(final int[] ary, final int idx) { assert idx >= 0 && idx < ary.length; return _Ibase + idx * _Iscale; } static public void incr( int is[], int i ) { add(is,i,1); } static public void add( int is[], int i, int x ) { long adr = rawIndex(is,i); int old = is[i]; while( !_unsafe.compareAndSwapInt(is,adr, old, old+x) ) old = is[i]; } } public static boolean contains(String[] names, String name) { for (String n : names) if (n.equals(name)) return true; return false; } /** Java-string illegal characters which need to be escaped */ public static final Pattern[] ILLEGAL_CHARACTERS = new Pattern[] { Pattern.compile("\\",Pattern.LITERAL), Pattern.compile("\"",Pattern.LITERAL) }; public static final String[] REPLACEMENTS = new String [] { "\\\\\\\\", "\\\\\"" }; /** Escape all " and \ characters to provide a proper Java-like string * Does not escape unicode characters. */ public static String escapeJava(String s) { assert ILLEGAL_CHARACTERS.length == REPLACEMENTS.length; for (int i=0; i<ILLEGAL_CHARACTERS.length; i++ ) { Matcher m = ILLEGAL_CHARACTERS[i].matcher(s); s = m.replaceAll(REPLACEMENTS[i]); } return s; } /** Clever union of String arrays. * * For union of numeric arrays (strings represent integers) it is expecting numeric ordering. * For pure string domains it is expecting lexicographical ordering. * For mixed domains it always expects lexicographical ordering since such a domain were produce * by a parser which sort string with Array.sort(). * * PRECONDITION - string domain was sorted by Array.sort(String[]), integer domain by Array.sort(int[]) and switched to Strings !!! * * @param a a set of strings * @param b a set of strings * @return union of arrays */ public static String[] domainUnion(String[] a, String[] b) { int cIinA = numInts(a); int cIinB = numInts(b); // Trivial case - all strings or ints, sorted if (cIinA==0 && cIinB==0 // only strings || cIinA==a.length && cIinB==b.length ) // only integers return union(a, b, cIinA==0); // Be little bit clever here: sort string representing numbers first and append // a,b were sorted by Array.sort() but can contain some numbers. // So sort numbers in numeric way, and then string in lexicographical order int[] ai = toInt(a, 0, cIinA); Arrays.sort(ai); // extract int part but sort it in numeric order int[] bi = toInt(b, 0, cIinB); Arrays.sort(bi); String[] ri = toString(union(ai,bi)); // integer part String[] si = union(a,b,cIinA,a.length-cIinA,cIinB,b.length-cIinB,true); return join(ri, si); } /** Union of given String arrays. * * The method expects ordering of domains in given order (lexicographical, numeric) * * @param a first array * @param b second array * @param lexo - true if domains are sorted in lexicographical order or false for numeric domains * @return union of values in given arrays. * * precondition lexo ? a,b are lexicographically sorted : a,b are sorted numerically * precondition a!=null &amp;&amp; b!=null */ public static String[] union(String[] a, String[] b, boolean lexo) { assert a!=null && b!=null : "Union expect non-null input!"; return union(a, b, 0, a.length, 0, b.length, lexo); } public static String[] union(String[] a, String[] b, int aoff, int alen, int boff, int blen, boolean lexo) { assert a!=null && b!=null : "Union expect non-null input!"; String[] r = new String[alen+blen]; int ia = aoff, ib = boff, i = 0; while (ia < aoff+alen && ib < boff+blen) { int c = lexo ? a[ia].compareTo(b[ib]) : Integer.valueOf(a[ia]).compareTo(Integer.valueOf(b[ib])); if ( c < 0) r[i++] = a[ia++]; else if (c == 0) { r[i++] = a[ia++]; ib++; } else r[i++] = b[ib++]; } if (ia < aoff+alen) while (ia<aoff+alen) r[i++] = a[ia++]; if (ib < boff+blen) while (ib<boff+blen) r[i++] = b[ib++]; return Arrays.copyOf(r, i); } /** Returns a union of given sorted arrays. */ public static int[] union(int[] a, int[] b) { assert a!=null && b!=null : "Union expect non-null input!"; int[] r = new int[a.length+b.length]; int ia = 0, ib = 0, i = 0; while (ia < a.length && ib < b.length) { int c = a[ia]-b[ib]; if ( c < 0) r[i++] = a[ia++]; else if (c == 0) { r[i++] = a[ia++]; ib++; } else r[i++] = b[ib++]; } if (ia < a.length) while (ia<a.length) r[i++] = a[ia++]; if (ib < b.length) while (ib<b.length) r[i++] = b[ib++]; return Arrays.copyOf(r, i); } public static <T> T[] join(T[] a, T[] b) { T[] res = Arrays.copyOf(a, a.length+b.length); System.arraycopy(b, 0, res, a.length, b.length); return res; } public static float[] join(float[] a, float[] b) { float[] res = Arrays.copyOf(a, a.length+b.length); System.arraycopy(b, 0, res, a.length, b.length); return res; } public static double[] join(double[] a, double[] b) { double[] res = Arrays.copyOf(a, a.length+b.length); System.arraycopy(b, 0, res, a.length, b.length); return res; } /** Returns number of strings which represents a number. */ public static int numInts(String... a) { int cnt = 0; for(String s : a) if (isInt(s)) cnt++; return cnt; } public static boolean isInt(String s) { int i = s.charAt(0)=='-' ? 1 : 0; for(; i<s.length();i++) if (!Character.isDigit(s.charAt(i))) return false; return true; } public static int[] toInt(String[] a, int off, int len) { int[] res = new int[len]; for(int i=0; i<len; i++) res[i] = Integer.valueOf(a[off+i]); return res; } public static int[] filter(int[] values, boolean[] filter, int fcnt) { assert filter.length == values.length : "Values should have same length as filter!"; assert filter.length - fcnt >= 0 : "Cannot filter more values then legth of filter vector!"; if (fcnt==0) return values; int[] result = new int[filter.length - fcnt]; int c = 0; for (int i=0; i<values.length; i++) { if (!filter[i]) result[c++] = values[i]; } return result; } public static int[][] pack(int[] values, boolean[] usemap) { assert values.length == usemap.length : "Cannot pack the map according given use map!"; int cnt = 0; for (int i=0; i<usemap.length; i++) cnt += usemap[i] ? 1 : 0; int[] pvals = new int[cnt]; // only used values int[] pindx = new int[cnt]; // indexes of used values int index = 0; for (int i=0; i<usemap.length; i++) { if (usemap[i]) { pvals[index] = values[i]; pindx[index] = i; index++; } } return new int[][] { pvals, pindx }; } /** * Poisson-distributed RNG * @param lambda Lambda parameter * @return Poisson-distributed random number in [0,inf) */ public static int getPoisson(double lambda, Random rng) { double L = Math.exp(-lambda); double p = 1.0; int k = 0; if (rng == null) rng = new Random(); do { k++; p *= rng.nextDouble(); } while (p > L); return k - 1; } /** Create a new sorted array according to given sort order */ public static float[] sortAccording(float[] ary, Integer[] sortOrder) { float[] res = new float[ary.length]; for(int i=0; i<ary.length; i++) res[i] = ary[sortOrder[i]]; return res; } public static String[] sortAccording(String[] ary, Integer[] sortOrder) { String[] res = new String[ary.length]; for(int i=0; i<ary.length; i++) res[i] = ary[sortOrder[i]]; return res; } public static int[] sortAccording(int[] ary, Integer[] sortOrder) { int[] res = new int[ary.length]; for(int i=0; i<ary.length; i++) res[i] = ary[sortOrder[i]]; return res; } /** Sort two arrays - the second one is sorted according the first one. */ public static void sortWith(final int[] ary, int[] ary2) { Integer[] sortOrder = new Integer[ary.length]; for(int i=0; i<sortOrder.length; i++) sortOrder[i] = i; Arrays.sort(sortOrder, new Comparator<Integer>() { @Override public int compare(Integer o1, Integer o2) { return ary[o1]-ary[o2]; } }); sortAccording2(ary, sortOrder); sortAccording2(ary2, sortOrder); } /** Sort given array according given sort order. Sort is implemented in-place. */ public static void sortAccording2(int[] ary, Integer[] sortOrder) { Integer[] so = sortOrder.clone(); // we are modifying sortOrder to preserve exchanges for(int i=0; i<ary.length; i++) { int tmp = ary[i]; int idx = so[i]; ary[i] = ary[idx]; ary[idx] = tmp; for (int j=i; j<so.length; j++) if (so[j]==i) { so[j] = idx; break; } } } /** Sort given array according given sort order. Sort is implemented in-place. */ public static void sortAccording2(boolean[] ary, Integer[] sortOrder) { Integer[] so = sortOrder.clone(); // we are modifying sortOrder to preserve exchanges for(int i=0; i<ary.length; i++) { boolean tmp = ary[i]; int idx = so[i]; ary[i] = ary[idx]; ary[idx] = tmp; for (int j=i; j<so.length; j++) if (so[j]==i) { so[j] = idx; break; } } } public static String[] createConfusionMatrixHeader( long xs[], String ds[] ) { String ss[] = new String[xs.length]; // the same length for( int i=0; i<ds.length; i++ ) if( xs[i] >= 0 || (ds[i] != null && ds[i].length() > 0) && !Integer.toString(i).equals(ds[i]) ) ss[i] = ds[i]; if( ds.length == xs.length-1 && xs[xs.length-1] > 0 ) ss[xs.length-1] = "NA"; return ss; } public static void printConfusionMatrix(StringBuilder sb, long[][] cm, String[] domain, boolean html) { if (cm == null || domain == null) return; for (int i=0; i<cm.length; ++i) assert(cm.length == cm[i].length); if (html) DocGen.HTML.arrayHead(sb); // Sum up predicted & actuals long acts [] = new long[cm .length]; long preds[] = new long[cm[0].length]; for( int a=0; a<cm.length; a++ ) { long sum=0; for( int p=0; p<cm[a].length; p++ ) { sum += cm[a][p]; preds[p] += cm[a][p]; } acts[a] = sum; } String adomain[] = createConfusionMatrixHeader(acts , domain); String pdomain[] = createConfusionMatrixHeader(preds, domain); assert adomain.length == pdomain.length : "The confusion matrix should have the same length for both directions."; String fmt = ""; String fmtS = ""; // Header if (html) { sb.append("<tr class='warning' style='min-width:60px'>"); sb.append("<th>&darr; Actual / Predicted &rarr;</th>"); for( int p=0; p<pdomain.length; p++ ) if( pdomain[p] != null ) sb.append("<th style='min-width:60px'>").append(pdomain[p]).append("</th>"); sb.append("<th>Error</th>"); sb.append("</tr>"); } else { // determine max length of each space-padded field int maxlen = 0; for( String s : pdomain ) if( s != null ) maxlen = Math.max(maxlen, s.length()); long lsum = 0; for( int a=0; a<cm.length; a++ ) { if( adomain[a] == null ) continue; for( int p=0; p<pdomain.length; p++ ) { if( pdomain[p] == null ) continue; lsum += cm[a][p]; } } maxlen = Math.max(8, Math.max(maxlen, String.valueOf(lsum).length()) + 2); fmt = "%" + maxlen + "d"; fmtS = "%" + maxlen + "s"; sb.append(String.format(fmtS, "Act/Prd")); for( String s : pdomain ) if( s != null ) sb.append(String.format(fmtS, s)); sb.append(" " + String.format(fmtS, "Error\n")); } // Main CM Body long terr=0; for( int a=0; a<cm.length; a++ ) { if( adomain[a] == null ) continue; if (html) { sb.append("<tr style='min-width:60px'>"); sb.append("<th style='min-width:60px'>").append(adomain[a]).append("</th>"); } else { sb.append(String.format(fmtS,adomain[a])); } long correct=0; for( int p=0; p<pdomain.length; p++ ) { if( pdomain[p] == null ) continue; boolean onDiag = adomain[a].equals(pdomain[p]); if( onDiag ) correct = cm[a][p]; String id = ""; if (html) { sb.append(onDiag ? "<td style='min-width: 60px; background-color:LightGreen' "+id+">":"<td style='min-width: 60px;'"+id+">").append(String.format("%,d", cm[a][p])).append("</td>"); } else { sb.append(String.format(fmt,cm[a][p])); } } long err = acts[a]-correct; terr += err; if (html) { sb.append(String.format("<th style='min-width: 60px;'>%.05f = %,d / %,d</th></tr>", (double)err/acts[a], err, acts[a])); } else { sb.append(" " + String.format("%.05f = %,d / %d\n", (double)err/acts[a], err, acts[a])); } } // Last row of CM if (html) { sb.append("<tr style='min-width:60px'><th>Totals</th>"); } else { sb.append(String.format(fmtS, "Totals")); } for( int p=0; p<pdomain.length; p++ ) { if( pdomain[p] == null ) continue; if (html) { sb.append("<td style='min-width:60px'>").append(String.format("%,d", preds[p])).append("</td>"); } else { sb.append(String.format(fmt, preds[p])); } } long nrows = 0; for (long n : acts) nrows += n; if (html) { sb.append(String.format("<th style='min-width:60px'>%.05f = %,d / %,d</th></tr>", (float)terr/nrows, terr, nrows)); DocGen.HTML.arrayTail(sb); } else { sb.append(" " + String.format("%.05f = %,d / %,d\n", (float)terr/nrows, terr, nrows)); } } /** Divide given size into partitions based on given ratios. * @param len number to be split into partitions * @param ratio split ratio of each partition * @return array of sizes based on given ratios, the size of the last segment is len-sum(ratio)*len. */ public static final int[] partitione(int len, float[] ratio) { int[] r = new int[ratio.length+1]; int sum = 0; int i = 0; float sr = 0; for (i=0; i<ratio.length; i++) { r[i] = (int) (ratio[i]*len); sum += r[i]; sr += ratio[i]; } if (sr<1f) r[i] = len - sum; else r[i-1] += (len-sum); return r; } public static final long[] partitione(long len, float[] ratio) { long[] r = new long[ratio.length+1]; long sum = 0; int i = 0; float sr = 0; for (i=0; i<ratio.length; i++) { r[i] = (int) (ratio[i]*len); sum += r[i]; sr += ratio[i]; } if (sr<1f) r[i] = len - sum; else r[i-1] += (len-sum); return r; } /** Compute start row and length of <code>i</code>-th fold from <code>nfolds</code>. * * @param nrows number of rows * @param nfolds number of folds * @param i fold which is intended to be computed * @return return start row and number of rows for <code>i</code>-th fold. */ public static final long[] nfold(long nrows, int nfolds, int i) { assert i>=0 && i<nfolds; long foldSize = nrows / nfolds; long start = i * foldSize; long size = i!=nfolds-1 ? foldSize : foldSize + (nrows % nfolds); return new long[] {start,size}; } /** Generate given numbers of keys by suffixing key by given numbered suffix. */ public static Key[] generateNumKeys(Key mk, int num) { return generateNumKeys(mk, num, "_part"); } public static Key[] generateNumKeys(Key mk, int num, String delim) { Key[] ks = new Key[num]; String n = mk!=null ? mk.toString() : "noname"; String suffix = ""; if (n.endsWith(".hex")) { n = n.substring(0, n.length()-4); // be nice suffix = ".hex"; } for (int i=0; i<num; i++) ks[i] = Key.make(n+delim+i+suffix); return ks; } public static Key generateShuffledKey(Key mk) { String n = mk!=null ? mk.toString() : "noname"; String suffix = ""; if (n.endsWith(".hex")) { n = n.substring(0, n.length()-4); // be nice suffix = ".hex"; } return Key.make(n+"_shuffled"+suffix); } public static boolean isSorted(int [] ids){ for(int i = 1; i < ids.length; ++i) if (ids[i] < ids[i-1])return false; return true; } private static class Vec2ArryTsk extends MRTask2<Vec2ArryTsk> { final int N; public double [] res; public Vec2ArryTsk(int N){this.N = N;} @Override public void setupLocal(){ res = MemoryManager.malloc8d(N); } @Override public void map(Chunk c){ final int off = (int)c._start; for(int i = 0; i < c._len; i = c.nextNZ(i)) res[off+i] = c.at0(i); } @Override public void reduce(Vec2ArryTsk other){ if(res != other.res) { for(int i = 0; i < res.length; ++i) { assert res[i] == 0 || other.res[i] == 0; res[i] += other.res[i]; // assuming only one nonzero } } } } public static double [] asDoubles(Vec v){ if(v.length() > 100000) throw new IllegalArgumentException("Vec is too big to be extracted into array"); return new Vec2ArryTsk((int)v.length()).doAll(v).res; } private static class Vec2IntArryTsk extends MRTask2<Vec2IntArryTsk> { final int N; public int [] res; public Vec2IntArryTsk(int N){this.N = N;} @Override public void setupLocal(){ res = MemoryManager.malloc4(N); } @Override public void map(Chunk c){ final int off = (int)c._start; for(int i = 0; i < c._len; i = c.nextNZ(i)) res[off+i] = (int)c.at80(i); } @Override public void reduce(Vec2IntArryTsk other){ if(res != other.res) { for(int i = 0; i < res.length; ++i) { assert res[i] == 0 || other.res[i] == 0; res[i] += other.res[i]; // assuming only one nonzero } } } } public static int [] asInts(Vec v){ if(v.length() > 100000) throw new IllegalArgumentException("Vec is too big to be extracted into array"); return new Vec2IntArryTsk((int)v.length()).doAll(v).res; } }
0
java-sources/ai/h2o/h2o-clustering/3.46.0.7/water
java-sources/ai/h2o/h2o-clustering/3.46.0.7/water/clustering/AssistedClusteringEmbeddedConfig.java
package water.clustering; import water.init.AbstractEmbeddedH2OConfig; import java.net.InetAddress; /** * Embedded config providing flatfile as a result of external service hinting H2O cluster * the IP adresses of the pods. */ public class AssistedClusteringEmbeddedConfig extends AbstractEmbeddedH2OConfig { private final String flatfile; public AssistedClusteringEmbeddedConfig(final String flatFile) { this.flatfile = flatFile; } @Override public void notifyAboutEmbeddedWebServerIpPort(final InetAddress ip, final int port) { } @Override public void notifyAboutCloudSize(final InetAddress ip, final int port, final InetAddress leaderIp, final int leaderPort, final int size) { } @Override public boolean providesFlatfile() { return true; } @Override public String fetchFlatfile() { return flatfile; } @Override public void exit(int status) { System.exit(status); } @Override public void print() { } }
0
java-sources/ai/h2o/h2o-clustering/3.46.0.7/water
java-sources/ai/h2o/h2o-clustering/3.46.0.7/water/clustering/AssistedClusteringEmbeddedConfigProvider.java
package water.clustering; import org.apache.log4j.Logger; import water.H2O; import water.clustering.api.AssistedClusteringRestApi; import water.init.AbstractEmbeddedH2OConfig; import water.init.EmbeddedConfigProvider; import water.util.Log; import java.io.IOException; import java.util.Optional; import java.util.concurrent.SynchronousQueue; import java.util.function.Consumer; /** * Provides {@link AssistedClusteringEmbeddedConfig} as a result of external assist. The resulting * embedded config provides a flatfile assembled by external service, given to H2O. * This EmbeddedConfig has no timeout and will wait indefinitely until a flatfifle is received. */ public class AssistedClusteringEmbeddedConfigProvider implements EmbeddedConfigProvider { private static final Logger LOG = Logger.getLogger(AssistedClusteringEmbeddedConfigProvider.class); private final SynchronousQueue<String> flatFileQueue = new SynchronousQueue<>(); private AssistedClusteringRestApi assistedClusteringRestApi; @Override public void init() { final Consumer<String> flatFileCallback = s -> { try { flatFileQueue.put(s); } catch (InterruptedException e) { e.printStackTrace(); } }; assistedClusteringRestApi = startAssistedClusteringRestApi(flatFileCallback) .orElseThrow(() -> new IllegalStateException("Assisted clustering Rest API unable to start.")); } /** * Start REST API listening to incoming request with a flatfile */ private Optional<AssistedClusteringRestApi> startAssistedClusteringRestApi(final Consumer<String> flatFileCallback) { Log.info("Starting assisted clustering REST API services"); try { final AssistedClusteringRestApi assistedClusteringRestApi = new AssistedClusteringRestApi(flatFileCallback); assistedClusteringRestApi.start(); Log.info("Assisted clustering REST API services successfully started."); return Optional.of(assistedClusteringRestApi); } catch (IOException e) { Log.err("Unable to start H2O assisted clustering REST API", e); H2O.exit(1); return Optional.empty(); } } @Override public boolean isActive() { return Boolean.parseBoolean(System.getenv("H2O_ASSISTED_CLUSTERING_REST")); } @Override public AbstractEmbeddedH2OConfig getConfig() { try { final String flatfile = flatFileQueue.take(); return new AssistedClusteringEmbeddedConfig(flatfile); } catch (InterruptedException e) { LOG.error(e.getMessage(), e); throw new IllegalStateException("Interruption occured during waiting for a flatfile.", e); } } void close() { AssistedClusteringRestApi api = assistedClusteringRestApi; assistedClusteringRestApi = null; if (api != null) { api.close(); } } }
0
java-sources/ai/h2o/h2o-clustering/3.46.0.7/water/clustering
java-sources/ai/h2o/h2o-clustering/3.46.0.7/water/clustering/api/AssistedClusteringEndpoint.java
package water.clustering.api; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import org.apache.log4j.Logger; import water.init.NetworkInit; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Consumer; import java.util.stream.Collectors; import static java.net.HttpURLConnection.*; import static water.clustering.api.HttpResponses.*; /** * A REST Endpoint waiting for external assist to POST a flatfile with H2O nodes. * Once successfully submitted, this endpoint will no longer accept any new calls. * It is the caller's responsibility to submit a valid flatfile. * <p> * There is no parsing or validation done on the flatfile, except for basic emptiness checks. * The logic for IPv4/IPv6 parsing is hidden in {@link NetworkInit} class and is therefore hidden * from this class. As this module is intended to insertable onto classpath of any H2O, it does not rely on * specific NetworkInit implementation. */ public class AssistedClusteringEndpoint implements HttpHandler, AutoCloseable { private static final Logger LOG = Logger.getLogger(AssistedClusteringEndpoint.class); private final ReadWriteLock lock = new ReentrantReadWriteLock(); private final AtomicBoolean flatFileReceived; private final ExecutorService flatFileConsumerCallbackExecutor = Executors.newSingleThreadExecutor(); private final Consumer<String> flatFileConsumer; public AssistedClusteringEndpoint(Consumer<String> flatFileConsumer) { this.flatFileConsumer = flatFileConsumer; this.flatFileReceived = new AtomicBoolean(false); } @Override public void handle(HttpExchange httpExchange) throws IOException { if (!POST_METHOD.equals(httpExchange.getRequestMethod())) { newResponseCodeOnlyResponse(httpExchange, HTTP_BAD_METHOD); } String postBody; try (InputStreamReader isr = new InputStreamReader(httpExchange.getRequestBody(), StandardCharsets.UTF_8); BufferedReader br = new BufferedReader(isr)) { postBody = br.lines().collect(Collectors.joining("\n")); if (postBody.isEmpty()) { newFixedLengthResponse(httpExchange, HTTP_BAD_REQUEST, MIME_TYPE_TEXT_PLAIN, "Unable to parse IP addresses in body. Only one IPv4/IPv6 address per line is accepted."); return; } } catch (IOException e) { LOG.error("Received incorrect flatfile request.", e); newResponseCodeOnlyResponse(httpExchange, HTTP_BAD_REQUEST); return; } final Lock writeLock = lock.writeLock(); try { writeLock.lock(); if (flatFileReceived.get()) { newFixedLengthResponse(httpExchange, HTTP_BAD_REQUEST, MIME_TYPE_TEXT_PLAIN, "Flatfile already provided."); return; } else { // Do not block response with internal handling flatFileConsumerCallbackExecutor.submit(() -> flatFileConsumer.accept(postBody)); flatFileReceived.set(true); // Do not accept any new requests once the flatfile has been received. } } finally { writeLock.unlock(); } newResponseCodeOnlyResponse(httpExchange, HTTP_OK); } @Override public void close() { flatFileConsumerCallbackExecutor.shutdown(); } }
0
java-sources/ai/h2o/h2o-clustering/3.46.0.7/water/clustering
java-sources/ai/h2o/h2o-clustering/3.46.0.7/water/clustering/api/AssistedClusteringRestApi.java
package water.clustering.api; import com.sun.net.httpserver.HttpServer; import java.io.IOException; import java.net.InetSocketAddress; import java.util.Objects; import java.util.function.Consumer; /** * Rest API definition for the assisted clustering function. */ public class AssistedClusteringRestApi implements AutoCloseable { /** * Default port to bind to / listen on. */ private static final int DEFAULT_PORT = 8080; public static final String ASSISTED_CLUSTERING_PORT_KEY = "H2O_ASSISTED_CLUSTERING_API_PORT"; private final AssistedClusteringEndpoint assistedClusteringEndpoint; private final HttpServer server; /** * Creates, but not starts assisted clustering REST API. To start the REST API, please use * one of the start methods available. * <p> * The REST API is bound to a default port of 8080, unless specified otherwise by the H2O_ASSISTED_CLUSTERING_API_PORT environment * variable. */ public AssistedClusteringRestApi(Consumer<String> flatFileConsumer) throws IOException { Objects.requireNonNull(flatFileConsumer); this.assistedClusteringEndpoint = new AssistedClusteringEndpoint(flatFileConsumer); int port = getPort(); server = HttpServer.create(new InetSocketAddress(port), 0); addMappings(); } /** * @return Either user-defined port via environment variable or default port to bind the REST API to. */ private static int getPort() { final String customPort = System.getenv(ASSISTED_CLUSTERING_PORT_KEY); if (customPort == null) { return DEFAULT_PORT; } try { return Integer.parseInt(customPort); } catch (NumberFormatException e) { final String errorMessage = String.format("Unusable port for Assisted clustering REST API to bind to: '%s'", customPort); throw new IllegalArgumentException(errorMessage, e); } } private void addMappings() { server.createContext("/clustering/flatfile", assistedClusteringEndpoint); server.createContext("/cluster/status", new H2OClusterStatusEndpoint()); } /** * From AutoCloseable - aids usage inside try-with-resources blocks. */ @Override public void close() { assistedClusteringEndpoint.close(); server.stop(0); } public void start() throws IOException { server.start(); } }