index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/ThreadHelper.java
package water; class ThreadHelper { static void initCommonThreadProperties(Thread t) { initCommonThreadProperties(H2O.ARGS, t); } static void initCommonThreadProperties(H2O.OptArgs args, Thread t) { if (args.embedded) { t.setDaemon(true); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/TimeLine.java
package water; import java.net.InetAddress; import java.net.UnknownHostException; import sun.misc.Unsafe; import water.nbhm.UtilUnsafe; import water.util.Log; /** * Maintain a VERY efficient list of events in the system. This must be VERY * cheap to call, as it will get called alot. On demand, we can snapshot this * list gather all other lists from all other (responsive) Nodes, and build a * whole-Cloud timeline for dumping. * * * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ public class TimeLine extends UDP { private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); // The TimeLine buffer. // The TimeLine buffer is full of Events; each event has a timestamp and some // event bytes. The buffer is a classic ring buffer; we toss away older // events. We snapshot the buffer by replacing it with a fresh array. The // index of the next free slot is kept in the 1st long of the array, and // there are MAX_EVENTS (a power of 2) more slots. // A TimeLine event is: // - Milliseconds since JVM boot; 4 bytes // - IP4 of send/recv // - Sys.Nano, 8 bytes-3 bits // - Nano low bit is 1 id packet was droped, next bit is 0 for send, 1 for recv, next bit is 0 for udp, 1 for tcp // - 16 bytes of payload; 1st byte is a udp_type opcode, next 4 bytes are typically task# public static final int MAX_EVENTS=2048; // Power-of-2, please static final int WORDS_PER_EVENT=4; static final long[] TIMELINE = new long[MAX_EVENTS*WORDS_PER_EVENT+1]; static long JVM_BOOT_MSEC = System.currentTimeMillis(); // Snapshot and return the current TIMELINE array private static long[] snapshot() { return TIMELINE.clone(); } // CAS access to the TIMELINE array private static final int _Lbase = _unsafe.arrayBaseOffset(long[].class); private static final int _Lscale = _unsafe.arrayIndexScale(long[].class); private static long rawIndex(long[] ary, int i) { assert i >= 0 && i < ary.length; return _Lbase + i * _Lscale; } private static boolean CAS( long[] A, int idx, long old, long nnn ) { return _unsafe.compareAndSwapLong( A, rawIndex(A,idx), old, nnn ); } // Return the next index into the TIMELINE array private static int next_idx( long [] tl ) { // Spin until we can CAS-acquire a fresh index while( true ) { int oldidx = (int)tl[0]; int newidx = (oldidx+1)&(MAX_EVENTS-1); if( CAS( tl, 0, oldidx, newidx ) ) return oldidx; } } // Record 1 event, the first 16 bytes of this buffer. This is expected to be // a high-volume multi-thread operation so needs to be fast. "sr" is send- // receive and must be either 0 or 1. "drop" is whether or not the UDP // packet is dropped as-if a network drop, and must be either 0 (kept) or 2 // (dropped). private static void record2( H2ONode h2o, long ns, boolean tcp, int sr, int drop, long b0, long b8 ) { final long ms = System.currentTimeMillis(); // Read first, in case we're slow storing values long deltams = ms-JVM_BOOT_MSEC; assert deltams < 0x0FFFFFFFFL; // No daily overflow final long[] tl = TIMELINE; // Read once, in case the whole array shifts out from under us final int idx = next_idx(tl); // Next free index tl[idx*WORDS_PER_EVENT+0+1] = (deltams)<<32 | (h2o.ip4()&0x0FFFFFFFFL); tl[idx*WORDS_PER_EVENT+1+1] = (ns&~7)| (tcp?4:0)|sr|drop; // More complexities: record the *receiver* port in the timeline - but not // in the outgoing UDP packet! The outgoing packet always has the sender's // port (that's us!) - which means the recorded timeline packet normally // never carries the *receiver* port - meaning the sender's timeline does // not record who he sent to! With this hack the Timeline record always // contains the info about "the other guy": inet+port for the receiver in // the sender's Timeline, and vice-versa for the receiver's Timeline. if( sr==0 ) b0 = (b0 & ~0xFFFF00) | (h2o._key.getInternalPort()<<8); tl[idx*WORDS_PER_EVENT+2+1] = b0; tl[idx*WORDS_PER_EVENT+3+1] = b8; } private static void record1( AutoBuffer b, boolean tcp, int sr, int drop) { try { int lim = b._bb.limit(); int pos = b._bb.position(); b._bb.limit(16); long lo = b.get8(0), hi = b.get8(8); final long ns = System.nanoTime(); record2(b._h2o, ns, tcp, sr, drop, lo, hi); b._bb.limit(lim); b._bb.position(pos); } catch(Throwable t) { Log.err("Timeline record failed, " + t.toString(), t); } } static void record_send( AutoBuffer b, boolean tcp) { record1(b,tcp,0, 0); } static void record_recv( AutoBuffer b, boolean tcp, int drop) { record1(b,tcp,1,drop); } // Record a completed I/O event. The nanosecond time slot is actually nano's-blocked-on-io // static void record_IOclose( AutoBuffer b, int flavor ) { // H2ONode h2o = b._h2o==null ? H2O.SELF : b._h2o; // // First long word going out has sender-port and a 'bad' control packet // long b0 = UDP.udp.i_o.ordinal(); // Special flag to indicate io-record and not a rpc-record // b0 |= H2O.SELF._key.getInternalPort()<<8; // b0 |= flavor<<24; // I/O flavor; one of the Value.persist backends // long iotime = b._time_start_ms > 0 ? (b._time_close_ms - b._time_start_ms) : 0; // b0 |= iotime<<32; // msec from start-to-finish, including non-i/o overheads // long b8 = b._size; // byte's transfered in this I/O // long ns = b._time_io_ns; // nano's blocked doing I/O // record2(h2o,ns,true,b.readMode()?1:0,0,b0,b8); // } /* Record an I/O call without using an AutoBuffer / NIO. * Used by e.g. HDFS & S3 * * @param block_ns - ns of blocking i/o call, * @param io_msg - ms of overall i/o time * @param r_w - 1 for read, 0 for write * @param size - bytes read/written * @param flavor - Value.HDFS or Value.S3 */ // public static void record_IOclose( long start_ns, long start_io_ms, int r_w, long size, int flavor ) { // long block_ns = System.nanoTime() - start_ns; // long io_ms = System.currentTimeMillis() - start_io_ms; // // First long word going out has sender-port and a 'bad' control packet // long b0 = UDP.udp.i_o.ordinal(); // Special flag to indicate io-record and not a rpc-record // b0 |= H2O.SELF._key.getInternalPort()<<8; // b0 |= flavor<<24; // I/O flavor; one of the Value.persist backends // b0 |= io_ms<<32; // msec from start-to-finish, including non-i/o overheads // record2(H2O.SELF,block_ns,true,r_w,0,b0,size); // } // Accessors, for TimeLines that come from all over the system public static int length( ) { return MAX_EVENTS; } // Internal array math so we can keep layout private private static int idx(long[] tl, int i ) { return (((int)tl[0]+i)&(MAX_EVENTS-1))*WORDS_PER_EVENT+1; } // That first long is complex: compressed CTM and IP4 private static long x0( long[] tl, int idx ) { return tl[idx(tl,idx)]; } // ms since boot of JVM public static long ms( long[] tl, int idx ) { return x0(tl,idx)>>>32; } public static InetAddress inet( long[] tl, int idx ) { int adr = (int)x0(tl,idx); byte[] ip4 = new byte[4]; ip4[0] = (byte)(adr ); ip4[1] = (byte)(adr>> 8); ip4[2] = (byte)(adr>>16); ip4[3] = (byte)(adr>>24); try { return InetAddress.getByAddress(ip4); } catch( UnknownHostException e ) { } return null; } // That 2nd long is nanosec, plus the low bit is send/recv & 2nd low is drop public static long ns( long[] tl, int idx ) { return tl[idx(tl,idx)+1]; } // Returns zero for send, 1 for recv public static int send_recv( long[] tl, int idx ) { return (int)(ns(tl,idx)&1); } // Returns zero for kept, 2 for dropped public static int dropped ( long[] tl, int idx ) { return (int)(ns(tl,idx)&2); } // 16 bytes of payload public static long l0( long[] tl, int idx ) { return tl[idx(tl,idx)+2]; } public static long l8( long[] tl, int idx ) { return tl[idx(tl,idx)+3]; } public static boolean isEmpty( long[] tl, int idx ) { return tl[idx(tl,idx)]==0; } // Take a system-wide snapshot. Return an array, indexed by H2ONode _idx, // containing that Node's snapshot. Try to get all the snapshots as close as // possible to the same point in time. static long[][] SNAPSHOT; static long TIME_LAST_SNAPSHOT = 1; static private H2O CLOUD; // Cloud instance being snapshotted public static H2O getCLOUD(){return CLOUD;} static public long[][] system_snapshot() { // Now spin-wait until we see all snapshots check in. // Be atomic about it. synchronized( TimeLine.class ) { // First see if we have a recent snapshot already. long now = System.currentTimeMillis(); if( now - TIME_LAST_SNAPSHOT < 3*1000 ) return SNAPSHOT; // Use the recent snapshot // A new snapshot is being built? if( TIME_LAST_SNAPSHOT != 0 ) { TIME_LAST_SNAPSHOT = 0; // Only fire off the UDP packet once; flag it // Make a new empty snapshot CLOUD = H2O.CLOUD; SNAPSHOT = new long[CLOUD.size()][]; // Broadcast a UDP packet, with the hopes of getting all SnapShots as close // as possible to the same point in time. new AutoBuffer(H2O.SELF,udp.timeline._prior).putUdp(udp.timeline).close(); } // Spin until all snapshots appear while( true ) { boolean done = true; for( int i=0; i<CLOUD._memary.length; i++ ) if( SNAPSHOT[i] == null ) done = false; if( done ) break; try { TimeLine.class.wait(); } catch( InterruptedException e ) {} } TIME_LAST_SNAPSHOT = System.currentTimeMillis(); return SNAPSHOT; } } // Send our most recent timeline to the remote via TCP @Override AutoBuffer call( AutoBuffer ab ) { long[] a = snapshot(); if( ab._h2o == H2O.SELF ) { synchronized(TimeLine.class) { if (CLOUD != null) { for (int i = 0; i < CLOUD._memary.length; i++) if (CLOUD._memary[i] == H2O.SELF) SNAPSHOT[i] = a; } TimeLine.class.notify(); } } else // Send timeline to remote new AutoBuffer(ab._h2o,udp.timeline._prior).putUdp(UDP.udp.timeline).putA8(a).close(); return null; } // Receive a remote timeline static void tcp_call( final AutoBuffer ab ) { ab.getPort(); long[] snap = ab.getA8(); if (CLOUD != null) { int idx = CLOUD.nidx(ab._h2o); if (idx >= 0 && idx < SNAPSHOT.length) SNAPSHOT[idx] = snap; // Ignore out-of-cloud timelines } ab.close(); synchronized (TimeLine.class) { TimeLine.class.notify(); } } String print16( AutoBuffer ab ) { return ""; } // no extra info in a timeline packet /** * Only for debugging. * Prints local timeline to stdout. * * To be used in case of an error when global timeline can not be relied upon as we might not be able to talk to other nodes. */ static void printMyTimeLine(){ long [] s = TimeLine.snapshot(); System.err.println("===================================<TIMELINE>=============================================="); for(int i = 0; i < TimeLine.length(); ++i) { long lo = TimeLine.l0(s, i),hi = TimeLine.l8(s, i); int port = (int)((lo >> 8) & 0xFFFF); String op = TimeLine.send_recv(s,i) == 0?"SEND":"RECV"; if(!TimeLine.isEmpty(s, i) && (lo & 0xFF) == UDP.udp.exec.ordinal()) System.err.println(TimeLine.ms(s, i) + ": " + op + " " + (((TimeLine.ns(s, i) & 4) != 0)?"TCP":"UDP") + TimeLine.inet(s, i) + ":" + port + " | " + UDP.printx16(lo, hi)); } System.err.println("==========================================================================================="); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/TypeMap.java
package water; import water.api.schemas3.*; import water.nbhm.NonBlockingHashMap; import water.util.*; import java.io.PrintStream; import java.util.Arrays; import java.util.ServiceLoader; /** Internal H2O class used to build and maintain the cloud-wide type mapping. * Only public to expose a few constants to subpackages. No exposed user * calls. */ public class TypeMap { static public final short NULL, PRIM_B, ICED, H2OCC, C1NCHUNK, FRAME, VECGROUP, ESPCGROUP; // This list contains all classes that are needed at cloud initialization time. private static final String[] BUILTIN_BOOTSTRAP_CLASSES = { " BAD", "[B", // 1 - water.Iced.class.getName(), // 2 - Base serialization class water.H2O.H2OCountedCompleter.class.getName(), // 3 - Base serialization class water.HeartBeat.class.getName(), // Used to Paxos up a cloud & leader water.H2ONode.class.getName(), // Needed to write H2ONode target/sources water.FetchClazz.class.getName(), // used to fetch IDs from leader water.FetchId.class.getName(), // used to fetch IDs from leader water.DTask.class.getName(), // Needed for those first Tasks water.fvec.Chunk.class.getName(), // parent of Chunk water.fvec.C1NChunk.class.getName(),// used as constant in parser water.fvec.Frame.class.getName(), // used in TypeaheadKeys & Exec2 water.fvec.Vec.VectorGroup.class.getName(), // Used in TestUtil water.fvec.Vec.ESPC.class.getName(), // Used in TestUtil // Status pages looked at without locking the cloud water.api.Schema.class.getName(), RequestSchemaV3.class.getName(), SchemaV3.Meta.class.getName(), SchemaV3.class.getName(), CloudV3.class.getName(), CloudV3.NodeV3.class.getName(), AboutV3.class.getName(), AboutEntryV3.class.getName(), water.UDPRebooted.ShutdownTsk.class.getName(), // Mistyped hack URLs H2OErrorV3.class.getName(), // Ask for ModelBuilders list RouteV3.class.getName(), ModelBuildersV3.class.getName(), // So Flow can ask about possible Model Builders without locking water.util.IcedSortedHashMap.class.getName(), // Seems wildly not-needed hex.schemas.ModelBuilderSchema.IcedHashMapStringModelBuilderSchema.class.getName(), // Checking for Flow clips NodePersistentStorageV3.class.getName(), NodePersistentStorageV3.NodePersistentStorageEntryV3.class.getName(), // Beginning to hunt for files water.util.IcedLong.class.getName(), water.util.IcedAtomicInt.class.getName(), water.util.IcedDouble.class.getName(), water.util.IcedBitSet.class.getName(), water.util.IcedHashSet.class.getName(), water.util.IcedHashMap.class.getName(), water.util.IcedHashMapBase.class.getName(), water.util.IcedHashMapGeneric.class.getName(), water.util.IcedHashMapGeneric.IcedHashMapStringString.class.getName(), water.util.IcedHashMapGeneric.IcedHashMapStringObject.class.getName(), TypeaheadV3.class.getName(), // Allow typeahead without locking }; // Class name -> ID mapping static private final NonBlockingHashMap<String, Integer> MAP = new NonBlockingHashMap<>(); // ID -> Class name mapping static String[] CLAZZES; // ID -> pre-allocated Golden Instance of Icer static private Icer[] GOLD; // Unique IDs static private int IDS; // Number of bootstrap classes static final int BOOTSTRAP_SIZE; // JUnit helper flag static public volatile boolean _check_no_locking; // ONLY TOUCH IN AAA_PreCloudLock! static { CLAZZES = findAllBootstrapClasses(); BOOTSTRAP_SIZE = CLAZZES.length; GOLD = new Icer[BOOTSTRAP_SIZE]; int id=0; // The initial set of Type IDs to boot with for( String s : CLAZZES ) MAP.put(s,id++); IDS = id; assert IDS == BOOTSTRAP_SIZE; // Some statically known names, to make life easier during e.g. bootup & parse NULL = (short) -1; PRIM_B = (short)onIce("[B"); ICED = (short)onIce("water.Iced"); assert ICED ==2; // Matches Iced customer serializer H2OCC = (short)onIce("water.H2O$H2OCountedCompleter"); assert H2OCC==3; // Matches customer serializer C1NCHUNK = (short)onIce("water.fvec.C1NChunk"); // Used in water.fvec.FileVec FRAME = (short)onIce("water.fvec.Frame"); // Used in water.Value VECGROUP = (short)onIce("water.fvec.Vec$VectorGroup"); // Used in TestUtil ESPCGROUP = (short)onIce("water.fvec.Vec$ESPC"); // Used in TestUtil } /** * Collect built-in bootstrap classes and bootstrap classes from all extensions. * @return array of class names, built-in classes are listed first followed by sorted list of classes from extensions */ static synchronized String[] findAllBootstrapClasses() { String[] additionalBootstrapClasses = new String[0]; ServiceLoader<TypeMapExtension> extensionsLoader = ServiceLoader.load(TypeMapExtension.class); for (TypeMapExtension ext : extensionsLoader) { additionalBootstrapClasses = ArrayUtils.append(additionalBootstrapClasses, ext.getBoostrapClasses()); } Arrays.sort(additionalBootstrapClasses); return ArrayUtils.append(BUILTIN_BOOTSTRAP_CLASSES, additionalBootstrapClasses); } /** * Retrieves the collection of bootstrap classes. * @return array of class names */ public static String[] bootstrapClasses() { return Arrays.copyOf(CLAZZES, BOOTSTRAP_SIZE); } // The major complexity of this code is that the are FOUR major data forms // which get converted to one another. At various times the code is // presented with one of the forms, and asked for another form, sometimes // forcing first to the other form. // // (1) Type ID - 2 byte shortcut for an Iced type // (2) String clazz name - the class name for an Iced type // (3) Iced POJO - an instance of Iced, the distributable workhorse object // (4) Icer POJO - an instance of Icer, the serializing delegate for Iced // // Some sample code paths: // <clinit>: convert string -> ID (then set static globals) // new code: fetch remote string->ID mapping // new code: leader sets string->ID mapping // printing: id -> string // deserial: id -> string -> Icer -> Iced (slow path) // deserial: id -> Icer -> Iced (fath path) // lookup : id -> string (on leader) // // returns the ID for an existing className (fails if class doesn't exist/cannot be loaded) public static int getIcedId(String className) { Integer I = MAP.get(className); if (I != null) return I; try { Class.forName(className); } catch (ClassNotFoundException e) { throw new IllegalArgumentException("Class " + className + " is not known to H2O.", e); } return onIce(className); } // During first Icing, get a globally unique class ID for a className static int onIce(Iced ice) { return onIce(ice.getClass().getName()); } static int onIce(Freezable ice) { return onIce(ice.getClass().getName()); } public static int onIce(String className) { Integer I = MAP.get(className); if( I != null ) return I; // Need to install a new cloud-wide type ID for className. assert H2O.CLOUD.size() > 0 : "No cloud when getting type id for "+className; // Am I leader, or not? Lock the cloud to find out Paxos.lockCloud(className); // Leader: pick an ID. Not-the-Leader: fetch ID from leader. int id = H2O.CLOUD.leader() == H2O.SELF ? -1 : FetchId.fetchId(className); return install(className,id); } // Quick check to see if cached private static Icer goForGold( int id ) { Icer gold[] = GOLD; // Read once, in case resizing // Racily read the GOLD array return id < gold.length ? gold[id] : null; } static String classNameLocal(final int id) { if( id == PRIM_B ) return "[B"; String[] clazs = CLAZZES; // Read once, in case resizing if( id < clazs.length ) { // Might be installed as a className mapping no Icer (yet) return clazs[id]; // Racily read the CLAZZES array } return null; } // Reverse: convert an ID to a className possibly fetching it from leader. public static String className(final int id) { String s = classNameLocal(id); if (s != null) return s; // Has the className already Paxos.lockCloud("Class Id="+id); // If the leader is already selected, then the cloud is already locked; but we don't know -> lock now s = FetchClazz.fetchClazz(id); // Fetch class name string from leader if (s == null) { // this is bad - we are missing the mapping and cannot get it from anywhere if (H2O.isCI()) { // when running on CI - dump all local TypeMaps to get some idea of what happened new PrintTypeMap().doAllNodes(); } throw new IllegalStateException("Leader has no mapping for id " + id); } install( s, id ); // Install name<->id mapping return s; } // Install the type mapping under lock, and grow all the arrays as needed. // The grow-step is not obviously race-safe: readers of all the arrays will // get either the old or new arrays. However readers are all reader with // smaller type ids, and these will work fine in either old or new arrays. synchronized static private int install( String className, int id ) { assert !_check_no_locking : "Locking cloud to assign typeid to "+className; if( id == -1 ) { // Leader requesting a new ID assert H2O.CLOUD.leader() == H2O.SELF; // Only leaders get to pick new IDs Integer i = MAP.get(className); if( i != null ) return i; // Check again under lock for already having an ID id = IDS++; // Leader gets an ID under lock } else { String localClassName = classNameLocal(id); if (localClassName != null) { if (localClassName.equals(className)) { return id; // Nothing to do - we already got the mapping } else { throw new IllegalStateException( "Inconsistent mapping: id=" + id + " is already mapped to " + localClassName + "; was requested to be mapped to " + className + "!"); } } } MAP.put(className,id); // No race on insert, since under lock // Expand lists to handle new ID, as needed if( id >= CLAZZES.length ) CLAZZES = Arrays.copyOf(CLAZZES,Math.max(CLAZZES.length<<1,id+1)); if( id >= GOLD .length ) GOLD = Arrays.copyOf(GOLD ,Math.max(CLAZZES.length<<1,id+1)); CLAZZES[id] = className; return id; } // Figure out the mapping from a type ID to a Class. Happens many places, // including during deserialization when a Node will be presented with a // fresh new ID with no idea what it stands for. Does NOT resize the GOLD // array, since the id->className mapping has already happened. public static Icer getIcer( Freezable ice ) { return getIcer(onIce(ice),ice.getClass()); } static Icer getIcer( int id, Iced ice ) { return getIcer(id,ice.getClass()); } static Icer getIcer( int id, Freezable ice ) { return getIcer(id,ice.getClass()); } static Icer getIcer( int id, Class ice_clz ) { Icer f = goForGold(id); if( f != null ) return f; // Lock on the Iced class during auto-gen - so we only gen the Icer for // a particular Iced class once. //noinspection SynchronizationOnLocalVariableOrMethodParameter synchronized( ice_clz ) { f = goForGold(id); // Recheck under lock if( f != null ) return f; // Hard work: make a new delegate class try { f = Weaver.genDelegate(id,ice_clz); } catch( Exception e ) { Log.err("Weaver generally only throws if classfiles are not found, e.g. IDE setups running test code from a remote node that is not in the classpath on this node."); throw Log.throwErr(e); } // Now install under the TypeMap class lock, so the GOLD array is not // resized out from under the installation. synchronized( TypeMap.class ) { assert id < BOOTSTRAP_SIZE || !(f.theFreezable() instanceof BootstrapFreezable) : "Class " + ice_clz + " is not BootstrapFreezable"; return GOLD[id]=f; } } } static Iced newInstance(int id) { return (Iced) newFreezable(id); } static <T extends Freezable> T newFreezable(int id, Class<T> tc) { @SuppressWarnings("unchecked") T iced = (T) newFreezable(id); assert tc == null || tc.isInstance(iced) : tc.getName() + " != " + iced.getClass().getName() + ", id = " + id; return iced; } /** Create a new freezable object based on its unique ID. * * @param id freezable unique id (provided by TypeMap) * @return new instance of Freezable object */ public static Freezable newFreezable(int id) { Freezable iced = theFreezable(id); assert iced != null : "No instance of id "+id+", class="+CLAZZES[id]; return iced.clone(); } /** Create a new freezable object based on its className. * * @param className class name * @return new instance of Freezable object */ public static Freezable newFreezable(String className) { return theFreezable(onIce(className)).clone(); } /** The single golden instance of an Iced, used for cloning and instanceof * tests, do-not-modify since it's The Golden Instance and shared. */ public static Freezable theFreezable(int id) { try { Icer f = goForGold(id); return (f==null ? getIcer(id, Class.forName(className(id))) : f).theFreezable(); } catch( ClassNotFoundException e ) { throw Log.throwErr(e); } } public static Freezable getTheFreezableOrThrow(int id) throws ClassNotFoundException { Icer f = goForGold(id); return (f==null ? getIcer(id, Class.forName(className(id))) : f).theFreezable(); } static void printTypeMap(PrintStream ps) { final String[] clazzes = CLAZZES; for (int i = 0; i < clazzes.length; i++) { final String className = CLAZZES[i]; ps.println(i + " -> " + className + " (map: " + (className != null ? MAP.get(className) : null) + ")"); } } private static class PrintTypeMap extends MRTask<PrintTypeMap> { @Override protected void setupLocal() { System.err.println("TypeMap dump on node " + H2O.SELF); printTypeMap(System.err); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/TypeMapExtension.java
package water; /** * Allows to dynamically modify behavior of TypeMap */ public interface TypeMapExtension { /** * Userspace-defined bootstrap classes. Bootstrap classes have fixed and cluster-wide known * ids. * * Extension can leverage this to facilitate data exchange of serialized objects between different * cluster instances. Because the set of bootstrap classes is known it mitigates possibility * of java deserialization attack. * * This is used eg. in XGBoost external cluster. * * @return class names stored in an array */ String[] getBoostrapClasses(); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/UDP.java
package water; import water.util.UnsafeUtils; /** * Do Something with an incoming UDP packet * * Classic Single Abstract Method pattern. * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ public abstract class UDP { /** UDP packet types, and their handlers */ public static enum udp { bad(false,null,(byte)-1), // Do not use the zero packet, too easy to make mistakes // Some health-related packet types. These packets are all stateless, in // that we do not need to send any replies back. heartbeat ( true, new UDPHeartbeat(),H2O.MAX_PRIORITY), rebooted ( true, new UDPRebooted() ,H2O.MAX_PRIORITY), // This node has rebooted recently timeline (false, new TimeLine() ,H2O.MAX_PRIORITY), // Get timeline dumps from across the Cloud // All my *reliable* tasks (below), are sent to remote nodes who then ACK // back an answer. To be reliable, I might send the TASK multiple times. // To get a reliable answer, the remote might send me multiple ACKs with // the same answer every time. When does the remote know it can quit // tracking reply ACKs? When it recieves an ACKACK. ackack(false,new UDPAckAck(),H2O.ACK_ACK_PRIORITY), // a generic ACKACK for a UDP async task // In order to unpack an ACK (which contains an arbitrary returned POJO) // the reciever might need to fetch a id/class mapping from the leader - // while inside an ACK-priority thread holding onto lots of resources // (e.g. TCP channel). Allow the fetch to complete on a higher priority // thread. fetchack(false,new UDPFetchAck(),H2O.FETCH_ACK_PRIORITY), // a class/id fetch ACK ack (false,new UDPAck (),H2O.ACK_PRIORITY), // a generic ACK for a UDP async task nack (false,new UDPNack(),H2O.ACK_PRIORITY), // a generic NACK // These packets all imply some sort of request/response handshake. // We'll hang on to these packets; filter out dup sends and auto-reply // identical result ACK packets. exec(false,new RPC.RemoteHandler(),H2O.DESERIAL_PRIORITY), // Remote hi-q execution request i_o (false,new UDP.IO_record(),(byte)-1); // Only used to profile I/O final UDP _udp; // The Callable S.A.M. instance final byte _prior; // Priority final boolean _paxos; // Ignore (or not) packets from outside the Cloud udp( boolean paxos, UDP udp, byte prior ) { _paxos = paxos; _udp = udp; _prior = prior; } static udp[] UDPS = values(); } public static udp getUdp(int id){return udp.UDPS[id];} // Handle an incoming I/O transaction, probably from a UDP packet. The // returned Autobuffer will be closed(). If the returned buffer is not the // passed-in buffer, the call() method must close it's AutoBuffer arg. abstract AutoBuffer call(AutoBuffer ab); // Pretty-print bytes 1-15; byte 0 is the udp_type enum static final char[] cs = new char[32]; static char hex(int x) { x &= 0xf; return (char)(x+((x<10)?'0':('a'-10))); } String print16( AutoBuffer ab ) { for( int i=0; i<16; i++ ) { int b = ab.get1U(); cs[(i<<1) ] = hex(b>>4); cs[(i<<1)+1 ] = hex(b ); } return new String(cs); } // Dispatch on the enum opcode and return a pretty string static private final byte[] pbuf = new byte[16]; static public String printx16( long lo, long hi ) { UnsafeUtils.set8(pbuf, 0, lo); UnsafeUtils.set8(pbuf, 8, hi); return udp.UDPS[(int)(lo&0xFF)]._udp.print16(new AutoBuffer(pbuf)); } private static class IO_record extends UDP { AutoBuffer call(AutoBuffer ab) { throw H2O.fail(); } String print16( AutoBuffer ab ) { int flavor = ab.get1U(3); int iotime = ab.get4 (4); int size = ab.get4 (8); return "I/O "+Value.nameOfPersist(flavor)+" "+iotime+"ms "+size+"b"; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/UDPAck.java
package water; /** * A remote task request has just returned an ACK with answer * * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ class UDPAck extends UDP { // Received an ACK for a remote Task. Ping the task. AutoBuffer call(AutoBuffer ab) { int tnum = ab.getTask(); RPC<?> t = ab._h2o.taskGet(tnum); // Forgotten task, but still must ACKACK if( t == null ) return RPC.ackack(ab,tnum); return t.response(ab); // Do the 2nd half of this task, includes ACKACK } // Pretty-print bytes 1-15; byte 0 is the udp_type enum String print16( AutoBuffer b ) { return "task# "+b.getTask(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/UDPAckAck.java
package water; /** * A task initiator has his response, we can quit sending him ACKs. * * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ class UDPAckAck extends UDP { // Received an ACKACK for a remote Task. Drop the task tracking @Override AutoBuffer call(AutoBuffer ab) { ab._h2o.remove_task_tracking(ab.getTask()); return ab; } // Pretty-print bytes 1-15; byte 0 is the udp_type enum @Override String print16( AutoBuffer ab ) { return "task# "+ab.getTask(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/UDPFetchAck.java
package water; /** * A remote task request has just returned an ACK with answer * * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ // Same as a UDPAck, but running at higher priority and only handling class/id mappings class UDPFetchAck extends UDPAck { }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/UDPHeartbeat.java
package water; /** * A UDP Heartbeat packet. * * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ class UDPHeartbeat extends UDP { @Override AutoBuffer call(AutoBuffer ab) { if(ab._h2o != H2O.SELF ) { // Do not update self-heartbeat object // The self-heartbeat is the sole holder of racey cloud-concensus hashes // and if we update it here we risk dropping an update. HeartBeat hb = new HeartBeat().read(ab); if (hb._cloud_name_hash != H2O.SELF._heartbeat._cloud_name_hash) { return ab; } assert ab._h2o != null; ab._h2o.setHeartBeat(hb); Paxos.doHeartbeat(ab._h2o); } return ab; } static void build_and_multicast( H2O cloud, HeartBeat hb ) { // Paxos.print_debug("send: heartbeat ",cloud._memset); assert hb._cloud_hash != 0 || hb._client; // Set before send, please H2O.SELF._heartbeat = hb; hb.write(new AutoBuffer(H2O.SELF,udp.heartbeat._prior).putUdp(UDP.udp.heartbeat)).close(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/UDPNack.java
package water; /** * A remote task re-request; NACK indicating "we heard you" * * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ class UDPNack extends UDP { // Received an ACK for a remote Task. Ping the task. private static long THEN; AutoBuffer call(AutoBuffer ab) { int tnum = ab.getTask(); RPC<?> t = ab._h2o.taskGet(tnum); if( t != null ) { assert t._tasknum==tnum; t._nack = true; } return ab; } // Pretty-print bytes 1-15; byte 0 is the udp_type enum String print16( AutoBuffer b ) { return "task# "+b.getTask(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/UDPRebooted.java
package water; import water.util.Log; /** * A UDP Rebooted packet: this node recently rebooted * * @author <a href="mailto:cliffc@h2o.ai"></a> * @version 1.0 */ public class UDPRebooted extends UDP { public static boolean BIG_DEBUG = false; public static byte MAGIC_SAFE_CLUSTER_KILL_BYTE = 42; public static enum T { none, reboot, shutdown, oom, error, locked, mismatch; public void send(H2ONode target) { assert this != none; // Note! To ensure that H2O version without the PUBDEV-4959 fix does not bring H2O with this fix into some unwanted // state we need to first discover if we are indeed receiving shutdown packet from a H2O version with this fix. // For this, we overload this first byte which is sent in both versions and contain ordinal number of the request type. // If we choose number different than the possible ordinal number we can safely discover on which version we are running. // When we discover that we run on a new version we can check if // the shutdown request comes from the node in the current cluster // otherwise we just ignore the request new AutoBuffer(target,udp.rebooted._prior) .putUdp(udp.rebooted) .put1(MAGIC_SAFE_CLUSTER_KILL_BYTE) .put1(ordinal()) .putInt(H2O.SELF._heartbeat._cloud_name_hash) .close(); } void broadcast() { send(H2O.SELF); } } static void checkForSuicide(int first_byte, AutoBuffer ab) { if( first_byte != UDP.udp.rebooted.ordinal() ) return; int shutdownPacketType = ab.get1(); if(shutdownPacketType == MAGIC_SAFE_CLUSTER_KILL_BYTE) { // we are running on a version with PUBDEV-4959 fix shutdownPacketType = ab.get1(); // read the real type int cloud_name_hash_origin = ab.getInt(); if (cloud_name_hash_origin == H2O.SELF._heartbeat._cloud_name_hash) { suicide(T.values()[shutdownPacketType], ab._h2o); }else { ListenerService.getInstance().report("shutdown_fail", cloud_name_hash_origin); } }else{ ListenerService.getInstance().report("shutdown_ignored"); Log.warn("Receive shutdownPacketType=" + shutdownPacketType + " request from H2O with older version than 3.14.0.4. This request" + " will be ignored"); } // if we receive request from H2O with a wrong version, just ignore the request } public static class ShutdownTsk extends DTask<ShutdownTsk> { final H2ONode _killer; final int _timeout; final transient boolean [] _confirmations; final int _nodeId; final int _exitCode; public ShutdownTsk(H2ONode killer, int nodeId, int timeout, boolean [] confirmations, int exitCode){ super(H2O.GUI_PRIORITY); _nodeId = nodeId; _killer = killer; _timeout = timeout; _confirmations = confirmations; _exitCode = exitCode; } transient boolean _didShutDown; private synchronized void doShutdown(int exitCode, String msg){ if(_didShutDown)return; Log.info(msg); H2O.closeAll(); H2O.exit(exitCode); } @Override public void compute2() { Log.info("Orderly shutdown from " + _killer); // start a separate thread which will force termination after timeout expires (in case we don't get ack ack in time) new Thread(){ @Override public void run(){ try {Thread.sleep(_timeout);} catch (InterruptedException e) {} doShutdown(_exitCode,"Orderly shutdown may not have been acknowledged to " + _killer + " (no ackack), exiting with exit code " + _exitCode + "."); } }.start(); tryComplete(); } @Override public void onAck(){ _confirmations[_nodeId] = true; } @Override public void onAckAck(){ doShutdown(_exitCode,"Orderly shutdown acknowledged to " + _killer + ", exiting with exit code " + _exitCode + "."); } } static void suicide( T cause, final H2ONode killer ) { String m; switch( cause ) { case none: return; case reboot: return; case shutdown: Log.warn("Orderly shutdown should be handled via ShutdownTsk. Message is from outside of the cloud? Ignoring it."); return; case oom: m = "Out of Memory, Heap Space exceeded, increase Heap Size,"; break; case error: if (BIG_DEBUG) Thread.dumpStack(); m = "Error leading to a cloud kill"; break; case locked: m = "Attempting to join an H2O cloud that is no longer accepting new H2O nodes"; break; case mismatch: m = "Attempting to join an H2O cloud with a different H2O version (is H2O already running?)"; break; default: m = "Received kill " + cause; break; } H2O.closeAll(); Log.err(m+" from "+killer); H2O.die("Exiting."); } @Override AutoBuffer call(AutoBuffer ab) { checkForSuicide(udp.rebooted.ordinal(),ab); if( ab._h2o != null ) ab._h2o.rebooted(); return ab; } // Pretty-print bytes 1-15; byte 0 is the udp_type enum @Override String print16(AutoBuffer ab) { ab.getPort(); int value = ab.get1(); if (value == MAGIC_SAFE_CLUSTER_KILL_BYTE) { value = ab.get1(); } return T.values()[value].toString(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/Value.java
package water; import java.io.IOException; import java.util.Arrays; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import jsr166y.ForkJoinPool; import water.fvec.Frame; import water.fvec.Vec; import water.util.Log; import water.util.StringUtils; /** The core Value stored in the distributed K/V store, used to cache Plain Old * Java Objects, and maintain coherency around the cluster. It contains an * underlying byte[] which may be spilled to disk and freed by the {@link * MemoryManager}, which is the {@link Iced} serialized version of the POJO, * and a cached copy of the POJO itself. * <p> * Requests to extract the POJO from the Value object first try to return the * cached POJO. If that is missing, then they will re-inflate the POJO from * the {@link Iced} byte[]. If that is missing it is only because the byte[] * was swapped to disk by the {@link Cleaner}. It will be reloaded from disk * and then inflated as normal. * <p> * The H2O {@link DKV} supports the full <em>Java Memory Model</em> coherency * but only with Gets and Puts. Normal Java updates to the cached POJO are * local-node visible (due to X86 and Java coherency rules) but NOT cluster-wide * visible until a Put completes after the update. * <p> * By the same token, updates ot the POJO are not reflected in the serialized * form nor the disk-spill copy unless a Put is triggered. As long as a local * thread keeps a pointer to the POJO, they can update it at will. If they * wish to recover the POJO from the DKV at a later time with all updates * intact, they <em>must</em> do a final Put after all updates. * <p> * Value objects maintain the needed coherency state, as well as any cached * copies, plus a bunch of utility and convenience functions. */ public final class Value extends Iced implements ForkJoinPool.ManagedBlocker { /** The Key part of a Key/Value store. Transient, because the Value is * typically found via its Key, and so the Key is available before we get * the Value and does not need to be passed around the wire. Not final, * because Keys are interned slowly (for faster compares) and periodically a * Value's Key will be updated to an interned but equivalent Key. * <p> * Should not be set by any user code. */ public transient Key _key; // --- // Type-id of serialized object; see TypeMap for the list. // Might be a primitive array type, or a Iced POJO private short _type; public int type() { return _type; } /** Class name of the embedded POJO, without needing an actual POJO. */ public String className() { return TypeMap.className(_type); } // Max size of Values before we start asserting. // Sizes around this big, or larger are probably true errors. // In any case, they will cause issues with both GC (giant pause times on // many collectors) and I/O (long term blocking of TCP I/O channels to // service a single request, causing starvation of other requests). public static final int MAX = Integer.MAX_VALUE; //DeepWater models can contain a single byte[] state as large as 3GB /** Size of the serialized wad of bits. Values are wads of bits; known small * enough to 'chunk' politely on disk, or fit in a Java heap (larger Vecs * are built via Chunks) but (much) larger than a UDP packet. Values can * point to either the disk or ram version or both. There's no compression * smarts (done by the big data Chunks) nor de-dup smarts (done by the * nature of a K/V). This is just a local placeholder for some user bits * being held at this local Node. */ public int _max; // --- // A array of this Value when cached in DRAM, or NULL if not cached. The // contents of _mem are immutable (Key/Value mappings can be changed by an // explicit PUT action). Cleared to null asynchronously by the memory // manager (but only if persisted to some disk or in a POJO). Can be filled // in by reloading from disk, or by serializing a POJO. private volatile byte[] _mem; final byte[] rawMem() { return _mem; } // --- // A POJO version of the _mem array, or null if the _mem has not been // serialized or if _mem is primitive data and not a POJO. Cleared to null // asynchronously by the memory manager (but only if persisted to some disk, // or in the _mem array). Can be filled in by deserializing the _mem array. // NOTE THAT IF YOU MODIFY any fields of a POJO that is part of a Value, // - this is NOT the recommended programming style, // - those changes are visible to all CPUs on the writing node, // - but not to other nodes, and // - the POJO might be dropped by the MemoryManager and reconstituted from // disk and/or the byte array back to it's original form, losing your changes. private volatile Freezable _pojo; Freezable rawPOJO() { return _pojo; } /** Invalidate byte[] cache. Only used to eagerly free memory, for data * which is expected to be read-once. */ public final void freeMem() { assert isPersisted() || _pojo != null || _key.isChunkKey(); _mem = null; } /** Invalidate POJO cache. Only used to eagerly free memory, for data * which is expected to be read-once. */ public final void freePOJO() { assert isPersisted() || _mem != null; _pojo = null; } public final boolean isConsistent() { byte[] mem = _mem; // Read once! if (mem == null) return true; Freezable<?> pojo = _pojo; // Read once! if (pojo == null) return true; if (pojo instanceof Keyed) { Freezable<?> reloaded = TypeMap.newInstance(_type); reloaded = reloaded.reloadFromBytes(mem); return reloaded instanceof Keyed && ((Keyed<?>) reloaded).checksum(true) == ((Keyed<?>) pojo).checksum(true); } else { byte[] pojoBytes = pojo.asBytes(); return Arrays.equals(pojoBytes, mem); } } /** The FAST path get-byte-array - final method for speed. Will (re)build * the mem array from either the POJO or disk. Never returns NULL. * @return byte[] holding the serialized POJO */ public final byte[] memOrLoad() { byte[] mem = _mem; // Read once! if( mem != null ) return mem; Freezable pojo = _pojo; // Read once! if( pojo != null ) // Has the POJO, make raw bytes return _mem = pojo.asBytes(); if( _max == 0 ) return (_mem = new byte[0]); return (_mem = loadPersist()); } // Just an empty shell of a Value, no local data but the Value is "real". // Any attempt to look at the Value will require a remote fetch. final boolean isEmpty() { return _max > 0 && _mem==null && _pojo == null && !isPersisted(); } /** The FAST path get-POJO as an {@link Iced} subclass - final method for * speed. Will (re)build the POJO from the _mem array. Never returns NULL. * @return The POJO, probably the cached instance. */ public final <T extends Iced> T get() { touch(); Iced pojo = (Iced)_pojo; // Read once! if( pojo != null ) return (T)pojo; pojo = TypeMap.newInstance(_type); return (T)(_pojo = pojo.reloadFromBytes(memOrLoad())); } /** The FAST path get-POJO as a {@link Freezable} - final method for speed. * Will (re)build the POJO from the _mem array. Never returns NULL. This * version has more type-checking. * @return The POJO, probably the cached instance. */ public final <T extends Freezable> T get(Class<T> fc) { T pojo = getFreezable(); assert fc.isAssignableFrom(pojo.getClass()); return pojo; } /** The FAST path get-POJO as a {@link Freezable} - final method for speed. * Will (re)build the POJO from the _mem array. Never returns NULL. * @return The POJO, probably the cached instance. */ public final <T extends Freezable> T getFreezable() { touch(); Freezable pojo = _pojo; // Read once! if( pojo != null ) return (T)pojo; pojo = TypeMap.newFreezable(_type); pojo.reloadFromBytes(memOrLoad()); return (T)(_pojo = pojo); } // --- // Time of last access to this value. transient long _lastAccessedTime = System.currentTimeMillis(); private void touch() {_lastAccessedTime = System.currentTimeMillis();} // Exposed and used for testing only; used to trigger premature cleaning/disk-swapping void touchAt(long time) {_lastAccessedTime = time;} // --- // Backend persistence info. 3 bits are reserved for 8 different flavors of // backend storage. 1 bit for whether or not the latest _mem field is // entirely persisted on the backend storage. The low 3 bits are final. The // other bit monotonically changes from 0->1. The deleted bit ALSO // monotonically changes 0->1. These two bits cannot be combined without the // use of atomic operations. private volatile byte _persist; // 1 bit of disk/notdisk; 3 bits of backend flavor public final static byte ICE = 1<<0; // ICE: distributed local disks public final static byte HDFS= 2<<0; // HDFS: backed by Hadoop cluster public final static byte S3 = 3<<0; // Amazon S3 public final static byte NFS = 4<<0; // NFS: Standard file system public final static byte GCS = 5<<0; // Google Cloud Storage public final static byte HTTP= 6<<0; // HTTP/HTTPS data source (that accepts byte ranges, "Accept-Ranges: bytes") public final static byte TCP = 7<<0; // TCP: For profile purposes, not a storage system private final static byte BACKEND_MASK = (8-1); final byte backend() { return (byte)(_persist&BACKEND_MASK); } boolean onICE (){ return (backend()) == ICE; } private boolean onHDFS(){ return (backend()) == HDFS; } private boolean onNFS (){ return (backend()) == NFS; } private boolean onS3 (){ return (backend()) == S3; } private boolean onGCS (){ return (backend()) == GCS; } // Manipulate the on-disk bit private final static byte NOTdsk = 0<<3; // latest _mem is persisted or not private final static byte ON_dsk = 1<<3; /** Check if the backing byte[] has been saved-to-disk */ public final boolean isPersisted() { return (_persist&ON_dsk)!=0; } public final void setDsk() { _persist |= ON_dsk; } // note: not atomic, but only monotonically set bit private volatile byte _deleted; // 1 bit of deleted public final boolean isDeleted() { return _deleted != 0; } public final void setDel() { _deleted=1; } // note: not atomic, but only monotonically set bit /** Best-effort store complete Values to disk. */ void storePersist() throws java.io.IOException { // 00 then start writing // 01 delete requested; do not write // 10 already written; do nothing // 11 already written & deleted; do nothing if( isDeleted() ) return; // 01 and 11 cases if( isPersisted() ) return; // 10 case H2O.getPM().store(backend(), this); // Write to disk // 00 -> 10 expected, set write bit // 10 assert; only Cleaner writes // 01 delete-during-write; delete again // 11 assert; only Cleaner writes assert !isPersisted(); // Only Cleaner writes setDsk(); // Not locked, not atomic, so can only called by one thread: Cleaner if( isDeleted() ) // Check del bit AFTER setting persist bit; close race with deleting user thread H2O.getPM().delete(backend(), this); // Possibly nothing to delete (race with writer) } /** Remove dead Values from disk */ public void removePersist() { // do not yank memory, as we could have a racing get hold on to this // free_mem(); // 00 -> 01 try to delete (racing, probably nothing to delete) // 01 double delete; do nothing // 10 -> 11 delete // 11 double delete; do nothing if( !onICE() ) return; // Wrong filestore? if( isDeleted() ) return; // Already deleted? setDel(); // Set del bit BEFORE testing isPersist if( !isPersisted() ) return;// Nothing there H2O.getPM().delete(backend(), this); // Possibly nothing to delete (race with writer) } /** Load some or all of completely persisted Values */ byte[] loadPersist() { // 00 assert: not written yet // 01 assert: load-after-delete // 10 expected; read // 11 assert: load-after-delete assert isPersisted(); try { byte[] res = H2O.getPM().load(backend(), this); assert !isDeleted(); // Race in user-land: load-after-delete return res; } catch( IOException ioe ) { throw Log.throwErr(ioe); } } String nameOfPersist() { return nameOfPersist(backend()); } /** One of ICE, HDFS, S3, GCS, NFS or TCP, according to where this Value is persisted. * @return Short String of the persitance name */ public static String nameOfPersist(int x) { switch( x ) { case ICE : return "ICE"; case HDFS: return "HDFS"; case S3 : return "S3"; case NFS : return "NFS"; case TCP : return "TCP"; case GCS : return "GCS"; default : return null; } } /** Check if the Value's POJO is a subtype of given type integer. Does not require the POJO. * @return True if the Value's POJO is a subtype. */ public static boolean isSubclassOf(int type, Class clz) { return type != TypeMap.PRIM_B && clz.isAssignableFrom(TypeMap.theFreezable(type).getClass()); } /** Check if the Value's POJO is a {@link Key} subtype. Does not require the POJO. * @return True if the Value's POJO is a {@link Key} subtype. */ public boolean isKey() { return _type != TypeMap.PRIM_B && TypeMap.theFreezable(_type) instanceof Key; } /** Check if the Value's POJO is a {@link Frame} subtype. Does not require the POJO. * @return True if the Value's POJO is a {@link Frame} subtype. */ public boolean isFrame() { return _type != TypeMap.PRIM_B && TypeMap.theFreezable(_type) instanceof Frame; } /** Check if the Value's POJO is a {@link water.fvec.Vec.VectorGroup} subtype. Does not require the POJO. * @return True if the Value's POJO is a {@link water.fvec.Vec.VectorGroup} subtype. */ public boolean isVecGroup() { return _type == TypeMap.VECGROUP; } /** Check if the Value's POJO is a {@link water.fvec.Vec.ESPC} subtype. Does not require the POJO. * @return True if the Value's POJO is a {@link water.fvec.Vec.ESPC} subtype. */ public boolean isESPCGroup() { return _type == TypeMap.ESPCGROUP; } /** Check if the Value's POJO is a {@link Lockable} subtype. Does not require the POJO. * @return True if the Value's POJO is a {@link Lockable} subtype. */ public boolean isLockable() { return _type != TypeMap.PRIM_B && TypeMap.theFreezable(_type) instanceof Lockable; } /** Check if the Value's POJO is a {@link Vec} subtype. Does not require the POJO. * @return True if the Value's POJO is a {@link Vec} subtype. */ public boolean isVec() { return _type != TypeMap.PRIM_B && TypeMap.theFreezable(_type) instanceof Vec; } /** Check if the Value's POJO is a {@link hex.Model} subtype. Does not require the POJO. * @return True if the Value's POJO is a {@link hex.Model} subtype. */ public boolean isModel() { return _type != TypeMap.PRIM_B && TypeMap.theFreezable(_type) instanceof hex.Model; } /** Check if the Value's POJO is a {@link Job} subtype. Does not require the POJO. * @return True if the Value's POJO is a {@link Job} subtype. */ public boolean isJob() { return _type != TypeMap.PRIM_B && TypeMap.theFreezable(_type) instanceof Job; } public Class<? extends Freezable> theFreezableClass() { return TypeMap.theFreezable(this._type).getClass(); } // -------------------------------------------------------------------------- /** Construct a Value from all parts; not needed for most uses. This special * constructor is used by {@link water.fvec} to build Value objects over * already-existing Files, so that the File contents will be lazily * swapped-in as the Values are first used. */ public Value(Key k, int max, byte[] mem, short type, byte be ) { assert mem==null || mem.length==max; assert max < MAX : "Value size=0x"+Integer.toHexString(max); _key = k; _max = max; _mem = mem; _type = type; _pojo = null; // For the ICE backend, assume new values are not-yet-written. // For HDFS & NFS backends, assume we from global data and preserve the // passed-in persist bits byte p = (byte)(be&BACKEND_MASK); _persist = (p==ICE) ? p : be; _rwlock = new AtomicInteger(1); _replicas = null; } // --- public Value(Key k, byte[] mem ) { this(k, mem.length, mem, TypeMap.PRIM_B, ICE); } // --- Value(Key k, String s ) { this(k, StringUtils.bytesOf(s)); } Value(Key k, Iced pojo ) { this(k,pojo,ICE); } Value(Key k, Iced pojo, byte be ) { _key = k; _pojo = pojo; _type = (short)pojo.frozenType(); _mem = pojo.asBytes(); _max = _mem.length; assert _max < MAX : "Value size = " + _max + " (0x"+Integer.toHexString(_max) + ") >= (MAX=" + MAX + ")."; // For the ICE backend, assume new values are not-yet-written. // For HDFS & NFS backends, assume we from global data and preserve the // passed-in persist bits byte p = (byte)(be&BACKEND_MASK); _persist = (p==ICE) ? p : be; _rwlock = new AtomicInteger(1); _replicas = null; } public Value(Key k, Freezable pojo, int pojoByteSz, byte be) { _key = k; _pojo = pojo; _type = (short)pojo.frozenType(); _mem = null; _max = pojoByteSz; byte p = (byte)(be&BACKEND_MASK); _persist = (p==ICE) ? p : be; _rwlock = new AtomicInteger(1); _replicas = null; } /** Standard constructor to build a Value from a POJO and a Key. */ public Value(Key k, Freezable pojo) { this(k,pojo,ICE); } Value(Key k, Freezable pojo, byte be) { _key = k; _pojo = pojo; _type = (short)pojo.frozenType(); _mem = pojo.asBytes(); _max = _mem.length; byte p = (byte)(be&BACKEND_MASK); _persist = (p==ICE) ? p : be; _rwlock = new AtomicInteger(1); _replicas = null; } // Custom serializers: the _mem field is racily cleared by the MemoryManager // and the normal serializer then might ship over a null instead of the // intended byte[]. Also, the value is NOT on the deserialize'd machines disk public final AutoBuffer write_impl( AutoBuffer ab ) { return ab.put1(_persist).put2(_type).putA1(memOrLoad()); } // Custom serializer: set _max from _mem length; set replicas & timestamp. public final Value read_impl(AutoBuffer bb) { assert _key == null; // Not set yet // Set persistence backend but... strip off saved-to-disk bit _persist = (byte)(bb.get1()&BACKEND_MASK); _type = (short) bb.get2(); _mem = bb.getA1(); _max = _mem.length; assert _max < MAX : "Value size=0x"+Integer.toHexString(_max)+" during read is larger than "+Integer.toHexString(MAX)+", type: "+TypeMap.className(_type); _pojo = null; // On remote nodes _rwlock is initialized to 1 (signaling a remote PUT is // in progress) flips to -1 when the remote PUT is done, or +2 if a notify // needs to happen. _rwlock = new AtomicInteger(-1); // Set as 'remote put is done' _replicas = null; touch(); return this; } // --------------------- // Ordering of K/V's! This field tracks a bunch of things used in ordering // updates to the same Key. Ordering Rules: // - Program Order. You see your own writes. All writes in a single thread // strongly ordered (writes never roll back). In particular can: // PUT(v1), GET, PUT(null) and The Right Thing happens. // - Unrelated writes can race (unless fencing). // - Writes are not atomic: some people can see a write ahead of others. // - Last-write-wins: if we do a zillion writes to the same Key then wait "a // long time", then do reads all reads will see the same last value. // - Blocking on a PUT stalls until the PUT is cloud-wide visible // // For comparison to H2O get/put MM // IA Memory Ordering, 8 principles from Rich Hudson, Intel // 1. Loads are not reordered with other loads // 2. Stores are not reordered with other stores // 3. Stores are not reordered with older loads // 4. Loads may be reordered with older stores to different locations but not // with older stores to the same location // 5. In a multiprocessor system, memory ordering obeys causality (memory // ordering respects transitive visibility). // 6. In a multiprocessor system, stores to the same location have a total order // 7. In a multiprocessor system, locked instructions have a total order // 8. Loads and stores are not reordered with locked instructions. // // My (KN, CNC) interpretation of H2O MM from today: // 1. Gets are not reordered with other Gets // 2 Puts may be reordered with Puts to different Keys. // 3. Puts may be reordered with older Gets to different Keys, but not with // older Gets to the same Key. // 4. Gets may be reordered with older Puts to different Keys but not with // older Puts to the same Key. // 5. Get/Put amongst threads doesn't obey causality // 6. Puts to the same Key have a total order. // 7. no such thing. although RMW operation exists with Put-like constraints. // 8. Gets and Puts may be reordered with RMW operations // 9. A write barrier exists that creates Sequential Consistency. Same-key // ordering (3-4) can't be used to create the effect. // // A Reader/Writer lock for the home node to control racing Gets and Puts. // - 0 for unlocked // - +N for locked by N concurrent GETs-in-flight // - -1 for write-locked // // An ACKACK from the client GET lowers the reader lock count. // // Home node PUTs alter which Value is mapped to a Key, then they block until // there are no active GETs, then atomically set the write-lock, then send // out invalidates to all the replicas. PUTs return when all invalidates // have reported back. // // An initial remote PUT will default the value to 1. A 2nd PUT attempt will // block until the 1st one completes (multiple writes to the same Key from // the same JVM block, so there is at most 1 outstanding write to the same // Key from the same JVM). The 2nd PUT will CAS the value to 2, indicating // the need for the finishing 1st PUT to call notify(). // // Note that this sequence involves a lot of blocking on repeated writes with // cached readers, but not the readers - i.e., writes are slow to complete. private transient AtomicInteger _rwlock; private boolean RW_CAS( int old, int nnn, String msg ) { if( !_rwlock.compareAndSet(old,nnn) ) return false; //System.out.println(_key+", "+old+" -> "+nnn+", "+msg); return true; } // List of who is replicated where private volatile byte[] _replicas; private static final AtomicReferenceFieldUpdater<Value,byte[]> REPLICAS_UPDATER = AtomicReferenceFieldUpdater.newUpdater(Value.class,byte[].class, "_replicas"); // Fills in the _replicas field atomically, on first set of a replica. private byte[] replicas( ) { byte[] r = _replicas; if( r != null ) return r; byte[] nr = makeReplicaIndicatorSpace(); if( REPLICAS_UPDATER.compareAndSet(this,null,nr) ) return nr; r = _replicas/*read again, since CAS failed must be set now*/; assert r!= null; return r; } private byte[] makeReplicaIndicatorSpace() { int size = H2ONode.IDX.length + 1 /*1-based numbering*/ + 10 /*buffer for 10 clients, if we exceed the buffer we just invalidate regardless if they have a copy or not*/; return new byte[size]; } // Bump the read lock, once per pending-GET or pending-Invalidate boolean read_lock() { while( true ) { // Repeat, in case racing GETs are bumping the counter int old = _rwlock.get(); if( old == -1 ) return false; // Write-locked; no new replications. Read fails to read *this* value assert old >= 0; // Not negative if( RW_CAS(old,old+1,"rlock+") ) return true; } } /** Atomically insert h2o into the replica list; reports false if the Value * flagged against future replication with a -1. Also bumps the active * Get count, which remains until the Get completes (we receive an ACKACK). */ boolean setReplica( H2ONode h2o ) { assert _key.home(); // Only the HOME node for a key tracks replicas assert h2o != H2O.SELF; // Do not track self as a replica if( !read_lock() ) return false; // Write-locked; no new replications. Read fails to read *this* value // Narrow non-race here. Here is a time window where the rwlock count went // up, but the replica list does not account for the new replica. However, // the rwlock cannot go down until an ACKACK is received, and the ACK // (hence ACKACK) doesn't go out until after this function returns. markHotReplica(h2o); // Both rwlock taken, and replica count is up now. return true; } private void markHotReplica(H2ONode n) { n.markLocalDKVAccess(); byte[] r = replicas(); if (n._unique_idx < r.length) r[n._unique_idx] = 1; } /** Atomically lower active GET and Invalidate count */ void lowerActiveGetCount( H2ONode h2o ) { assert _key.home(); // Only the HOME node for a key tracks replicas assert h2o != H2O.SELF;// Do not track self as a replica while( true ) { // Repeat, in case racing GETs are bumping the counter int old = _rwlock.get(); // Read the lock-word assert old > 0; // Since lowering, must be at least 1 assert old != -1; // Not write-locked, because we are an active reader assert (h2o==null) || (_replicas!=null && (h2o._unique_idx >= _replicas.length || _replicas[h2o._unique_idx]==1)); // Self-bit is set if( RW_CAS(old,old-1,"rlock-") ) { if( old-1 == 0 ) // GET count fell to zero? synchronized( this ) { notifyAll(); } // Notify any pending blocked PUTs return; // Repeat until count is lowered } } } /** This value was atomically extracted from the local STORE by a successful * TaskPutKey attempt (only 1 thread can ever extract and thus call here). * No future lookups will find this Value, but there may be existing uses. * Atomically set the rwlock count to -1 locking it from further GETs and * ship out invalidates to caching replicas. May need to block on active * GETs. Updates a set of Future invalidates that can be blocked against. */ Futures lockAndInvalidate( H2ONode sender, Value newval, Futures fs ) { assert _key.home(); // Only the HOME node for a key tracks replicas assert newval._rwlock.get() >= 1; // starts read-locked // Write-Lock against further GETs while( true ) { // Repeat, in case racing GETs are bumping the counter int old = _rwlock.get(); assert old >= 0 : _key+", rwlock="+old; // Count does not go negative assert old != -1; // Only the thread doing a PUT ever locks if( old !=0 ) { // has readers? // Active readers: need to block until the GETs (of this very Value!) // all complete, before we can invalidate this Value - lest a racing // Invalidate bypass a GET. try { ForkJoinPool.managedBlock(this); } catch( InterruptedException ignore ) { } } else if( RW_CAS(0,-1,"wlock") ) break; // Got the write-lock! } // We have the set of Nodes with replicas now. Ship out invalidates. // Bump the newval read-lock by 1 for each pending invalidate byte[] r = _replicas; if( r!=null ) { // No replicas, nothing to invalidate final int max = r.length; for( int i=0; i<max; i++ ) if( r[i]==1 && H2ONode.IDX[i] != sender ) TaskInvalidateKey.invalidate(H2ONode.IDX[i],_key,newval,fs); // Speculatively invalidate replicas also on nodes that were not known when the cluster was formed (clients) final int unseenMax = H2ONode.IDX.length; for (int i=max; i<unseenMax; i++) { final H2ONode node = H2ONode.IDX[i]; if (node != null && // can happen when the IDX array is being expanded node != sender && // ignore myself node.isRemovedFromCloud() && // ignore nodes that are not active anymore node.accessedLocalDKV()) // ignore nodes that appear active but didn't actually read DKV ever TaskInvalidateKey.invalidate(node, _key, newval, fs); } } newval.lowerActiveGetCount(null); // Remove initial read-lock, accounting for pending inv counts return fs; } void blockTillNoReaders( ) { assert _key.home(); // Only the HOME node for a key tracks replicas // Write-Lock against further GETs while( true ) { // Repeat, in case racing GETs are bumping the counter int old = _rwlock.get(); if( old <= 0) return; // No readers, or this Value already replaced with a later value // Active readers: need to block until the GETs (of this very Value!) all complete try { ForkJoinPool.managedBlock(this); } catch( InterruptedException ignore ) { } } } /** Initialize the _replicas field for a PUT. On the Home node (for remote * PUTs), it is initialized to the one replica we know about, and not * read-locked. Used on a new Value about to be PUT on the Home node. */ void initReplicaHome( H2ONode h2o, Key key ) { assert key.home(); assert _key == null; // This is THE initializing key write for serialized Values assert h2o != H2O.SELF; // Do not track self as a replica _key = key; // Set the replica bit for the one node we know about, and leave the // rest clear. markHotReplica(h2o); _rwlock.set(1); // An initial read-lock, so a fast PUT cannot wipe this one out before invalidates have a chance of being counted } /** Block this thread until all prior remote PUTs complete - to force * remote-PUT ordering on the home node. */ void startRemotePut() { assert !_key.home(); int x; // assert I am waiting on threads with higher priority? while( (x=_rwlock.get()) != -1 ) // Spin until rwlock==-1 if( x == 2 || RW_CAS(1,2,"remote_need_notify") ) try { ForkJoinPool.managedBlock(this); } catch( InterruptedException ignore ) { } } /** The PUT for this Value has completed. Wakeup any blocked later PUTs. */ void completeRemotePut() { assert !_key.home(); // Attempt an eager blind attempt, assuming no blocked pending notifies if( RW_CAS(1, -1,"remote_complete") ) return; synchronized(this) { boolean res = RW_CAS(2, -1,"remote_do_notify"); assert res; // Must succeed notifyAll(); // Wake up pending blocked PUTs } } // Construct a Value which behaves like a "null" or "deleted" Value, but // allows for counting pending invalidates on the delete operation... and can // thus stall future Puts overriding the deletion until the delete completes. static Value makeNull( Key key ) { assert key.home(); return new Value(key,0,null,(short)0,TCP); } boolean isNull() { assert _type != 0 || _key.home(); return _type == 0; } // Get from the local STORE. If we fetch out a special Null Value, and it is // unlocked (it will never be write-locked, but may be read-locked if there // are pending invalidates on it), upgrade it in-place to a true null. // Return the not-Null value, or the true null. public static Value STORE_get( Key key ) { Value val = H2O.STORE.get(key); if( val == null ) return null; // A true null if( !val.isNull() ) return val; // Not a special Null // One-shot throwaway attempt at upgrading the special Null to a true null if( val._rwlock.get()==0 ) H2O.putIfMatch(key,null,val); return null; // Special null, but missing from callers point of view } /** Return true if blocking is unnecessary. * Alas, used in TWO places and the blocking API forces them to share here. */ @Override public boolean isReleasable() { int r = _rwlock.get(); if( _key.home() ) { // Called from lock_and_invalidate // Home-key blocking: wait for active-GET count to fall to zero, or blocking on deleted object return r <= 0; } else { // Called from start_put // Remote-key blocking: wait for active-PUT lock to hit -1 assert r == 2 || r == -1; // Either waiting (2) or done (-1) but not started(1) return r == -1; // done! } } /** Possibly blocks the current thread. Returns true if isReleasable would * return true. Used by the FJ Pool management to spawn threads to prevent * deadlock is otherwise all threads would block on waits. */ @Override public synchronized boolean block() { while( !isReleasable() ) { try { wait(); } catch( InterruptedException ignore ) { } } return true; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7
java-sources/ai/h2o/h2o-core/3.46.0.7/water/Weaver.java
package water; import javassist.*; import sun.misc.Unsafe; import water.api.API; import water.nbhm.UtilUnsafe; import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Modifier; import java.util.ArrayList; /** Class to auto-gen serializer delegate classes. */ public class Weaver { /** Get all woven fields in this class, including subclasses, up to the * normal {@link Iced} serialization classes, skipping static and transient * fields, and the required _ice_id field. * @return Array of {@link Field} holding the list of woven fields. */ public static Field[] getWovenFields( Class<?> clz ) { ArrayList<Field> flds = new ArrayList<>(); while( Iced.class.isAssignableFrom(clz) || Freezable.class.isAssignableFrom(clz) || H2O.H2OCountedCompleter.class.isAssignableFrom(clz) ) { for( Field f : clz.getDeclaredFields() ) { int mods = f.getModifiers(); if( Modifier.isTransient(mods) || Modifier.isStatic(mods) ) continue; if( "_ice_id".equals(f.getName()) ) continue; // Strip the required typeid field flds.add(f); } clz = clz.getSuperclass(); } return flds.toArray(new Field[0]); } private static final ClassPool _pool; private static final CtClass _dtask, _enum, _serialize; private static final Unsafe _unsafe = UtilUnsafe.getUnsafe(); static { try { _pool = ClassPool.getDefault(); _pool.insertClassPath(new ClassClassPath(Weaver.class)); _dtask= _pool.get("water.DTask"); // these also need copyOver _enum = _pool.get("java.lang.Enum"); // Special serialization _serialize = _pool.get("java.io.Serializable"); // Base of serialization } catch( NotFoundException nfe ) { throw new RuntimeException(nfe); } } @SuppressWarnings("unchecked") public static <T extends Freezable<T>> Icer<T> genDelegate( int id, Class<T> clazz ) { try { T ice = Modifier.isAbstract(clazz.getModifiers()) ? null : (T)_unsafe.allocateInstance(clazz); Class<?> icer_clz = javassistLoadClass(id,clazz); return (Icer<T>)icer_clz.getDeclaredConstructors()[0].newInstance(ice); } catch( InvocationTargetException | InstantiationException | IllegalAccessException | NotFoundException | CannotCompileException | NoSuchFieldException | ClassNotFoundException e) { throw new RuntimeException(e); } } // The name conversion from a Iced subclass to an Icer subclass. private static String implClazzName( String name ) { return name + "$Icer"; } // See if javaassist can find this class, already generated private static Class<?> javassistLoadClass(int id, Class<?> iced_clazz) throws CannotCompileException, NotFoundException, InstantiationException, IllegalAccessException, NoSuchFieldException, ClassNotFoundException, InvocationTargetException { // End the super class lookup chain at "water.Iced", // returning the known delegate class "water.Icer". String iced_name = iced_clazz.getName(); assert !iced_name.startsWith("scala.runtime.AbstractFunction"); // Now look for a pre-cooked Icer. No locking, 'cause we're just looking String icer_name = implClazzName(iced_name); CtClass icer_cc = _pool.getOrNull(icer_name); // Full Name Lookup of Icer if( icer_cc != null ) { synchronized( iced_clazz ) { if( !icer_cc.isFrozen() ) icer_cc.toClass(iced_clazz.getClassLoader(), null); // Load class (but does not link & init) return Class.forName(icer_name,true,iced_clazz.getClassLoader()); // Found a pre-cooked Icer implementation } } // Serialize parent. No locking; occasionally we'll "onIce" from the // remote leader more than once. Class<?> super_clazz = iced_clazz.getSuperclass(); Class<?> super_icer_clazz; int super_id; if(Freezable.class.isAssignableFrom(super_clazz)) { super_id = TypeMap.onIce(super_clazz.getName()); super_icer_clazz = javassistLoadClass(super_id, super_clazz); } else { super_icer_clazz = Icer.class; super_id = -1; } CtClass super_icer_cc = _pool.get(super_icer_clazz.getName()); CtClass iced_cc = _pool.get(iced_name); // Lookup the based Iced class // Lock on the Iced class (prevent multiple class-gens of the SAME Iced // class, but also to allow parallel class-gens of unrelated Iced). //noinspection SynchronizationOnLocalVariableOrMethodParameter synchronized( iced_clazz ) { icer_cc = _pool.getOrNull(icer_name); // Retry under lock if( icer_cc != null ) return Class.forName(icer_name); // Found a pre-cooked Icer implementation icer_cc = genIcerClass(id,iced_cc,iced_clazz,icer_name,super_id,super_icer_cc); icer_cc.toClass(iced_clazz.getClassLoader(), null); // Load class (but does not link & init) return Class.forName(icer_name,true, iced_clazz.getClassLoader()); // Initialize class now, before subclasses } } // Generate the Icer class private static CtClass genIcerClass(int id, CtClass iced_cc, Class<?> iced_clazz, String icer_name, int super_id, CtClass super_icer) throws CannotCompileException, NotFoundException, NoSuchFieldException { // Generate the Icer class String iced_name = iced_cc.getName(); CtClass icer_cc = _pool.makeClass(icer_name); icer_cc.setSuperclass(super_icer); icer_cc.setModifiers(javassist.Modifier.PUBLIC); // Overall debug printing? if (false) { System.out.println("Iced class " + icer_cc.getName() + " is number: " + id); } // Detailed debug printing? boolean debug_print=false; CtField[] ctfs = iced_cc.getDeclaredFields(); for( CtField ctf : ctfs ) debug_print |= ctf.getName().equals("DEBUG_WEAVER"); if( debug_print ) System.out.println("class "+icer_cc.getName()+" extends "+super_icer.getName()+" {"); // Make a copy of the enum array, for later deser for( CtField ctf : ctfs ) { CtClass ctft = ctf.getType(); String name = ctf.getName(); int mods = ctf.getModifiers(); if( javassist.Modifier.isTransient(mods) || javassist.Modifier.isStatic(mods) ) continue; // Only serialize not-transient instance fields (not static) // Check for enum CtClass base = ctft; while( base.isArray() ) base = base.getComponentType(); if( base.subtypeOf(_enum) ) { // either an enum or an array of enum // Insert in the Icer, a copy of the enum values() array from Iced // e.g. private final myEnum[] _fld = myEnum.values(); String src = " private final "+base.getName().replace('$', '.')+"[] "+name+" = "+base.getName().replace('$', '.')+".values();\n"; if( debug_print ) System.out.println(src); CtField ctfr = CtField.make(src,icer_cc); icer_cc.addField(ctfr); } } // The write call String debug = make_body(icer_cc, iced_cc, iced_clazz, "write", null, null, " protected final water.AutoBuffer write"+id+"(water.AutoBuffer ab, "+iced_name+" ice) {\n", super_id == -1?"":" write"+super_id+"(ab,ice);\n", " ab.put%z(ice.%s);\n" , " ab.put%z((%C)_unsafe.get%u(ice,%dL)); // %s\n", " ab.put%z(ice.%s);\n" , " ab.put%z((%C)_unsafe.get%u(ice,%dL)); // %s\n", " ab.put%z(ice.%s);\n" , " ab.put%z((%C)_unsafe.get%u(ice,%dL)); // %s\n", " return ab;\n" + " }"); if( debug_print ) System.out.println(debug); String debugJ= make_body(icer_cc, iced_cc, iced_clazz, "writeJSON", "(supers?ab.put1(','):ab).", " ab.put1(',').", " protected final water.AutoBuffer writeJSON"+id+"(water.AutoBuffer ab, "+iced_name+" ice) {\n", super_id == -1?"":" writeJSON"+super_id+"(ab,ice);\n", "putJSON%z(\"%s\",ice.%s);\n" , "putJSON%z(\"%s\",(%C)_unsafe.get%u(ice,%dL)); // %s\n", "putJSON%z(\"%s\",ice.%s);\n" , "putJSON%z(\"%s\",(%C)_unsafe.get%u(ice,%dL)); // %s\n", "putJSON%z(\"%s\",ice.%s);\n" , "putJSON%z(\"%s\",(%C)_unsafe.get%u(ice,%dL)); // %s\n" , " return ab;\n" + " }"); if( debug_print ) System.out.println(debugJ); // The generic override method. Called virtually at the start of a // serialization call. Only calls thru to the named static method. String wbody = " protected water.AutoBuffer write(water.AutoBuffer ab, water.Freezable ice) {\n"+ " return write"+id+"(ab,("+iced_name+")ice);\n"+ " }"; if( debug_print ) System.out.println(wbody); addMethod(wbody,icer_cc); String wbodyJ= " protected water.AutoBuffer writeJSON(water.AutoBuffer ab, water.Freezable ice) {\n"+ " return writeJSON"+id+"(ab.put1('{'),("+iced_name+")ice).put1('}');\n"+ " }"; if( debug_print ) System.out.println(wbodyJ); addMethod(wbodyJ,icer_cc); // The read call String rbody_impl = make_body(icer_cc, iced_cc, iced_clazz, "read", null, null, " protected final "+iced_name+" read"+id+"(water.AutoBuffer ab, "+iced_name+" ice) {\n", super_id == -1?"":" read"+super_id+"(ab,ice);\n", " ice.%s = ab.get%z();\n", " _unsafe.put%u(ice,%dL,ab.get%z()); //%s\n", " ice.%s = (%C)ab.get%z(%s);\n", " _unsafe.put%u(ice,%dL,ab.get%z(%s));\n", " ice.%s = (%C)ab.get%z(%c.class);\n"," _unsafe.put%u(ice,%dL,(%C)ab.get%z(%c.class)); //%s\n", " return ice;\n" + " }"); if( debug_print ) System.out.println(rbody_impl); String rbodyJ_impl = make_body(icer_cc, iced_cc, iced_clazz, "readJSON", null, null, " protected final "+iced_name+" readJSON"+id+"(water.AutoBuffer ab, "+iced_name+" ice) {\n", super_id == -1?"":" readJSON"+super_id+"(ab,ice);\n", " ice.%s = ab.get%z();\n", " _unsafe.put%u(ice,%dL,ab.get%z()); //%s\n", " ice.%s = (%C)ab.get%z(%s);\n", " _unsafe.put%u(ice,%dL,ab.get%z(%s));\n", " ice.%s = (%C)ab.get%z(%c.class);\n"," _unsafe.put%u(ice,%dL,(%C)ab.get%z(%c.class)); //%s\n", " return ice;\n" + " }"); if( debug_print ) System.out.println(rbodyJ_impl); // The generic override method. Called virtually at the start of a // serialization call. Only calls thru to the named static method. String rbody = " protected water.Freezable read(water.AutoBuffer ab, water.Freezable ice) {\n"+ " return read"+id+"(ab,("+iced_name+")ice);\n"+ " }"; if( debug_print ) System.out.println(rbody); addMethod(rbody,icer_cc); String rbodyJ= " protected water.Freezable readJSON(water.AutoBuffer ab, water.Freezable ice) {\n"+ " return readJSON"+id+"(ab,("+iced_name+")ice);\n"+ " }"; if( debug_print ) System.out.println(rbodyJ); addMethod(rbodyJ,icer_cc); String cnbody = " protected java.lang.String className() { return \""+iced_name+"\"; }"; if( debug_print ) System.out.println(cnbody); addMethod(cnbody,icer_cc); String ftbody = " protected int frozenType() { return "+id+"; }"; if( debug_print ) System.out.println(ftbody); addMethod(ftbody,icer_cc); String cmp2 = " protected void compute1( water.H2O.H2OCountedCompleter dt ) { dt.compute1(); }"; if( debug_print ) System.out.println(cmp2); addMethod(cmp2,icer_cc); // DTasks need to be able to copy all their (non transient) fields from one // DTask instance over another, to match the MRTask API. if( iced_cc.subclassOf(_dtask) ) { String cpbody_impl = make_body(icer_cc, iced_cc, iced_clazz, "copyOver", null, null, " protected void copyOver(water.Freezable fdst, water.Freezable fsrc) {\n", " super.copyOver(fdst,fsrc);\n"+ " "+iced_name+" dst = ("+iced_name+")fdst;\n"+ " "+iced_name+" src = ("+iced_name+")fsrc;\n", " dst.%s = src.%s;\n"," _unsafe.put%u(dst,%dL,_unsafe.get%u(src,%dL)); //%s\n", " dst.%s = src.%s;\n"," _unsafe.put%u(dst,%dL,_unsafe.get%u(src,%dL)); //%s\n", " dst.%s = src.%s;\n"," _unsafe.put%u(dst,%dL,_unsafe.get%u(src,%dL)); //%s\n", " }"); if( debug_print ) System.out.println(cpbody_impl); } String cstrbody = " public "+icer_cc.getSimpleName()+"( "+iced_name+" iced) { super(iced); }"; if( debug_print ) System.out.println(cstrbody); try { icer_cc.addConstructor(CtNewConstructor.make(cstrbody,icer_cc)); } catch( CannotCompileException ce ) { System.err.println("--- Compilation failure while compiling "+icer_cc.getName()+"\n"+cstrbody+"\n------\n"+ce); throw ce; } if( debug_print ) System.out.println("}"); return icer_cc; } // Generate a method body string private static String make_body(CtClass icer_cc, CtClass iced_cc, Class<?> iced_clazz, String impl, String field_sep1, String field_sep2, String header, String supers, String prims, String prims_unsafe, String enums, String enums_unsafe, String iced, String iced_unsafe, String trailer ) throws CannotCompileException, NotFoundException, NoSuchFieldException { StringBuilder sb = new StringBuilder(); sb.append(header); if(impl.equals("writeJSON")) { if (supers.isEmpty()) { sb.append(" boolean supers = false;"); } else { sb.append(" int position = ab.position();\n"); sb.append(supers); sb.append(" boolean supers = ab.position() != position;\n"); } } else sb.append(supers); // Customer serializer? String mimpl = impl+"_impl"; for( CtMethod mth : iced_cc.getDeclaredMethods() ) if( mth.getName().equals(mimpl) ) { // Found custom serializer? int mods = mth.getModifiers(); String ice_handle; String ice_args; if(javassist.Modifier.isStatic(mods)) { ice_handle = iced_clazz.getName() + "."; ice_args = "(ice,ab)"; } else if(javassist.Modifier.isFinal(mods)) { ice_handle = "ice."; ice_args = "(ab)"; }else if(javassist.Modifier.isAbstract(mods)){ ice_handle = null; ice_args = null; } else throw barf(iced_cc," Custom serialization methods must be declared either static or final. Failed for method " + mimpl); // If the custom serializer is actually abstract, then do nothing - it // must be (re)implemented in all child classes which will Do The Right Thing. if( javassist.Modifier.isAbstract(mods) || javassist.Modifier.isVolatile(mods) ) sb.append(impl.startsWith("write") ? " return ab;\n }" : " return ice;\n }"); else { if (!supers.isEmpty() && impl.equals("writeJSON")) { sb.append(" if(supers) {\n"); sb.append(" ab.put1(',');\n"); sb.append(" int pos = ab.position();\n"); sb.append(" ").append(ice_handle).append(mimpl).append(ice_args).append(";\n"); sb.append(" if(ab.position() == pos) ab.position(pos-1);\n"); // empty json serialization, drop the comma sb.append(" return ab;\n } \n"); } sb.append(" return ").append(ice_handle).append(mimpl).append(ice_args).append(";\n }"); } mimpl = null; // flag it break; } // For all fields... CtField[] ctfs = iced_cc.getDeclaredFields(); for( CtField ctf : ctfs ) { if( mimpl == null ) break; // Custom serializer, do not dump fields int mods = ctf.getModifiers(); if( javassist.Modifier.isTransient(mods) || javassist.Modifier.isStatic(mods) ) continue; // Only serialize not-transient instance fields (not static) if (ctf.hasAnnotation(API.class)) if(!((API)ctf.getAvailableAnnotations()[0]).json()) continue; if( field_sep1 != null ) { sb.append(field_sep1); field_sep1 = null; } else if( field_sep2 != null ) sb.append(field_sep2); CtClass ctft = ctf.getType(); CtClass base = ctft; while( base.isArray() ) base = base.getComponentType(); // Can the generated code access the field? If not - use Unsafe. If so, // use the fieldname (ldX bytecode) directly. Genned code is in the same // package, so public,protected and package-private all have sufficient // access, only private is a problem. boolean can_access = !javassist.Modifier.isPrivate(mods); if( (impl.equals("read") || impl.equals("copyOver")) && javassist.Modifier.isFinal(mods) ) can_access = false; long off = _unsafe.objectFieldOffset(iced_clazz.getDeclaredField(ctf.getName())); int ftype = ftype(iced_cc, ctf.getSignature() ); // Field type encoding if( ftype%20 == 9 || ftype%20 == 11 ) { // Iced/Objects sb.append(can_access ? iced : iced_unsafe); } else if( ftype%20 == 10 ) { // Enums sb.append(can_access ? enums : enums_unsafe); } else { // Primitives sb.append(can_access ? prims : prims_unsafe); } String z = FLDSZ1[ftype % 20]; for(int i = 0; i < ftype / 20; ++i ) z = 'A'+z; subsub(sb, "%z", z); // %z ==> short type name subsub(sb, "%s", ctf.getName()); // %s ==> field name subsub(sb, "%c", dollarsub(base.getName())); // %c ==> base class name subsub(sb, "%C", dollarsub(ctft.getName())); // %C ==> full class name subsub(sb, "%d", ""+off); // %d ==> field offset, only for Unsafe subsub(sb, "%u", utype(ctf.getSignature())); // %u ==> unsafe type name } if( mimpl != null ) // default auto-gen serializer? sb.append(trailer); String body = sb.toString(); addMethod(body,icer_cc); return body; } // Add a gen'd method. Politely print if there's an error during generation. private static void addMethod( String body, CtClass icer_cc ) throws CannotCompileException { try { icer_cc.addMethod(CtNewMethod.make(body,icer_cc)); } catch( CannotCompileException ce ) { System.err.println("--- Compilation failure while compiling "+icer_cc.getName()+"\n"+body+"\n------\n"+ce); throw ce; } catch ( RuntimeException re ) { // NotFoundException is wrapped in RE System.err.println("--- Failure while compiling "+icer_cc.getName()+"\n"+body+"\n------\n"+re); throw re; } } static private final String[] FLDSZ1 = { "Z","1","2","2","4","4f","8","8d", // Primitives "Str","","Enum", // String, Freezable, Enum "Ser" // java.lang.Serializable }; // Field types: // 0-7: primitives // 8,9, 10: String, Freezable, Enum // 11: Java serialized object (implements Serializable) // 20-27: array-of-prim // 28,29, 30: array-of-String, Freezable, Enum // Barfs on all others (eg Values or array-of-Frob, etc) private static int ftype( CtClass ct, String sig ) throws NotFoundException { switch( sig.charAt(0) ) { case 'Z': return 0; // Booleans: I could compress these more case 'B': return 1; // Primitives case 'C': return 2; case 'S': return 3; case 'I': return 4; case 'F': return 5; case 'J': return 6; case 'D': return 7; case 'L': // Handled classes if( sig.equals("Ljava/lang/String;") ) return 8; String clz = sig.substring(1,sig.length()-1).replace('/', '.'); CtClass argClass = _pool.get(clz); if( argClass.subtypeOf(_pool.get("water.Freezable")) ) return 9; if( argClass.subtypeOf(_enum) ) return 10; if( argClass.subtypeOf(_serialize) ) return 11; // Uses Java Serialization break; case '[': // Arrays return ftype(ct, sig.substring(1))+20; // Same as prims, plus 20 } throw barf(ct, sig); } // Unsafe field access private static String utype( String sig ) { switch( sig.charAt(0) ) { case 'Z': return "Boolean"; case 'B': return "Byte"; case 'C': case 'S': return "Char"; case 'I': return "Int"; case 'F': return "Float"; case 'J': return "Long"; case 'D': return "Double"; case 'L': case '[': return "Object"; } throw new RuntimeException("unsafe access to type "+sig); } // Replace the 1st '$' with '.' static private String dollarsub( String s ) { int idx = s.indexOf('$'); return idx == -1 ? s : (s.substring(0,idx)+"."+s.substring(idx+1)); } // Replace 2-byte strings like "%s" with s2. static private void subsub( StringBuilder sb, String s1, String s2 ) { int idx; while( (idx=sb.indexOf(s1)) != -1 ) sb.replace(idx,idx+2,s2); } private static RuntimeException barf( CtClass ct, String sig ) { return new RuntimeException(ct.getSimpleName()+"."+sig+": Serialization not implemented"); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/API.java
package water.api; import java.lang.annotation.*; /** API Annotation * * API annotations are used to document field behaviors for the external REST API. Each * field is described by a matching Java field, plus these annotations. */ @Retention(RetentionPolicy.RUNTIME) @Target({ElementType.FIELD}) @Documented public @interface API { /** Is a given field an input, an output, or both? */ enum Direction {INPUT, OUTPUT, INOUT} /** How important is it to specify a given field to get a useful result? */ enum Level {critical, secondary, expert} /** * A short help description to appear alongside the field in a UI. */ String help(); /** * The label that should be displayed for the field if the name is insufficient. */ String label() default ""; /** * Is this field required, or is the default value generally sufficient? */ boolean required() default false; /** * How important is this field? The web UI uses the level to do a slow reveal of the parameters. */ Level level() default Level.critical; /** * Is this field an input, output or inout? */ Direction direction() default Direction.INPUT; // The following are markers for *input* fields. /** * For enum-type fields the allowed values are specified using the values annotation. * This is used in UIs to tell the user the allowed values, and for validation. */ String[] values() default {}; /** Proovide values for enum-like types if it cannot be provided as a constant in annotation. */ Class<? extends ValuesProvider> valuesProvider() default ValuesProvider.class; /** * Should this field be rendered in the JSON representation? */ boolean json() default true; /** * For Vec-type fields this is the set of Frame-type fields which must contain the named column. * For example, for a SupervisedModel the response_column must be in both the training_frame * and (if it's set) the validation_frame. */ String[] is_member_of_frames() default {}; /** * For Vec-type fields this is the set of other Vec-type fields which must contain * mutually exclusive values. For example, for a SupervisedModel the response_column * must be mutually exclusive with the weights_column. */ String[] is_mutually_exclusive_with() default {}; /** * Identify grid-able parameter. */ boolean gridable() default false; }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/APIException.java
package water.api; /** * The exception to report various errors during * handling API requests. */ abstract public class APIException extends RuntimeException { public APIException(String s, Throwable t) { super(s,t); } public APIException(String s) { super(s); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/AboutHandler.java
package water.api; import water.H2O; import water.H2OSecurityManager; import water.api.schemas3.AboutEntryV3; import water.api.schemas3.AboutV3; import water.util.PrettyPrint; import java.util.ArrayList; import java.util.Date; public class AboutHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public AboutV3 get(int version, AboutV3 s) { ArrayList<AboutEntryV3> entries = new ArrayList<>(); entries.add(new AboutEntryV3("Build git branch", H2O.ABV.branchName())); entries.add(new AboutEntryV3("Build git hash", H2O.ABV.lastCommitHash())); entries.add(new AboutEntryV3("Build git describe", H2O.ABV.describe())); entries.add(new AboutEntryV3("Build project version", H2O.ABV.projectVersion())); entries.add(new AboutEntryV3("Build age", PrettyPrint.toAge(H2O.ABV.compiledOnDate(), new Date()))); entries.add(new AboutEntryV3("Built by", H2O.ABV.compiledBy())); entries.add(new AboutEntryV3("Built on", H2O.ABV.compiledOn())); entries.add(new AboutEntryV3("Internal Security", H2OSecurityManager.instance().securityEnabled ? "Enabled": "Disabled")); if (H2O.ABV.isTooOld()) { entries.add(new AboutEntryV3("Version warning", "Your H2O version is over 100 days old. Please download the latest version from: https://h2o-release.s3.amazonaws.com/h2o/latest_stable.html")); } for (H2O.AboutEntry ae : H2O.getAboutEntries()) { entries.add(new AboutEntryV3(ae.getName(), ae.getValue())); } s.entries = entries.toArray(new AboutEntryV3[entries.size()]); return s; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/AbstractRegister.java
package water.api; import java.util.Collections; import java.util.List; import java.util.ServiceLoader; public abstract class AbstractRegister implements RestApiExtension { @Override public void registerSchemas(RestApiContext context) { assert context != null : "Context needs to be passed!"; ServiceLoader<Schema> schemaLoader = ServiceLoader.load(Schema.class); for (Schema schema : schemaLoader) { context.registerSchema(schema); } } @Override public String getName() { return this.getClass().getName(); } @Override public List<String> getRequiredCoreExtensions() { return Collections.emptyList(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/AlgoAbstractRegister.java
package water.api; import hex.ModelBuilder; import water.H2O; /** * Abstract base class for registering Rest API for algorithms */ public abstract class AlgoAbstractRegister extends AbstractRegister { /** * Register algorithm common REST interface. * * @param mbProto prototype instance of algorithm model builder * @param version registration version */ protected final void registerModelBuilder(RestApiContext context, ModelBuilder mbProto, int version) { if (H2O.ARGS.features_level.compareTo(mbProto.builderVisibility()) > 0) { return; // Skip endpoint registration } String base = mbProto.getClass().getSimpleName(); String lbase = mbProto.getName(); Class<? extends water.api.Handler> handlerClass = water.api.ModelBuilderHandler.class; Class<? extends water.api.Handler> segmentModelsBuilderHandlerClass = SegmentModelsBuilderHandler.class; // This is common model builder handler context.registerEndpoint( "train_" + lbase, "POST /" + version + "/ModelBuilders/" + lbase, handlerClass, "train", "Train a " + base + " model." ); context.registerEndpoint( "segment_train_" + lbase, "POST /" + version + "/SegmentModelsBuilders/" + lbase, segmentModelsBuilderHandlerClass, "segment_train", "Validate a set of " + base + " model builder parameters." ); context.registerEndpoint( "validate_" + lbase, "POST /" + version + "/ModelBuilders/" + lbase + "/parameters", handlerClass, "validate_parameters", "Validate a set of " + base + " model builder parameters." ); context.registerEndpoint( "grid_search_" + lbase, "POST /99/Grid/" + lbase, GridSearchHandler.class, "train", "Run grid search for " + base + " model." ); context.registerEndpoint( "grid_search_" + lbase + "_resume", "POST /99/Grid/" + lbase + "/resume", GridSearchHandler.class, "resume", "Resume grid search for " + base + " model." ); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/AssemblyHandler.java
package water.api; import water.DKV; import water.Key; import water.api.schemas3.KeyV3; import water.api.schemas99.AssemblyV99; import water.rapids.Assembly; import water.rapids.transforms.Transform; import water.fvec.Frame; import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; public class AssemblyHandler extends Handler { public AssemblyV99 fit(int version, AssemblyV99 ass) throws ClassNotFoundException, NoSuchMethodException, IllegalAccessException, InvocationTargetException, InstantiationException { if( ass==null ) return null; if( ass.steps == null ) return ass; // process assembly: // of the form [name__class__ast__inplace__names, name__class__ast__inplace__names, ...] // s[0] : stepName // s[1] : transform class // s[2] : ast (can be noop) // s[3] : inplace // s[4] : names ArrayList<Transform> steps = new ArrayList<>(); for(String step: ass.steps) { String[] s = step.split("__"); Class transformClass = Class.forName("water.rapids.transforms."+s[1]); Class[] constructorTypes = new Class[]{String.class /*name*/, String.class /*ast*/, boolean.class /*inplace*/, String[].class /*newNames*/}; Object[] constructorArgs = new Object[]{s[0], s[2], Boolean.valueOf(s[3]), s[4].equals("|")?null:s[4].split("\\|")}; steps.add((Transform) transformClass.getConstructor(constructorTypes).newInstance(constructorArgs)); } Assembly assembly = new Assembly(Key.make("assembly_"+Key.make().toString()), steps.toArray(new Transform[steps.size()])); ass.result = new KeyV3.FrameKeyV3(assembly.fit((Frame)DKV.getGet(ass.frame.key()))._key); ass.assembly = new KeyV3.AssemblyKeyV3(assembly._key); DKV.put(assembly); return ass; } public AssemblyV99 toJava(int version, AssemblyV99 ass) { return ass; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/CapabilitiesHandler.java
package water.api; import water.AbstractH2OExtension; import water.ExtensionManager; import water.api.schemas3.CapabilitiesV3; import water.api.schemas3.CapabilityEntryV3; import java.util.ArrayList; public class CapabilitiesHandler extends Handler{ private ArrayList<CapabilityEntryV3> getCoreExtensionEntries(){ ArrayList<CapabilityEntryV3> entries = new ArrayList<>(); for(AbstractH2OExtension ext: ExtensionManager.getInstance().getCoreExtensions()){ entries.add(new CapabilityEntryV3(ext.getExtensionName())); } return entries; } private ArrayList<CapabilityEntryV3> getRestAPIExtensionEntries(){ ArrayList<CapabilityEntryV3> entries = new ArrayList<>(); for(RestApiExtension ext: ExtensionManager.getInstance().getRestApiExtensions()){ entries.add(new CapabilityEntryV3(ext.getName())); } return entries; } @SuppressWarnings("unused") // called through reflection by RequestServer public CapabilitiesV3 listAll(int version, CapabilitiesV3 s) { ArrayList<CapabilityEntryV3> entries = new ArrayList<>(); entries.addAll(getCoreExtensionEntries()); entries.addAll(getRestAPIExtensionEntries()); s.capabilities = entries.toArray(new CapabilityEntryV3[entries.size()]); return s; } @SuppressWarnings("unused") // called through reflection by RequestServer public CapabilitiesV3 listCore(int version, CapabilitiesV3 s) { ArrayList<CapabilityEntryV3> entries = new ArrayList<>(); entries.addAll(getCoreExtensionEntries()); s.capabilities = entries.toArray(new CapabilityEntryV3[entries.size()]); return s; } @SuppressWarnings("unused") // called through reflection by RequestServer public CapabilitiesV3 listRest(int version, CapabilitiesV3 s) { ArrayList<CapabilityEntryV3> entries = new ArrayList<>(); entries.addAll(getRestAPIExtensionEntries()); s.capabilities = entries.toArray(new CapabilityEntryV3[entries.size()]); return s; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/CloudHandler.java
package water.api; import org.joda.time.DateTimeZone; import water.H2O; import water.H2ONode; import water.H2OSecurityManager; import water.Paxos; import water.api.schemas3.CloudV3; import water.parser.ParseTime; import water.util.PrettyPrint; import java.util.Date; class CloudHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public CloudV3 head(int version, CloudV3 cloud) { return cloud; } @SuppressWarnings("unused") // called through reflection by RequestServer public CloudV3 status(int version, CloudV3 cloud) { // TODO: this really ought to be in the water package cloud.version = H2O.ABV.projectVersion(); cloud.branch_name = H2O.ABV.branchName(); cloud.last_commit_hash = H2O.ABV.lastCommitHash(); cloud.describe = H2O.ABV.describe(); cloud.compiled_by = H2O.ABV.compiledBy(); cloud.compiled_on = H2O.ABV.compiledOn(); cloud.build_number = H2O.ABV.buildNumber(); cloud.build_age = PrettyPrint.toAge(H2O.ABV.compiledOnDate(), new Date()); cloud.build_too_old = H2O.ABV.isTooOld(); cloud.node_idx = H2O.SELF.index(); cloud.cloud_name = H2O.ARGS.name; cloud.is_client = H2O.ARGS.client; cloud.cloud_size = H2O.CLOUD.size(); cloud.cloud_uptime_millis = System.currentTimeMillis() - H2O.START_TIME_MILLIS.get(); cloud.cloud_internal_timezone = DateTimeZone.getDefault().toString(); cloud.datafile_parser_timezone = ParseTime.getTimezone().toString(); cloud.consensus = Paxos._commonKnowledge; cloud.locked = Paxos._cloudLocked; cloud.internal_security_enabled = H2OSecurityManager.instance().securityEnabled; cloud.web_ip = H2O.ARGS.web_ip; // set leader H2ONode leader = H2O.CLOUD.leaderOrNull(); // leader might be null in client mode if clouding didn't finish yet cloud.leader_idx = leader == null ? -1 : leader.index(); // set list of members (might be empty) H2ONode[] members = H2O.CLOUD.members(); cloud.bad_nodes = 0; cloud.cloud_healthy = true; cloud.nodes = new CloudV3.NodeV3[members.length]; for (int i = 0; i < members.length; i++) { cloud.nodes[i] = new CloudV3.NodeV3(members[i], cloud.skip_ticks); if (! cloud.nodes[i].healthy) { cloud.cloud_healthy = false; cloud.bad_nodes++; } } return cloud; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/CloudLockHandler.java
package water.api; import water.Paxos; import water.api.schemas3.CloudLockV3; class CloudLockHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public CloudLockV3 lock(int version, CloudLockV3 cloudLock) { StringBuilder builder = new StringBuilder("requested via REST api."); if (cloudLock.reason != null) { builder.append(" Reason: ").append(cloudLock.reason); } Paxos.lockCloud(builder.toString()); return cloudLock; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/CreateFrameHandler.java
package water.api; import hex.CreateFrame; import hex.createframe.recipes.SimpleCreateFrameRecipe; import water.Job; import water.Key; import water.api.schemas3.CreateFrameV3; import water.api.schemas3.JobV3; import water.api.schemas3.KeyV3; import water.api.schemas4.input.CreateFrameSimpleIV4; import water.api.schemas4.output.JobV4; import water.fvec.Frame; public class CreateFrameHandler extends Handler { public JobV3 run(int version, CreateFrameV3 cf) { if (cf.dest == null) { cf.dest = new KeyV3.FrameKeyV3(); cf.dest.name = Key.rand(); } CreateFrame cfr = new CreateFrame(cf.dest.key()); cf.fillImpl(cfr); return new JobV3(cfr.execImpl()); } public static class CreateSimpleFrame extends RestApiHandler<CreateFrameSimpleIV4, JobV4> { @Override public String name() { return "createSimpleFrame"; } @Override public String help() { return "Create frame with random (uniformly distributed) data. You can specify " + "how many columns of each type to make; and what the desired range for " + "each column type."; } @Override public JobV4 exec(int ignored, CreateFrameSimpleIV4 input) { SimpleCreateFrameRecipe cf = input.createAndFillImpl(); Job<Frame> job = cf.exec(); return new JobV4().fillFromImpl(job); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/DCTTransformerHandler.java
package water.api; import water.api.schemas3.DCTTransformerV3; import water.api.schemas3.JobV3; import water.util.DCTTransformer; public class DCTTransformerHandler extends Handler { public JobV3 run(int version, DCTTransformerV3 sf) { DCTTransformer fft = sf.createAndFillImpl(); return new JobV3(fft.exec()); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/DatasetServlet.java
package water.api; import water.DKV; import water.fvec.Frame; import water.server.ServletUtils; import water.util.FileUtils; import water.util.Log; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.InputStream; import java.io.OutputStream; import java.nio.charset.Charset; /** */ public class DatasetServlet extends HttpServlet { @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) { String uri = ServletUtils.getDecodedUri(request); try { String f_name = request.getParameter("frame_id"); String hex_string = request.getParameter("hex_string"); String escape_quotes_string = request.getParameter("escape_quotes"); if (f_name == null) { throw new RuntimeException("Cannot find value for parameter 'frame_id'"); } Frame dataset = DKV.getGet(f_name); Frame.CSVStreamParams parms = new Frame.CSVStreamParams(); if (hex_string != null) parms.setHexString(Boolean.parseBoolean(hex_string)); if (escape_quotes_string != null) parms.setEscapeQuotes(Boolean.parseBoolean(escape_quotes_string)); InputStream is = dataset.toCSV(parms); response.setContentType("application/octet-stream"); response.setCharacterEncoding(Charset.defaultCharset().name()); // Clean up the file name int x = f_name.length() - 1; boolean dot = false; for (; x >= 0; x--) if (!Character.isLetterOrDigit(f_name.charAt(x)) && f_name.charAt(x) != '_') if (f_name.charAt(x) == '.' && !dot) dot = true; else break; String suggested_fname = f_name.substring(x + 1).replace(".hex", ".csv"); if (!suggested_fname.endsWith(".csv")) suggested_fname = suggested_fname + ".csv"; f_name = suggested_fname; response.addHeader("Content-Disposition", "attachment; filename=" + f_name); ServletUtils.setResponseStatus(response, HttpServletResponse.SC_OK); OutputStream os = null; try { os = response.getOutputStream(); FileUtils.copyStream(is, os, 2048); } finally { if (os != null) { try { os.close(); } catch (Exception e) { Log.err(e); } } } } catch (Exception e) { ServletUtils.sendErrorResponse(response, e, uri); } finally { ServletUtils.logRequest("GET", request, response); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/DecryptionSetupHandler.java
package water.api; import water.api.schemas3.DecryptionSetupV3; import water.parser.DecryptionTool; public class DecryptionSetupHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public DecryptionSetupV3 setupDecryption(int version, DecryptionSetupV3 dsV3) { DecryptionTool.DecryptionSetup ds = dsV3.fillImpl(new DecryptionTool.DecryptionSetup()); DecryptionTool tool = DecryptionTool.make(ds); ds._decrypt_tool_id = tool._key; return new DecryptionSetupV3().fillFromImpl(ds); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/DelegatingStreamWriter.java
package water.api; import water.util.ArrayUtils; import java.io.OutputStream; public class DelegatingStreamWriter implements StreamWriter { StreamWriter _streamWriter; StreamWriteOption[] _options; private DelegatingStreamWriter(StreamWriter streamWriter, StreamWriteOption[] options) { _streamWriter = streamWriter; _options = options; } @Override public void writeTo(OutputStream os, StreamWriteOption... options) { _streamWriter.writeTo(os, ArrayUtils.append(_options, options)); } public static StreamWriter wrapWithOptions(StreamWriter streamWriter, StreamWriteOption[] options) { if (options == null || options.length == 0) return streamWriter; else return new DelegatingStreamWriter(streamWriter, options); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/DownloadDataHandler.java
package water.api; import water.api.schemas3.DownloadDataV3; @SuppressWarnings("unused") public class DownloadDataHandler extends Handler { public DownloadDataV3 fetch(int version, DownloadDataV3 server) { throw new RuntimeException("Function fetch should never be called."); // This should never happen, since DownloadDataset is handled in JettyHTTPD. } public DownloadDataV3 fetchStreaming(int version, DownloadDataV3 server) { throw new RuntimeException("Function fetch should never be called."); // This should never happen, since DownloadDataset is handled in JettyHTTPD. } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/EnumValuesProvider.java
package water.api; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.function.Predicate; public class EnumValuesProvider<E extends Enum<E>> implements ValuesProvider { private String[] _values; public EnumValuesProvider(Class<E> clazz) { this(clazz, e -> true); } public EnumValuesProvider(Class<E> clazz, Predicate<E> filter) { _values = getValuesOf(clazz, filter); } public EnumValuesProvider(Class<E> clazz, E[] excluded) { final List<E> exclusions = Arrays.asList(excluded); _values = getValuesOf(clazz, e -> !exclusions.contains(e)); } @Override public String[] values() { return _values; } private String[] getValuesOf(Class<E> clazz, Predicate<E> filter) { E[] values = clazz.getEnumConstants(); List<String> names = new ArrayList<>(values.length); for (E val : values) { if (filter.test(val)) { names.add(val.name()); } } return names.toArray(new String[0]); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/FSIOException.java
package water.api; public class FSIOException extends APIException { public FSIOException(String path, Throwable t) { super( "FS IO Failure: \n" + " accessed path : " + path + " caused by: " + (t != null ? t.getMessage() : "NA"), t); } public FSIOException(String path, String msg) { super( "FS IO Failure: \n" + " accessed path : " + path + " msg: " + msg); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/FindHandler.java
package water.api; import water.H2O; import water.MRTask; import water.api.schemas3.FindV3; import water.api.schemas3.FrameV3; import water.exceptions.H2OColumnNotFoundArgumentException; import water.exceptions.H2OCategoricalLevelNotFoundArgumentException; import water.exceptions.H2OIllegalArgumentException; import water.fvec.Chunk; import water.fvec.Frame; import water.fvec.Vec; import water.util.ArrayUtils; import water.util.IcedHashMap; import water.util.IcedHashMapGeneric; class FindHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public FindV3 find(int version, FindV3 find) { Frame frame = find.key._fr; // Peel out an optional column; restrict to this column if( find.column != null ) { Vec vec = frame.vec(find.column); if( vec==null ) throw new H2OColumnNotFoundArgumentException("column", frame, find.column); find.key = new FrameV3(new Frame(new String[]{find.column}, new Vec[]{vec})); // need to update frame variable frame = find.key._fr; } // Convert the search string into a column-specific flavor Vec[] vecs = frame.vecs(); double ds[] = new double[vecs.length]; for( int i=0; i<vecs.length; i++ ) { if( vecs[i].isCategorical() ) { int idx = ArrayUtils.find(vecs[i].domain(),find.match); if( idx==-1 && vecs.length==1 ) throw new H2OCategoricalLevelNotFoundArgumentException("match", find.match, frame._key.toString(), frame.name(i)); ds[i] = idx; } else if( vecs[i].isUUID() ) { throw H2O.unimpl(); } else if( vecs[i].isString() ) { throw H2O.unimpl(); } else if( vecs[i].isTime() ) { throw H2O.unimpl(); } else { try { ds[i] = find.match==null ? Double.NaN : Double.parseDouble(find.match); } catch( NumberFormatException e ) { if( vecs.length==1 ) { // There's only one Vec and it's a numeric Vec and our search string isn't a number IcedHashMapGeneric.IcedHashMapStringObject values = new IcedHashMapGeneric.IcedHashMapStringObject(); String msg = "Frame: " + frame._key.toString() + " as only one column, it is numeric, and the find pattern is not numeric: " + find.match; values.put("frame_name", frame._key.toString()); values.put("column_name", frame.name(i)); values.put("pattern", find.match); throw new H2OIllegalArgumentException(msg, msg, values); } ds[i] = Double.longBitsToDouble(0xcafebabe); // Do not match } } } Find f = new Find(find.row,ds).doAll(frame); find.prev = f._prev; find.next = f._next==Long.MAX_VALUE ? -1 : f._next; return find; } private static class Find extends MRTask<Find> { final long _row; final double[] _ds; long _prev, _next; Find( long row, double[] ds ) { super((byte)(H2O.GUI_PRIORITY - 2)); _row = row; _ds = ds; _prev = -1; _next = Long.MAX_VALUE; } @Override public void map( Chunk cs[] ) { for( int col = 0; col<cs.length; col++ ) { Chunk C = cs[col]; for( int row=0; row<C._len; row++ ) { if( C.atd(row) == _ds[col] || (C.isNA(row) && Double.isNaN(_ds[col])) ) { long r = C.start()+row; if( r < _row ) { if( r > _prev ) _prev = r; } else if( r > _row ) { if( r < _next ) _next = r; } } } } } @Override public void reduce( Find f ) { if( _prev < f._prev ) _prev = f._prev; if( _next > f._next ) _next = f._next; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/FrameChunksHandler.java
package water.api; import water.api.schemas3.FrameChunksV3; import water.fvec.Frame; public class FrameChunksHandler extends Handler { public FrameChunksV3 fetch(int version, FrameChunksV3 chunks) { Frame frame = FramesHandler.getFromDKV("key", chunks.frame_id.key()); chunks.fillFromFrame(frame); return chunks; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/FramesHandler.java
package water.api; import hex.Model; import water.*; import water.api.schemas3.*; import water.exceptions.*; import water.fvec.Frame; import water.fvec.Vec; import water.fvec.persist.FramePersist; import water.util.ExportFileFormat; import water.util.Log; import java.util.*; /* * FramesHandler deals with all REST API endpoints that start with /Frames. * <p> * GET /3/Frames/(?<frameid>.*)/export/(?<path>.*)/overwrite/(?<force>.*) * <p> export(): Export a Frame to the given path with optional overwrite. * <p> * GET /3/Frames/(?<frameid>.*)/columns/(?<column>.*)/summary * <p> columnSummary(): Return the summary metrics for a column, e.g. mins, maxes, mean, sigma, percentiles, etc. * <p> * GET /3/Frames/(?<frameid>.*)/columns/(?<column>.*)/domain * <p> columnDomain(): Return the domains for the specified column. \"null\" if the column is not an categorical. * <p> * GET /3/Frames/(?<frameid>.*)/columns/(?<column>.*) * <p> column(): Return the specified column from a Frame. * <p> * TODO: deleteme? * GET /3/Frames/(?<frameid>.*)/columns * <p> columns(): Return all the columns from a Frame. * <p> * GET /3/Frames/(?<frameid>.*)/summary * <p> summary(): Return a Frame, including the histograms, after forcing computation of rollups. * <p> * GET /3/Frames/(?<frameid>.*) * <p> fetch(): Return the specified Frame. * <p> * GET /3/Frames * <p> list(): Return all Frames in the H2O distributed K/V store. * <p> * DELETE /3/Frames/(?<frameid>.*) * <p> delete(): Delete the specified Frame from the H2O distributed K/V store. * <p> * DELETE /3/Frames * <p> deleteAll(): Delete all Frames from the H2O distributed K/V store. * <p> */ public class FramesHandler<I extends FramesHandler.Frames, S extends SchemaV3<I,S>> extends Handler { /** Class which contains the internal representation of the frames list and params. */ public static final class Frames extends Iced { public Key<Frame> frame_id; public long row_offset; public int row_count; public Frame[] frames; public String column; public boolean find_compatible_models = false; /** * Fetch all the Models so we can see if they are compatible with our Frame(s). */ static protected Map<Model, Set<String>> fetchModelCols(Model[] all_models) { Map<Model, Set<String>> all_models_cols = new HashMap<>(); for (Model m : all_models) all_models_cols.put(m, new HashSet<>(Arrays.asList(m._output._names))); return all_models_cols; } /** * For a given frame return an array of the compatible models. * * @param frame The frame for which we should fetch the compatible models. * @param all_models An array of all the Models in the DKV. * @return An array of compatible models */ private static Model[] findCompatibleModels(Frame frame, Model[] all_models) { Map<Model, Set<String>> all_models_cols = Frames.fetchModelCols(all_models); List<Model> compatible_models = new ArrayList<>(); HashSet<String> frame_column_names = new HashSet<>(Arrays.asList(frame._names)); for (Map.Entry<Model, Set<String>> entry : all_models_cols.entrySet()) { Model model = entry.getKey(); Set<String> model_cols = entry.getValue(); if (frame_column_names.containsAll(model_cols)) { // See if adapt throws an exception or not. try { if( model.adaptTestForTrain(new Frame(frame), false, false).length == 0 ) compatible_models.add(model); } catch( IllegalArgumentException e ) { // skip } } } return compatible_models.toArray(new Model[compatible_models.size()]); } } /** * Return all the frames. The Frames list will be instances of FrameSynopsisV3, * which only contains a few fields, for performance reasons. * @see FrameSynopsisV3 */ @SuppressWarnings("unused") // called through reflection by RequestServer public FramesListV3 list(int version, FramesListV3 s) { Frames f = s.createAndFillImpl(); f.frames = Frame.fetchAll(); s.fillFromImplWithSynopsis(f); return s; } // TODO: in /4 return a schema with just a list of column names. @SuppressWarnings("unused") // called through reflection by RequestServer public FramesV3 columns(int version, FramesV3 s) { // TODO: return *only* the columns. . . This may be a different schema. return fetch(version, s); } // TODO: almost identical to ModelsHandler; refactor public static Frame getFromDKV(String param_name, String key_str) { return getFromDKV(param_name, Key.make(key_str)); } // TODO: almost identical to ModelsHandler; refactor public static Frame getFromDKV(String param_name, Key key) { if (null == key) throw new H2OIllegalArgumentException(param_name, "Frames.getFromDKV()", null); Value v = DKV.get(key); if (null == v) throw new H2OKeyNotFoundArgumentException(param_name, key.toString()); Iced ice = v.get(); if( ice instanceof Vec ) return new Frame((Vec)ice); if (! (ice instanceof Frame)) throw new H2OKeyWrongTypeArgumentException(param_name, key.toString(), Frame.class, ice.getClass()); return (Frame)ice; } // TODO: return VecV4 /** Return a single column from the frame. */ @SuppressWarnings("unused") // called through reflection by RequestServer public FramesV3 column(int version, FramesV3 s) { // TODO: should return a Vec schema Frame frame = getFromDKV("key", s.frame_id.key()); Vec vec = frame.vec(s.column); if (null == vec) throw new H2OColumnNotFoundArgumentException("column", s.frame_id.toString(), s.column); Vec[] vecs = { vec }; String[] names = { s.column }; Frame new_frame = new Frame(names, vecs); s.frames = new FrameV3[1]; s.frames[0] = new FrameV3(new_frame); ((FrameV3)s.frames[0]).clearBinsField(); return s; } // TODO: return VecDomainV4 @SuppressWarnings("unused") // called through reflection by RequestServer public FramesV3 columnDomain(int version, FramesV3 s) { Frame frame = getFromDKV("key", s.frame_id.key()); Vec vec = frame.vec(s.column); if (vec == null) throw new H2OColumnNotFoundArgumentException("column", s.frame_id.toString(), s.column); s.domain = new String[1][]; s.domain[0] = vec.domain(); return s; } // TODO: return VecSummaryV4 @SuppressWarnings("unused") // called through reflection by RequestServer public FramesV3 columnSummary(int version, FramesV3 s) { Frame frame = getFromDKV("key", s.frame_id.key()); // safe Vec vec = frame.vec(s.column); if (null == vec) throw new H2OColumnNotFoundArgumentException("column", s.frame_id.toString(), s.column); // Compute second pass of rollups: the histograms. vec.bins(); // Cons up our result s.frames = new FrameV3[1]; s.frames[0] = new FrameV3(new Frame(new String[]{s.column}, new Vec[]{vec}), s.row_offset, s.row_count, s.column_offset, s.column_count); return s; } // TODO: return everything but the second level of rollups (histograms); currently mins and maxes are missing /** Return a single frame. */ @SuppressWarnings("unused") // called through reflection by RequestServer public FramesV3 fetch(int version, FramesV3 s) { FramesV3 frames = doFetch(version, s); // Summary data is big, and not always there: null it out here. You have to call columnSummary // to force computation of the summary data. for (FrameBaseV3 a_frame: frames.frames) { ((FrameV3)a_frame).clearBinsField(); } return frames; } public FramesV3 fetchLight(int version, FramesV3 s) { FramesV3 frames = doFetch(version, s, false); for (FrameBaseV3 a_frame: frames.frames) { ((FrameV3)a_frame).clearBinsField(); } return frames; } private FramesV3 doFetch(int version, FramesV3 s) { return doFetch(version, s, true); } private FramesV3 doFetch(int version, FramesV3 s, boolean expensive) { s.createAndFillImpl(); Frame frame = getFromDKV("key", s.frame_id.key()); // safe s.frames = new FrameV3[1]; s.frames[0] = new FrameV3(frame, s.row_offset, s.row_count, s.column_offset, s.column_count, s.full_column_count, expensive); if (s.find_compatible_models) { Model[] compatible = Frames.findCompatibleModels(frame, Model.fetchAll()); s.compatible_models = new ModelSchemaV3[compatible.length]; ((FrameV3)s.frames[0]).compatible_models = new String[compatible.length]; int i = 0; for (Model m : compatible) { s.compatible_models[i] = (ModelSchemaV3)SchemaServer.schema(version, m).fillFromImpl(m); ((FrameV3)s.frames[0]).compatible_models[i] = m._key.toString(); i++; } } return s; } /** Export a single frame to the specified path. */ public FramesV3 export(int version, FramesV3 s) { Frame fr = getFromDKV("key", s.frame_id.key()); Log.info("ExportFiles processing (" + s.path + ")"); if (ExportFileFormat.parquet.equals(s.format)) { // format is optional (can be null, eg. from Flow) Log.warn("Format is 'parquet', csv parameter values: separator, header, quote_header will be ignored!"); Log.warn("Format is 'parquet', H2O itself determines the optimal number of files (1 file per chunk). Parts parameter value will be ignored!"); if (s.parallel) { Log.warn("Parallel export to a single file is not supported for parquet format! Export will continue with a parquet-specific setup."); } s.job = new JobV3(Frame.exportParquet(fr, s.path, s.force, s.compression, s.write_checksum, s.tz_adjust_from_local)); } else { Frame.CSVStreamParams csvParms = new Frame.CSVStreamParams() .setSeparator(s.separator) .setHeaders(s.header) .setQuoteColumnNames(s.quote_header); s.job = new JobV3(Frame.export(fr, s.path, s.frame_id.key().toString(), s.force, s.num_parts, s.parallel, s.compression, csvParms)); } return s; } public FrameSaveV3 save(int version, FrameSaveV3 req) { Frame fr = getFromDKV("frame_id", req.frame_id.key()); FramePersist persist = new FramePersist(fr); req.job = new JobV3(persist.saveTo(req.dir, req.force)); return req; } public FrameLoadV3 load(int version, FrameLoadV3 req) { Value v = DKV.get(req.frame_id.key()); if (v != null) { if (req.force) { ((Frame) v.get()).remove(); } else { throw new IllegalArgumentException("Frame " + req.frame_id + " already exists."); } } req.job = new JobV3(FramePersist.loadFrom(req.frame_id.key(), req.dir)); return req; } @SuppressWarnings("unused") // called through reflection by RequestServer // TODO: return list of FrameSummaryV3 that has histograms et al. public FramesV3 summary(int version, FramesV3 s) { Frame frame = getFromDKV("key", s.frame_id.key()); // safe if( null != frame) { Futures fs = new Futures(); int i = 0; for( Vec v : frame.vecs() ) { if (null == DKV.get(v._key)) Log.warn("For Frame: " + frame._key + ", Vec number: " + i + " (" + frame.name(i)+ ") is missing; not returning it."); else v.startRollupStats(fs, Vec.DO_HISTOGRAMS); i++; } fs.blockForPending(); } return doFetch(version, s); } /** Remove an unlocked frame. Fails if frame is in-use. */ @SuppressWarnings("unused") // called through reflection by RequestServer public FramesV3 delete(int version, FramesV3 frames) { Frame frame = getFromDKV("key", frames.frame_id.key()); // safe frame.delete(); // lock & remove return frames; } /** * Remove ALL an unlocked frames. Throws IAE for all deletes that failed * (perhaps because the Frames were locked & in-use). */ @SuppressWarnings("unused") // called through reflection by RequestServer public FramesV3 deleteAll(int version, FramesV3 frames) { final Key[] keys = KeySnapshot.globalKeysOfClass(Frame.class); ArrayList<String> missing = new ArrayList<>(); Futures fs = new Futures(); for (Key key : keys) { try { getFromDKV("(none)", key).delete(null, fs, true); } catch (IllegalArgumentException iae) { missing.add(key.toString()); } } fs.blockForPending(); if( missing.size() != 0 ) throw new H2OKeysNotFoundArgumentException("(none)", missing.toArray(new String[missing.size()])); return frames; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/GarbageCollectHandler.java
package water.api; import water.H2O; import water.api.schemas3.GarbageCollectV3; public class GarbageCollectHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public GarbageCollectV3 gc(int version, GarbageCollectV3 s) { H2O.gc(); return s; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/GridImportExportHandler.java
package water.api; import hex.faulttolerance.Recovery; import hex.Model; import hex.ModelExportOption; import hex.grid.Grid; import water.*; import water.api.schemas3.GridExportV3; import water.api.schemas3.GridImportV3; import water.api.schemas3.KeyV3; import water.fvec.persist.PersistUtils; import water.persist.Persist; import water.util.FileUtils; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.util.Objects; public class GridImportExportHandler extends Handler { /** * Loads a grid from a folder. Path to the folder and grid id (considered to be grid's filename) is required. * After a grid is loaded, an attempt to find all it's related models in the very same folder is made. * All models must be found in order to successfully import a grid. Grid's version must be the same as the version of * H2O it is imported into. * * @param version API version * @param gridImportV3 Import arguments * @return Key to the imported Grid. Grid's key is the same as serialized in the binary file given. * @throws IOException Error reading grid or related models. */ @SuppressWarnings("unused") public KeyV3.GridKeyV3 importGrid(final int version, final GridImportV3 gridImportV3) throws IOException { Objects.requireNonNull(gridImportV3); validateGridImportParameters(gridImportV3); Grid grid = Grid.importBinary(gridImportV3.grid_path, gridImportV3.load_params_references); return new KeyV3.GridKeyV3(grid._key); } @SuppressWarnings("unused") public KeyV3.GridKeyV3 exportGrid(final int version, final GridExportV3 gridExportV3) throws IOException { validateGridExportParameters(gridExportV3); if(DKV.get(gridExportV3.grid_id) == null){ throw new IllegalArgumentException(String.format("Grid with id '%s' has not been found.", gridExportV3.grid_id)); } final Iced possibleGrid = DKV.get(gridExportV3.grid_id).get(); if(!(possibleGrid instanceof Grid)){ throw new IllegalArgumentException(String.format("Given Grid Key '%s' is not a valid Grid.", gridExportV3.grid_id)); } final Grid serializedGrid = (Grid) possibleGrid; ModelExportOption[] options = gridExportV3.getModelExportOptions(); serializedGrid.exportBinary(gridExportV3.grid_directory, true, options); if (gridExportV3.save_params_references) { new Recovery<Grid>(gridExportV3.grid_directory).exportReferences(serializedGrid); } return new KeyV3.GridKeyV3(serializedGrid._key); } /** * Basic sanity check for given Grid export parameters * * @param input An instance of {@link GridExportV3}, may not be null. */ private void validateGridExportParameters(final GridExportV3 input) { Objects.requireNonNull(input); if (input.grid_directory == null || input.grid_directory.isEmpty()) { throw new IllegalArgumentException(String.format("Given grid directory '%s' is not a valid directory.", input.grid_directory)); } if (input.grid_id == null || input.grid_id.isEmpty()) { throw new IllegalArgumentException(String.format("Invalid Grid id '%s'.", input.grid_id)); } } /** * Basic sanity check for given Grid import parameters * * @param input An instance of {@link GridImportV3}, may not be null. */ private void validateGridImportParameters(final GridImportV3 input) { Objects.requireNonNull(input); if (input.grid_path == null || input.grid_path.isEmpty()) { throw new IllegalArgumentException(String.format("Given grid directory '%s' is not a valid path.", input.grid_path)); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/GridSearchHandler.java
package water.api; import hex.Model; import hex.ModelBuilder; import hex.ModelParametersBuilderFactory; import hex.faulttolerance.Recovery; import hex.grid.Grid; import hex.grid.GridSearch; import hex.grid.HyperSpaceSearchCriteria; import static hex.grid.HyperSpaceWalker.BaseWalker.SUBSPACES; import hex.schemas.*; import water.H2O; import water.Job; import water.Key; import water.TypeMap; import water.api.schemas3.JobV3; import water.api.schemas3.KeyV3; import water.api.schemas3.ModelParametersSchemaV3; import water.exceptions.H2OIllegalArgumentException; import water.util.IcedHashMap; import water.util.PojoUtils; import java.lang.reflect.Field; import java.util.*; /** * A generic grid search handler implementing launch of grid search. * * <p>A model specific grid search handlers should inherit from class and implements corresponding * methods. * * FIXME: how to get rid of P, since it is already enforced by S * * @param <G> Implementation output of grid search * @param <MP> Type of model parameters * @param <P> Type of schema representing model parameters * @param <S> Schema representing structure of grid search end-point */ public class GridSearchHandler<G extends Grid<MP>, S extends GridSearchSchema<G, S, MP, P>, MP extends Model.Parameters, P extends ModelParametersSchemaV3> extends Handler { // Invoke the handler with parameters. Can throw any exception the called handler can throw. // TODO: why does this do its own params filling? // TODO: why does this do its own sub-dispatch? @Override public S handle(int version, water.api.Route route, Properties parms, String postBody) throws Exception { final String methodName = route._handler_method.getName(); String ss[] = route._url.split("/"); String algoURLName = ss[3]; // {}/{99}/{Grid}/{gbm}/ if ("train".equals(methodName)) { return trainGrid(algoURLName, parms); } else if ("resume".equals(methodName)) { return resumeGrid(algoURLName, parms); } else { throw water.H2O.unimpl(); } } private S resumeGrid(String algoURLName, Properties parms) { if (!parms.containsKey("grid_id")) { throw new IllegalArgumentException("grid_id is missing"); } S gss = buildGridSearchSchema(algoURLName, parms); Grid<MP> grid = gss.grid_id.key().get(); Key<Job> jobKey = gss.job_id != null ? gss.job_id.key() : null; Recovery<Grid> recovery = getRecovery(gss); Job<Grid> gsJob = GridSearch.resumeGridSearch( jobKey, grid, new DefaultModelParametersBuilderFactory<MP, P>(), recovery ); gss.hyper_parameters = null; gss.job = new JobV3(gsJob); return gss; } private S buildGridSearchSchema(String algoURLName, Properties parms) { // Peek out the desired algo from the URL String algoName = ModelBuilder.algoName(algoURLName); // gbm -> GBM; deeplearning -> DeepLearning String schemaDir = ModelBuilder.schemaDirectory(algoURLName); // Get the latest version of this algo: /99/Grid/gbm ==> GBMV3 // String algoSchemaName = SchemaServer.schemaClass(version, algoName).getSimpleName(); // GBMV3 // int algoVersion = Integer.valueOf(algoSchemaName.substring(algoSchemaName.lastIndexOf("V")+1)); // '3' // Ok, i'm replacing one hack with another hack here, because SchemaServer.schema*() calls are getting eliminated. // There probably shouldn't be any reference to algoVersion here at all... TODO: unhack all of this int algoVersion = 3; if (algoName.equals("SVD") || algoName.equals("Aggregator") || algoName.equals("StackedEnsemble")) algoVersion = 99; // TODO: this is a horrible hack which is going to cause maintenance problems: String paramSchemaName = schemaDir+algoName+"V"+algoVersion+"$"+ModelBuilder.paramName(algoURLName)+"V"+algoVersion; S gss = (S) new GridSearchSchema(); gss.init_meta(); gss.parameters = (P)TypeMap.newFreezable(paramSchemaName); gss.parameters.init_meta(); gss.hyper_parameters = new IcedHashMap<>(); // Get default parameters, then overlay the passed-in values ModelBuilder builder = ModelBuilder.make(algoURLName,null,null); // Default parameter settings gss.parameters.fillFromImpl(builder._parms); // Defaults for this builder into schema gss.fillFromParms(parms); // Override defaults from user parms return gss; } private S trainGrid(String algoURLName, Properties parms) { S gss = buildGridSearchSchema(algoURLName, parms); // Verify list of hyper parameters // Right now only names, no types // note: still use _validation_frame and and _training_frame at this point. // Do not change those names yet. validateHyperParams(gss.parameters, gss.hyper_parameters); // Get actual parameters MP params = (MP) gss.parameters.createAndFillImpl(); Map<String,Object[]> sortedMap = new TreeMap<>(gss.hyper_parameters); // Need to change validation_frame to valid now. HyperSpacewalker will complain // if it encountered an illegal parameter name. From now on, validation_frame, // training_fame are no longer valid names. if (sortedMap.containsKey("validation_frame")) { sortedMap.put("valid", sortedMap.get("validation_frame")); sortedMap.remove("validation_frame"); } // Get/create a grid for given frame Key<Grid> destKey = gss.grid_id != null ? gss.grid_id.key() : null; // Prepare recovery if requested Recovery<Grid> recovery = getRecovery(gss); Key<Job> jobKey = gss.job_id != null ? gss.job_id.key() : null; // Create target grid search object (keep it private for now) // Start grid search and return the schema back with job key Job<Grid> gsJob = GridSearch.startGridSearch( jobKey, destKey, params, sortedMap, new DefaultModelParametersBuilderFactory<MP, P>(), (HyperSpaceSearchCriteria) gss.search_criteria.createAndFillImpl(), recovery, GridSearch.getParallelismLevel(gss.parallelism) ); // Fill schema with job parameters // FIXME: right now we have to remove grid parameters which we sent back gss.hyper_parameters = null; gss.total_models = gsJob._result.get().getModelCount(); // TODO: looks like it's currently always 0 gss.job = new JobV3(gsJob); return gss; } @SuppressWarnings("unused") // called through reflection by RequestServer public S train(int version, S gridSearchSchema) { throw H2O.fail(); } @SuppressWarnings("unused") // called through reflection by RequestServer public S resume(int version, S gridSearchSchema) { throw H2O.fail(); } /** * Validate given hyper parameters with respect to type parameter P. * * It verifies that given parameters are annotated in P with @API annotation * * @param params regular model build parameters * @param hyperParams map of hyper parameters */ protected void validateHyperParams(P params, Map<String, Object[]> hyperParams) { List<SchemaMetadata.FieldMetadata> fsMeta = SchemaMetadata.getFieldMetadata(params); Set<String> allKeys = new HashSet<>(hyperParams.keySet()); allKeys.remove(SUBSPACES); for (String hparam : allKeys) { SchemaMetadata.FieldMetadata fieldMetadata = null; // Found corresponding metadata about the field for (SchemaMetadata.FieldMetadata fm : fsMeta) { if (fm.name.equals(hparam)) { fieldMetadata = fm; break; } } if (fieldMetadata == null) { throw new H2OIllegalArgumentException(hparam, "grid", "Unknown hyper parameter for grid search!"); } if (!fieldMetadata.is_gridable) { throw new H2OIllegalArgumentException(hparam, "grid", "Illegal hyper parameter for grid search! The parameter '" + fieldMetadata.name + " is not gridable!"); } } if(hyperParams.get(SUBSPACES) != null) { Arrays.stream(hyperParams.get(SUBSPACES)).forEach(subspace -> validateHyperParams(params, (Map<String, Object[]>) subspace)); } } private Recovery<Grid> getRecovery(GridSearchSchema gss) { if (gss.recovery_dir != null) { return new Recovery<>(gss.recovery_dir); } else if (H2O.ARGS.auto_recovery_dir != null) { return new Recovery<>(H2O.ARGS.auto_recovery_dir); } else { return null; } } public static class DefaultModelParametersBuilderFactory<MP extends Model.Parameters, PS extends ModelParametersSchemaV3> implements ModelParametersBuilderFactory<MP> { @Override public ModelParametersBuilder<MP> get(MP initialParams) { return new ModelParametersFromSchemaBuilder<MP, PS>(initialParams); } @Override public PojoUtils.FieldNaming getFieldNamingStrategy() { return PojoUtils.FieldNaming.DEST_HAS_UNDERSCORES; } } /** * Model parameters factory building model parameters with respect to its schema. <p> A user calls * the {@link #set(String, Object)} method with names of parameters as they are defined in Schema. * The builder transfer the given values from Schema to corresponding model parameters object. * </p> * * @param <MP> type of model parameters * @param <PS> type of schema representing model parameters */ public static class ModelParametersFromSchemaBuilder<MP extends Model.Parameters, PS extends ModelParametersSchemaV3> implements ModelParametersBuilderFactory.ModelParametersBuilder<MP> { final private MP params; final private PS paramsSchema; final private ArrayList<String> fields; public ModelParametersFromSchemaBuilder(MP initialParams) { params = initialParams; paramsSchema = (PS) SchemaServer.schema(-1, params.getClass()); fields = new ArrayList<>(7); } public ModelParametersFromSchemaBuilder<MP, PS> set(String name, Object value) { try { Field f = paramsSchema.getClass().getField(name); API api = (API) f.getAnnotations()[0]; Schema.setField(paramsSchema, f, name, value.toString(), api.required(), paramsSchema.getClass()); fields.add(name); } catch (NoSuchFieldException e) { throw new IllegalArgumentException("Cannot find field '" + name + "'" + " to value " + value, e); } catch (RuntimeException | IllegalAccessException e) { throw new IllegalArgumentException("Cannot set field '" + name + "'" + " to value " + value, e); } return this; } public MP build() { PojoUtils .copyProperties(params, paramsSchema, PojoUtils.FieldNaming.DEST_HAS_UNDERSCORES, null, fields.toArray(new String[fields.size()])); // FIXME: handle these train/valid fields in different way // See: ModelParametersSchemaV3#fillImpl if (params._valid == null && paramsSchema.validation_frame != null) { params._valid = Key.make(paramsSchema.validation_frame.name); } if (params._train == null && paramsSchema.training_frame != null) { params._train = Key.make(paramsSchema.training_frame.name); } return params; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/Grids.java
package water.api; import water.Iced; public class Grids extends Iced { }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/GridsHandler.java
package water.api; import hex.grid.Grid; import hex.schemas.GridSchemaV99; import water.Key; import water.KeySnapshot; import water.Value; import water.api.schemas99.GridsV99; /** * /Grids/ end-point handler. */ public class GridsHandler extends Handler { /** * Return all the grids. */ @SuppressWarnings("unused") // called through reflection by RequestServer public GridsV99 list(int version, GridsV99 s) { final Key[] gridKeys = KeySnapshot.globalSnapshot().filter(new KeySnapshot.KVFilter() { @Override public boolean filter(KeySnapshot.KeyInfo k) { return Value.isSubclassOf(k._type, Grid.class); } }).keys(); s.grids = new GridSchemaV99[gridKeys.length]; for (int i = 0; i < gridKeys.length; i++) { s.grids[i] = new GridSchemaV99(); s.grids[i].fillFromImpl(getFromDKV("(none)", gridKeys[i], Grid.class)); } return s; } /** * Return a specified grid. */ @SuppressWarnings("unused") // called through reflection by RequestServer public GridSchemaV99 fetch(int version, GridSchemaV99 s) { return s.fillFromImpl(getFromDKV("grid_id", s.grid_id.key(), Grid.class)); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/H2oRestGsonHelper.java
package water.api; import com.google.gson.*; import water.api.schemas3.FrameV3; import water.api.schemas3.KeyV3; import java.lang.reflect.Type; /** * Custom Gson serialization for our REST API, which does things like turn a String of a Key into * a Key object automagically. */ public class H2oRestGsonHelper { /** * Create a Gson JSON serializer / deserializer that has custom handling for certain H2O classes for * which our REST API does automagic type conversions. * <p> * TODO: this method is copy-pasted from H2oApi.java in a more limited form; refactor. * See the comments there. */ public static Gson createH2oCompatibleGson() { return new GsonBuilder() // .registerTypeAdapterFactory(new ModelV3TypeAdapter()) .registerTypeAdapter(KeyV3.class, new KeySerializer()) .registerTypeAdapter(FrameV3.ColSpecifierV3.class, new ColSerializer()) // .registerTypeAdapter(ModelBuilderSchema.class, new ModelDeserializer()) // .registerTypeAdapter(ModelSchemaBaseV3.class, new ModelSchemaDeserializer()) .create(); } /** * Keys get sent as Strings and returned as objects also containing the type and URL, * so they need a custom GSON serializer. */ private static class KeySerializer implements JsonSerializer<KeyV3>, JsonDeserializer<KeyV3> { @Override public JsonElement serialize(KeyV3 key, Type typeOfKey, JsonSerializationContext context) { return new JsonPrimitive(key.name); } @Override public KeyV3 deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) { if (json.isJsonNull()) return null; JsonObject jobj = json.getAsJsonObject(); String type = jobj.get("type").getAsString(); switch (type) { // TODO: dynamically generate all possible cases case "Key<Model>": return context.deserialize(jobj, KeyV3.ModelKeyV3.class); case "Key<Job>": return context.deserialize(jobj, KeyV3.JobKeyV3.class); case "Key<Grid>": return context.deserialize(jobj, KeyV3.GridKeyV3.class); case "Key<Frame>": return context.deserialize(jobj, KeyV3.FrameKeyV3.class); default: throw new JsonParseException("Unable to deserialize key of type " + type); } } } private static class ColSerializer implements JsonSerializer<FrameV3.ColSpecifierV3>, JsonDeserializer<FrameV3.ColSpecifierV3> { @Override public JsonElement serialize(FrameV3.ColSpecifierV3 col, Type typeOfCol, JsonSerializationContext context) { return new JsonPrimitive(col.column_name); // UGH: external-facing, generated POJO uses camelCase. . . } @Override public FrameV3.ColSpecifierV3 deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) { if (json.isJsonNull()) return null; return new FrameV3.ColSpecifierV3(json.getAsString()); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/HDFSIOException.java
package water.api; public class HDFSIOException extends APIException { public HDFSIOException(String hdfsURI, String hdfsConf, Exception e) { super( "HDFS IO Failure: \n" + " accessed URI : " + hdfsURI + "\n" + " configuration: " + hdfsConf + "\n" + " " + e); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/Handler.java
package water.api; import water.*; import water.H2O.H2OCountedCompleter; import water.exceptions.H2OIllegalArgumentException; import water.exceptions.H2OKeyNotFoundArgumentException; import water.exceptions.H2OKeyWrongTypeArgumentException; import water.util.Log; import water.util.PojoUtils; import water.util.ReflectionUtils; import water.util.annotations.IgnoreJRERequirement; import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.Properties; public class Handler extends H2OCountedCompleter<Handler> { public static Class<? extends Schema> getHandlerMethodInputSchema(Method method) { return (Class<? extends Schema>)ReflectionUtils.findMethodParameterClass(method, 1); } public static Class<? extends Schema> getHandlerMethodOutputSchema(Method method) { return (Class<? extends Schema>)ReflectionUtils.findMethodOutputClass(method); } // Invoke the handler with parameters. Can throw any exception the called handler can throw. public Schema handle(int version, Route route, Properties parms, String post_body) throws Exception { Class<? extends Schema> handler_schema_class = getHandlerMethodInputSchema(route._handler_method); Schema schema = Schema.newInstance(handler_schema_class); // If the schema has a real backing class fill from it to get the default field values: Class<? extends Iced> iced_class = schema.getImplClass(); if (iced_class != Iced.class) { Iced defaults = schema.createImpl(); schema.fillFromImpl(defaults); } boolean has_body = (null != post_body); // Fill from http request params: schema = schema.fillFromParms(parms, !has_body); if (schema == null) throw H2O.fail("fillFromParms returned a null schema for version: " + version + " in: " + this.getClass() + " with params: " + parms); //Fill from JSON body, if there is one. NOTE: there should *either* be a JSON body *or* parms, //with the exception of control-type query parameters. if (has_body) { schema = schema.fillFromBody(post_body); } // NOTE! The handler method is free to modify the input schema and hand it back. Schema result = null; try { route._handler_method.setAccessible(true); result = (Schema)route._handler_method.invoke(this, version, schema); } // Exception thrown out of the invoked method turn into InvocationTargetException // rather uselessly. Peel out the original exception & throw it. catch( InvocationTargetException ite ) { Throwable t = ite.getCause(); if( t instanceof RuntimeException ) throw (RuntimeException)t; if( t instanceof Error ) throw (Error)t; throw new RuntimeException(t); } // Version-specific unwind from the Iced back into the Schema return result; } protected StringBuffer markdown(Handler handler, int version, StringBuffer docs, String filename) { // TODO: version handling StringBuffer sb = new StringBuffer(); Path path = Paths.get(filename); try { sb.append(Files.readAllBytes(path)); } catch (IOException e) { Log.warn("Caught IOException trying to read doc file: ", path); } if (docs != null) docs.append(sb); return sb; } public static <T extends Keyed> T getFromDKV(String param_name, String key, Class<T> klazz) { return getFromDKV(param_name, Key.make(key), klazz); } public static <T extends Keyed> T getFromDKV(String param_name, Key key, Class<T> klazz) { if (key == null) throw new H2OIllegalArgumentException(param_name, "Handler.getFromDKV()", "null"); Value v = DKV.get(key); if (v == null) throw new H2OKeyNotFoundArgumentException(param_name, key.toString()); try { return klazz.cast(v.get()); } catch (ClassCastException e) { throw new H2OKeyWrongTypeArgumentException(param_name, key.toString(), klazz, v.get().getClass()); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/HandlerFactory.java
package water.api; /** * Handler factory supports different strategies to * create an instance of handler class for given registered route. */ public interface HandlerFactory { /** Shared default factory to create handler by using no-arg ctor * and reflection. */ HandlerFactory DEFAULT = new HandlerFactory() { @Override public Handler create(Class<? extends Handler> handlerClz) throws Exception { return handlerClz.newInstance(); } }; Handler create(Class<? extends Handler> handler) throws Exception; }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/ImportFilesHandler.java
package water.api; import water.H2O; import water.api.schemas3.ImportFilesMultiV3; import water.api.schemas3.ImportFilesV3; import java.util.ArrayList; /** * The handler provides import capabilities. * * <p> * Currently import from local filesystem, hdfs and s3 is supported. * </p> */ public class ImportFilesHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public ImportFilesV3 importFiles(int version, ImportFilesV3 importFiles) { ArrayList<String> files = new ArrayList<>(); ArrayList<String> keys = new ArrayList<>(); ArrayList<String> fails = new ArrayList<>(); ArrayList<String> dels = new ArrayList<>(); H2O.getPM().importFiles(importFiles.path, importFiles.pattern, files, keys, fails, dels); importFiles.files = files.toArray(new String[files.size()]); importFiles.destination_frames = keys.toArray(new String[keys.size()]); importFiles.fails = fails.toArray(new String[fails.size()]); importFiles.dels = dels.toArray(new String[dels.size()]); return importFiles; } @SuppressWarnings("unused") // called through reflection by RequestServer public ImportFilesMultiV3 importFilesMulti(int version, ImportFilesMultiV3 importFiles) { ArrayList<String> files = new ArrayList<>(); ArrayList<String> keys = new ArrayList<>(); ArrayList<String> fails = new ArrayList<>(); ArrayList<String> dels = new ArrayList<>(); H2O.getPM().importFiles(importFiles.paths, importFiles.pattern, files, keys, fails, dels); importFiles.files = files.toArray(new String[files.size()]); importFiles.destination_frames = keys.toArray(new String[keys.size()]); importFiles.fails = fails.toArray(new String[fails.size()]); importFiles.dels = dels.toArray(new String[dels.size()]); return importFiles; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/ImportHiveTableHandler.java
package water.api; import water.ExtensionManager; import water.Job; import water.api.schemas3.ImportHiveTableV3; import water.api.schemas3.JobV3; import water.fvec.Frame; public class ImportHiveTableHandler extends Handler { public interface HiveTableImporter { String DEFAULT_DATABASE = "default"; String NAME = "HiveTableImporter"; Job<Frame> loadHiveTable(String database, String tableName, String[][] partitions, boolean allowMultiFormat) throws Exception; } private HiveTableImporter getImporter() { return (HiveTableImporter) ExtensionManager.getInstance().getCoreExtension(HiveTableImporter.NAME); } @SuppressWarnings("unused") // called via reflection public JobV3 importHiveTable(int version, ImportHiveTableV3 request) throws Exception { HiveTableImporter importer = getImporter(); if (importer != null) { try { Job<Frame> job = importer.loadHiveTable(request.database, request.table, request.partitions, request.allow_multi_format); return new JobV3(job); } catch (NoClassDefFoundError e) { throw new IllegalStateException("Hive Metastore client classes not available on classpath, try specifying the database as JDBC URL.", e); } } else { throw new IllegalStateException("HiveTableImporter extension not enabled."); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/ImportSQLTableHandler.java
package water.api; import water.Job; import water.api.schemas3.ImportSQLTableV99; import water.api.schemas3.JobV3; import water.jdbc.SQLManager; import water.jdbc.SqlFetchMode; import water.util.EnumUtils; /** * Import Sql Table into H2OFrame */ public class ImportSQLTableHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public JobV3 importSQLTable(int version, final ImportSQLTableV99 importSqlTable) { final SqlFetchMode sqlFetchMode; if (importSqlTable.fetch_mode == null) { sqlFetchMode = SqlFetchMode.DISTRIBUTED; } else { sqlFetchMode = EnumUtils.valueOfIgnoreCase(SqlFetchMode.class, importSqlTable.fetch_mode) .orElseThrow(() -> new IllegalArgumentException("Unrecognized SQL Fetch mode: " + importSqlTable.fetch_mode)); } Boolean useTempTable = null; if (importSqlTable.use_temp_table != null) { useTempTable = Boolean.parseBoolean(importSqlTable.use_temp_table); } Job j = SQLManager.importSqlTable( importSqlTable.connection_url, importSqlTable.table, importSqlTable.select_query, importSqlTable.username, importSqlTable.password, importSqlTable.columns, useTempTable, importSqlTable.temp_table_name, sqlFetchMode, importSqlTable.num_chunks_hint != null ? Integer.valueOf(importSqlTable.num_chunks_hint) : null ); return new JobV3().fillFromImpl(j); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/InteractionHandler.java
package water.api; import hex.Interaction; import water.api.schemas3.InteractionV3; import water.api.schemas3.JobV3; public class InteractionHandler extends Handler { public JobV3 run(int version, InteractionV3 cf) { Interaction cfr = new Interaction(); cf.fillImpl(cfr); return new JobV3(cfr.execImpl(cf.dest==null? null : cf.dest.key())); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/JStackHandler.java
package water.api; import water.api.schemas3.JStackV3; import water.util.JStack; public class JStackHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public JStackV3 fetch(int version, JStackV3 js) { return js.fillFromImpl(new JStack().execImpl()); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/JobsHandler.java
package water.api; import water.*; import water.api.schemas3.JobV3; import water.api.schemas3.JobsV3; import water.api.schemas4.input.JobIV4; import water.api.schemas4.output.JobV4; import water.exceptions.H2ONotFoundArgumentException; import water.server.ServletUtils; import water.util.Log; public class JobsHandler extends Handler { /** Impl class for a collection of jobs; only used in the API to make it easier to cons up the jobs array via the magic of PojoUtils.copyProperties. */ @SuppressWarnings("unused") // called through reflection by RequestServer public JobsV3 list(int version, JobsV3 s) { Job[] jobs = Job.jobs(); // Jobs j = new Jobs(); // j._jobs = Job.jobs(); // PojoUtils.copyProperties(s, j, PojoUtils.FieldNaming.ORIGIN_HAS_UNDERSCORES); s.jobs = new JobV3[jobs.length]; int i = 0; for (Job j : jobs) { try { s.jobs[i] = (JobV3) SchemaServer.schema(version, j).fillFromImpl(j); } // no special schema for this job subclass, so fall back to JobV3 catch (H2ONotFoundArgumentException e) { s.jobs[i] = new JobV3().fillFromImpl(j); } i++; // Java does the increment before the function call which throws?! } return s; } @SuppressWarnings("unused") // called through reflection by RequestServer public JobsV3 fetch(int version, JobsV3 s) { Key<Job> key = s.job_id.key(); long waitingStartedAt = System.currentTimeMillis(); long waitMs = fetchJobTimeoutMs(); Job<?> j = Job.tryGetDoneJob(key, waitMs); long waitingEndedAt = System.currentTimeMillis(); if (Log.isLoggingFor(Log.TRACE)) { Log.trace("Waited for job result for " + (waitingEndedAt - waitingStartedAt) + "ms."); } JobV3 job; try { job = (JobV3) SchemaServer.schema(version, j); } catch (H2ONotFoundArgumentException e) { // no special schema for this job subclass, so fall back to JobV3 job = new JobV3().fillFromImpl(j); } job.fillFromImpl(j); s.jobs = new JobV3[1]; s.jobs[0] = job; return s; } static long fetchJobTimeoutMs() { String timeoutSpec = ServletUtils.getSessionProperty("job.fetch_timeout_ms", null); if (timeoutSpec == null) { return -1; } try { return Long.parseLong(timeoutSpec); } catch (Exception e) { Log.trace(e); return -1; } } public JobsV3 cancel(int version, JobsV3 c) { Job j = DKV.getGet(c.job_id.key()); if (j == null) { throw new IllegalArgumentException("No job with key " + c.job_id.key()); } j.stop(); // Request Job stop long start = System.currentTimeMillis(); Log.info("Waiting for job " + c.job_id.key() + " to finish execution."); try { j.get(); // Wait for Job to complete } catch (Exception e) { if (! Job.isCancelledException(e)) { Log.warn("Job was cancelled with exception", e); } } long took = System.currentTimeMillis() - start; Log.info("Job " + c.job_id.key() + " cancelled (waiting took=" + took + "ms)."); return c; } public static class FetchJob extends RestApiHandler<JobIV4, JobV4> { @Override public String name() { return "getJob4"; } @Override public String help() { return "Retrieve information about the current state of a job."; } @Override public JobV4 exec(int ignored, JobIV4 input) { Key<Job> key = Key.make(input.job_id); Value val = DKV.get(key); if (val == null) throw new IllegalArgumentException("Job " + input.job_id + " is missing"); Iced iced = val.get(); if (!(iced instanceof Job)) throw new IllegalArgumentException("Id " + input.job_id + " references a " + iced.getClass() + " not a Job"); Job job = (Job) iced; JobV4 out = new JobV4(); out.fillFromImpl(job); return out; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/KillMinus3Handler.java
package water.api; import water.H2O; import water.MRTask; import water.api.schemas3.KillMinus3V3; import water.exceptions.H2OIllegalArgumentException; import water.util.Log; public class KillMinus3Handler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer private static String getProcessId() throws Exception { // Note: may fail in some JVM implementations // therefore fallback has to be provided // something like '<pid>@<hostname>', at least in SUN / Oracle JVMs final String jvmName = java.lang.management.ManagementFactory.getRuntimeMXBean().getName(); final int index = jvmName.indexOf('@'); if (index < 1) { // part before '@' empty (index = 0) / '@' not found (index = -1) throw new Exception ("Can't get process Id"); } return Long.toString(Long.parseLong(jvmName.substring(0, index))); } public KillMinus3V3 killm3(int version, KillMinus3V3 u) { new MRTask((byte)(H2O.MIN_HI_PRIORITY - 1)) { @Override public void setupLocal() { try { String cmd = "/bin/kill -3 " + getProcessId(); java.lang.Runtime.getRuntime().exec(cmd); } catch( java.io.IOException ioe ) { // Silently ignore if, e.g. /bin/kill does not exist on windows } catch (Exception xe) { Log.err(xe); throw new H2OIllegalArgumentException(xe.getMessage()); } } }.doAllNodes(); return u; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/LogAndEchoHandler.java
package water.api; import water.H2O; import water.MRTask; import water.api.schemas3.LogAndEchoV3; import water.util.Log; public class LogAndEchoHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public LogAndEchoV3 echo(int version, final LogAndEchoV3 u) { new MRTask(H2O.MIN_HI_PRIORITY) { @Override public void setupLocal() { Log.info(u.message); } }.doAllNodes(); return u; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/LogsHandler.java
package water.api; import water.*; import water.api.schemas3.LogsV3; import water.util.*; import java.io.*; import java.net.URI; import java.text.SimpleDateFormat; import java.util.Date; import static water.api.RequestServer.HTTP_OK; public class LogsHandler extends Handler { private static class GetLogTask extends DTask<GetLogTask> { public String name; public String log; public boolean success = false; public GetLogTask() { super(H2O.GUI_PRIORITY); log = null; } public void doIt() { String logPathFilename = "/undefined"; // Satisfy IDEA inspection. try { if (name == null || name.equals("default")) { name = "debug"; } switch (name) { case "stdout": case "stderr": LinuxProcFileReader lpfr = new LinuxProcFileReader(); lpfr.read(); if (!lpfr.valid()) { log = "This option only works for Linux hosts"; } else { String pid = lpfr.getProcessID(); String fdFileName = "/proc/" + pid + "/fd/" + (name.equals("stdout") ? "1" : "2"); File f = new File(fdFileName); logPathFilename = f.getCanonicalPath(); if (logPathFilename.startsWith("/dev")) { log = "Unsupported when writing to console"; } if (logPathFilename.startsWith("socket")) { log = "Unsupported when writing to a socket"; } if (logPathFilename.startsWith("pipe")) { log = "Unsupported when writing to a pipe"; } if (logPathFilename.equals(fdFileName)) { log = "Unsupported when writing to a pipe"; } Log.trace("LogPathFilename calculation: " + logPathFilename); } break; case "trace": case "debug": case "info": case "warn": case "error": case "fatal": if (!Log.isLoggingFor(name)) { log = "Logging for " + name.toUpperCase() + " is not enabled as the log level is set to " + Log.LVLS[Log.getLogLevel()] + "."; } else { try { logPathFilename = Log.getLogFilePath(name); } catch (Exception e) { log = "H2O logging not configured."; } } break; case "httpd": try { logPathFilename = Log.getLogFilePath(name); } catch (Exception e) { log = "H2O logging not configured."; } break; default: throw new IllegalArgumentException("Illegal log file name requested (try 'default')"); } if (log == null) { File f = new File(logPathFilename); if (!f.exists()) { throw new IllegalArgumentException("File " + f + " does not exist"); } if (!f.canRead()) { throw new IllegalArgumentException("File " + f + " is not readable"); } BufferedReader reader = new BufferedReader(new FileReader(f)); String line; StringBuilder sb = new StringBuilder(); line = reader.readLine(); while (line != null) { sb.append(line); sb.append("\n"); line = reader.readLine(); } reader.close(); log = sb.toString(); } success = true; } catch (Exception e) { throw new RuntimeException(e); } } @Override public void compute2() { doIt(); tryComplete(); } } private static H2ONode getH2ONode(String nodeIdx) { try { int numNodeIdx = Integer.parseInt(nodeIdx); if ((numNodeIdx < -1) || (numNodeIdx >= H2O.CLOUD.size())) { throw new IllegalArgumentException("H2O node with the specified index does not exist!"); } else if (numNodeIdx == -1) { return H2O.SELF; } else { return H2O.CLOUD._memary[numNodeIdx]; } } catch (NumberFormatException nfe) { // not a number, try to parse for ipPort if (nodeIdx.equals("self")) { return H2O.SELF; } else { H2ONode node = H2O.CLOUD.getNodeByIpPort(nodeIdx); if (node != null) { return node; } else { // it still can be client H2ONode client = H2O.getClientByIPPort(nodeIdx); if (client != null) { return client; } else { // the ipport does not represent any existing h2o cloud member or client throw new IllegalArgumentException("No H2O node running as part of this cloud on " + nodeIdx + " does not exist!"); } } } } } @SuppressWarnings("unused") // called through reflection by RequestServer public LogsV3 fetch(int version, LogsV3 s) { H2ONode node = getH2ONode(s.nodeidx); String filename = s.name; if (filename != null) { if (filename.contains(File.separator)) { throw new IllegalArgumentException("Filename may not contain File.separator character."); } } GetLogTask t = new GetLogTask(); t.name = filename; if (H2O.SELF.equals(node)) { // Local node. try { t.doIt(); } catch (Exception e) { Log.err(e); } } else { // Remote node. Log.trace("GetLogTask starting to node " + node._key + " ..."); new RPC<>(node, t).call().get(); Log.trace("GetLogTask completed to node " + node._key); } if (!t.success) { throw new RuntimeException("GetLogTask failed"); } s.log = t.log; return s; } public static URI downloadLogs(String destinationDir, LogArchiveContainer logContainer) { String outputFileStem = getOutputLogStem(); String outputFileName = outputFileStem + "." + logContainer.getFileExtension(); byte[] logBytes = downloadLogs(logContainer, outputFileStem); File destination = new File(destinationDir, outputFileName); try (FileOutputStream fileOutputStream = new FileOutputStream(destination)) { fileOutputStream.write(logBytes); } catch (IOException e) { Log.err("Can't write logs to " + destinationDir + ", reason: \n" + e); } return destination.toURI(); } static NanoResponse downloadLogsViaRestAPI(LogArchiveContainer logContainer) { String outputFileStem = getOutputLogStem(); byte[] finalArchiveByteArray = downloadLogs(logContainer, outputFileStem); NanoResponse res = new NanoResponse(HTTP_OK, logContainer.getMimeType(), new ByteArrayInputStream(finalArchiveByteArray)); res.addHeader("Content-Length", Long.toString(finalArchiveByteArray.length)); res.addHeader("Content-Disposition", "attachment; filename=" + outputFileStem + "." + logContainer.getFileExtension()); return res; } private static byte[] downloadLogs(LogArchiveContainer logContainer, String outputFileStem) { Log.info("\nCollecting logs."); byte[][] workersLogs = getWorkersLogs(logContainer); byte[] clientLogs = getClientLogs(logContainer); try { return archiveLogs(logContainer, new Date(), workersLogs, clientLogs, outputFileStem); } catch (Exception e) { return StringUtils.toBytes(e); } } private static String getOutputLogStem() { String pattern = "yyyyMMdd_hhmmss"; SimpleDateFormat formatter = new SimpleDateFormat(pattern); String now = formatter.format(new Date()); return "h2ologs_" + now; } private static byte[][] getWorkersLogs(LogArchiveContainer logContainer) { H2ONode[] members = H2O.CLOUD.members(); byte[][] perNodeArchive = new byte[members.length][]; for (int i = 0; i < members.length; i++) { try { // Skip nodes that aren't healthy, since they are likely to cause the entire process to hang. if (members[i].isHealthy()) { GetLogsFromNode g = new GetLogsFromNode(i, logContainer); g.doIt(); perNodeArchive[i] = g.bytes; } else { perNodeArchive[i] = StringUtils.bytesOf("Node not healthy"); } } catch (Exception e) { perNodeArchive[i] = StringUtils.toBytes(e); } } return perNodeArchive; } private static byte[] getClientLogs(LogArchiveContainer logContainer) { if (H2O.ARGS.client) { try { GetLogsFromNode g = new GetLogsFromNode(-1, logContainer); g.doIt(); return g.bytes; } catch (Exception e) { return StringUtils.toBytes(e); } } return null; } private static byte[] archiveLogs(LogArchiveContainer container, Date now, byte[][] results, byte[] clientResult, String topDir) throws IOException { int l = 0; assert H2O.CLOUD._memary.length == results.length : "Unexpected change in the cloud!"; for (byte[] result : results) l += result.length; ByteArrayOutputStream baos = new ByteArrayOutputStream(l); try (LogArchiveWriter archive = container.createLogArchiveWriter(baos)) { // Add top-level directory. LogArchiveWriter.ArchiveEntry entry = new LogArchiveWriter.ArchiveEntry(topDir + File.separator, now); archive.putNextEntry(entry); // Archive directory from each cloud member. for (int i = 0; i < results.length; i++) { String filename = topDir + File.separator + "node" + i + "_" + H2O.CLOUD._memary[i].getIpPortString().replace(':', '_').replace('/', '_') + "." + container.getFileExtension(); LogArchiveWriter.ArchiveEntry ze = new LogArchiveWriter.ArchiveEntry(filename, now); archive.putNextEntry(ze); archive.write(results[i]); archive.closeEntry(); } // Archive directory from the client node. Name it 'driver' since that's what Sparking Water users see. if (clientResult != null) { String filename = topDir + File.separator + "driver." + container.getFileExtension(); LogArchiveWriter.ArchiveEntry ze = new LogArchiveWriter.ArchiveEntry(filename, now); archive.putNextEntry(ze); archive.write(clientResult); archive.closeEntry(); } // Close the top-level directory. archive.closeEntry(); } return baos.toByteArray(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/MetadataHandler.java
package water.api; import hex.ModelBuilder; import water.Iced; import water.TypeMap; import water.api.schemas3.MetadataV3; import water.api.schemas3.RouteV3; import water.api.schemas3.SchemaMetadataV3; import water.api.schemas4.EndpointV4; import water.util.MarkdownBuilder; import water.api.schemas4.EndpointsListV4; import water.api.schemas4.ListRequestV4; import java.net.MalformedURLException; import java.util.Map; /** * Docs REST API handler, which provides endpoint handlers for the autogeneration of * Markdown (and in the future perhaps HTML and PDF) documentation for REST API endpoints * and payload entities (aka Schemas). */ public class MetadataHandler extends Handler { /** Return a list of all REST API Routes and a Markdown Table of Contents. */ @SuppressWarnings("unused") // called through reflection by RequestServer public MetadataV3 listRoutes(int version, MetadataV3 docs) { MarkdownBuilder builder = new MarkdownBuilder(); builder.comment("Preview with http://jbt.github.io/markdown-editor"); builder.heading1("REST API Routes Table of Contents"); builder.hline(); builder.tableHeader("HTTP method", "URI pattern", "Input schema", "Output schema", "Summary"); docs.routes = new RouteV3[RequestServer.numRoutes()]; int i = 0; for (Route route : RequestServer.routes()) { RouteV3 schema = new RouteV3(route); docs.routes[i] = schema; // ModelBuilder input / output schema hackery MetadataV3 look = new MetadataV3(); look.routes = new RouteV3[1]; look.routes[0] = schema; look.path = route._url; look.http_method = route._http_method; fetchRoute(version, look); schema.input_schema = look.routes[0].input_schema; schema.output_schema = look.routes[0].output_schema; builder.tableRow( route._http_method, route._url, Handler.getHandlerMethodInputSchema(route._handler_method).getSimpleName(), Handler.getHandlerMethodOutputSchema(route._handler_method).getSimpleName(), route._summary); i++; } docs.markdown = builder.toString(); return docs; } public EndpointsListV4 listRoutes4(int version, ListRequestV4 inp) { EndpointsListV4 res = new EndpointsListV4(); res.endpoints = new EndpointV4[RequestServer.numRoutes(4)]; int i = 0; for (Route route : RequestServer.routes()) { if (route.getVersion() != version) continue; EndpointV4 routeSchema = Schema.newInstance(EndpointV4.class).fillFromImpl(route); res.endpoints[i++] = routeSchema; } return res; } /** Return the metadata for a REST API Route, specified either by number or path. */ // Also called through reflection by RequestServer public MetadataV3 fetchRoute(int version, MetadataV3 docs) { Route route = null; if (docs.path != null && docs.http_method != null) { try { route = RequestServer.lookupRoute(new RequestUri(docs.http_method, docs.path)); } catch (MalformedURLException e) { route = null; } } else { // Linear scan for the route, plus each route is asked for in-order // during doc-gen leading to an O(n^2) execution cost. if (docs.path != null) try { docs.num = Integer.parseInt(docs.path); } catch (NumberFormatException e) { /* path is not a number, it's ok */ } if (docs.num >= 0 && docs.num < RequestServer.numRoutes()) route = RequestServer.routes().get(docs.num); // Crash-n-burn if route not found (old code thru an AIOOBE), so we // something similarly bad. docs.routes = new RouteV3[]{new RouteV3(route)}; } if (route == null) return null; Schema sinput, soutput; if( route._handler_class.equals(water.api.ModelBuilderHandler.class) || route._handler_class.equals(water.api.GridSearchHandler.class)) { // GridSearchHandler uses the same logic as ModelBuilderHandler because there are no separate // ${ALGO}GridSearchParametersV3 classes, instead each field in ${ALGO}ParametersV3 is marked as either gridable // or not. String ss[] = route._url.split("/"); String algoURLName = ss[3]; // {}/{3}/{ModelBuilders}/{gbm}/{parameters} String algoName = ModelBuilder.algoName(algoURLName); // gbm -> GBM; deeplearning -> DeepLearning String schemaDir = ModelBuilder.schemaDirectory(algoURLName); int version2 = Integer.valueOf(ss[1]); try { String inputSchemaName = schemaDir + algoName + "V" + version2; // hex.schemas.GBMV3 sinput = (Schema) TypeMap.getTheFreezableOrThrow(TypeMap.onIce(inputSchemaName)); } catch (java.lang.ClassNotFoundException e) { // Not very pretty, but for some routes such as /99/Grid/glm we want to map to GLMV3 (because GLMV99 does not // exist), yet for others such as /99/Grid/svd we map to SVDV99 (because SVDV3 does not exist). sinput = (Schema) TypeMap.theFreezable(TypeMap.onIce(schemaDir + algoName + "V3")); } sinput.init_meta(); soutput = sinput; } else { sinput = Schema.newInstance(Handler.getHandlerMethodInputSchema (route._handler_method)); soutput = Schema.newInstance(Handler.getHandlerMethodOutputSchema(route._handler_method)); } docs.routes[0].input_schema = sinput.getClass().getSimpleName(); docs.routes[0].output_schema = soutput.getClass().getSimpleName(); docs.routes[0].markdown = route.markdown(sinput,soutput).toString(); return docs; } /** Fetch the metadata for a Schema by its full internal classname, e.g. "hex.schemas.DeepLearningV2.DeepLearningParametersV2". TODO: Do we still need this? */ @Deprecated @SuppressWarnings("unused") // called through reflection by RequestServer public MetadataV3 fetchSchemaMetadataByClass(int version, MetadataV3 docs) { docs.schemas = new SchemaMetadataV3[1]; // NOTE: this will throw an exception if the classname isn't found: SchemaMetadataV3 meta = new SchemaMetadataV3(SchemaMetadata.createSchemaMetadata(docs.classname)); docs.schemas[0] = meta; return docs; } /** Fetch the metadata for a Schema by its simple Schema name (e.g., "DeepLearningParametersV2"). */ @SuppressWarnings("unused") // called through reflection by RequestServer public MetadataV3 fetchSchemaMetadata(int version, MetadataV3 docs) { if ("void".equals(docs.schemaname)) { docs.schemas = new SchemaMetadataV3[0]; return docs; } docs.schemas = new SchemaMetadataV3[1]; // NOTE: this will throw an exception if the classname isn't found: Schema schema = Schema.newInstance(docs.schemaname); // get defaults try { Iced impl = (Iced) schema.getImplClass().newInstance(); schema.fillFromImpl(impl); } catch (Exception e) { // ignore if create fails; this can happen for abstract classes } SchemaMetadataV3 meta = new SchemaMetadataV3(new SchemaMetadata(schema)); docs.schemas[0] = meta; return docs; } /** Fetch the metadata for all the Schemas. */ @SuppressWarnings("unused") // called through reflection by RequestServer public MetadataV3 listSchemas(int version, MetadataV3 docs) { Map<String, Class<? extends Schema>> ss = SchemaServer.schemas(); docs.schemas = new SchemaMetadataV3[ss.size()]; // NOTE: this will throw an exception if the classname isn't found: int i = 0; for (Class<? extends Schema> schema_class : ss.values()) { // No hardwired version! YAY! FINALLY! Schema schema = Schema.newInstance(schema_class); // get defaults try { Iced impl = (Iced) schema.getImplClass().newInstance(); schema.fillFromImpl(impl); } catch (Exception e) { // ignore if create fails; this can happen for abstract classes } docs.schemas[i++] = new SchemaMetadataV3(new SchemaMetadata(schema)); } return docs; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/MissingInserterHandler.java
package water.api; import water.api.schemas3.JobV3; import water.api.schemas3.MissingInserterV3; import water.util.FrameUtils; public class MissingInserterHandler extends Handler { public JobV3 run(int version, MissingInserterV3 mis) { FrameUtils.MissingInserter mi = mis.createAndFillImpl(); return new JobV3(mi.execImpl()); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/ModelBuilderHandler.java
package water.api; import hex.Model; import hex.ModelBuilder; import hex.schemas.ModelBuilderSchema; import water.H2O; import water.Job; import water.Key; import water.api.schemas3.ModelParametersSchemaV3; import water.util.HttpResponseStatus; import water.util.Log; import water.util.PojoUtils; import java.util.Properties; public class ModelBuilderHandler<B extends ModelBuilder, S extends ModelBuilderSchema<B,S,P>, P extends ModelParametersSchemaV3> extends Handler { // Invoke the handler with parameters. Can throw any exception the called handler can throw. @Override public S handle(int version, Route route, Properties parms, String postBody) throws Exception { // Only here for train or validate-parms String handlerName = route._handler_method.getName(); boolean doTrain = handlerName.equals("train"); assert doTrain || handlerName.equals("validate_parameters"); // User specified key, or make a default? String model_id = parms.getProperty("model_id"); String warningStr = null; if ((model_id != null) && (model_id.contains("/"))) { // found / in model_id, replace with _ and set warning String tempName = model_id; model_id = model_id.replaceAll("/", "_"); warningStr = "Bad model_id: slash (/) found and replaced with _. " + "Original model_id "+tempName + " is now "+model_id+"."; Log.warn("model_id", warningStr); } String algoURLName = ModelBuilderHandlerUtils.parseAlgoURLName(route); String algoName = ModelBuilder.algoName(algoURLName); // Default Job for just this training Key<Model> key = doTrain ? (model_id==null ? ModelBuilder.defaultKey(algoName) : Key.make(model_id)) : null; Job job = doTrain ? (warningStr!=null ? new Job<>(key, ModelBuilder.javaName(algoURLName), algoName, warningStr) : new Job<>(key, ModelBuilder.javaName(algoURLName),algoName)) : null; // ModelBuilder B builder = ModelBuilder.make(algoURLName,job,key); S schema = ModelBuilderHandlerUtils.makeBuilderSchema(version, algoURLName, parms, builder); builder.init(false); // Validate parameters schema.fillFromImpl(builder); // Fill in the result Schema with the Job at least, plus any extra trainModel errors PojoUtils.copyProperties(schema.parameters, builder._parms, PojoUtils.FieldNaming.ORIGIN_HAS_UNDERSCORES, null, new String[] { "error_count", "messages" }); schema.setHttpStatus(HttpResponseStatus.OK.getCode()); if( doTrain ) schema.job.fillFromImpl(builder.trainModelOnH2ONode()); return schema; } @SuppressWarnings("unused") // called through reflection by RequestServer public S train(int version, S schema) { throw H2O.fail(); } @SuppressWarnings("unused") // called through reflection by RequestServer public S validate_parameters(int version, S schema) { throw H2O.fail(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/ModelBuilderHandlerUtils.java
package water.api; import hex.ModelBuilder; import hex.schemas.ModelBuilderSchema; import water.TypeMap; import water.api.schemas3.ModelParametersSchemaV3; import java.util.Properties; public class ModelBuilderHandlerUtils { @SuppressWarnings("unchecked") static <B extends ModelBuilder, S extends ModelBuilderSchema<B,S,P>, P extends ModelParametersSchemaV3> S makeBuilderSchema( int version, String algoURLName, Properties parms, B builder ) { String algoName = ModelBuilder.algoName(algoURLName); // gbm -> GBM; deeplearning -> DeepLearning String schemaDir = ModelBuilder.schemaDirectory(algoURLName); // Build a Model Schema and a ModelParameters Schema String schemaName = schemaDir + algoName + "V" + version; S schema = (S) TypeMap.newFreezable(schemaName); schema.init_meta(); String parmName = schemaDir + algoName + "V" + version + "$" + algoName + "ParametersV" + version; schema.parameters = (P) TypeMap.newFreezable(parmName); schema.parameters.fillFromImpl(builder._parms); // Defaults for this builder into schema schema.parameters.fillFromParms(parms); // Overwrite with user parms schema.parameters.fillImpl(builder._parms); // Merged parms back over Model.Parameter object schema.parameters.fillImpl(builder._input_parms); return schema; } static <B extends ModelBuilder> B makeBuilder(int version, String algoURLName, Properties parms) { B builder = ModelBuilder.make(algoURLName, null, null); // used for the side effect of populating Parameter object in Builder ModelBuilderHandlerUtils.makeBuilderSchema(version, algoURLName, parms, builder); return builder; } static String parseAlgoURLName(Route route) { // Peek out the desired algo from the URL String ss[] = route._url.split("/"); return ss[3]; // {}/{3}/{ModelBuilders}/{gbm}/{parameters} } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/ModelBuildersHandler.java
package water.api; import hex.Model; import hex.ModelBuilder; import hex.ModelMojoWriter; import hex.schemas.ModelBuilderSchema; import water.H2O; import water.Iced; import water.api.schemas3.ModelBuildersV3; import water.api.schemas3.SchemaV3; import water.api.schemas4.ListRequestV4; import water.api.schemas4.ModelInfoV4; import water.api.schemas4.ModelsInfoV4; import water.util.ReflectionUtils; import java.lang.reflect.Method; class ModelBuildersHandler extends Handler { /** Return all the modelbuilders. */ @SuppressWarnings("unused") // called through reflection by RequestServer public ModelBuildersV3 list(int version, ModelBuildersV3 m) { m.model_builders = new ModelBuilderSchema.IcedHashMapStringModelBuilderSchema(); for( String algo : ModelBuilder.algos() ) { ModelBuilder builder = ModelBuilder.make(algo, null, null); m.model_builders.put(algo.toLowerCase(), (ModelBuilderSchema)SchemaServer.schema(version, builder).fillFromImpl(builder)); } return m; } /** Return a single modelbuilder. */ @SuppressWarnings("unused") // called through reflection by RequestServer public ModelBuildersV3 fetch(int version, ModelBuildersV3 m) { m.model_builders = new ModelBuilderSchema.IcedHashMapStringModelBuilderSchema(); ModelBuilder builder = ModelBuilder.make(m.algo, null, null); m.model_builders.put(m.algo.toLowerCase(), (ModelBuilderSchema)SchemaServer.schema(version, builder).fillFromImpl(builder)); return m; } public static class ModelIdV3 extends SchemaV3<Iced, ModelIdV3> { @API(help="Model ID", direction = API.Direction.OUTPUT) String model_id; } /** Calculate next unique model_id. */ @SuppressWarnings("unused") // called through reflection by RequestServer public ModelIdV3 calcModelId(int version, ModelBuildersV3 m) { m.model_builders = new ModelBuilderSchema.IcedHashMapStringModelBuilderSchema(); String model_id = H2O.calcNextUniqueModelId(m.algo); ModelIdV3 mm = new ModelIdV3(); mm.model_id = model_id; return mm; } @SuppressWarnings("unused") // called through reflection by RequestServer public ModelsInfoV4 modelsInfo(int version, ListRequestV4 m) { String[] algos = ModelBuilder.algos(); ModelInfoV4[] infos = new ModelInfoV4[algos.length]; ModelsInfoV4 res = new ModelsInfoV4(); for (int i = 0; i < algos.length; i++) { ModelBuilder builder = ModelBuilder.make(algos[i], null, null); infos[i] = new ModelInfoV4(); infos[i].algo = algos[i]; infos[i].maturity = builder.builderVisibility() == ModelBuilder.BuilderVisibility.Stable? "stable" : builder.builderVisibility() == ModelBuilder.BuilderVisibility.Beta? "beta" : "alpha"; infos[i].have_mojo = builder.haveMojo(); infos[i].have_pojo = builder.havePojo(); infos[i].mojo_version = infos[i].have_mojo? detectMojoVersion(builder) : null; } res.models = infos; return res; } private String detectMojoVersion(ModelBuilder builder) { Class<? extends Model> modelClass = ReflectionUtils.findActualClassParameter(builder.getClass(), 0); try { Method getMojoMethod = modelClass.getDeclaredMethod("getMojo"); Class<?> retClass = getMojoMethod.getReturnType(); if (retClass == ModelMojoWriter.class || !ModelMojoWriter.class.isAssignableFrom(retClass)) throw new RuntimeException("Method getMojo() in " + modelClass + " must return the concrete implementation " + "of the ModelMojoWriter class. The return type is declared as " + retClass); try { ModelMojoWriter mmw = (ModelMojoWriter) retClass.newInstance(); return mmw.mojoVersion(); } catch (InstantiationException e) { throw getMissingCtorException(retClass, e); } catch (IllegalAccessException e) { throw getMissingCtorException(retClass, e); } } catch (NoSuchMethodException e) { throw new RuntimeException("Model class " + modelClass + " is expected to have method getMojo();"); } } private RuntimeException getMissingCtorException(Class<?> retClass, Exception e) { return new RuntimeException("MojoWriter class " + retClass + " must define a no-arg constructor.\n" + e); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/ModelCacheManager.java
package water.api; import hex.Model; public class ModelCacheManager { public static <M extends Model, P extends Model.Parameters> M get(P parms) { Model[] models = Model.fetchAll(); long checksum = parms.checksum(); for (Model model : models) { if (model._parms != null && model._parms.checksum() == checksum) return (M) model; } return null; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/ModelExportAware.java
package water.api; import hex.ModelExportOption; public interface ModelExportAware { boolean isExportCVPredictionsEnabled(); default ModelExportOption[] getModelExportOptions() { if (isExportCVPredictionsEnabled()) return new ModelExportOption[]{ModelExportOption.INCLUDE_CV_PREDICTIONS}; else return null; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/ModelMetricsHandler.java
package water.api; import hex.*; import hex.genmodel.utils.DistributionFamily; import org.apache.commons.lang.ArrayUtils; import water.*; import water.api.schemas3.*; import water.exceptions.H2OIllegalArgumentException; import water.exceptions.H2OKeyNotFoundArgumentException; import water.fvec.Frame; import water.fvec.Vec; import water.udf.CFuncRef; import water.util.Log; class ModelMetricsHandler extends Handler { /** Class which contains the internal representation of the ModelMetrics list and params. */ public static final class ModelMetricsList extends Iced { public Model _model; public Frame _frame; public ModelMetrics[] _model_metrics; public String _predictions_name; public String _deviances_name; public boolean _deviances; public boolean _reconstruction_error; public boolean _reconstruction_error_per_feature; public int _deep_features_hidden_layer = -1; public String _deep_features_hidden_layer_name = null; public boolean _reconstruct_train; public boolean _project_archetypes; public boolean _reverse_transform; public boolean _leaf_node_assignment; public int _exemplar_index = -1; public String _custom_metric_func; public String _auc_type; public int _top_n; public int _bottom_n; public boolean _compare_abs; public String _auuc_type; public int _auuc_nbins; public double[] _custom_auuc_thresholds; public Frame _background_frame; // Used for B-SHAP public boolean _output_space; // If true transform SHAP so that they sum to the f(x)-f(b) in the output space (i.e. after applying linkInv func) public boolean _output_per_reference; // Fetch all metrics that match model and/or frame ModelMetricsList fetch() { final Key[] modelMetricsKeys = KeySnapshot.globalSnapshot().filter(new KeySnapshot.KVFilter() { @Override public boolean filter(KeySnapshot.KeyInfo k) { try { if( !Value.isSubclassOf(k._type, ModelMetrics.class) ) return false; // Fast-path cutout ModelMetrics mm = DKV.getGet(k._key); // If we're filtering by model filter by Model. :-) if( _model != null && !mm.isForModel((Model)DKV.getGet(_model._key)) ) return false; // If we're filtering by frame filter by Frame. :-) if( _frame != null && !mm.isForFrame((Frame)DKV.getGet(_frame._key)) ) return false; } catch( NullPointerException | ClassCastException ex ) { return false; // Handle all kinds of broken racey key updates } return true; } }).keys(); _model_metrics = new ModelMetrics[modelMetricsKeys.length]; for (int i = 0; i < modelMetricsKeys.length; i++) _model_metrics[i] = DKV.getGet(modelMetricsKeys[i]); return this; // Flow coding } // Delete the metrics that match model and/or frame ModelMetricsList delete() { ModelMetricsList matches = fetch(); for (ModelMetrics mm : matches._model_metrics) DKV.remove(mm._key); return matches; } /** Return all the models matching the model&frame filters */ public Schema list(int version, ModelMetricsList m) { return this.schema(version).fillFromImpl(m.fetch()); } protected ModelMetricsListSchemaV3 schema(int version) { switch (version) { case 3: return new ModelMetricsListSchemaV3(); default: throw H2O.fail("Bad version for ModelMetrics schema: " + version); } } } // class ModelMetricsList /** Schema for a list of ModelMetricsBaseV3. * This should be common across all versions of ModelMetrics schemas, so it lives here. * TODO: move to water.api.schemas3 * */ public static final class ModelMetricsListSchemaV3 extends RequestSchemaV3<ModelMetricsList, ModelMetricsListSchemaV3> { // Input fields @API(help = "Key of Model of interest (optional)") public KeyV3.ModelKeyV3<Model> model; @API(help = "Key of Frame of interest (optional)") public KeyV3.FrameKeyV3 frame; @API(help = "Key of predictions frame, if predictions are requested (optional)", direction = API.Direction.INOUT) public KeyV3.FrameKeyV3 predictions_frame; @API(help = "Key for the frame containing per-observation deviances (optional)", direction = API.Direction.INOUT) public KeyV3.FrameKeyV3 deviances_frame; @API(help = "Compute reconstruction error (optional, only for Deep Learning AutoEncoder models)", json = false) public boolean reconstruction_error; @API(help = "Compute reconstruction error per feature (optional, only for Deep Learning AutoEncoder models)", json = false) public boolean reconstruction_error_per_feature; @API(help = "Extract Deep Features for given hidden layer (optional, only for Deep Learning models)", json = false) public int deep_features_hidden_layer; @API(help = "Extract Deep Features for given hidden layer by name (optional, only for Deep Water models)", json = false) public String deep_features_hidden_layer_name; @API(help = "Reconstruct original training frame (optional, only for GLRM models)", json = false) public boolean reconstruct_train; @API(help = "Project GLRM archetypes back into original feature space (optional, only for GLRM models)", json = false) public boolean project_archetypes; @API(help = "Reverse transformation applied during training to model output (optional, only for GLRM models)", json = false) public boolean reverse_transform; @API(help = "Return the leaf node assignment (optional, only for DRF/GBM models)", json = false) public boolean leaf_node_assignment; @API(help = "Type of the leaf node assignment (optional, only for DRF/GBM models)", values = {"Path", "Node_ID"}, json = false) public Model.LeafNodeAssignment.LeafNodeAssignmentType leaf_node_assignment_type; @API(help = "Predict the class probabilities at each stage (optional, only for GBM models)", json = false) public boolean predict_staged_proba; @API(help = "Predict the feature contributions - Shapley values (optional, only for DRF, GBM and XGBoost models)", json = false) public boolean predict_contributions; @API(help = "Return which row is used in which tree (optional, only for GBM models)", json = false) public boolean row_to_tree_assignment; @API(help = "Specify how to output feature contributions in XGBoost - XGBoost by default outputs contributions for 1-hot encoded features, " + "specifying a Compact output format will produce a per-feature contribution", values = {"Original", "Compact"}, json = false) public Model.Contributions.ContributionsOutputFormat predict_contributions_output_format; @API(help = "Only for predict_contributions function - sort Shapley values and return top_n highest (optional)", json = false) public int top_n; @API(help = "Only for predict_contributions function - sort Shapley values and return bottom_n lowest (optional)", json = false) public int bottom_n; @API(help = "Only for predict_contributions function - sort absolute Shapley values (optional)", json = false) public boolean compare_abs; @API(help = "Retrieve the feature frequencies on paths in trees in tree-based models (optional, only for GBM, DRF and Isolation Forest)", json = false) public boolean feature_frequencies; @API(help = "Retrieve all members for a given exemplar (optional, only for Aggregator models)", json = false) public int exemplar_index; @API(help = "Compute the deviances per row (optional, only for classification or regression models)", json = false) public boolean deviances; @API(help = "Reference to custom evaluation function, format: `language:keyName=funcName`", json=false) public String custom_metric_func; @API(help = "Set default multinomial AUC type. Must be one of: \"AUTO\", \"NONE\", \"MACRO_OVR\", \"WEIGHTED_OVR\", \"MACRO_OVO\", \"WEIGHTED_OVO\". Default is \"NONE\" (optional, only for multinomial classification).", json=false, direction = API.Direction.INPUT) public String auc_type; @API(help = "Set default AUUC type for uplift binomial classification. Must be one of: \"AUTO\", \"qini\", \"lift\", \"gain\". Default is \"AUTO\" (optional, only for uplift binomial classification).", json=false, direction = API.Direction.INPUT) public String auuc_type; @API(help = "Custom AUUC thresholds (for uplift binomial classification).", level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true) public double[] custom_auuc_thresholds; @API(help = "Set number of bins to calculate AUUC. Must be -1 or higher than 0. Default is -1 which means 1000 (optional, only for uplift binomial classification).", json=false, direction = API.Direction.INPUT) public int auuc_nbins; @API(help = "Specify background frame used as a reference for calculating SHAP.", json = false) public KeyV3.FrameKeyV3 background_frame; @API(help = "If true, transform contributions so that they sum up to the difference in the output space (applicable iff contributions are in link space). Note that this transformation is an approximation and the contributions won't be exact SHAP values.", json = false) public boolean output_space; @API(help = "If true, return contributions against each background sample (aka reference), i.e. phi(feature, x, bg), otherwise return contributions averaged over the background sample (phi(feature, x) = E_{bg} phi(feature, x, bg))") public boolean output_per_reference; // Output fields @API(help = "ModelMetrics", direction = API.Direction.OUTPUT) public ModelMetricsBaseV3[] model_metrics; @Override public ModelMetricsHandler.ModelMetricsList fillImpl(ModelMetricsList mml) { // TODO: check for type! mml._model = (this.model == null || this.model.key() == null ? null : this.model.key().get()); mml._frame = (this.frame == null || this.frame.key() == null ? null : this.frame.key().get()); mml._predictions_name = (null == this.predictions_frame || null == this.predictions_frame.key() ? null : this.predictions_frame.key().toString()); mml._background_frame = (this.background_frame == null || this.background_frame.key() == null ? null : this.background_frame.key().get()); mml._reconstruction_error = this.reconstruction_error; mml._reconstruction_error_per_feature = this.reconstruction_error_per_feature; mml._deep_features_hidden_layer = this.deep_features_hidden_layer; mml._deep_features_hidden_layer_name = this.deep_features_hidden_layer_name; mml._reconstruct_train = this.reconstruct_train; mml._project_archetypes = this.project_archetypes; mml._reverse_transform = this.reverse_transform; mml._leaf_node_assignment = this.leaf_node_assignment; mml._exemplar_index = this.exemplar_index; mml._deviances = this.deviances; mml._auc_type = this.auc_type; mml._top_n = this.top_n; mml._bottom_n = this.bottom_n; mml._compare_abs = this.compare_abs; mml._auuc_type = this.auuc_type; mml._auuc_nbins = this.auuc_nbins; mml._custom_metric_func = this.custom_metric_func; mml._custom_auuc_thresholds = this.custom_auuc_thresholds; mml._output_space = this.output_space; mml._output_per_reference = output_per_reference; if (model_metrics != null) { mml._model_metrics = new ModelMetrics[model_metrics.length]; for( int i=0; i<model_metrics.length; i++ ) mml._model_metrics[i++] = (ModelMetrics)model_metrics[i].createImpl(); } return mml; } @Override public ModelMetricsListSchemaV3 fillFromImpl(ModelMetricsList mml) { // TODO: this is failing in PojoUtils with an IllegalAccessException. Why? Different class loaders? // PojoUtils.copyProperties(this, m, PojoUtils.FieldNaming.CONSISTENT); // Shouldn't need to do this manually. . . this.model = (mml._model == null ? null : new KeyV3.ModelKeyV3(mml._model._key)); this.frame = (mml._frame == null ? null : new KeyV3.FrameKeyV3(mml._frame._key)); this.predictions_frame = (mml._predictions_name == null ? null : new KeyV3.FrameKeyV3(Key.<Frame>make(mml._predictions_name))); this.deviances_frame = (mml._deviances_name == null ? null : new KeyV3.FrameKeyV3(Key.<Frame>make(mml._deviances_name))); this.background_frame = (mml._background_frame == null ? null: new KeyV3.FrameKeyV3(mml._background_frame._key)); this.reconstruction_error = mml._reconstruction_error; this.reconstruction_error_per_feature = mml._reconstruction_error_per_feature; this.deep_features_hidden_layer = mml._deep_features_hidden_layer; this.deep_features_hidden_layer_name = mml._deep_features_hidden_layer_name; this.reconstruct_train = mml._reconstruct_train; this.project_archetypes = mml._project_archetypes; this.reverse_transform = mml._reverse_transform; this.leaf_node_assignment = mml._leaf_node_assignment; this.exemplar_index = mml._exemplar_index; this.deviances = mml._deviances; this.auc_type = mml._auc_type; this.top_n = mml._top_n; this.bottom_n = mml._bottom_n; this.compare_abs = mml._compare_abs; this.auuc_type = mml._auuc_type; this.auuc_nbins = mml._auuc_nbins; this.custom_auuc_thresholds = mml._custom_auuc_thresholds; this.output_space = mml._output_space; this.output_per_reference = mml._output_per_reference; if (null != mml._model_metrics) { this.model_metrics = new ModelMetricsBaseV3[mml._model_metrics.length]; for( int i=0; i<model_metrics.length; i++ ) { ModelMetrics mm = mml._model_metrics[i]; this.model_metrics[i] = (ModelMetricsBaseV3) SchemaServer.schema(3, mm.getClass()).fillFromImpl(mm); } } else { this.model_metrics = new ModelMetricsBaseV3[0]; } return this; } } // ModelMetricsListSchemaV3 // TODO: almost identical to ModelsHandler; refactor public static ModelMetrics getFromDKV(Key key) { if (null == key) throw new IllegalArgumentException("Got null key."); Value v = DKV.get(key); if (null == v) throw new IllegalArgumentException("Did not find key: " + key.toString()); Iced ice = v.get(); if (! (ice instanceof ModelMetrics)) throw new IllegalArgumentException("Expected a Model for key: " + key.toString() + "; got a: " + ice.getClass()); return (ModelMetrics)ice; } /** Return a single ModelMetrics. */ @SuppressWarnings("unused") // called through reflection by RequestServer public ModelMetricsListSchemaV3 fetch(int version, ModelMetricsListSchemaV3 s) { ModelMetricsList m = s.createAndFillImpl(); s.fillFromImpl(m.fetch()); return s; } /** Delete one or more ModelMetrics. */ @SuppressWarnings("unused") // called through reflection by RequestServer public ModelMetricsListSchemaV3 delete(int version, ModelMetricsListSchemaV3 s) { ModelMetricsList m = s.createAndFillImpl(); s.fillFromImpl(m.delete()); return s; } /** * Score a frame with the given model and return just the metrics. * <p> * NOTE: ModelMetrics are now always being created by model.score. . . */ @SuppressWarnings("unused") // called through reflection by RequestServer public ModelMetricsListSchemaV3 score(int version, ModelMetricsListSchemaV3 s) { // parameters checking: if (null == s.model) throw new H2OIllegalArgumentException("model", "predict", s.model); if (null == DKV.get(s.model.name)) throw new H2OKeyNotFoundArgumentException("model", "predict", s.model.name); if (null == s.frame) throw new H2OIllegalArgumentException("frame", "predict", s.frame); if (null == DKV.get(s.frame.name)) throw new H2OKeyNotFoundArgumentException("frame", "predict", s.frame.name); ModelMetricsList parms = s.createAndFillImpl(); String customMetricFunc = s.custom_metric_func; if (customMetricFunc == null) { customMetricFunc = parms._model._parms._custom_metric_func; } // set user given auc type, used for scoring a testing data fe. from h2o.performance function MultinomialAucType at = parms._model._parms._auc_type; if(s.auc_type != null) { parms._model._parms._auc_type = MultinomialAucType.valueOf(s.auc_type.toUpperCase()); } AUUC.AUUCType auucType = parms._model._parms._auuc_type; if(s.auuc_type != null){ parms._model._parms._auuc_type = AUUC.AUUCType.valueOf(s.auuc_type); } parms._model.score(parms._frame, parms._predictions_name, null, true, CFuncRef.from(customMetricFunc)).remove(); // throw away predictions, keep metrics as a side-effect ModelMetricsListSchemaV3 mm = this.fetch(version, s); // TODO: for now only binary predictors write an MM object. // For the others cons one up here to return the predictions frame. if (null == mm) mm = new ModelMetricsListSchemaV3(); if (null == mm.model_metrics || 0 == mm.model_metrics.length) { Log.warn("Score() did not return a ModelMetrics for model: " + s.model + " on frame: " + s.frame); } // set original auc type back parms._model._parms._auc_type = at; parms._model._parms._auuc_type = auucType; return mm; } public static final class ModelMetricsMaker extends Iced { public String _predictions_frame; public String _actuals_frame; public String[] _domain; public DistributionFamily _distribution; public MultinomialAucType _auc_type; public AUUC.AUUCType _auuc_type; public int _auuc_nbins; public ModelMetrics _model_metrics; } public static final class ModelMetricsMakerSchemaV3 extends SchemaV3<ModelMetricsMaker, ModelMetricsMakerSchemaV3> { @API(help="Predictions Frame.", direction=API.Direction.INOUT) public String predictions_frame; @API(help="Actuals Frame.", direction=API.Direction.INOUT) public String actuals_frame; @API(help="Weights Frame.", direction=API.Direction.INOUT) public String weights_frame; @API(help="Treatment Frame.", direction=API.Direction.INOUT) public String treatment_frame; @API(help="Domain (for classification).", direction=API.Direction.INOUT) public String[] domain; @API(help="Distribution (for regression).", direction=API.Direction.INOUT, values = { "gaussian", "poisson", "gamma", "laplace" }) public DistributionFamily distribution; @API(help = "Default AUC type (for multinomial classification).", valuesProvider = ModelParamsValuesProviders.MultinomialAucTypeSchemeValuesProvider.class, level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true) public MultinomialAucType auc_type; @API(help = "Default AUUC type (for uplift binomial classification).", valuesProvider = ModelParamsValuesProviders.UpliftAuucTypeSchemeValuesProvider.class, level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true) public AUUC.AUUCType auuc_type; @API(help = "Number of bins to calculate AUUC (for uplift binomial classification).", level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true) public int auuc_nbins; @API(help = "Custom AUUC thresholds (for uplift binomial classification).", level = API.Level.secondary, direction = API.Direction.INOUT, gridable = true) public double[] custom_auuc_thresholds; @API(help="Model Metrics.", direction=API.Direction.OUTPUT) public ModelMetricsBaseV3 model_metrics; } /** * Make a model metrics object from actual and predicted values */ @SuppressWarnings("unused") // called through reflection by RequestServer public ModelMetricsMakerSchemaV3 make(int version, ModelMetricsMakerSchemaV3 s) { // parameters checking: if (null == s.predictions_frame) throw new H2OIllegalArgumentException("predictions_frame", "make", s.predictions_frame); Frame pred = DKV.getGet(s.predictions_frame); if (null == pred) throw new H2OKeyNotFoundArgumentException("predictions_frame", "make", s.predictions_frame); if (null == s.actuals_frame) throw new H2OIllegalArgumentException("actuals_frame", "make", s.actuals_frame); Frame act = DKV.getGet(s.actuals_frame); if (null == act) throw new H2OKeyNotFoundArgumentException("actuals_frame", "make", s.actuals_frame); Vec weights = null; if (null != s.weights_frame) { Frame weightsFrame = DKV.getGet(s.weights_frame); if (null == weightsFrame) throw new H2OKeyNotFoundArgumentException("weights_frame", "make", s.weights_frame); weights = weightsFrame.anyVec(); } Vec treatment = null; if(null != s.treatment_frame){ Frame treatmentFrame = DKV.getGet(s.treatment_frame); if (null == treatmentFrame) throw new H2OKeyNotFoundArgumentException("treatment_frame", "make", s.treatment_frame); treatment = treatmentFrame.anyVec(); if(s.auuc_type == null) s.auuc_type = AUUC.AUUCType.AUTO; if(s.auuc_nbins < -1 || s.auuc_nbins == 0) throw new H2OIllegalArgumentException("auuc_bins", "make", "The value has to be -1 or higher than 0."); if(s.custom_auuc_thresholds != null) { if (s.custom_auuc_thresholds.length == 0) throw new H2OIllegalArgumentException("custom_auuc_thresholds", "make", "The length of the array has to be higher than 0."); } } if (s.domain ==null) { if (pred.numCols()!=1) { throw new H2OIllegalArgumentException("predictions_frame", "make", "For regression problems (domain=null), the predictions_frame must have exactly 1 column."); } ModelMetricsRegression mm = ModelMetricsRegression.make(pred.anyVec(), act.anyVec(), weights, s.distribution); s.model_metrics = new ModelMetricsRegressionV3().fillFromImpl(mm); } else if (s.domain.length==2) { if (treatment != null) { ModelMetricsBinomialUplift mm = ModelMetricsBinomialUplift.make(pred.anyVec(), act.anyVec(), treatment, s.domain, s.auuc_type, s.auuc_nbins, s.custom_auuc_thresholds); s.model_metrics = new ModelMetricsBinomialUpliftV3().fillFromImpl(mm); } else { if (pred.numCols()!=1) { throw new H2OIllegalArgumentException("predictions_frame", "make", "For domains with 2 class labels, the predictions_frame must have exactly one column containing the class-1 probabilities."); } ModelMetricsBinomial mm = ModelMetricsBinomial.make(pred.anyVec(), act.anyVec(), weights, s.domain); s.model_metrics = new ModelMetricsBinomialV3().fillFromImpl(mm); } } else if (s.domain.length>2){ if (pred.numCols()!=s.domain.length) { throw new H2OIllegalArgumentException("predictions_frame", "make", "For domains with " + s.domain.length + " class labels, the predictions_frame must have exactly " + s.domain.length + " columns containing the class-probabilities."); } if (s.distribution == DistributionFamily.ordinal) { ModelMetricsOrdinal mm = ModelMetricsOrdinal.make(pred, act.anyVec(), s.domain); s.model_metrics = new ModelMetricsOrdinalV3().fillFromImpl(mm); } else { ModelMetricsMultinomial mm = ModelMetricsMultinomial.make(pred, act.anyVec(), weights, s.domain, s.auc_type); s.model_metrics = new ModelMetricsMultinomialV3().fillFromImpl(mm); } } else { throw H2O.unimpl(); } return s; } private Model.Contributions getModelContributionsObject(ModelMetricsList params) { Model model = params._model; if (! (model instanceof Model.Contributions)) { String errorMessage = "Model type " + model._parms.algoName() + " doesn't support calculating Feature Contributions."; throw new H2OIllegalArgumentException(errorMessage); } return (Model.Contributions) model; } private Model.RowToTreeAssignment getModelRowToTreeAssignmentObject(ModelMetricsList params) { Model model = params._model; if (! (model instanceof Model.RowToTreeAssignment)) { String errorMessage = "Model type " + model._parms.algoName() + " doesn't support calculating row to tree assignment."; throw new H2OIllegalArgumentException(errorMessage); } return (Model.RowToTreeAssignment) model; } /** * Score a frame with the given model and return a Job that output a frame with predictions. * Do *not* calculate ModelMetrics. */ @SuppressWarnings("unused") // called through reflection by RequestServer public JobV3 predictAsync(int version, final ModelMetricsListSchemaV3 s) { // parameters checking: if (null == s.model) throw new H2OIllegalArgumentException("model", "predict", s.model); if (null == DKV.get(s.model.name)) throw new H2OKeyNotFoundArgumentException("model", "predict", s.model.name); if (null == s.frame) throw new H2OIllegalArgumentException("frame", "predict", s.frame); if (null == DKV.get(s.frame.name)) throw new H2OKeyNotFoundArgumentException("frame", "predict", s.frame.name); if (s.deviances || null != s.deviances_frame) throw new H2OIllegalArgumentException("deviances", "not supported for async", s.deviances_frame); final ModelMetricsList parms = s.createAndFillImpl(); long workAmount = parms._frame.anyVec().nChunks(); if (s.predict_contributions) { workAmount = parms._frame.anyVec().length(); if (null != parms._background_frame) { workAmount = ((Model.Contributions)parms._model).scoreContributionsWorkEstimate(parms._frame, parms._background_frame, s.output_per_reference); } if (null == parms._predictions_name) parms._predictions_name = "contributions_" + Key.make().toString().substring(0, 5) + "_" + parms._model._key.toString() + "_on_" + parms._frame._key.toString(); } else if (s.row_to_tree_assignment) { workAmount = parms._frame.anyVec().length(); if (null == parms._predictions_name) parms._predictions_name = "row_to_tree_assignment_" + Key.make().toString().substring(0, 5) + "_" + parms._model._key.toString() + "_on_" + parms._frame._key.toString(); } else if (s.deep_features_hidden_layer > 0 || s.deep_features_hidden_layer_name != null) { if (null == parms._predictions_name) parms._predictions_name = "deep_features" + Key.make().toString().substring(0, 5) + "_" + parms._model._key.toString() + "_on_" + parms._frame._key.toString(); } else if (null == parms._predictions_name) { parms._predictions_name = "transformation" + Key.make().toString().substring(0, 5) + "_" + parms._model._key.toString() + "_on_" + parms._frame._key.toString(); } final Job<Frame> j = new Job<>(Key.make(parms._predictions_name), Frame.class.getName(), "transformation"); H2O.H2OCountedCompleter work = new H2O.H2OCountedCompleter() { @Override public void compute2() { if (s.predict_contributions) { Model.Contributions mc = getModelContributionsObject(parms); Model.Contributions.ContributionsOutputFormat outputFormat = null == s.predict_contributions_output_format ? Model.Contributions.ContributionsOutputFormat.Original : s.predict_contributions_output_format; Model.Contributions.ContributionsOptions options = new Model.Contributions.ContributionsOptions(); options.setOutputFormat(outputFormat) .setTopN(parms._top_n) .setBottomN(parms._bottom_n) .setCompareAbs(parms._compare_abs) .setOutputSpace(parms._output_space) .setOutputPerReference(parms._output_per_reference); mc.scoreContributions(parms._frame, Key.make(parms._predictions_name), j, options, parms._background_frame); } else if (s.row_to_tree_assignment) { Model.RowToTreeAssignment mc = getModelRowToTreeAssignmentObject(parms); mc.rowToTreeAssignment(parms._frame, Key.make(parms._predictions_name), j); } else if (s.deep_features_hidden_layer < 0 && s.deep_features_hidden_layer_name == null) { parms._model.score(parms._frame, parms._predictions_name, j, false, CFuncRef.from(s.custom_metric_func)); } else if (s.deep_features_hidden_layer_name != null){ Frame predictions; try { predictions = ((Model.DeepFeatures) parms._model).scoreDeepFeatures(parms._frame, s.deep_features_hidden_layer_name, j); } catch(IllegalArgumentException e) { Log.warn(e.getMessage()); throw e; } if (predictions!=null) { predictions = new Frame(Key.make(parms._predictions_name), predictions.names(), predictions.vecs()); DKV.put(predictions._key, predictions); } } else { Frame predictions = ((Model.DeepFeatures) parms._model).scoreDeepFeatures(parms._frame, s.deep_features_hidden_layer, j); predictions = new Frame(Key.make(parms._predictions_name), predictions.names(), predictions.vecs()); DKV.put(predictions._key, predictions); } if ((parms._model._warningsP != null) && (parms._model._warningsP.length > 0)) { // add prediction warning here only String[] allWarnings = (String[]) ArrayUtils.addAll(j.warns(), parms._model._warningsP); // copy both over j.setWarnings(allWarnings); } tryComplete(); } }; j.start(work, workAmount); return new JobV3().fillFromImpl(j); } /** * Score a frame with the given model and return the metrics AND the prediction frame. */ @SuppressWarnings("unused") // called through reflection by RequestServer public ModelMetricsListSchemaV3 predict(int version, ModelMetricsListSchemaV3 s) { // parameters checking: if (s.model == null) throw new H2OIllegalArgumentException("model", "predict", null); if (DKV.get(s.model.name) == null) throw new H2OKeyNotFoundArgumentException("model", "predict", s.model.name); // Aggregator doesn't need a Frame to 'predict' if (s.exemplar_index < 0) { if (s.frame == null) throw new H2OIllegalArgumentException("frame", "predict", null); if (DKV.get(s.frame.name) == null) throw new H2OKeyNotFoundArgumentException("frame", "predict", s.frame.name); } ModelMetricsList parms = s.createAndFillImpl(); Frame predictions; Frame deviances = null; if (!s.reconstruction_error && !s.reconstruction_error_per_feature && s.deep_features_hidden_layer < 0 && !s.project_archetypes && !s.reconstruct_train && !s.leaf_node_assignment && !s.predict_staged_proba && !s.predict_contributions && !s.row_to_tree_assignment && !s.feature_frequencies && s.exemplar_index < 0) { if (null == parms._predictions_name) parms._predictions_name = "predictions" + Key.make().toString().substring(0,5) + "_" + parms._model._key.toString() + "_on_" + parms._frame._key.toString(); String customMetricFunc = s.custom_metric_func; if (customMetricFunc == null) { customMetricFunc = parms._model._parms._custom_metric_func; } predictions = parms._model.score(parms._frame, parms._predictions_name, null, true, CFuncRef.from(customMetricFunc)); if (s.deviances) { if (!parms._model.isSupervised()) throw new H2OIllegalArgumentException("Deviances can only be computed for supervised models."); if (null == parms._deviances_name) parms._deviances_name = "deviances" + Key.make().toString().substring(0, 5) + "_" + parms._model._key.toString() + "_on_" + parms._frame._key.toString(); deviances = parms._model.computeDeviances(parms._frame, predictions, parms._deviances_name); } } else { if (s.deviances) throw new H2OIllegalArgumentException("Cannot compute deviances in combination with other special predictions."); if (Model.DeepFeatures.class.isAssignableFrom(parms._model.getClass())) { if (s.reconstruction_error || s.reconstruction_error_per_feature) { if (s.deep_features_hidden_layer >= 0) throw new H2OIllegalArgumentException("Can only compute either reconstruction error OR deep features.", ""); if (null == parms._predictions_name) parms._predictions_name = "reconstruction_error" + Key.make().toString().substring(0,5) + "_" + parms._model._key.toString() + "_on_" + parms._frame._key.toString(); predictions = ((Model.DeepFeatures) parms._model).scoreAutoEncoder(parms._frame, Key.make(parms._predictions_name), parms._reconstruction_error_per_feature); } else { if (s.deep_features_hidden_layer < 0) throw new H2OIllegalArgumentException("Deep features hidden layer index must be >= 0.", ""); if (null == parms._predictions_name) parms._predictions_name = "deep_features" + Key.make().toString().substring(0,5) + "_" + parms._model._key.toString() + "_on_" + parms._frame._key.toString(); predictions = ((Model.DeepFeatures) parms._model).scoreDeepFeatures(parms._frame, s.deep_features_hidden_layer); } predictions = new Frame(Key.<Frame>make(parms._predictions_name), predictions.names(), predictions.vecs()); DKV.put(predictions._key, predictions); } else if(Model.GLRMArchetypes.class.isAssignableFrom(parms._model.getClass())) { if(s.project_archetypes) { if (parms._predictions_name == null) parms._predictions_name = "reconstructed_archetypes_" + Key.make().toString().substring(0, 5) + "_" + parms._model._key.toString() + "_of_" + parms._frame._key.toString(); predictions = ((Model.GLRMArchetypes) parms._model).scoreArchetypes(parms._frame, Key.<Frame>make(parms._predictions_name), s.reverse_transform); } else { assert s.reconstruct_train; if (parms._predictions_name == null) parms._predictions_name = "reconstruction_" + Key.make().toString().substring(0, 5) + "_" + parms._model._key.toString() + "_of_" + parms._frame._key.toString(); predictions = ((Model.GLRMArchetypes) parms._model).scoreReconstruction(parms._frame, Key.<Frame>make(parms._predictions_name), s.reverse_transform); } } else if(s.leaf_node_assignment) { assert(Model.LeafNodeAssignment.class.isAssignableFrom(parms._model.getClass())); if (null == parms._predictions_name) parms._predictions_name = "leaf_node_assignment" + Key.make().toString().substring(0, 5) + "_" + parms._model._key.toString() + "_on_" + parms._frame._key.toString(); Model.LeafNodeAssignment.LeafNodeAssignmentType type = null == s.leaf_node_assignment_type ? Model.LeafNodeAssignment.LeafNodeAssignmentType.Path : s.leaf_node_assignment_type; predictions = ((Model.LeafNodeAssignment) parms._model).scoreLeafNodeAssignment(parms._frame, type, Key.<Frame>make(parms._predictions_name)); } else if(s.feature_frequencies) { assert(Model.FeatureFrequencies.class.isAssignableFrom(parms._model.getClass())); if (null == parms._predictions_name) parms._predictions_name = "feature_frequencies" + Key.make().toString().substring(0, 5) + "_" + parms._model._key.toString() + "_on_" + parms._frame._key.toString(); predictions = ((Model.FeatureFrequencies) parms._model).scoreFeatureFrequencies(parms._frame, Key.<Frame>make(parms._predictions_name)); } else if(s.predict_staged_proba) { if (! (parms._model instanceof Model.StagedPredictions)) { throw new H2OIllegalArgumentException("Model type " + parms._model._parms.algoName() + " doesn't support Staged Predictions."); } if (null == parms._predictions_name) parms._predictions_name = "staged_proba_" + Key.make().toString().substring(0, 5) + "_" + parms._model._key.toString() + "_on_" + parms._frame._key.toString(); predictions = ((Model.StagedPredictions) parms._model).scoreStagedPredictions(parms._frame, Key.<Frame>make(parms._predictions_name)); } else if(s.predict_contributions) { Model.Contributions mc = getModelContributionsObject(parms); if (null == parms._predictions_name) parms._predictions_name = "contributions_" + Key.make().toString().substring(0, 5) + "_" + parms._model._key.toString() + "_on_" + parms._frame._key.toString(); Model.Contributions.ContributionsOutputFormat outputFormat = null == s.predict_contributions_output_format ? Model.Contributions.ContributionsOutputFormat.Original : s.predict_contributions_output_format; Model.Contributions.ContributionsOptions options = new Model.Contributions.ContributionsOptions() .setOutputFormat(outputFormat) .setOutputSpace(parms._output_space) .setOutputPerReference(parms._output_per_reference); predictions = mc.scoreContributions(parms._frame, Key.make(parms._predictions_name), null, options); } else if(s.row_to_tree_assignment) { Model.RowToTreeAssignment mc = getModelRowToTreeAssignmentObject(parms); if (null == parms._predictions_name) parms._predictions_name = "row_to_tree_assignment" + Key.make().toString().substring(0, 5) + "_" + parms._model._key.toString() + "_on_" + parms._frame._key.toString(); predictions = mc.rowToTreeAssignment(parms._frame, Key.make(parms._predictions_name), null); } else if(s.exemplar_index >= 0) { assert(Model.ExemplarMembers.class.isAssignableFrom(parms._model.getClass())); if (null == parms._predictions_name) parms._predictions_name = "members_" + parms._model._key.toString() + "_for_exemplar_" + parms._exemplar_index; predictions = ((Model.ExemplarMembers) parms._model).scoreExemplarMembers(Key.<Frame>make(parms._predictions_name), parms._exemplar_index); } else throw new H2OIllegalArgumentException("Requires a Deep Learning, GLRM, DRF or GBM model.", "Model must implement specific methods."); } ModelMetricsListSchemaV3 mm = this.fetch(version, s); // TODO: for now only binary predictors write an MM object. // For the others cons one up here to return the predictions frame. if (null == mm) mm = new ModelMetricsListSchemaV3(); mm.predictions_frame = new KeyV3.FrameKeyV3(predictions._key); if (parms._leaf_node_assignment) //don't show metrics in leaf node assignments are made mm.model_metrics = null; if (deviances !=null) mm.deviances_frame = new KeyV3.FrameKeyV3(deviances._key); if (null == mm.model_metrics || 0 == mm.model_metrics.length) { // There was no response in the test set -> cannot make a model_metrics object } else { mm.model_metrics[0].predictions = new FrameV3(predictions, 0, 100); // TODO: Should call schema(version) } return mm; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/ModelsHandler.java
package water.api; import hex.*; import water.*; import water.api.schemas3.*; import water.exceptions.H2OIllegalArgumentException; import water.exceptions.H2OKeyNotFoundArgumentException; import water.exceptions.H2OKeyWrongTypeArgumentException; import water.exceptions.H2OKeysNotFoundArgumentException; import water.fvec.Frame; import water.persist.Persist; import water.util.FileUtils; import water.util.JCodeGen; import water.util.TwoDimTable; import java.io.File; import java.io.IOException; import java.io.OutputStream; import java.net.URI; import java.util.*; public class ModelsHandler<I extends ModelsHandler.Models, S extends SchemaV3<I,S>> extends Handler { /** Class which contains the internal representation of the models list and params. */ public static final class Models extends Iced<Models> { public Key model_id; public Model[] models; public boolean find_compatible_frames = false; /** * Fetch all the Frames so we can see if they are compatible with our Model(s). */ protected Map<Frame, Set<String>> fetchFrameCols() { if (!find_compatible_frames) return null; // caches for this request Frame[] all_frames = Frame.fetchAll(); Map<Frame, Set<String>> all_frames_cols = new HashMap<>(); for (Frame f : all_frames) all_frames_cols.put(f, new HashSet<>(Arrays.asList(f._names))); return all_frames_cols; } /** * For a given model return an array of the compatible frames. * * @param model The model to fetch the compatible frames for. * @param all_frames_cols A Map of Frame to a Set of its column names. * @return all frames compatible with a given model */ private static Frame[] findCompatibleFrames(Model<?, ?, ?> model, Map<Frame, Set<String>> all_frames_cols) { List<Frame> compatible_frames = new ArrayList<>(); Set<String> model_column_names = new HashSet<>(Arrays.asList(model._output._names)); for (Map.Entry<Frame, Set<String>> entry : all_frames_cols.entrySet()) { Frame frame = entry.getKey(); Set<String> frame_cols = entry.getValue(); if (frame_cols.containsAll(model_column_names)) { // See if adapt throws an exception or not. try { if( model.adaptTestForTrain(new Frame(frame), false, false).length == 0 ) compatible_frames.add(frame); } catch( IllegalArgumentException e ) { // skip } } } return compatible_frames.toArray(new Frame[0]); } } /** Return all the models. */ @SuppressWarnings("unused") // called through reflection by RequestServer public ModelsV3 list(int version, ModelsV3 s) { Models m = s.createAndFillImpl(); m.models = Model.fetchAll(); return s.fillFromImplWithSynopsis(m); } // TODO: almost identical to ModelsHandler; refactor public static Model getFromDKV(String param_name, String key_str) { return getFromDKV(param_name, Key.make(key_str)); } // TODO: almost identical to ModelsHandler; refactor public static Model getFromDKV(String param_name, Key key) { if (key == null) throw new H2OIllegalArgumentException(param_name, "Models.getFromDKV()", null); Value v = DKV.get(key); if (v == null) throw new H2OKeyNotFoundArgumentException(param_name, key.toString()); Iced ice = v.get(); if (! (ice instanceof Model)) throw new H2OKeyWrongTypeArgumentException(param_name, key.toString(), Model.class, ice.getClass()); return (Model)ice; } /** Return a single model. */ @SuppressWarnings("unused") // called through reflection by RequestServer public StreamingSchema fetchPreview(int version, ModelsV3 s) { s.preview = true; return fetchJavaCode(version, s); } /** Return a single model. */ @SuppressWarnings("unused") // called through reflection by RequestServer public ModelsV3 fetch(int version, ModelsV3 s) { Model model = getFromDKV("key", s.model_id.key()); s.models = new ModelSchemaV3[1]; s.models[0] = (ModelSchemaV3)SchemaServer.schema(version, model).fillFromImpl(model); if (s.find_compatible_frames) { // TODO: refactor fetchFrameCols so we don't need this Models object Models m = new Models(); m.models = new Model[1]; m.models[0] = model; m.find_compatible_frames = true; Frame[] compatible = Models.findCompatibleFrames(model, m.fetchFrameCols()); s.compatible_frames = new FrameV3[compatible.length]; // TODO: FrameBaseV3 ((ModelSchemaV3)s.models[0]).compatible_frames = new String[compatible.length]; int i = 0; for (Frame f : compatible) { s.compatible_frames[i] = new FrameV3(f); ((ModelSchemaV3)s.models[0]).compatible_frames[i] = f._key.toString(); i++; } } return s; } public StreamingSchema fetchJavaCode(int version, ModelsV3 s) { final Model model = getFromDKV("key", s.model_id.key()); if (!model.havePojo()) { throw H2O.unimpl(String.format("%s does not support export to POJO", model._parms.fullName())); } final String filename = JCodeGen.toJavaId(s.model_id.key().toString()) + ".java"; // Return stream writer for given model return new StreamingSchema(model.new JavaModelStreamWriter(s.preview), filename); } @SuppressWarnings("unused") // called from the RequestServer through reflection public StreamingSchema fetchMojo(int version, ModelsV3 s) { Model model = getFromDKV("key", s.model_id.key()); if (!model.haveMojo()) { throw H2O.unimpl(String.format("%s does not support export to MOJO", model._parms.fullName())); } String filename = JCodeGen.toJavaId(s.model_id.key().toString()) + ".zip"; return new StreamingSchema(model.getMojo(), filename); } @SuppressWarnings("unused") // called from the RequestServer through reflection public StreamingSchema fetchBinaryModel(int version, ModelsV3 s) { Model<?, ?, ?> model = getFromDKV("key", s.model_id.key()); String filename = JCodeGen.toJavaId(s.model_id.key().toString()); StreamWriteOption[] options = s.getModelExportOptions(); StreamWriter sw = DelegatingStreamWriter.wrapWithOptions(model, options); return new StreamingSchema(sw, filename); } @SuppressWarnings("unused") // called from the RequestServer through reflection public JobV3 makePartialDependence(int version, PartialDependenceV3 s) { PartialDependence partialDependence; if (s.destination_key != null) partialDependence = new PartialDependence(s.destination_key.key()); else partialDependence = new PartialDependence(Key.<PartialDependence>make()); s.fillImpl(partialDependence); //fill frame_id/model_id/nbins/etc. return new JobV3(partialDependence.execImpl()); } @SuppressWarnings("unused") public FeatureInteractionV3 makeFeatureInteraction(int version, FeatureInteractionV3 s) { Model model = getFromDKV("key", s.model_id.key()); if (model instanceof FeatureInteractionsCollector) { TwoDimTable[][] featureInteractions = ((FeatureInteractionsCollector) model).getFeatureInteractionsTable(s.max_interaction_depth, s.max_tree_depth, s.max_deepening); if(featureInteractions == null){ return s; } s.feature_interaction = new TwoDimTableV3[featureInteractions[0].length + featureInteractions[2].length + 1]; for (int i = 0; i < featureInteractions[0].length; i++) { s.feature_interaction[i] = new TwoDimTableV3().fillFromImpl(featureInteractions[0][i]); } s.feature_interaction[featureInteractions[0].length] = new TwoDimTableV3().fillFromImpl(featureInteractions[1][0]); for (int i = 0; i < featureInteractions[2].length; i++) { s.feature_interaction[i + featureInteractions[0].length + 1] = new TwoDimTableV3().fillFromImpl(featureInteractions[2][i]); } return s; } else { throw H2O.unimpl(String.format("%s does not support feature interactions calculation", model._parms.fullName())); } } @SuppressWarnings("unused") public FriedmanPopescusHV3 makeFriedmansPopescusH(int version, FriedmanPopescusHV3 s) { Model model = getFromDKV("key", s.model_id.key()); if (model instanceof FriedmanPopescusHCollector) { s.h = ((FriedmanPopescusHCollector) model).getFriedmanPopescusH(s.frame._fr, s.variables); return s; } else { throw H2O.unimpl(String.format("%s does not support Friedman Popescus H calculation", model._parms.fullName())); } } @SuppressWarnings("unused") public SignificantRulesV3 makeSignificantRulesTable(int version, SignificantRulesV3 s) { Model model = getFromDKV("key", s.model_id.key()); if (model instanceof SignificantRulesCollector) { s.significant_rules_table = new TwoDimTableV3(((SignificantRulesCollector) model).getRuleImportanceTable()); return s; } else { throw H2O.unimpl(String.format("%s does not support significant rules collection", model._parms.fullName())); } } @SuppressWarnings("unused") // called from the RequestServer through reflection public PartialDependenceV3 fetchPartialDependence(int version, KeyV3.PartialDependenceKeyV3 s) { PartialDependence partialDependence = DKV.getGet(s.key()); return new PartialDependenceV3().fillFromImpl(partialDependence); } /** Remove an unlocked model. Fails if model is in-use. */ @SuppressWarnings("unused") // called through reflection by RequestServer public ModelsV3 delete(int version, ModelsV3 s) { Model model = getFromDKV("key", s.model_id.key()); model.delete(); // lock & remove return s; } /** * Remove ALL an unlocked models. Throws IAE for all deletes that failed * (perhaps because the Models were locked & in-use). */ @SuppressWarnings("unused") // called through reflection by RequestServer public ModelsV3 deleteAll(int version, ModelsV3 models) { final Key[] keys = KeySnapshot.globalKeysOfClass(Model.class); ArrayList<String> missing = new ArrayList<>(); Futures fs = new Futures(); for (Key key : keys) { try { getFromDKV("(none)", key).delete(null, fs, true); } catch (IllegalArgumentException iae) { missing.add(key.toString()); } } fs.blockForPending(); if( missing.size() != 0 ) throw new H2OKeysNotFoundArgumentException("(none)", missing.toArray(new String[missing.size()])); return models; } @SuppressWarnings("unused") // called through reflection by RequestServer public ModelsV3 importModel(int version, ModelImportV3 mimport) { ModelsV3 s = Schema.newInstance(ModelsV3.class); try { Model<?, ?, ?> model = Model.importBinaryModel(mimport.dir); s.models = new ModelSchemaV3[]{(ModelSchemaV3) SchemaServer.schema(version, model).fillFromImpl(model)}; } catch (IOException | FSIOException e) { throw new H2OIllegalArgumentException("dir", "importModel", e); } return s; } @SuppressWarnings("unused") // called through reflection by RequestServer public ModelsV3 uploadModel(int version, ModelImportV3 mimport) { ModelsV3 s = Schema.newInstance(ModelsV3.class); try { Model<?, ?, ?> model = Model.uploadBinaryModel(mimport.dir); s.models = new ModelSchemaV3[]{(ModelSchemaV3) SchemaServer.schema(version, model).fillFromImpl(model)}; } catch (IOException | FSIOException e) { throw new H2OIllegalArgumentException("dir", "importModel", e); } return s; } @SuppressWarnings("unused") // called through reflection by RequestServer public ModelExportV3 exportModel(int version, ModelExportV3 mexport) { Model model = getFromDKV("model_id", mexport.model_id.key()); try { ModelExportOption[] options = mexport.getModelExportOptions(); URI targetUri = model.exportBinaryModel(mexport.dir, mexport.force, options); // mexport.dir: Really file, not dir // Send back mexport.dir = "file".equals(targetUri.getScheme()) ? new File(targetUri).getCanonicalPath() : targetUri.toString(); } catch (IOException | FSIOException e) { throw new H2OIllegalArgumentException("dir", "exportModel", e); } return mexport; } @SuppressWarnings("unused") // called through reflection by RequestServer public ModelExportV3 exportMojo(int version, ModelExportV3 mexport) { Model model = getFromDKV("model_id", mexport.model_id.key()); try { URI targetUri = model.exportMojo(mexport.dir, mexport.force); // mexport.dir: Really file, not dir // Send back mexport.dir = "file".equals(targetUri.getScheme()) ? new File(targetUri).getCanonicalPath() : targetUri.toString(); } catch (IOException e) { throw new H2OIllegalArgumentException("dir", "exportModel", e); } return mexport; } @SuppressWarnings("unused") // called through reflection by RequestServer public ModelExportV3 exportModelDetails(int version, ModelExportV3 mexport){ Model model = getFromDKV("model_id", mexport.model_id.key()); try { URI targetUri = FileUtils.getURI(mexport.dir); // Really file, not dir Persist p = H2O.getPM().getPersistForURI(targetUri); //Make model schema before exporting ModelSchemaV3 modelSchema = (ModelSchemaV3)SchemaServer.schema(version, model).fillFromImpl(model); //Output model details to JSON OutputStream os = p.create(targetUri.toString(),mexport.force); os.write(modelSchema.writeJSON(new AutoBuffer()).buf()); // Send back mexport.dir = "file".equals(targetUri.getScheme()) ? new File(targetUri).getCanonicalPath() : targetUri.toString(); } catch (IOException e) { throw new H2OIllegalArgumentException("dir", "exportModelDetails", e); } return mexport; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/NanoResponse.java
package water.api; import water.util.FileUtils; import water.util.Log; import water.util.StringUtils; import java.io.ByteArrayInputStream; import java.io.InputStream; import java.io.OutputStream; import java.util.Properties; /** * HTTP response. * Return one of these from serve(). */ public class NanoResponse { /** * Default constructor: response = HTTP_OK, data = mime = 'null' */ public NanoResponse() { this.status = RequestServer.HTTP_OK; } /** * Basic constructor. */ public NanoResponse(String status, String mimeType, InputStream data) { this.status = status; this.mimeType = mimeType; this.data = data; } /** * Convenience method that makes an InputStream out of given text. */ public NanoResponse(String status, String mimeType, String txt) { this(status, mimeType, StringUtils.bytesOf(txt)); } public NanoResponse(String status, String mimeType, byte[] data) { this.status = status; this.mimeType = mimeType; this.data = new ByteArrayInputStream(data); } public void writeTo(OutputStream os) { FileUtils.copyStream(data, os, 1024); } /** * Adds given line to the header. */ public void addHeader(String name, String value) { header.put(name, value); } /** * HTTP status code after processing, e.g. "200 OK", HTTP_OK */ public String status; /** * MIME type of content, e.g. "text/html" */ public String mimeType; /** * Data of the response, may be null. */ public InputStream data; /** * Headers for the HTTP response. Use addHeader() * to add lines. */ public Properties header = new Properties(); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/NanoStreamResponse.java
package water.api; import java.io.OutputStream; /** */ public class NanoStreamResponse extends NanoResponse { public NanoStreamResponse(String status, String mimeType, StreamWriter streamWriter) { this.status = status; this.mimeType = mimeType; this.streamWriter = streamWriter; } @Override public void writeTo(OutputStream os) { streamWriter.writeTo(os); } public StreamWriter streamWriter; }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/NetworkTestHandler.java
package water.api; import water.api.schemas3.NetworkBenchV3; import water.api.schemas3.NetworkTestV3; import water.init.NetworkBench; import water.init.NetworkTest; public class NetworkTestHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public NetworkTestV3 fetch(int version, NetworkTestV3 js) { return js.fillFromImpl(new NetworkTest().execImpl()); } @SuppressWarnings("unused") // called through reflection by RequestServer public NetworkBenchV3 runBench(int version, NetworkBenchV3 nb){ return nb.fillFromImpl(new NetworkBench().doTest());} }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/NodePersistentStorageHandler.java
package water.api; import water.H2O; import water.api.schemas3.NodePersistentStorageV3; import water.init.NodePersistentStorage; import water.init.NodePersistentStorage.NodePersistentStorageEntry; import java.util.UUID; public class NodePersistentStorageHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public NodePersistentStorageV3 configured(int version, NodePersistentStorageV3 s) { NodePersistentStorage nps = H2O.getNPS(); s.configured = nps.configured(); return s; } @SuppressWarnings("unused") // called through reflection by RequestServer public NodePersistentStorageV3 exists(int version, NodePersistentStorageV3 s) { NodePersistentStorage nps = H2O.getNPS(); if (s.name != null) { s.exists = nps.exists(s.category, s.name); } else { s.exists = nps.exists(s.category); } return s; } @SuppressWarnings("unused") // called through reflection by RequestServer public NodePersistentStorageV3 put(int version, NodePersistentStorageV3 s) { NodePersistentStorage nps = H2O.getNPS(); UUID uuid = java.util.UUID.randomUUID(); s.name = uuid.toString(); nps.put(s.category, s.name, s.value); return s; } @SuppressWarnings("unused") // called through reflection by RequestServer public NodePersistentStorageV3 put_with_name(int version, NodePersistentStorageV3 s) { NodePersistentStorage nps = H2O.getNPS(); nps.put(s.category, s.name, s.value); return s; } @SuppressWarnings("unused") // called through reflection by RequestServer public NodePersistentStorageV3 get_as_string(int version, NodePersistentStorageV3 s) { NodePersistentStorage nps = H2O.getNPS(); s.value = nps.get_as_string(s.category, s.name); return s; } @SuppressWarnings("unused") // called through reflection by RequestServer public NodePersistentStorageV3 list(int version, NodePersistentStorageV3 s) { NodePersistentStorage nps = H2O.getNPS(); NodePersistentStorageEntry[] entries = nps.list(s.category); s.entries = new NodePersistentStorageV3.NodePersistentStorageEntryV3[entries.length]; int i = 0; for (NodePersistentStorageEntry entry : entries) { NodePersistentStorageV3.NodePersistentStorageEntryV3 e = new NodePersistentStorageV3.NodePersistentStorageEntryV3(); e.category = entry._category; e.name = entry._name; e.size = entry._size; e.timestamp_millis = entry._timestamp_millis; s.entries[i] = e; i++; } return s; } @SuppressWarnings("unused") // called through reflection by RequestServer public NodePersistentStorageV3 delete(int version, NodePersistentStorageV3 s) { NodePersistentStorage nps = H2O.getNPS(); nps.delete(s.category, s.name); return s; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/NpsBinServlet.java
package water.api; import water.H2O; import water.init.NodePersistentStorage; import water.server.ServletUtils; import water.util.FileUtils; import water.util.Log; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.InputStream; import java.io.OutputStream; import java.util.concurrent.atomic.AtomicLong; import java.util.regex.Matcher; import java.util.regex.Pattern; /** */ public class NpsBinServlet extends HttpServlet { private static final Pattern URL_PATTERN = Pattern.compile(".*/NodePersistentStorage.bin/([^/]+)/([^/]+)"); @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) { String uri = ServletUtils.getDecodedUri(request); try { String[] params = ServletUtils.parseUriParams(uri, response, URL_PATTERN, 2); if (params == null) { return; } String categoryName = params[0]; String keyName = params[1]; NodePersistentStorage nps = H2O.getNPS(); AtomicLong length = new AtomicLong(); InputStream is = nps.get(categoryName, keyName, length); if (length.get() > (long) Integer.MAX_VALUE) { throw new Exception("NPS value size exceeds Integer.MAX_VALUE"); } response.setContentType("application/octet-stream"); response.setContentLength((int) length.get()); response.addHeader("Content-Disposition", "attachment; filename=" + keyName + ".flow"); ServletUtils.setResponseStatus(response, HttpServletResponse.SC_OK); OutputStream os = null; try { os = response.getOutputStream(); FileUtils.copyStream(is, os, 2048); } finally { if (os != null) { try { os.close(); } catch (Exception e) { Log.err(e); } } } } catch (Exception e) { ServletUtils.sendErrorResponse(response, e, uri); } finally { ServletUtils.logRequest("GET", request, response); } } @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) { String uri = ServletUtils.getDecodedUri(request); try { String[] params = ServletUtils.parseUriParams(uri, response, URL_PATTERN, 2); if (params == null) { return; } String categoryName = params[0]; String keyName = params[1]; try (InputStream is = ServletUtils.extractInputStream(request, response)) { if (is == null) { return; } H2O.getNPS().put(categoryName, keyName, is); long length = H2O.getNPS().get_length(categoryName, keyName); String responsePayload = "{ " + "\"category\" : " + "\"" + categoryName + "\", " + "\"name\" : " + "\"" + keyName + "\", " + "\"total_bytes\" : " + length + " " + "}\n"; response.setContentType("application/json"); response.getWriter().write(responsePayload); } } catch (Exception e) { ServletUtils.sendErrorResponse(response, e, uri); } finally { ServletUtils.logRequest("POST", request, response); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/ParseHandler.java
package water.api; import water.DKV; import water.Key; import water.api.schemas3.JobV3; import water.api.schemas3.ParseSVMLightV3; import water.api.schemas3.ParseV3; import water.exceptions.H2OIllegalArgumentException; import water.fvec.Frame; import water.fvec.Vec; import water.parser.*; import java.util.regex.Matcher; import java.util.regex.Pattern; class ParseHandler extends Handler { // Entry point for parsing. @SuppressWarnings("unused") // called through reflection by RequestServer public ParseV3 parse(int version, ParseV3 parse) { ParserInfo parserInfo = ParserService.INSTANCE.getByName(parse.parse_type).info(); ParseSetup setup = new ParseSetup(parserInfo, parse.separator, parse.single_quotes, parse.check_header, parse.number_columns, delNulls(parse.column_names), ParseSetup.strToColumnTypes(parse.column_types), parse.domains, parse.na_strings, null, new ParseWriter.ParseErr[0], parse.chunk_size, parse.decrypt_tool != null ? parse.decrypt_tool.key() : null, parse.skipped_columns, parse.custom_non_data_line_markers != null ? parse.custom_non_data_line_markers.getBytes(): null, parse.escapechar, parse.force_col_types, parse.tz_adjust_to_local); if (parse.source_frames == null) throw new H2OIllegalArgumentException("Data for Frame '" + parse.destination_frame.name + "' is not available. Please check that the path is valid (for all H2O nodes).'"); Key[] srcs = new Key[parse.source_frames.length]; for (int i = 0; i < parse.source_frames.length; i++) { srcs[i] = parse.source_frames[i].key(); } if (parse.partition_by != null) { final String[][] partitionValues = syntheticColumValuesFromPartitions(parse.partition_by, srcs); setup.setSyntheticColumns(parse.partition_by, partitionValues, Vec.T_CAT); } if ((setup.getParseType().name().toLowerCase().equals("svmlight") || (setup.getParseType().name().toLowerCase().equals("avro") )) && ((setup.getSkippedColumns() != null) && (setup.getSkippedColumns().length >0))) throw new H2OIllegalArgumentException("Parser: skipped_columns are not supported for SVMlight or Avro parsers."); if (setup.getSkippedColumns() !=null && ((setup.get_parse_columns_indices()==null) || (setup.get_parse_columns_indices().length==0))) throw new H2OIllegalArgumentException("Parser: all columns in the file are skipped and no H2OFrame" + " can be returned."); // Need this to send error message to R if (parse.force_col_types && parse.column_types != null) setup.setOrigColumnTypes(parse.column_types); parse.job = new JobV3(ParseDataset.parse( parse.destination_frame.key(), srcs, parse.delete_on_done, setup, parse.blocking )._job); if (parse.blocking) { Frame fr = DKV.getGet(parse.destination_frame.key()); parse.rows = fr.numRows(); } return parse; } /** * Extracts synthetic column values from the keys of parsed files, as the keys contain path to the file and the * partitioned file path contains the values necessary for all the columns the dataset is partitioned by. * * @param partitionColumnNames Names of the columns to be partitioned. Those are expected to be validated in * the ParseSetup phase. * @param fileKeys Keys to all the files parsed, with key IDs having the file path in them. * @return A two-dimensional {@link String} array, where the first dimension is the index of the column equal to the * partitionColumnNames. The second dimension are the categorical values the datasaet has been partitioned by. * @throws IllegalArgumentException If one of the partitioned columns is not found in any of the paths provided. */ private static String[][] syntheticColumValuesFromPartitions(final String[] partitionColumnNames, final Key[] fileKeys) throws IllegalArgumentException { final String[][] values = new String[fileKeys.length][partitionColumnNames.length]; for (int fileIndex = 0; fileIndex < fileKeys.length; fileIndex++) { for (int partitionIndex = 0; partitionIndex < partitionColumnNames.length; partitionIndex++) { final Matcher matcher = Pattern.compile(partitionColumnNames[partitionIndex] + "=([^\\/\\\\]+)") .matcher(fileKeys[fileIndex].toString()); if (!matcher.find()) { throw new IllegalArgumentException(String.format("Unable to find partition column '%s' in file key '%s'", partitionColumnNames[partitionIndex], fileKeys[fileIndex].toString())); } final String partitionValue = matcher.group(1); values[fileIndex][partitionIndex] = partitionValue; } } return values; } private static String[] delNulls(String[] names) { if (names == null) return null; for(int i=0; i < names.length; i++) if (names[i].equals("null")) names[i] = null; return names; } @SuppressWarnings("unused") // called through reflection by RequestServer public JobV3 parseSVMLight(int version, ParseSVMLightV3 parse) { Key [] fkeys = new Key[parse.source_frames.length]; for(int i = 0; i < fkeys.length; ++i) fkeys[i] = parse.source_frames[i].key(); Key<Frame> destKey = parse.destination_frame == null? null : parse.destination_frame.key(); if(destKey == null) destKey = Key.make(ParseSetup.createHexName(parse.source_frames[0].toString())); ParseSetup setup = ParseSetup.guessSetup(fkeys,ParseSetup.makeSVMLightSetup()); return new JobV3().fillFromImpl(ParseDataset.forkParseSVMLight(destKey,fkeys,setup)); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/ParseSetupHandler.java
package water.api; import water.DKV; import water.Key; import water.api.schemas3.KeyV3; import water.api.schemas3.ParseSetupV3; import water.exceptions.H2OIllegalArgumentException; import water.parser.ParseDataset; import water.parser.ParseSetup; import water.util.DistributedException; import water.util.PojoUtils; import java.util.*; import java.util.regex.Matcher; import java.util.regex.Pattern; import static water.parser.DefaultParserProviders.GUESS_INFO; /** A class holding parser-setup flags: kind of parser, field separator, column * header labels, whether or not to allow single-quotes to quote, number of * columns discovered. */ public class ParseSetupHandler extends Handler { public ParseSetupV3 guessSetup(int version, ParseSetupV3 p) { if (p.source_frames == null || p.source_frames.length == 0) throw new H2OIllegalArgumentException("No file names given for parsing."); Key[] fkeys = new Key[p.source_frames.length]; for(int i=0; i < p.source_frames.length; i++) { fkeys[i] = p.source_frames[i].key(); if (DKV.get(fkeys[i]) == null) throw new IllegalArgumentException("Key not loaded: "+ p.source_frames[i]); } // corrects for json putting in empty strings in the place of empty sub-arrays if (p.na_strings != null) { for (int i = 0; i < p.na_strings.length; i++) { if (p.na_strings[i] != null && p.na_strings[i].length == 0) p.na_strings[i] = null; } } checkPartitionByColumnPresence(p.source_frames, p.partition_by); ParseSetup ps; try{ ps = new ParseSetup(p); ps = ParseSetup.guessSetup(fkeys, ps); } catch(Throwable ex) { Throwable ex2 = ex; if(ex instanceof DistributedException) ex2 = ex.getCause(); if(ex2 instanceof ParseDataset.H2OParseException) throw new H2OIllegalArgumentException(ex2.getMessage()); throw ex; } ps.setSkippedColumns(p.skipped_columns); // setup the skipped_columns here ps.setParseColumnIndices(ps.getNumberColumns(), ps.getSkippedColumns()); if(ps.errs() != null && ps.errs().length > 0) { p.warnings = new String[ps.errs().length]; for (int i = 0; i < ps.errs().length; ++i) p.warnings[i] = ps.errs().toString(); } // TODO: ParseSetup throws away the srcs list. . . if ((null == p.column_name_filter || "".equals(p.column_name_filter)) && (0 == p.column_offset) && (0 == p.column_count)) { // return the entire data preview PojoUtils.copyProperties(p, ps, PojoUtils.FieldNaming.ORIGIN_HAS_UNDERSCORES, new String[]{"destination_key", "source_keys", "column_types", "parse_type"}); p.total_filtered_column_count = p.number_columns; } else { // have to manually copy the desired parts of p.data to apply either column_name_filter or column pagination or both PojoUtils.copyProperties(p, ps, PojoUtils.FieldNaming.ORIGIN_HAS_UNDERSCORES, new String[]{"destination_key", "source_keys", "column_types", "data", "parse_type"}); String[] all_col_names = ps.getColumnNames(); String[][] data = ps.getData(); ArrayList<Integer> keep_indexes = new ArrayList<>(); if (null != p.column_name_filter && ! "".equals(p.column_name_filter)) { // filter and then paginate columns Pattern pattern = Pattern.compile(p.column_name_filter); Matcher m = pattern.matcher("dummy"); for (int column = 0; column < all_col_names.length; column++) { m.reset(all_col_names[column]); if (m.matches()) keep_indexes.add(column); } } else { // paginate all columns // note: we do a little extra work below by treating this like the filter case, but the code is simpler for (int column = 0; column < all_col_names.length; column++) { keep_indexes.add(column); } } int width_to_return = Math.max(0, keep_indexes.size() - p.column_offset); if (p.column_count > 0) width_to_return = Math.min(width_to_return, p.column_count); String[][] filtered_data = new String[data.length][width_to_return]; for (int row = 0; row < data.length; row++) { int output_column = 0; for (int input_column_index = p.column_offset; input_column_index < p.column_offset + width_to_return; input_column_index++) { // indirect through keep_indexes filtered_data[row][output_column++] = data[row][keep_indexes.get(input_column_index)]; } } p.data = filtered_data; p.total_filtered_column_count = keep_indexes.size(); } p.destination_frame = ParseSetup.createHexName(p.source_frames[0].toString()); if (p.check_header == ParseSetup.HAS_HEADER && p.data != null && Arrays.equals(p.column_names, p.data[0])) p.data = Arrays.copyOfRange(p.data, 1, p.data.length); // Fill in data type names for each column. p.column_types = ps.getColumnTypeStrings(); p.parse_type = ps.getParseType() != null ? ps.getParseType().name() : GUESS_INFO.name(); return p; } /** * @param sourceFrames Source frames provided by the user to parse * @param partitionByColumns partitionByColumn specified by the user */ private void checkPartitionByColumnPresence(final KeyV3.FrameKeyV3[] sourceFrames, final String[] partitionByColumns) { if (partitionByColumns == null || partitionByColumns.length == 0) return; final Map<String, String> nonMatchingKeys = new HashMap<>(); for (final String partitionColumn : partitionByColumns) { final Pattern pattern = Pattern.compile(".*" + partitionColumn + "=([^\\/\\\\]+).*"); for (int i = 0; i < sourceFrames.length; i++) { final String framePath = sourceFrames[i].key().toString(); final Matcher matcher = pattern.matcher(framePath); if (!matcher.matches()) { nonMatchingKeys.put(framePath, partitionColumn); } } } if (nonMatchingKeys.size() == 0) return; final StringBuilder errMsgBuilder = new StringBuilder("The following files do not contain required partitionBy columns on their path: "); nonMatchingKeys.entrySet().forEach(nonMatching -> { errMsgBuilder.append('\n'); errMsgBuilder.append("File: "); errMsgBuilder.append(nonMatching.getKey()); errMsgBuilder.append(" | Missing column: "); errMsgBuilder.append(nonMatching.getValue()); }); throw new IllegalArgumentException(errMsgBuilder.toString()); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/ParseTypeValuesProvider.java
package water.api; import water.parser.ParserService; /** */ public class ParseTypeValuesProvider implements ValuesProvider { @Override public String[] values() { return ParserService.INSTANCE.getAllProviderNames(true); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/PingHandler.java
package water.api; import water.H2O; import water.H2ONode; import water.api.schemas3.PingV3; public class PingHandler extends Handler { // Time in msec since somebody accessed the '3/Ping' endpoint on this node public static long lastAccessed; @SuppressWarnings("unused") // called through reflection by RequestServer public PingV3 ping(int version, PingV3 ping) { ping.cloud_uptime_millis = System.currentTimeMillis() - H2O.START_TIME_MILLIS.get(); ping.cloud_healthy = true; H2ONode[] members = H2O.CLOUD.members(); if (null != members) { ping.nodes = new PingV3.NodeMemoryInfoV3[members.length]; for (int i = 0; i < members.length; i++) { H2ONode n = members[i]; ping.nodes[i] = new PingV3.NodeMemoryInfoV3(n.getIpPortString(), n._heartbeat.get_free_mem()); } } lastAccessed = System.currentTimeMillis(); return ping; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/PostFileServlet.java
package water.api; import water.Key; import water.fvec.UploadFileVec; import water.server.ServletUtils; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.InputStream; /** */ public class PostFileServlet extends HttpServlet { @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) { String uri = ServletUtils.getDecodedUri(request); try { String destination_frame = request.getParameter("destination_frame"); if (destination_frame == null) { destination_frame = "upload" + Key.rand(); } // // Here is an example of how to upload a file from the command line. // // curl -v -F "file=@allyears2k_headers.zip" "http://localhost:54321/3/PostFile.bin?destination_frame=a.zip" // // JSON Payload returned is: // { "destination_frame": "key_name", "total_bytes": nnn } // try (InputStream is = ServletUtils.extractInputStream(request, response)) { if (is == null) { return; } UploadFileVec.ReadPutStats stats = new UploadFileVec.ReadPutStats(); UploadFileVec.readPut(destination_frame, is, stats); String responsePayload = "{ " + "\"destination_frame\": \"" + destination_frame + "\", " + "\"total_bytes\": " + stats.total_bytes + " " + "}\n"; response.setContentType("application/json"); response.getWriter().write(responsePayload); } } catch (Exception e) { ServletUtils.sendErrorResponse(response, e, uri); } finally { ServletUtils.logRequest("POST", request, response); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/ProfilerHandler.java
package water.api; import water.api.schemas3.ProfilerNodeV3; import water.api.schemas3.ProfilerV3; import water.util.JProfile; public class ProfilerHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public ProfilerV3 fetch(int version, ProfilerV3 p) { if (p.depth < 1) throw new IllegalArgumentException("depth must be >= 1."); JProfile profile = new JProfile(p.depth).execImpl(true); p.nodes = new ProfilerNodeV3[profile.nodes.length]; int i=0; for (JProfile.ProfileSummary s : profile.nodes) { ProfilerNodeV3 n = new ProfilerNodeV3(); n.node_name = s.profile.node_name; n.timestamp = s.profile.timestamp; n.entries = new ProfilerNodeV3.ProfilerNodeEntryV3[s.profile.stacktraces.length]; for (int j = 0; j < s.profile.stacktraces.length; j++) { ProfilerNodeV3.ProfilerNodeEntryV3 e = new ProfilerNodeV3.ProfilerNodeEntryV3(); e.stacktrace = s.profile.stacktraces[j]; e.count = s.profile.counts[j]; n.entries[j] = e; } p.nodes[i] = n; i++; } return p; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/PutKeyServlet.java
package water.api; import org.apache.commons.io.IOUtils; import water.DKV; import water.Key; import water.Value; import water.server.ServletUtils; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.IOException; import java.io.InputStream; /** * Upload any value to K/V store. * * Example * ``` * curl -v -F "file=@my.jar" "http://localhost:54321/3/PutKey.bin?destination_key=my.jar" * ``` */ public class PutKeyServlet extends HttpServlet { @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) { String uri = ServletUtils.getDecodedUri(request); try { String destKey = paramDestinationKey(request, response); Boolean overwrite = paramOverwrite(request, response, true); if (!validate(destKey, overwrite, response)) { return; } try (InputStream is = ServletUtils.extractInputStream(request, response)) { if (is == null) { return; } // // Note: this is necessary since we are saving data into local K/V. // Key key = Key.make(destKey); int bytesStored = -1; if (DKV.get(key) == null || overwrite) { byte[] ba = IOUtils.toByteArray(is); // Save the binary data into K/V DKV.put(key, new Value(key, ba)); bytesStored = ba.length; } String responsePayload = "{ " + "\"destination_key\": \"" + destKey + "\", " + "\"total_bytes\": " + bytesStored + " " + "}\n"; response.setContentType("application/json"); response.getWriter().write(responsePayload); } } catch (Exception e) { ServletUtils.sendErrorResponse(response, e, uri); } finally { ServletUtils.logRequest("POST", request, response); } } private String paramDestinationKey(HttpServletRequest request, HttpServletResponse response) { String keyName = request.getParameter("destination_key"); return keyName != null ? keyName : "func_" + Key.rand(); } private Boolean paramOverwrite(HttpServletRequest request, HttpServletResponse response, boolean defaultValue) { String val = request.getParameter("overwrite"); return val != null ? Boolean.valueOf(val) : defaultValue; } private boolean validate(String destKey, Boolean overwrite, HttpServletResponse response) throws IOException { if (destKey == null) { ServletUtils.sendResponseError(response, HttpServletResponse.SC_BAD_REQUEST, "The field 'destination_frame` is compulsory!"); return false; } return true; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/RapidsHandler.java
package water.api; import water.H2O; import water.Key; import water.api.schemas3.*; import water.api.schemas3.RapidsHelpV3.RapidsExpressionV3; import water.api.schemas4.InputSchemaV4; import water.api.schemas4.SessionIdV4; import water.exceptions.H2OIllegalArgumentException; import water.rapids.Rapids; import water.rapids.Session; import water.rapids.Val; import water.rapids.ast.AstRoot; import water.util.Log; import water.util.StringUtils; import java.util.*; public class RapidsHandler extends Handler { public RapidsSchemaV3 exec(int version, RapidsSchemaV3 rapids) { if (rapids == null) return null; if (!StringUtils.isNullOrEmpty(rapids.id)) throw new H2OIllegalArgumentException("Field RapidsSchemaV3.id is deprecated and should not be set " + rapids.id); if (StringUtils.isNullOrEmpty(rapids.ast)) return rapids; if (StringUtils.isNullOrEmpty(rapids.session_id)) rapids.session_id = "_specialSess"; final Session ses = getOrCreateSession(rapids.session_id); Val val; try { // This call is synchronized on the session instance val = Rapids.exec(rapids.ast, ses); } catch (IllegalArgumentException e) { throw e; } catch (Throwable t) { throw Log.throwErr(t); } switch (val.type()) { case Val.NUM: return new RapidsNumberV3(val.getNum()); case Val.NUMS: return new RapidsNumbersV3(val.getNums()); case Val.ROW: return new RapidsNumbersV3(val.getRow()); case Val.STR: return new RapidsStringV3(val.getStr()); case Val.STRS: return new RapidsStringsV3(val.getStrs()); case Val.FRM: return new RapidsFrameV3(val.getFrame()); case Val.MFRM: return new RapidsMapFrameV3(val.getMapFrame()); case Val.FUN: return new RapidsFunctionV3(val.getFun().toString()); default: throw H2O.fail(); } } public static Session getSession(String sessionId) { return RapidsHandler.SESSIONS.get(sessionId); } Session getOrCreateSession(String sessionId) { Session ses = getSession(sessionId); if (ses == null) { ses = new Session(sessionId); RapidsHandler.SESSIONS.put(sessionId, ses); } return ses; } public RapidsHelpV3 genHelp(int version, SchemaV3 noschema) { Iterator<AstRoot> iterator = ServiceLoader.load(AstRoot.class).iterator(); List<AstRoot> rapids = new ArrayList<>(); while (iterator.hasNext()) { rapids.add(iterator.next()); } ArrayList<RapidsExpressionV3> expressions = new ArrayList<>(); for(AstRoot expr: rapids){ expressions.add(processAstClass(expr)); } RapidsHelpV3 res = new RapidsHelpV3(); res.expressions = expressions.toArray(new RapidsExpressionV3[expressions.size()]); return res; } private RapidsExpressionV3 processAstClass(AstRoot expr) { RapidsExpressionV3 target = new RapidsExpressionV3(); target.name = expr.getClass().getSimpleName(); target.pattern = expr.example(); target.description = expr.description(); return target; } /** Map of session-id (sent by the client) to the actual session instance. */ public static HashMap<String, Session> SESSIONS = new HashMap<>(); @SuppressWarnings("unused") // called through reflection by RequestServer public InitIDV3 startSession(int version, InitIDV3 p) { p.session_key = "_sid" + Key.make().toString().substring(0,5); p.session_properties_allowed = sessionPropertiesAllowed(); return p; } @SuppressWarnings("unused") // called through reflection by RequestServer public InitIDV3 endSession(int version, InitIDV3 p) { if (SESSIONS.get(p.session_key) != null) { try { SESSIONS.get(p.session_key).end(null); SESSIONS.remove(p.session_key); } catch (Throwable ex) { throw SESSIONS.get(p.session_key).endQuietly(ex); } } p.session_properties_allowed = sessionPropertiesAllowed(); return p; } @SuppressWarnings("unused") // called through reflection by RequestServer public SessionPropertyV3 setSessionProperty(int version, SessionPropertyV3 p) { if (!sessionPropertiesAllowed()) { throw new IllegalStateException("Using session properties is disabled by the admin."); } Session session = getOrCreateSession(p.session_key); session.setProperty(p.key, p. value); return p; } @SuppressWarnings("unused") // called through reflection by RequestServer public SessionPropertyV3 getSessionProperty(int version, SessionPropertyV3 p) { p.value = null; if (!sessionPropertiesAllowed()) { return p; } Session session = getSession(p.session_key); if (session == null) { return p; } p.value = session.getProperty(p.key, null); return p; } boolean sessionPropertiesAllowed() { return H2O.getSysBoolProperty("session.allow_properties", true); } public static class StartSession4 extends RestApiHandler<InputSchemaV4, SessionIdV4> { @Override public String name() { return "newSession4"; } @Override public String help() { return "Start a new Rapids session, and return the session id."; } @Override public SessionIdV4 exec(int ignored, InputSchemaV4 input) { SessionIdV4 out = new SessionIdV4(); out.session_key = "_sid" + Key.make().toString().substring(0, 5); return out; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/RecoveryHandler.java
package water.api; import hex.faulttolerance.Recovery; import water.H2O; import water.Iced; import water.api.schemas3.SchemaV3; import java.util.Optional; import static water.api.API.Direction.INPUT; public class RecoveryHandler extends Handler { public static class ResumeV3 extends SchemaV3<Iced, ResumeV3> { @API(help = "Full path to the directory with recovery data", direction = INPUT) public String recovery_dir; } public ResumeV3 resume(final int version, final ResumeV3 params) { String recoveryDir; if (params.recovery_dir != null && params.recovery_dir.length() > 0) { recoveryDir = params.recovery_dir; } else { recoveryDir = H2O.ARGS.auto_recovery_dir; } Recovery.autoRecover(Optional.ofNullable(recoveryDir)); return params; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/RegisterV3Api.java
package water.api; /** * */ public class RegisterV3Api extends AbstractRegister { @Override public void registerEndPoints(RestApiContext context) { // Data context.registerEndpoint("createFrame", "POST /3/CreateFrame", CreateFrameHandler.class, "run", "Create a synthetic H2O Frame with random data. You can specify the number of rows/columns, as well as column" + " types: integer, real, boolean, time, string, categorical. The frame may also have a dedicated \"response\" " + "column, and some of the entries in the dataset may be created as missing."); context.registerEndpoint("splitFrame", "POST /3/SplitFrame", SplitFrameHandler.class, "run", "Split an H2O Frame."); context.registerEndpoint("generateInteractions", "POST /3/Interaction", InteractionHandler.class, "run", "Create interactions between categorical columns."); context.registerEndpoint("_missingInserter_run", "POST /3/MissingInserter", MissingInserterHandler.class, "run", "Insert missing values."); context.registerEndpoint("_dctTransformer_run", "POST /99/DCTTransformer", DCTTransformerHandler.class, "run", "Row-by-row discrete cosine transforms in 1D, 2D and 3D."); context.registerEndpoint("_tabulate_run", "POST /99/Tabulate", TabulateHandler.class, "run", "Tabulate one column vs another."); context.registerEndpoint("importFiles_deprecated", "GET /3/ImportFiles", ImportFilesHandler.class, "importFiles", "[DEPRECATED] Import raw data files into a single-column H2O Frame."); context.registerEndpoint("importFiles", "POST /3/ImportFiles", ImportFilesHandler.class, "importFiles", "Import raw data files into a single-column H2O Frame."); context.registerEndpoint("importFilesMulti", "POST /3/ImportFilesMulti", ImportFilesHandler.class, "importFilesMulti", "Import raw data files from multiple directories (or different data sources) into a single-column H2O Frame."); context.registerEndpoint("importSqlTable", "POST /99/ImportSQLTable", ImportSQLTableHandler.class, "importSQLTable", "Import SQL table into an H2O Frame."); context.registerEndpoint("importHiveTable", "POST /3/ImportHiveTable", ImportHiveTableHandler.class, "importHiveTable", "Import Hive table into an H2O Frame."); context.registerEndpoint("saveToHiveTable", "POST /3/SaveToHiveTable", SaveToHiveTableHandler.class, "saveToHiveTable", "Save an H2O Frame contents into a Hive table."); context.registerEndpoint("guessParseSetup", "POST /3/ParseSetup", ParseSetupHandler.class, "guessSetup", "Guess the parameters for parsing raw byte-oriented data into an H2O Frame."); context.registerEndpoint("parse", "POST /3/Parse", ParseHandler.class, "parse", "Parse a raw byte-oriented Frame into a useful columnar data Frame."); // NOTE: prefer POST due to higher content limits context.registerEndpoint("setupDecryption", "POST /3/DecryptionSetup", DecryptionSetupHandler.class, "setupDecryption", "Install a decryption tool for parsing of encrypted data."); context.registerEndpoint("parseSvmLight", "POST /3/ParseSVMLight", ParseHandler.class, "parseSVMLight", "Parse a raw byte-oriented Frame into a useful columnar data Frame."); // NOTE: prefer POST due to higher content limits context.registerEndpoint("ping", "GET /3/Ping", PingHandler.class, "ping", "The endpoint used to let H2O know from external services that it should keep running."); // Admin context.registerEndpoint("cloudStatus", "GET /3/Cloud", CloudHandler.class, "status", "Determine the status of the nodes in the H2O cloud."); context.registerEndpoint("cloudStatusMinimal", "HEAD /3/Cloud", CloudHandler.class, "head", "Determine the status of the nodes in the H2O cloud."); context.registerEndpoint("cloudLock", "POST /3/CloudLock", CloudLockHandler.class, "lock", "Lock the cloud."); context.registerEndpoint("jobs", "GET /3/Jobs", JobsHandler.class, "list", "Get a list of all the H2O Jobs (long-running actions)."); context.registerEndpoint("timeline", "GET /3/Timeline", TimelineHandler.class, "fetch", "Debugging tool that provides information on current communication between nodes."); context.registerEndpoint("profiler", "GET /3/Profiler", ProfilerHandler.class, "fetch", "Report real-time profiling information for all nodes (sorted, aggregated stack traces)."); context.registerEndpoint("stacktraces", "GET /3/JStack", JStackHandler.class, "fetch", "Report stack traces for all threads on all nodes."); context.registerEndpoint("testNetwork", "GET /3/NetworkTest", NetworkTestHandler.class, "fetch", "Run a network test to measure the performance of the cluster interconnect."); context.registerEndpoint("unlockAllKeys", "POST /3/UnlockKeys", UnlockKeysHandler.class, "unlock", "Unlock all keys in the H2O distributed K/V store, to attempt to recover from a crash."); context.registerEndpoint("shutdownCluster", "POST /3/Shutdown", ShutdownHandler.class, "shutdown", "Shut down the cluster."); // REST only, no html: context.registerEndpoint("about", "GET /3/About", AboutHandler.class, "get", "Return information about this H2O cluster."); context.registerEndpoint("endpoints", "GET /3/Metadata/endpoints", MetadataHandler.class, "listRoutes", "Return the list of (almost) all REST API endpoints."); context.registerEndpoint("endpoint", "GET /3/Metadata/endpoints/{path}", MetadataHandler.class, "fetchRoute", "Return the REST API endpoint metadata, including documentation, for the endpoint specified by path or index."); context.registerEndpoint("schemaForClass", "GET /3/Metadata/schemaclasses/{classname}", MetadataHandler.class, "fetchSchemaMetadataByClass", "Return the REST API schema metadata for specified schema class."); context.registerEndpoint("schema", "GET /3/Metadata/schemas/{schemaname}", MetadataHandler.class, "fetchSchemaMetadata", "Return the REST API schema metadata for specified schema."); context.registerEndpoint("schemas", "GET /3/Metadata/schemas", MetadataHandler.class, "listSchemas", "Return list of all REST API schemas."); context.registerEndpoint("typeaheadFileSuggestions", "GET /3/Typeahead/files", TypeaheadHandler.class, "files", "Typeahead hander for filename completion."); context.registerEndpoint("job", "GET /3/Jobs/{job_id}", JobsHandler.class, "fetch", "Get the status of the given H2O Job (long-running action)."); context.registerEndpoint("cancelJob", "POST /3/Jobs/{job_id}/cancel", JobsHandler.class, "cancel", "Cancel a running job."); context.registerEndpoint("findInFrame", "GET /3/Find", FindHandler.class, "find", "Find a value within a Frame."); context.registerEndpoint("exportFrame_deprecated", "GET /3/Frames/{frame_id}/export/{path}/overwrite/{force}", FramesHandler.class, "export", "[DEPRECATED] Export a Frame to the given path with optional overwrite."); context.registerEndpoint("exportFrame", "POST /3/Frames/{frame_id}/export", FramesHandler.class, "export", "Export a Frame to the given path with optional overwrite."); context.registerEndpoint("saveFrame", "POST /3/Frames/{frame_id}/save", FramesHandler.class, "save", "Save frame data to the given path."); context.registerEndpoint("loadFrame", "POST /3/Frames/load", FramesHandler.class, "load", "Load a frame from data on given path."); context.registerEndpoint("frameColumnSummary", "GET /3/Frames/{frame_id}/columns/{column}/summary", FramesHandler.class, "columnSummary", "Return the summary metrics for a column, e.g. min, max, mean, sigma, percentiles, etc."); context.registerEndpoint("frameColumnDomain", "GET /3/Frames/{frame_id}/columns/{column}/domain", FramesHandler.class, "columnDomain", "Return the domains for the specified categorical column (\"null\" if the column is not a categorical)."); context.registerEndpoint("frameColumn", "GET /3/Frames/{frame_id}/columns/{column}", FramesHandler.class, "column", "Return the specified column from a Frame."); context.registerEndpoint("frameColumns", "GET /3/Frames/{frame_id}/columns", FramesHandler.class, "columns", "Return all the columns from a Frame."); context.registerEndpoint("frameSummary", "GET /3/Frames/{frame_id}/summary", FramesHandler.class, "summary", "Return a Frame, including the histograms, after forcing computation of rollups."); context.registerEndpoint("lightFrame", "GET /3/Frames/{frame_id}/light", FramesHandler.class, "fetchLight", "Return a basic info about Frame to fill client Rapid expression cache."); context.registerEndpoint("frame", "GET /3/Frames/{frame_id}", FramesHandler.class, "fetch", "Return the specified Frame."); context.registerEndpoint("frames", "GET /3/Frames", FramesHandler.class, "list", "Return all Frames in the H2O distributed K/V store."); context.registerEndpoint("deleteFrame", "DELETE /3/Frames/{frame_id}", FramesHandler.class, "delete", "Delete the specified Frame from the H2O distributed K/V store."); context.registerEndpoint("deleteAllFrames", "DELETE /3/Frames", FramesHandler.class, "deleteAll", "Delete all Frames from the H2O distributed K/V store."); context.registerEndpoint("frameChunks", "GET /3/FrameChunks/{frame_id}", FrameChunksHandler.class, "fetch", "Return information about chunks for a given frame."); // Handle models context.registerEndpoint("model", "GET /3/Models/{model_id}", ModelsHandler.class, "fetch", "Return the specified Model from the H2O distributed K/V store, optionally with the list of compatible Frames."); context.registerEndpoint("models", "GET /3/Models", ModelsHandler.class, "list", "Return all Models from the H2O distributed K/V store."); context.registerEndpoint("deleteModel", "DELETE /3/Models/{model_id}", ModelsHandler.class, "delete", "Delete the specified Model from the H2O distributed K/V store."); context.registerEndpoint("deleteAllModels", "DELETE /3/Models", ModelsHandler.class, "deleteAll", "Delete all Models from the H2O distributed K/V store."); // Get java code for models as context.registerEndpoint("modelPreview", "GET /3/Models.java/{model_id}/preview", ModelsHandler.class, "fetchPreview", "Return potentially abridged model suitable for viewing in a browser (currently only used for java model code)."); // Register resource also with .java suffix since we do not want to break API context.registerEndpoint("modelJavaCode", "GET /3/Models.java/{model_id}", ModelsHandler.class, "fetchJavaCode", "[DEPRECATED] Return the stream containing model implementation in Java code."); context.registerEndpoint("modelMojo", "GET /3/Models/{model_id}/mojo", ModelsHandler.class, "fetchMojo", "Return the model in the MOJO format. This format can then be interpreted by " + "gen_model.jar in order to perform prediction / scoring. Currently works for GBM and DRF algos only."); context.registerEndpoint("modelBinary", "GET /3/Models.fetch.bin/{model_id}", ModelsHandler.class, "fetchBinaryModel", "Return the model in the binary format."); context.registerEndpoint("makePDP", "POST /3/PartialDependence/", ModelsHandler.class, "makePartialDependence", "Create data for partial dependence plot(s) for the specified model and frame."); context.registerEndpoint("makeFI", "POST /3/FeatureInteraction", ModelsHandler.class, "makeFeatureInteraction", "Fetch feature interaction data"); context.registerEndpoint("makeH", "POST /3/FriedmansPopescusH", ModelsHandler.class, "makeFriedmansPopescusH", "Fetch Friedman Popescus H."); context.registerEndpoint("makeRules", "POST /3/SignificantRules", ModelsHandler.class, "makeSignificantRulesTable", "Fetch significant rules table."); context.registerEndpoint("fetchPDP", "GET /3/PartialDependence/{name}", ModelsHandler.class, "fetchPartialDependence", "Fetch partial dependence data."); // Model serialization - import/export calls context.registerEndpoint("importModel", "POST /99/Models.bin/{model_id}", ModelsHandler.class, "importModel", "Import given binary model into H2O."); context.registerEndpoint("exportModel", "GET /99/Models.bin/{model_id}", ModelsHandler.class, "exportModel", "Export given model."); context.registerEndpoint("uploadModel", "POST /99/Models.upload.bin/{model_id}", ModelsHandler.class, "uploadModel", "Upload given binary model into H2O."); context.registerEndpoint("exportMojo", "GET /99/Models.mojo/{model_id}", ModelsHandler.class, "exportMojo", "Export given model as Mojo."); context.registerEndpoint("exportModelDetails", "GET /99/Models/{model_id}/json", ModelsHandler.class, "exportModelDetails", "Export given model details in json format."); context.registerEndpoint("grid", "GET /99/Grids/{grid_id}", GridsHandler.class, "fetch", "Return the specified grid search result."); context.registerEndpoint("grids", "GET /99/Grids", GridsHandler.class, "list", "Return all grids from H2O distributed K/V store."); context.registerEndpoint("newModelId", "POST /3/ModelBuilders/{algo}/model_id", ModelBuildersHandler.class, "calcModelId", "Return a new unique model_id for the specified algorithm."); context.registerEndpoint("modelBuilder", "GET /3/ModelBuilders/{algo}", ModelBuildersHandler.class, "fetch", "Return the Model Builder metadata for the specified algorithm."); context.registerEndpoint("modelBuilders", "GET /3/ModelBuilders", ModelBuildersHandler.class, "list", "Return the Model Builder metadata for all available algorithms."); // TODO: filtering isn't working for these first four; we get all results: context.registerEndpoint("_mmFetch1", "GET /3/ModelMetrics/models/{model}/frames/{frame}", ModelMetricsHandler.class, "fetch", "Return the saved scoring metrics for the specified Model and Frame."); context.registerEndpoint("_mmDelete1", "DELETE /3/ModelMetrics/models/{model}/frames/{frame}", ModelMetricsHandler.class, "delete", "Return the saved scoring metrics for the specified Model and Frame."); context.registerEndpoint("_mmFetch2", "GET /3/ModelMetrics/models/{model}", ModelMetricsHandler.class, "fetch", "Return the saved scoring metrics for the specified Model."); context.registerEndpoint("_mmFetch3", "GET /3/ModelMetrics/frames/{frame}/models/{model}", ModelMetricsHandler.class, "fetch", "Return the saved scoring metrics for the specified Model and Frame."); context.registerEndpoint("_mmDelete2", "DELETE /3/ModelMetrics/frames/{frame}/models/{model}", ModelMetricsHandler.class, "delete", "Return the saved scoring metrics for the specified Model and Frame."); context.registerEndpoint("_mmFetch4", "GET /3/ModelMetrics/frames/{frame}", ModelMetricsHandler.class, "fetch", "Return the saved scoring metrics for the specified Frame."); context.registerEndpoint("_mmFetch5", "GET /3/ModelMetrics", ModelMetricsHandler.class, "fetch", "Return all the saved scoring metrics."); context.registerEndpoint("score", "POST /3/ModelMetrics/models/{model}/frames/{frame}", ModelMetricsHandler.class, "score", "Return the scoring metrics for the specified Frame with the specified Model. If the Frame has already been " + "scored with the Model then cached results will be returned; otherwise predictions for all rows in the Frame " + "will be generated and the metrics will be returned."); context.registerEndpoint("predict", "POST /3/Predictions/models/{model}/frames/{frame}", ModelMetricsHandler.class, "predict", "Score (generate predictions) for the specified Frame with the specified Model. Both the Frame of " + "predictions and the metrics will be returned."); context.registerEndpoint("predict_async", "POST /4/Predictions/models/{model}/frames/{frame}", ModelMetricsHandler.class, "predictAsync", "Score (generate predictions) for the specified Frame with the specified Model. Both the Frame of " + "predictions and the metrics will be returned."); context.registerEndpoint("makeMetrics", "POST /3/ModelMetrics/predictions_frame/{predictions_frame}/actuals_frame/{actuals_frame}", ModelMetricsHandler.class, "make", "Create a ModelMetrics object from the predicted and actual values, and a domain for classification problems or a distribution family for regression problems."); context.registerEndpoint("waterMeterCpuTicks", "GET /3/WaterMeterCpuTicks/{nodeidx}", WaterMeterCpuTicksHandler.class, "fetch", "Return a CPU usage snapshot of all cores of all nodes in the H2O cluster."); context.registerEndpoint("waterMeterIoForNode", "GET /3/WaterMeterIo/{nodeidx}", WaterMeterIoHandler.class, "fetch", "Return IO usage snapshot of all nodes in the H2O cluster."); context.registerEndpoint("waterMeterIoForCluster", "GET /3/WaterMeterIo", WaterMeterIoHandler.class, "fetch_all", "Return IO usage snapshot of all nodes in the H2O cluster."); // Node persistent storage context.registerEndpoint("npsContains", "GET /3/NodePersistentStorage/categories/{category}/names/{name}/exists", NodePersistentStorageHandler.class, "exists", "Return true or false."); context.registerEndpoint("npsExistsCategory", "GET /3/NodePersistentStorage/categories/{category}/exists", NodePersistentStorageHandler.class, "exists", "Return true or false."); context.registerEndpoint("npsEnabled", "GET /3/NodePersistentStorage/configured", NodePersistentStorageHandler.class, "configured", "Return true or false."); context.registerEndpoint("npsPut", "POST /3/NodePersistentStorage/{category}/{name}", NodePersistentStorageHandler.class, "put_with_name", "Store a named value."); context.registerEndpoint("npsGet", "GET /3/NodePersistentStorage/{category}/{name}", NodePersistentStorageHandler.class, "get_as_string", "Return value for a given name."); context.registerEndpoint("npsRemove", "DELETE /3/NodePersistentStorage/{category}/{name}", NodePersistentStorageHandler.class, "delete", "Delete a key."); context.registerEndpoint("npsCreateCategory", "POST /3/NodePersistentStorage/{category}", NodePersistentStorageHandler.class, "put", "Store a value."); context.registerEndpoint("npsKeys", "GET /3/NodePersistentStorage/{category}", NodePersistentStorageHandler.class, "list", "Return all keys stored for a given category."); // TODO: context.registerEndpoint("DELETE /3/ModelMetrics/models/{model}/frames/{frame}", ModelMetricsHandler.class, "delete"); // TODO: context.registerEndpoint("DELETE /3/ModelMetrics/frames/{frame}/models/{model}", ModelMetricsHandler.class, "delete"); // TODO: context.registerEndpoint("DELETE /3/ModelMetrics/frames/{frame}", ModelMetricsHandler.class, "delete"); // TODO: context.registerEndpoint("DELETE /3/ModelMetrics/models/{model}", ModelMetricsHandler.class, "delete"); // TODO: context.registerEndpoint("DELETE /3/ModelMetrics", ModelMetricsHandler.class, "delete"); // TODO: context.registerEndpoint("POST /3/Predictions/models/{model}/frames/{frame}", ModelMetricsHandler.class, "predict"); // Log file management. // Note: Hacky pre-route cutout of "/3/Logs/download" is done above in a non-json way. context.registerEndpoint("logs", "GET /3/Logs/nodes/{nodeidx}/files/{name}", LogsHandler.class, "fetch", "Get named log file for a node."); // ModelBuilder Handler registration must be done for each algo in the application class // (e.g., H2OApp), because the Handler class is parameterized by the associated Schema, // and this is different for each ModelBuilder in order to handle its parameters in a // typesafe way: // context.registerEndpoint("POST /3/ModelBuilders/{algo}", ModelBuildersHandler.class, "train", "Train {algo}"); // context.registerEndpoint("logThreadDump", "GET /3/KillMinus3", KillMinus3Handler.class, "killm3", "Kill minus 3 on *this* node"); context.registerEndpoint("rapidsExec", "POST /99/Rapids", RapidsHandler.class, "exec", "Execute an Rapids AstRoot."); context.registerEndpoint("_assembly_toJava", "GET /99/Assembly.java/{assembly_id}/{file_name}", AssemblyHandler.class, "toJava", "Generate a Java POJO from the Assembly"); context.registerEndpoint("_assembly_fit", "POST /99/Assembly", AssemblyHandler.class, "fit", "Fit an assembly to an input frame"); context.registerEndpoint("_downloadDataset_fetch", "GET /3/DownloadDataset", DownloadDataHandler.class, "fetch", "Download dataset as a CSV."); context.registerEndpoint("_downloadDataset_fetchStreaming", "GET /3/DownloadDataset.bin", DownloadDataHandler.class, "fetchStreaming", "Download dataset as a CSV."); context.registerEndpoint("deleteKey", "DELETE /3/DKV/{key}", RemoveHandler.class, "remove", "Remove an arbitrary key from the H2O distributed K/V store."); context.registerEndpoint("deleteAllKeys", "DELETE /3/DKV", RemoveAllHandler.class, "remove", "Remove all keys from the H2O distributed K/V store."); context.registerEndpoint("logAndEcho", "POST /3/LogAndEcho", LogAndEchoHandler.class, "echo", "Save a message to the H2O logfile."); context.registerEndpoint("newSession", "GET /3/InitID", RapidsHandler.class, "startSession", "Issue a new session ID."); context.registerEndpoint("endSession", "DELETE /3/InitID", RapidsHandler.class, "endSession", "End a session."); context.registerEndpoint("setSessionProperty", "POST /3/SessionProperties", RapidsHandler.class, "setSessionProperty", "Set session property."); context.registerEndpoint("getSessionProperty", "GET /3/SessionProperties", RapidsHandler.class, "getSessionProperty", "Get session property."); context.registerEndpoint("garbageCollect", "POST /3/GarbageCollect", GarbageCollectHandler.class, "gc", "Explicitly call System.gc()."); context.registerEndpoint("_sample_status", "GET /99/Sample", CloudHandler.class, "status", "Example of an experimental endpoint. Call via /EXPERIMENTAL/Sample. Experimental endpoints can change at " + "any moment."); context.registerEndpoint("rapids_help", "GET /99/Rapids/help", RapidsHandler.class, "genHelp", "Produce help for Rapids AstRoot language."); // Endpoints for Steam to manage H2O. context.registerEndpoint("steamMetrics", "GET /3/SteamMetrics", SteamMetricsHandler.class, "fetch", "Get metrics for Steam from H2O."); context.registerEndpoint("list_all_capabilities", "GET /3/Capabilities", CapabilitiesHandler.class, "listAll", "List of all registered capabilities"); context.registerEndpoint("list_core_capabilities", "GET /3/Capabilities/Core", CapabilitiesHandler.class, "listCore", "List registered core capabilities"); context.registerEndpoint("list_rest_capabilities", "GET /3/Capabilities/API", CapabilitiesHandler.class, "listRest", "List of all registered Rest API capabilities"); context.registerEndpoint("import_grid", "POST /3/Grid.bin/import", GridImportExportHandler.class, "importGrid", "Import previously saved grid model"); context.registerEndpoint("export_grid", "POST /3/Grid.bin/{grid_id}/export", GridImportExportHandler.class, "exportGrid", "Export a Grid and its models."); context.registerEndpoint("recovery_resume", "POST /3/Recovery/resume", RecoveryHandler.class, "resume", "Recover stored state and resume interrupted job."); } @Override public String getName() { return "Core V3"; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/RegisterV4Api.java
package water.api; /** * Master-class for v4 REST APIs */ public class RegisterV4Api extends AbstractRegister { @Override public void registerEndPoints(RestApiContext context) { //------------ Metadata: endpoints and schemas --------------------------------------------------------------------- context.registerEndpoint("endpoints4", "GET /4/endpoints", MetadataHandler.class, "listRoutes4", "Returns the list of all REST API (v4) endpoints." ); //------------ Rapids ---------------------------------------------------------------------------------------------- context.registerEndpoint("POST /4/sessions", RapidsHandler.StartSession4.class); context.registerEndpoint("endSession4", "DELETE /4/sessions/{session_key}", RapidsHandler.class, "endSession", "Close the Rapids session." ); //------------ Models ---------------------------------------------------------------------------------------------- context.registerEndpoint("modelsInfo", "GET /4/modelsinfo", ModelBuildersHandler.class, "modelsInfo", "Return basic information about all models available to train." ); //------------ Frames ---------------------------------------------------------------------------------------------- context.registerEndpoint("POST /4/Frames/$simple", CreateFrameHandler.CreateSimpleFrame.class); //------------ Jobs ------------------------------------------------------------------------------------------------ context.registerEndpoint("GET /4/jobs/{job_id}", JobsHandler.FetchJob.class); } @Override public String getName() { return "Core V4"; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/RemoveAllHandler.java
package water.api; import water.*; import water.api.schemas3.KeyV3; import water.api.schemas3.RemoveAllV3; import water.util.Log; // Best-effort cluster brain-wipe and reset. // Useful between unrelated tests. public class RemoveAllHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public RemoveAllV3 remove(int version, RemoveAllV3 u) { Futures fs = new Futures(); // Cancel and remove leftover running jobs for( Job j : Job.jobs() ) { j.stop_requested(); j.remove(fs); } // Wipe out any and all session info if( RapidsHandler.SESSIONS != null ) { for(String k: RapidsHandler.SESSIONS.keySet() ) (RapidsHandler.SESSIONS.get(k)).endQuietly(null); RapidsHandler.SESSIONS.clear(); } fs.blockForPending(); if (u.retained_keys != null && u.retained_keys.length != 0) { retainKeys(u.retained_keys); } else { clearAll(); } Log.info("Finished removing objects"); return u; } private void clearAll() { Log.info("Removing all objects"); DKVManager.clear(); } private void retainKeys(final KeyV3[] retained_keys) { Log.info(String.format("Removing all objects, except for %d provided key(s)", retained_keys.length)); final Key[] retainedKeys; if (retained_keys == null) { retainedKeys = new Key[0]; } else { retainedKeys = new Key[retained_keys.length]; for (int i = 0; i < retainedKeys.length; i++) { if (retained_keys[i] == null) throw new IllegalArgumentException("An attempt to retain a 'null' key detected. Cleaning operation aborted."); retainedKeys[i] = retained_keys[i].key(); } } DKVManager.retain(retainedKeys); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/RemoveHandler.java
package water.api; import water.*; import water.api.schemas3.RemoveV3; public class RemoveHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public RemoveV3 remove(int version, RemoveV3 u) { Keyed val = DKV.getGet(u.key.key()); if (val != null) { if (val instanceof Lockable) ((Lockable) val).delete(u.cascade); // Fails if object already locked else val.remove(u.cascade); // Unconditional delete } H2O.updateNotIdle(); return u; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/RequestServer.java
package water.api; import org.apache.commons.lang.exception.ExceptionUtils; import water.*; import water.api.schemas3.H2OErrorV3; import water.api.schemas3.H2OModelBuilderErrorV3; import water.api.schemas99.AssemblyV99; import water.exceptions.*; import water.init.NodePersistentStorage; import water.nbhm.NonBlockingHashMap; import water.rapids.Assembly; import water.server.ServletUtils; import water.util.*; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.*; import java.net.MalformedURLException; import java.util.*; import java.util.concurrent.atomic.AtomicLong; /** * This is a simple web server which accepts HTTP requests and routes them * to methods in Handler classes for processing. Schema classes are used to * provide a more stable external JSON interface while allowing the implementation * to evolve rapidly. As part of request handling the framework translates * back and forth between the stable external representation of objects (Schema) * and the less stable internal classes. * <p> * Request <i>routing</i> is done by searching a list of registered * handlers, in order of registration, for a handler whose path regex matches * the request URI and whose HTTP method (GET, POST, DELETE...) matches the * request's method. If none is found an HTTP 404 is returned. * <p> * A Handler class is parametrized by the kind of Schema that it accepts * for request handling, as well as the internal implementation class (Iced * class) that the Schema translates from and to. Handler methods are allowed to * return other Schema types than in the type parameter if that makes * sense for a given request. For example, a prediction (scoring) call on * a Model might return a Frame schema. * <p> * When an HTTP request is accepted the framework does the following steps: * <ol> * <li>searches the registered handler methods for a matching URL and HTTP method</li> * <li>collects any parameters which are captured from the URI and adds them to the map of HTTP query parameters</li> * <li>creates an instance of the correct Handler class and calls handle() on it, passing the version, route and params</li> * <li>Handler.handle() creates the correct Schema object given the version and calls fillFromParms(params) on it</li> * <li>calls schema.createImpl() to create a schema-independent "back end" object</li> * <li>dispatches to the handler method, passing in the schema-independent impl object and returning the result Schema object</li> * </ol> * * @see water.api.Handler * @see water.api.RegisterV3Api */ public class RequestServer extends HttpServlet { // TODO: merge doGeneric() and serve() // Originally we had RequestServer based on NanoHTTPD. At some point we switched to JettyHTTPD, but there are // still some leftovers from the Nano times. // TODO: invoke DatasetServlet, PostFileServlet and NpsBinServlet using standard Routes // Right now those 3 servlets are handling 5 "special" api endpoints from JettyHTTPD, and we also have several // "special" endpoints in maybeServeSpecial(). We don't want them to be special. The Route class should be // made flexible enough to generate responses of various kinds, and then all of those "special" cases would // become regular API calls. // TODO: Move JettyHTTPD.sendErrorResponse here, and combine with other error-handling functions // That method is only called from 3 servlets mentioned above, and we want to standardize the way how errors // are handled in different responses. // // Returned in REST API responses as X-h2o-rest-api-version-max // Do not bump to 4 until when the API v4 is fully ready for release. public static final int H2O_REST_API_VERSION = 3; private static RouteTree routesTree = new RouteTree(""); private static ArrayList<Route> routesList = new ArrayList<>(150); public static int numRoutes() { return routesList.size(); } public static ArrayList<Route> routes() { return routesList; } public static Route lookupRoute(RequestUri uri) { return routesTree.lookup(uri, null); } private static HttpLogFilter[] _filters=new HttpLogFilter[]{defaultFilter()}; public static void setFilters(HttpLogFilter... filters) { _filters=filters; } /** * Some HTTP response status codes */ public static final String HTTP_OK = "200 OK", HTTP_CREATED = "201 Created", HTTP_ACCEPTED = "202 Accepted", HTTP_NO_CONTENT = "204 No Content", HTTP_PARTIAL_CONTENT = "206 Partial Content", HTTP_REDIRECT = "301 Moved Permanently", HTTP_NOT_MODIFIED = "304 Not Modified", HTTP_BAD_REQUEST = "400 Bad Request", HTTP_UNAUTHORIZED = "401 Unauthorized", HTTP_FORBIDDEN = "403 Forbidden", HTTP_NOT_FOUND = "404 Not Found", HTTP_BAD_METHOD = "405 Method Not Allowed", HTTP_PRECONDITION_FAILED = "412 Precondition Failed", HTTP_TOO_LONG_REQUEST = "414 Request-URI Too Long", HTTP_RANGE_NOT_SATISFIABLE = "416 Requested Range Not Satisfiable", HTTP_TEAPOT = "418 I'm a Teapot", HTTP_THROTTLE = "429 Too Many Requests", HTTP_INTERNAL_ERROR = "500 Internal Server Error", HTTP_NOT_IMPLEMENTED = "501 Not Implemented", HTTP_SERVICE_NOT_AVAILABLE = "503 Service Unavailable"; /** * Common mime types for dynamic content */ public static final String MIME_PLAINTEXT = "text/plain", MIME_HTML = "text/html", MIME_CSS = "text/css", MIME_JSON = "application/json", MIME_JS = "application/javascript", MIME_JPEG = "image/jpeg", MIME_PNG = "image/png", MIME_SVG = "image/svg+xml", MIME_GIF = "image/gif", MIME_WOFF = "application/x-font-woff", MIME_DEFAULT_BINARY = "application/octet-stream", MIME_XML = "text/xml"; /** * Calculates number of routes having the specified version. */ public static int numRoutes(int version) { int count = 0; for (Route route : routesList) if (route.getVersion() == version) count++; return count; } //------ Route Registration ------------------------------------------------------------------------------------------ /** * Register an HTTP request handler method for a given URL pattern, with parameters extracted from the URI. * <p> * URIs which match this pattern will have their parameters collected from the path and from the query params * * @param api_name suggested method name for this endpoint in the external API library. These names should be * unique. If null, the api_name will be created from the class name and the handler method name. * @param method_uri combined method / url pattern of the request, e.g.: "GET /3/Jobs/{job_id}" * @param handler_class class which contains the handler method * @param handler_method name of the handler method * @param summary help string which explains the functionality of this endpoint * @see Route * @see water.api.RequestServer * @return the Route for this request */ public static Route registerEndpoint( String api_name, String method_uri, Class<? extends Handler> handler_class, String handler_method, String summary ) { String[] spl = method_uri.split(" "); assert spl.length == 2 : "Unexpected method_uri parameter: " + method_uri; return registerEndpoint(api_name, spl[0], spl[1], handler_class, handler_method, summary, HandlerFactory.DEFAULT); } /** * @param api_name suggested method name for this endpoint in the external API library. These names should be * unique. If null, the api_name will be created from the class name and the handler method name. * @param http_method HTTP verb (GET, POST, DELETE) this handler will accept * @param url url path, possibly containing placeholders in curly braces, e.g: "/3/DKV/{key}" * @param handler_class class which contains the handler method * @param handler_method name of the handler method * @param summary help string which explains the functionality of this endpoint * @param handler_factory factory to create instance of handler (used by Sparkling Water) * @return the Route for this request */ public static Route registerEndpoint( String api_name, String http_method, String url, Class<? extends Handler> handler_class, String handler_method, String summary, HandlerFactory handler_factory ) { assert api_name != null : "api_name should not be null"; try { RequestUri uri = new RequestUri(http_method, url); Route route = new Route(uri, api_name, summary, handler_class, handler_method, handler_factory); routesTree.add(uri, route); routesList.add(route); return route; } catch (MalformedURLException e) { throw H2O.fail(e.getMessage()); } } /** * Register an HTTP request handler for the given URL pattern. * * @param method_uri combined method/url pattern of the endpoint, for * example: {@code "GET /3/Jobs/{job_id}"} * @param handler_clz class of the handler (should inherit from * {@link RestApiHandler}). */ public static Route registerEndpoint(String method_uri, Class<? extends RestApiHandler> handler_clz) { try { RestApiHandler handler = handler_clz.newInstance(); return registerEndpoint(handler.name(), method_uri, handler_clz, null, handler.help()); } catch (Exception e) { throw H2O.fail(e.getMessage()); } } //------ Handling Requests ------------------------------------------------------------------------------------------- @Override protected void doTrace(HttpServletRequest req, HttpServletResponse resp) { throw new UnsupportedOperationException("TRACE method is not supported"); // TRACE method is blocked by GateHandler, this makes sure the request doesn't accidentally sneaks in } @Override protected void doGet(HttpServletRequest rq, HttpServletResponse rs) { doGeneric("GET", rq, rs); } @Override protected void doPut(HttpServletRequest rq, HttpServletResponse rs) { doGeneric("PUT", rq, rs); } @Override protected void doPost(HttpServletRequest rq, HttpServletResponse rs) { doGeneric("POST", rq, rs); } @Override protected void doHead(HttpServletRequest rq, HttpServletResponse rs) { doGeneric("HEAD", rq, rs); } @Override protected void doDelete(HttpServletRequest rq, HttpServletResponse rs) { doGeneric("DELETE", rq, rs); } @Override protected void doOptions(HttpServletRequest rq, HttpServletResponse rs) { if (System.getProperty(H2O.OptArgs.SYSTEM_DEBUG_CORS) != null) { rs.setHeader("Access-Control-Allow-Origin", "*"); rs.setHeader("Access-Control-Allow-Headers", "Content-Type"); rs.setStatus(HttpServletResponse.SC_OK); } } /** * Top-level dispatch handling */ public void doGeneric(String method, HttpServletRequest request, HttpServletResponse response) { try { String userAgent = request.getHeader("User-Agent"); String sessionKey = request.getHeader("Session-Key"); ServletUtils.startTransaction(userAgent, sessionKey); // Note that getServletPath does an un-escape so that the %24 of job id's are turned into $ characters. String uri = request.getServletPath(); Properties headers = new Properties(); Enumeration<String> en = request.getHeaderNames(); while (en.hasMoreElements()) { String key = en.nextElement(); String value = request.getHeader(key); headers.put(key, value); } final String contentType = request.getContentType(); Properties parms = new Properties(); String postBody = null; if (System.getProperty(H2O.OptArgs.SYSTEM_PROP_PREFIX + "debug.cors") != null) { response.setHeader("Access-Control-Allow-Origin", "*"); response.setHeader("Access-Control-Allow-Headers", "Content-Type"); } if (contentType != null && contentType.startsWith(MIME_JSON)) { StringBuffer jb = new StringBuffer(); String line = null; try { BufferedReader reader = request.getReader(); while ((line = reader.readLine()) != null) jb.append(line); } catch (Exception e) { throw new H2OIllegalArgumentException("Exception reading POST body JSON for URL: " + uri); } postBody = jb.toString(); } else { // application/x-www-form-urlencoded Map<String, String[]> parameterMap; parameterMap = request.getParameterMap(); for (Map.Entry<String, String[]> entry : parameterMap.entrySet()) { String key = entry.getKey(); String[] values = entry.getValue(); if (values.length == 1) { parms.put(key, values[0]); } else if (values.length > 1) { StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (String value : values) { if (!first) sb.append(","); sb.append("\"").append(value).append("\""); first = false; } sb.append("]"); parms.put(key, sb.toString()); } } } // Make serve() call. NanoResponse resp = serve(uri, method, headers, parms, postBody); // Un-marshal Nano response back to Jetty. String choppedNanoStatus = resp.status.substring(0, 3); assert (choppedNanoStatus.length() == 3); int sc = Integer.parseInt(choppedNanoStatus); ServletUtils.setResponseStatus(response, sc); response.setContentType(resp.mimeType); Properties header = resp.header; Enumeration<Object> en2 = header.keys(); while (en2.hasMoreElements()) { String key = (String) en2.nextElement(); String value = header.getProperty(key); response.setHeader(key, value); } resp.writeTo(response.getOutputStream()); } catch (Error e) { try { // Send the full stackTrack as message to the client directly. Default error response error in Jetty's ServletHandler // is made inactive by ending the error, as the response is marked as committed. response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, ExceptionUtils.getFullStackTrace(e)); // After the response is sent, the exceptions is re-thrown to finish the process as without this interception throw e; } catch (IOException ex) { ServletUtils.setResponseStatus(response, 500); Log.err(e); } } catch (IOException e) { ServletUtils.setResponseStatus(response, 500); Log.err(e); // Trying to send an error message or stack trace will produce another IOException... } finally { ServletUtils.logRequest(method, request, response); // Handle shutdown if it was requested. if (H2O.getShutdownRequested()) { (new Thread() { public void run() { boolean [] confirmations = new boolean[H2O.CLOUD.size()]; if (H2O.SELF.index() >= 0) { confirmations[H2O.SELF.index()] = true; } for(H2ONode n:H2O.CLOUD._memary) { if(n != H2O.SELF) new RPC<>(n, new UDPRebooted.ShutdownTsk(H2O.SELF,n.index(), 1000, confirmations, 0)).call(); } try { Thread.sleep(2000); } catch (Exception ignore) {} int failedToShutdown = 0; // shutdown failed for(boolean b:confirmations) if(!b) failedToShutdown++; Log.info("Orderly shutdown: " + (failedToShutdown > 0? failedToShutdown + " nodes failed to shut down! ":"") + " Shutting down now."); H2O.closeAll(); H2O.exit(failedToShutdown); } }).start(); } ServletUtils.endTransaction(); } } /** * Subsequent handling of the dispatch */ public static NanoResponse serve(String url, String method, Properties header, Properties parms, String post_body) { boolean hideParameters = true; try { // Jack priority for user-visible requests Thread.currentThread().setPriority(Thread.MAX_PRIORITY - 1); RequestType type = RequestType.requestType(url); RequestUri uri = new RequestUri(method, url); // Log the request hideParameters = maybeLogRequest(uri, header, parms); // For certain "special" requests that produce non-JSON payloads we require special handling. NanoResponse special = maybeServeSpecial(uri); if (special != null) return special; // Determine the Route corresponding to this request, and also fill in {parms} with the path parameters Route route = routesTree.lookup(uri, parms); //----- DEPRECATED API handling ------------ // These APIs are broken, because they lead users to create invalid URLs. For example the endpoint // /3/Frames/{frameid}/export/{path}/overwrite/{force} // is invalid, because it leads to URLs like this: // /3/Frames/predictions_9bd5_GLM_model_R_1471148_36_on_RTMP_sid_afec_27/export//tmp/pred.csv/overwrite/TRUE // Here both the {frame_id} and {path} usually contain "/" (making them non-tokens), they may contain other // special characters not valid within URLs (for example if filename is not in ASCII); finally the use of strings // to represent booleans creates ambiguities: should I write "true", "True", "TRUE", or perhaps "1"? // // TODO These should be removed as soon as possible... if (route == null && url.startsWith("/3/Frames/")) { // /3/Frames/{frame_id}/export/{path}/overwrite/{force} if ((url.toLowerCase().endsWith("/overwrite/true") || url.toLowerCase().endsWith("/overwrite/false")) && url.contains("/export/")) { int i = url.indexOf("/export/"); boolean force = url.toLowerCase().endsWith("true"); parms.put("frame_id", url.substring(10, i)); parms.put("path", url.substring(i+8, url.length()-15-(force?0:1))); parms.put("force", force? "true" : "false"); route = findRouteByApiName("exportFrame_deprecated"); } // /3/Frames/{frame_id}/export else if (url.endsWith("/export")) { parms.put("frame_id", url.substring(10, url.length()-7)); route = findRouteByApiName("exportFrame"); } // /3/Frames/{frame_id}/columns/{column}/summary else if (url.endsWith("/summary") && url.contains("/columns/")) { int i = url.indexOf("/columns/"); parms.put("frame_id", url.substring(10, i)); parms.put("column", url.substring(i+9, url.length()-8)); route = findRouteByApiName("frameColumnSummary"); } // /3/Frames/{frame_id}/columns/{column}/domain else if (url.endsWith("/domain") && url.contains("/columns/")) { int i = url.indexOf("/columns/"); parms.put("frame_id", url.substring(10, i)); parms.put("column", url.substring(i+9, url.length()-7)); route = findRouteByApiName("frameColumnDomain"); } // /3/Frames/{frame_id}/columns/{column} else if (url.contains("/columns/")) { int i = url.indexOf("/columns/"); parms.put("frame_id", url.substring(10, i)); parms.put("column", url.substring(i+9)); route = findRouteByApiName("frameColumn"); } // /3/Frames/{frame_id}/summary else if (url.endsWith("/summary")) { parms.put("frame_id", url.substring(10, url.length()-8)); route = findRouteByApiName("frameSummary"); } // /3/Frames/{frame_id}/light else if (url.endsWith("/light")) { parms.put("frame_id", url.substring(10, url.length()-"/light".length())); route = findRouteByApiName("lightFrame"); } // /3/Frames/{frame_id}/columns else if (url.endsWith("/columns")) { parms.put("frame_id", url.substring(10, url.length()-8)); route = findRouteByApiName("frameColumns"); } // /3/Frames/{frame_id} else { parms.put("frame_id", url.substring(10)); route = findRouteByApiName(method.equals("DELETE")? "deleteFrame" : "frame"); } } else if (url.startsWith("/3/ModelMetrics/predictions_frame/")){ route = findRouteByApiName("makeMetrics"); } //------------------------------------------ if (route == null) { // if the request is not known, treat as resource request, or 404 if not found if (uri.isGetMethod()) if (H2O.ARGS.disable_flow) return response404Plain(method + " " + url, "Access to H2O Flow is disabled"); else return getResource(type, url); else return response404(method + " " + url); } else { Schema response = route._handler.handle(uri.getVersion(), route, parms, post_body); PojoUtils.filterFields(response, (String)parms.get("_include_fields"), (String)parms.get("_exclude_fields")); return serveSchema(response, type); } } catch (H2OFailException e) { H2OError error = e.toH2OError(url); Log.fatal("Caught exception (fatal to the cluster): " + error.toString()); throw H2O.fail(serveError(error).toString()); } catch (H2OModelBuilderIllegalArgumentException e) { H2OModelBuilderError error = e.toH2OError(url); Log.warn("Caught exception: " + error.toString()); return serveSchema(new H2OModelBuilderErrorV3().fillFromImpl(error), RequestType.json); } catch (H2OAbstractRuntimeException e) { H2OError error = e.toH2OError(url); Log.warn("Caught exception: " + error.toString()); return serveError(error); } catch (AssertionError e) { H2OError error = new H2OError( System.currentTimeMillis(), url, e.toString(), e.toString(), HttpResponseStatus.INTERNAL_SERVER_ERROR.getCode(), new IcedHashMapGeneric.IcedHashMapStringObject(), e); Log.err("Caught assertion error: " + error.toString()); return serveError(error); } catch (Exception e) { // make sure that no Exception is ever thrown out from the request H2OError error = new H2OError(e, url); // some special cases for which we return 400 because it's likely a problem with the client request: if (e instanceof IllegalArgumentException || e instanceof FileNotFoundException || e instanceof MalformedURLException) error._http_status = HttpResponseStatus.BAD_REQUEST.getCode(); String parmsInfo = hideParameters ? "<hidden>" : String.valueOf(parms); Log.err("Caught exception: " + error.toString() + ";parms=" + parmsInfo); return serveError(error); } } /** * Log the request (unless it's an overly common one). * @return flag whether the request parameters might be sensitive or not */ private static boolean maybeLogRequest(RequestUri uri, Properties header, Properties parms) { LogFilterLevel level = LogFilterLevel.LOG; for (HttpLogFilter f : _filters) level = level.reduce(f.filter(uri, header, parms)); switch (level) { case DO_NOT_LOG: return false; // do not log the request by default but allow parameters to be logged on exceptional completion case URL_ONLY: Log.info(uri, ", parms: <hidden>"); return true; // parameters are sensitive - never log them default: Log.info(uri + ", parms: " + parms); return false; } } public enum LogFilterLevel { LOG(0), URL_ONLY(1), DO_NOT_LOG(Integer.MAX_VALUE); private int level; LogFilterLevel(int level) { this.level = level; } LogFilterLevel reduce(LogFilterLevel other) { if (other.level > this.level) { return other; } else { return this; } } } /** * Create a new HttpLogFilter. * * Implement this interface to create new filters used by maybeLogRequest */ public interface HttpLogFilter { LogFilterLevel filter(RequestUri uri, Properties header, Properties parms); } /** * Provide the default filters for H2O's HTTP logging. * @return an array of HttpLogFilter instances */ public static HttpLogFilter defaultFilter() { return new HttpLogFilter() { // this is much prettier with 1.8 lambdas @Override public LogFilterLevel filter(RequestUri uri, Properties header, Properties parms) { // static web content String url = uri.getUrl(); if (url.endsWith(".css") || url.endsWith(".js") || url.endsWith(".png") || url.endsWith(".ico") ) { return LogFilterLevel.DO_NOT_LOG; } // endpoints that might take sensitive parameters (passwords and other credentials) String[] path = uri.getPath(); if (path[2].equals("PersistS3") || path[2].equals("ImportSQLTable") || path[2].equals("DecryptionSetup") ) { return LogFilterLevel.URL_ONLY; } // endpoints that are called very frequently AND DON'T accept any sensitive information in parameters if (path[2].equals("Cloud") || path[2].equals("Jobs") && uri.isGetMethod() || path[2].equals("Log") || path[2].equals("Progress") || path[2].equals("Typeahead") || path[2].equals("WaterMeterCpuTicks") || path[2].equals("Ping") ) { return LogFilterLevel.DO_NOT_LOG; } return LogFilterLevel.LOG; } }; } //------ Lookup tree for Routes -------------------------------------------------------------------------------------- private static class RouteTree { private String root; private boolean isWildcard; private HashMap<String, RouteTree> branches; private Route leaf; public RouteTree(String token) { isWildcard = isWildcardToken(token); root = isWildcard ? "*" : token; branches = new HashMap<>(); leaf = null; } public void add(RequestUri uri, Route route) { String[] path = uri.getPath(); addByPath(path, 0, route); } public Route lookup(RequestUri uri, Properties parms) { if (!uri.isApiUrl()) return null; String[] path = uri.getPath(); ArrayList<String> path_params = new ArrayList<>(3); Route route = this.lookupByPath(path, 0, path_params); // Fill in the path parameters if (parms != null && route != null) { String[] param_names = route._path_params; assert path_params.size() == param_names.length; for (int i = 0; i < param_names.length; i++) parms.put(param_names[i], path_params.get(i)); } return route; } private void addByPath(String[] path, int index, Route route) { if (index + 1 < path.length) { String nextToken = isWildcardToken(path[index+1])? "*" : path[index+1]; if (!branches.containsKey(nextToken)) branches.put(nextToken, new RouteTree(nextToken)); branches.get(nextToken).addByPath(path, index + 1, route); } else { assert leaf == null : "Duplicate path encountered: " + Arrays.toString(path); leaf = route; } } private Route lookupByPath(String[] path, int index, ArrayList<String> path_params) { assert isWildcard || root.equals(path[index]); if (index + 1 < path.length) { String nextToken = path[index+1]; // First attempt an exact match if (branches.containsKey(nextToken)) { Route route = branches.get(nextToken).lookupByPath(path, index+1, path_params); if (route != null) return route; } // Then match against a wildcard if (branches.containsKey("*")) { path_params.add(path[index+1]); Route route = branches.get("*").lookupByPath(path, index + 1, path_params); if (route != null) return route; path_params.remove(path_params.size() - 1); } // If we are at the deepest level of the tree and no match was found, attempt to look for alternative versions. // For example, if the user requests /4/About, and we only have /3/About, then we should deliver that version // instead. if (index == path.length - 2) { int v = Integer.parseInt(nextToken); for (String key : branches.keySet()) { if (branches.get(key).leaf == null) continue; if (Integer.parseInt(key) <= v) { // We also create a new branch in the tree to memorize this new route path. RouteTree newBranch = new RouteTree(nextToken); newBranch.leaf = branches.get(key).leaf; branches.put(nextToken, newBranch); return newBranch.leaf; } } } } else { return leaf; } return null; } private static boolean isWildcardToken(String token) { return token.equals("*") || token.startsWith("{") && token.endsWith("}"); } } private static Route findRouteByApiName(String apiName) { for (Route route : routesList) { if (route._api_name.equals(apiName)) return route; } return null; } //------ Handling of Responses --------------------------------------------------------------------------------------- /** * Handle any URLs that bypass the standard route approach. This is stuff that has abnormal non-JSON response * payloads. * @param uri RequestUri object of the incoming request. * @return Response object, or null if the request does not require any special handling. */ private static NanoResponse maybeServeSpecial(RequestUri uri) { assert uri != null; if (uri.isHeadMethod()) { // Blank response used by R's uri.exists("/") if (uri.getUrl().equals("/")) return new NanoResponse(HTTP_OK, MIME_PLAINTEXT, ""); } if (uri.isGetMethod()) { // url "/3/Foo/bar" => path ["", "GET", "Foo", "bar", "3"] String[] path = uri.getPath(); if (path[2].equals("")) return redirectToFlow(); if (path[2].equals("Logs") && path[3].equals("download")) { // if archive type was specified path will look like ["", "GET", "Logs", "download", "<TYPE>", 3] // if archive type was not specified it will be just ["", "GET", "Logs", "download", 3] boolean containerTypeSpecified = path.length >= 6; LogArchiveContainer container = containerTypeSpecified ? LogArchiveContainer.valueOf(path[4].toUpperCase()) : LogArchiveContainer.ZIP; // use ZIP as default return LogsHandler.downloadLogsViaRestAPI(container); } if (path[2].equals("NodePersistentStorage.bin") && path.length == 6) return downloadNps(path[3], path[4]); } return null; } private static NanoResponse response404Plain(String what, String description) { String message = what + " not found" + (description != null ? ": " + description : ""); return new NanoResponse(H2OError.httpStatusHeader( HttpResponseStatus.NOT_FOUND.getCode()), MIME_PLAINTEXT, message); } private static NanoResponse response404(String what) { H2ONotFoundArgumentException e = new H2ONotFoundArgumentException(what + " not found", what + " not found"); H2OError error = e.toH2OError(what); Log.warn(error._dev_msg); return serveError(error); } private static NanoResponse serveSchema(Schema s, RequestType type) { // Convert Schema to desired output flavor String http_response_header = H2OError.httpStatusHeader(HttpResponseStatus.OK.getCode()); // If we're given an http response code use it. if (s instanceof SpecifiesHttpResponseCode) { http_response_header = H2OError.httpStatusHeader(((SpecifiesHttpResponseCode) s).httpStatus()); } // If we've gotten an error always return the error as JSON if (s instanceof SpecifiesHttpResponseCode && HttpResponseStatus.OK.getCode() != ((SpecifiesHttpResponseCode) s).httpStatus()) { type = RequestType.json; } if (s instanceof H2OErrorV3) { return new NanoResponse(http_response_header, MIME_JSON, s.toJsonString()); } if (s instanceof StreamingSchema) { StreamingSchema ss = (StreamingSchema) s; StreamWriter sw = ss.getStreamWriter(); NanoResponse r = new NanoStreamResponse(http_response_header, MIME_DEFAULT_BINARY, sw); // Needed to make file name match class name r.addHeader("Content-Disposition", "attachment; filename=\"" + ss.getFilename() + "\""); return r; } // TODO: remove this entire switch switch (type) { case html: // return JSON for html requests case json: return new NanoResponse(http_response_header, MIME_JSON, s.toJsonBytes()); case xml: throw H2O.unimpl("Unknown type: " + type.toString()); case java: if (s instanceof AssemblyV99) { // TODO: fix the AssemblyV99 response handler so that it produces the appropriate StreamingSchema Assembly ass = DKV.getGet(((AssemblyV99) s).assembly_id); NanoResponse r = new NanoResponse(http_response_header, MIME_DEFAULT_BINARY, ass.toJava(((AssemblyV99) s).file_name)); r.addHeader("Content-Disposition", "attachment; filename=\""+JCodeGen.toJavaId(((AssemblyV99) s).file_name)+".java\""); return r; } else { throw new H2OIllegalArgumentException("Cannot generate java for type: " + s.getClass().getSimpleName()); } default: throw H2O.unimpl("Unknown type to serveSchema(): " + type); } } @SuppressWarnings(value = "unchecked") private static NanoResponse serveError(H2OError error) { // Note: don't use Schema.schema(version, error) because we have to work at bootstrap: return serveSchema(new H2OErrorV3().fillFromImpl(error), RequestType.json); } private static NanoResponse redirectToFlow() { NanoResponse res = new NanoResponse(HTTP_REDIRECT, MIME_PLAINTEXT, ""); res.addHeader("Location", H2O.ARGS.context_path + "/flow/index.html"); return res; } private static NanoResponse downloadNps(String categoryName, String keyName) { NodePersistentStorage nps = H2O.getNPS(); AtomicLong length = new AtomicLong(); InputStream is = nps.get(categoryName, keyName, length); NanoResponse res = new NanoResponse(HTTP_OK, MIME_DEFAULT_BINARY, is); res.addHeader("Content-Length", Long.toString(length.get())); res.addHeader("Content-Disposition", "attachment; filename=" + keyName + ".flow"); return res; } // cache of all loaded resources @SuppressWarnings("MismatchedQueryAndUpdateOfCollection") // remove this once TO-DO below is addressed private static final NonBlockingHashMap<String,byte[]> _cache = new NonBlockingHashMap<>(); // Returns the response containing the given uri with the appropriate mime type. private static NanoResponse getResource(RequestType request_type, String url) { byte[] bytes = _cache.get(url); if (bytes == null) { // Try-with-resource try (InputStream resource = water.init.JarHash.getResource2(url)) { if( resource != null ) { try { bytes = toByteArray(resource); } catch (IOException e) { Log.err(e); } // PP 06-06-2014 Disable caching for now so that the browser // always gets the latest sources and assets when h2o-client is rebuilt. // TODO need to rethink caching behavior when h2o-dev is merged into h2o. // // if (bytes != null) { // byte[] res = _cache.putIfAbsent(url, bytes); // if (res != null) bytes = res; // Racey update; take what is in the _cache //} // } } catch( IOException ignore ) { } } if (bytes == null || bytes.length == 0) // No resource found? return response404("Resource " + url); int i = url.lastIndexOf('.'); String mime; switch (url.substring(i + 1)) { case "js": mime = MIME_JS; break; case "css": mime = MIME_CSS; break; case "htm":case "html": mime = MIME_HTML; break; case "jpg":case "jpeg": mime = MIME_JPEG; break; case "png": mime = MIME_PNG; break; case "svg": mime = MIME_SVG; break; case "gif": mime = MIME_GIF; break; case "woff": mime = MIME_WOFF; break; default: mime = MIME_DEFAULT_BINARY; } NanoResponse res = new NanoResponse(HTTP_OK, mime, new ByteArrayInputStream(bytes)); res.addHeader("Content-Length", Long.toString(bytes.length)); return res; } // Convenience utility private static byte[] toByteArray(InputStream is) throws IOException { try (ByteArrayOutputStream os = new ByteArrayOutputStream()) { byte[] buffer = new byte[0x2000]; for (int len; (len = is.read(buffer)) != -1; ) os.write(buffer, 0, len); return os.toByteArray(); } } /** * Dummy Rest API context which is redirecting calls to static method API. */ public static class DummyRestApiContext implements RestApiContext { @Override public Route registerEndpoint(String apiName, String methodUri, Class<? extends Handler> handlerClass, String handlerMethod, String summary) { return RequestServer.registerEndpoint(apiName, methodUri, handlerClass, handlerMethod, summary); } @Override public Route registerEndpoint(String apiName, String httpMethod, String url, Class<? extends Handler> handlerClass, String handlerMethod, String summary, HandlerFactory handlerFactory) { return RequestServer.registerEndpoint(apiName, httpMethod, url, handlerClass, handlerMethod, summary, handlerFactory); } @Override public Route registerEndpoint(String methodUri, Class<? extends RestApiHandler> handlerClass) { return RequestServer.registerEndpoint(methodUri, handlerClass); } private Set<Schema> allSchemas = new HashSet<>(); @Override public void registerSchema(Schema... schemas) { for (Schema schema : schemas) { allSchemas.add(schema); } } public Schema[] getAllSchemas() { return allSchemas.toArray(new Schema[0]); } }; }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/RequestType.java
package water.api; /** Request type. * * Requests can have multiple types. Basic types include the plain json type * in which the result is returned as a JSON object, a html type that acts as * the webpage, or the help type that displays the extended help for the * request. */ enum RequestType { json { @Override String requestName(String url) { String s = "." + toString(); int i = url.indexOf(s); if( i== -1 ) return url; // No, or default, type return url.substring(0,i)+url.substring(i+s.length()); } }, // json type request, a result is a JSON structure html , // webpage request help , // should display the help on the given request query, // Displays the query for the argument in html mode png , // image, e.g. plot txt , // text, e.g. a script java , // java program xml , // xml request ; private static final RequestType[] _values = values(); /** * Returns the request type of a given URL. Missing / unknown type defaults to JSON. */ static RequestType requestType(String url) { int i = url.indexOf('.'); if (i == -1) return json; // Default for no extension String s = url.substring(i+1); int idx = s.indexOf('/'); if (idx >= 0) { s = s.substring(0, idx); } for( RequestType t : _values ) if( s.equals(t.name()) ) return t; return json; // None of the above; use json } /** Returns the name of the request, that is the request url without the * request suffix. E.g. converts "/GBM.html/crunk" into "/GBM/crunk" */ String requestName(String url) { return url; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/RequestUri.java
package water.api; import java.net.MalformedURLException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; import java.util.Set; import java.util.regex.Pattern; /** * */ public class RequestUri { private static Pattern version_pattern = Pattern.compile("^/(?:\\d+|EXPERIMENTAL|LATEST)/.*", Pattern.CASE_INSENSITIVE); private static Set<String> http_methods = new HashSet<>(Arrays.asList("HEAD", "GET", "POST", "DELETE")); private String method; private String url; private String[] path; private boolean is_api_url; public RequestUri(String request_method, String request_url) throws MalformedURLException { if (!http_methods.contains(request_method)) throw new MalformedURLException("Bad HTTP method: " + request_method); method = request_method; url = request_url; is_api_url = version_pattern.matcher(request_url).matches(); path = null; } public String getUrl() { return url; } public boolean isApiUrl() { return is_api_url; } public String getMethod() { return method; } public boolean isGetMethod() { return method.equals("GET"); } public boolean isPostMethod() { return method.equals("POST"); } public boolean isHeadMethod() { return method.equals("HEAD"); } public String[] getPath() { computePathIfNeeded(); return path; } public String[] getParamsList() { computePathIfNeeded(); ArrayList<String> params_list = new ArrayList<>(); for (int i = 2; i < path.length; i++) if (path[i].startsWith("{") && path[i].endsWith("}")) params_list.add(path[i].substring(1, path[i].length()-1)); return params_list.toArray(new String[params_list.size()]); } public int getVersion() { computePathIfNeeded(); String ver = path[path.length - 1]; return ver.isEmpty()? 0 : Integer.parseInt(ver); } public String toString() { return method + " " + url; } /** * Convert the provided HTTP_method/URL pair into a "path" suitable for lookups in the RouteTree. This is mostly * equivalent to url.split("/"), with a few caveats: * - if the url contains "special" version (LATEST/EXPERIMENTAL), it will be replaced with its numeric value; * - the order of url chunks is modified: the version is always moved to the end, its place taken by http_method; * Examples: * "GET", "/3/Models/{model_id}" => ["", "GET", "Models", "{model_id}", "3"] * "GET", "/" => ["", "GET", ""] * First chunk is always "" because that is the root of the RouteTree. */ private void computePathIfNeeded() { if (path == null) { // This will make sure path array has one extra element in the end, where we will store the version string. // Pass -1 because otherwise split() removes any trailing empty strings. path = (url + "/").split("/", -1); assert path[0].isEmpty() && path.length >= 3; String ver = path[1].toUpperCase(); if (ver.equals("EXPERIMENTAL")) ver = ((Integer) SchemaServer.getExperimentalVersion()).toString(); if (ver.equals("LATEST")) ver = ((Integer) SchemaServer.getLatestOrHighestSupportedVersion()).toString(); // Old clients (h2o-2) tend to append .json suffix to the endpoint's name -- fixing that if (path[2].endsWith(".json")) path[2] = path[2].substring(0, path[2].length() - 5); path[1] = method; path[path.length - 1] = ver; } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/RestApiContext.java
package water.api; /** * REST API registration interfaces. * * This is an abstraction layer between a rest server and registration * modules. */ public interface RestApiContext { Route registerEndpoint(String apiName, String methodUri, Class<? extends Handler> handlerClass, String handlerMethod, String summary); Route registerEndpoint(String apiName, String httpMethod, String url, Class<? extends Handler> handlerClass, String handlerMethod, String summary, HandlerFactory handlerFactory); Route registerEndpoint(String methodUri, Class<? extends RestApiHandler> handlerClass); void registerSchema(Schema... schemas); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/RestApiExtension.java
package water.api; import water.AbstractH2OExtension; import java.util.List; /** * REST API registration endpoint. * * The interface should be overriden by clients which would like * to provide additional REST API endpoints. * * The registration is divided into two parts: * - register Handlers to expose a new REST API endpoint (e.g., /3/ModelBuilder/XGBoost/) * - register Schemas to provide a new definition of REST API input/output */ public interface RestApiExtension { /** * * @param context */ void registerEndPoints(RestApiContext context); /** * * @param context */ void registerSchemas(RestApiContext context); /** Provide name of the REST API extension. */ String getName(); /** List of core extensions on which this rest api depends on */ List<String> getRequiredCoreExtensions(); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/RestApiHandler.java
package water.api; /** * Common interface for <s>all</s> some REST endpoint handlers. * <p> * This class is a preferred way for adding new REST endpoints. * * @param <IS> input schema class * @param <OS> output schema class */ public abstract class RestApiHandler<IS extends Schema, OS extends Schema> extends Handler { /** Suggested name for the endpoint in external libraries. */ public abstract String name(); /** Help for this endpoint (will be used in generated bindings). */ public abstract String help(); /** * Execute the endpoint, returning the result as the output schema. * * @param ignored TODO: remove this parameter * @param input input schema object * @return output schema object */ public abstract OS exec(int ignored, IS input); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/Route.java
package water.api; import water.H2O; import water.Iced; import water.util.MarkdownBuilder; import java.lang.reflect.Method; import java.util.Arrays; /** * Routing of an http request to a handler method, with path parameter parsing. */ public final class Route extends Iced { static final int MIN_VERSION = 1; // TODO: handlers are now stateless, so create a single instance and stash it here // TODO: all fields should be final! // TODO: remove no-args ctor, since it is not used public String _http_method; public String _url; public String _summary; public String _api_name; public Class<? extends Handler> _handler_class; public Method _handler_method; // NOTE: Java 7 captures and lets you look up subpatterns by name but won't give you the list of names, so we need this redundant list: public String[] _path_params; // list of params we capture from the url pattern, e.g. for /17/MyComplexObj/(.*)/(.*) public Handler _handler; private RequestUri _uri; /** Handler factory configures a way how handler is instantiated. * * PLEASE: do not remove it even H2O is not using it. It is used by Sparkling Water, since * it needs to pass a Spark context to a new handler */ final HandlerFactory _handler_factory; public Route() { _handler_factory = null; } public Route(RequestUri uri, String api_name, String summary, Class<? extends Handler> handler_class, String handler_method, HandlerFactory handler_factory) { assert uri != null && handler_class != null; assert handler_factory != null : "handler_factory should not be null, caller has to pass it!"; _uri = uri; _http_method = uri.getMethod(); _url = uri.getUrl(); _summary = summary; _api_name = api_name; _handler_class = handler_class; _handler_method = resolveMethod(handler_class, handler_method == null? "exec" : handler_method); _path_params = uri.getParamsList(); _handler_factory = handler_factory; try { _handler = _handler_factory.create(_handler_class); } catch (Exception ie) { throw H2O.fail("failed to register handler " + handler_class.getSimpleName() + "." + handler_method, ie); } } public RequestUri getUri() { return _uri; } public int getVersion() { return _uri.getVersion(); } /** * Generate Markdown documentation for this Route. */ public StringBuffer markdown(Schema sinput, Schema soutput) { MarkdownBuilder builder = new MarkdownBuilder(); builder.comment("Preview with http://jbt.github.io/markdown-editor"); builder.heading1(_http_method, _url); builder.hline(); builder.paragraph(_summary); // parameters and output tables builder.heading1("Input schema: "); builder.append(sinput .markdown(true ,false)); builder.heading1("Output schema: "); builder.append(soutput.markdown(false, true)); return builder.stringBuffer(); } @Override public boolean equals(Object o) { if (o == this) return true; if (!(o instanceof Route)) return false; Route route = (Route) o; return _api_name.equals(route._api_name) && _handler_class .equals(route._handler_class) && _handler_method.equals(route._handler_method) && _http_method.equals(route._http_method) && _url.equals(route._url) && Arrays.equals(_path_params, route._path_params); } @Override public int hashCode() { return _api_name.hashCode(); } @Override public String toString() { return "Route{" + "_http_method='" + _http_method + '\'' + ", _url_pattern=" + _url + ", _summary='" + _summary + '\'' + ", _api_name='" + _api_name + "'" + ", _handler_class=" + _handler_class + ", _handler_method=" + _handler_method + ", _input_schema=" + Handler.getHandlerMethodInputSchema(_handler_method) + ", _output_schema=" + Handler.getHandlerMethodOutputSchema(_handler_method) + ", _path_params=" + Arrays.toString(_path_params) + '}'; } /** * Search the provided class (and all its superclasses) for the requested method. * @param handler_class Class to be searched * @param handler_method Name of the method to look for. The method must have signature (int, Schema). * @return The callable Method object. */ private static Method resolveMethod(Class<? extends Handler> handler_class, String handler_method) { for (Method method : handler_class.getMethods()) if (method.getName().equals(handler_method)) { Class[] pt = method.getParameterTypes(); if (pt != null && pt.length == 2 && pt[0] == Integer.TYPE && Schema.class.isAssignableFrom(pt[1])) return method; } throw H2O.fail("Failed to find handler method: " + handler_method + " in class: " + handler_class.getSimpleName()); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/SaveToHiveTableHandler.java
package water.api; import water.ExtensionManager; import water.Key; import water.api.schemas3.SaveToHiveTableV3; import water.fvec.Frame; public class SaveToHiveTableHandler extends Handler { public interface HiveFrameSaver { String NAME = "HiveFrameSaver"; enum Format { CSV, PARQUET } void saveFrameToHive( Key<Frame> frameKey, String jdbcUrl, String tableName, Format format, String tablePath, String tmpPath ); } private HiveFrameSaver getSaver() { return (HiveFrameSaver) ExtensionManager.getInstance().getCoreExtension(HiveFrameSaver.NAME); } @SuppressWarnings("unused") // called via reflection public SaveToHiveTableV3 saveToHiveTable(int version, SaveToHiveTableV3 request) { HiveFrameSaver saver = getSaver(); if (saver != null) { saver.saveFrameToHive( request.frame_id.key(), request.jdbc_url, request.table_name, request.format, request.table_path, request.tmp_path ); return request; } else { throw new IllegalStateException("HiveTableSaver extension not enabled."); } } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/Schema.java
package water.api; import com.google.gson.Gson; import water.*; import water.api.schemas3.FrameV3; import water.api.schemas3.JobV3; import water.api.schemas3.KeyV3; import water.api.schemas3.ModelSchemaV3; import water.exceptions.H2OIllegalArgumentException; import water.exceptions.H2OKeyNotFoundArgumentException; import water.exceptions.H2ONotFoundArgumentException; import water.fvec.Frame; import water.util.*; import java.lang.annotation.Annotation; import java.lang.reflect.Array; import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Modifier; import java.util.*; /** * Base Schema class; all REST API Schemas inherit from here. * <p> * Schema is a primary interface of the REST APIs: all endpoints consume some schema object as an input, and produce * another schema object as an output (though some endpoints may return nothing). * <p> * Schemas, as an external interface, are required to be stable: fields may not be renamed or removed, their types or * meaning may not change, etc. It is allowed to add new fields to a schema, provided that they are optional, and * that their default values correspond to the old behavior of the endpoint. If these requirements cannot be met, * then a new version of a schema class must be created. * <p> * Many schemas are in direct correspondence with H2O objects. For example, JobV3 schema represents the Job object. * These "representative" Iced objects are called "implementation" or "impl", and are parametrized with type I. * Such representation is necessary in order to ensure stability of the interface: even as Job class evolves, the * interface of JobV3 schema must not. In the simplest case, when there is 1-to-1 correspondence between fields in * the impl class and in the schema, we use reflection magic to copy those fields over. The reflection magic is smart * enough to perform simple field name translations, and even certain type translations (like Keyed objects into Keys). * If there is no such correspondence, then special type adapters must be written. Right now this is done by * overriding the {@link #fillImpl(I) fillImpl} and {@link #fillFromImpl(I) fillFromImpl} methods. Usually they will * want to call super to get the default behavior, and then modify the results a bit (e.g., to map differently-named * fields, or to compute field values). Transient and static fields are ignored by the reflection magic. * <p> * There are also schemas that do not correspond to any H2O object. These are mostly the input schemas (schemas used * for inputs of api requests). Such schemas should be "implemented" by Iced. * <p> * All schemas are expected to be self-documenting, in the sense that all fields within those schemas should carry * detailed documentation about their meaning, as well as any additional hints about the field's usage. These should * be annotated using the {@link API @API} interface. If a schema contains a complicated object, then that object * itself should derive from Schema, so that its fields can also be properly documented. However if the internal * object is sufficiently simple (say, a Map), then it may be sufficient to document it as a whole and have it derived * from Iced, not from Schema. * <p> * Schema names (getSimpleName()) must be unique within an application. During Schema discovery and registration * there are checks to ensure this. Each schema is associated with exactly one implementation object, however some * Iced objects are mapped into multiple schemas. * <p> * For V3 Schemas each field had a "direction" (input / output / both), which allowed us to use the same schema as * both input and output for an endpoint. This is no longer possible in V4: two separate schema classes for input / * output should be created. * * <h1>Usage</h1> * <p> * {@link Handler} creates an input schema from the body/parameters of the HTTP request (using * {@link #fillFromParms(Properties) fillFromParms()}, and passes it on to the corresponding handler method. * <p> * Each handler method may modify the input schema and return it as the output schema (common for V3 endpoints, * should be avoided in V4). * <p> * Alternatively, a handler method may create a new output schema object from scratch, or from an existing * implementation object. * * <h1>Internal details</h1> * <p> * Most Java developers need not be concerned with the details that follow, because the * framework will make these calls as necessary. * <p> * To create a schema object and fill it from an existing impl object: * <pre>{@code S schema = new SomeSchemaClass().fillFromImpl(impl);}</pre> * <p> * To create an impl object and fill it from an existing schema object: * <pre>{@code I impl = schema.createAndFillImpl();}</pre> * <p> * Schemas that are used for HTTP requests are filled with the default values of their impl * class, and then any present HTTP parameters override those default values. * To create a schema object filled from the default values of its impl class and then * overridden by HTTP request params: * <pre>{@code S schema = new SomeSchemaClass().fillFromImpl().fillFromParms(parms);}</pre> * * @param <I> "implementation" (Iced) class for this schema * @param <S> reference to self: this should always be the same class as being declared. For example: * <pre>public class TimelineV3 extends Schema&lt;Timeline, TimelineV3&gt;</pre> */ public abstract class Schema<I extends Iced, S extends Schema<I,S>> extends Iced { // These fields are declared transient so that they do not get included when a schema is serialized into JSON. private transient Class<I> _impl_class; private transient int _schema_version; private transient String _schema_name; private transient String _schema_type; private transient static final Gson gson = H2oRestGsonHelper.createH2oCompatibleGson(); // stateless and thread safe /** Default constructor; triggers lazy schema registration. * @throws water.exceptions.H2OFailException if there is a name collision or * there is more than one schema which maps to the same Iced class */ public Schema() { init_meta(); SchemaServer.checkIfRegistered(this); } /** * Create a new Schema instance from an existing impl object. */ public Schema(I impl) { this(); this.fillFromImpl(impl); } public void init_meta() { if (_schema_name != null) return; _schema_name = this.getClass().getSimpleName(); _schema_version = extractVersionFromSchemaName(_schema_name); _schema_type = getImplClass().getSimpleName(); } /** Extract the version number from the schema class name. Returns -1 if * there's no version number at the end of the classname. */ public static int extractVersionFromSchemaName(String clz_name) { int idx = clz_name.lastIndexOf('V'); if (idx == -1) return -1; try { return Integer.valueOf(clz_name.substring(idx+1)); } catch( NumberFormatException ex) { return -1; } } /** Get the version number of this schema, for example 3 or 99. Note that 99 * is the "experimental" version, meaning that there are no stability * guarantees between H2O versions. */ public int getSchemaVersion() { return _schema_version; } public String getSchemaName() { return _schema_name; } public String getSchemaType() { return _schema_type; } /* Temporary hack to allow reassignment of schema_type by KeyV3 class */ public void setSchemaType_doNotCall(String s) { _schema_type = s; } /** * Create an appropriate implementation object and any child objects but does not fill them. * The standard purpose of a createImpl without a fillImpl is to be able to get the default * values for all the impl's fields. * <p> * For objects without children this method does all the required work. For objects * with children the subclass will need to override, e.g. by calling super.createImpl() * and then calling createImpl() on its children. * <p> * Note that impl objects for schemas which override this method don't need to have * a default constructor (e.g., a Keyed object constructor can still create and set * the Key), but they must not fill any fields which can be filled later from the schema. * <p> * TODO: We could handle the common case of children with the same field names here * by finding all of our fields that are themselves Schemas. */ public I createImpl() { try { return getImplClass().newInstance(); } catch (Exception e) { throw H2O.fail("Exception making a newInstance",e); } } protected I fillImpl(I impl, String[] fieldsToSkip) { PojoUtils.copyProperties(impl, this, PojoUtils.FieldNaming.CONSISTENT, fieldsToSkip); // TODO: make field names in the impl classes consistent and remove PojoUtils.copyProperties(impl, this, PojoUtils.FieldNaming.DEST_HAS_UNDERSCORES, fieldsToSkip); return impl; } /** Fill an impl object and any children from this schema and its children. * If a schema doesn't need to adapt any fields if does not need to override * this method. */ public I fillImpl(I impl) { return fillImpl(impl, null); } /** Convenience helper which creates and fills an impl object from this schema. */ public final I createAndFillImpl() { return this.fillImpl(this.createImpl()); } /** * Fill this schema from the default impl, and then return self. */ public final S fillFromImpl() { return fillFromImpl(createImpl(), null); } /** * Fill this Schema from the given implementation object. If a schema doesn't need to adapt any fields if does not * need to override this method. */ public S fillFromImpl(I impl) { return fillFromImpl(impl, null); } protected S fillFromImpl(I impl, String[] fieldsToSkip) { PojoUtils.copyProperties(this, impl, PojoUtils.FieldNaming.ORIGIN_HAS_UNDERSCORES, fieldsToSkip); PojoUtils.copyProperties(this, impl, PojoUtils.FieldNaming.CONSISTENT, fieldsToSkip); // TODO: make field names in the impl classes consistent and remove //noinspection unchecked (parameter <S> should be the derived class itself) return (S) this; } /** Return the class of the implementation type parameter I for the * given Schema class. Used by the metadata facilities and the * reflection-base field-copying magic in PojoUtils. */ public static Class<? extends Iced> getImplClass(Class<? extends Schema> clz) { Class<? extends Iced> impl_class = ReflectionUtils.findActualClassParameter(clz, 0); if (null == impl_class) Log.warn("Failed to find an impl class for Schema: " + clz); return impl_class; } /** Return the class of the implementation type parameter I for this Schema. * Used by generic code which deals with arbitrary schemas and their backing * impl classes. Never returns null. */ public Class<I> getImplClass() { return _impl_class != null ? _impl_class : (_impl_class = ReflectionUtils.findActualClassParameter(this.getClass(), 0)); } /** * Fill this Schema object from a set of parameters. * * @param parms parameters - set of tuples (parameter name, parameter value) * @return this schema * * @see #fillFromParms(Properties, Properties, boolean) */ public S fillFromParms(Properties parms) { return fillFromParms(parms, true); } /** * Fill this Schema object from a set of parameters. * * @param parms parameters - set of tuples (parameter name, parameter value) * @param checkRequiredFields perform check for missing required fields * @return this schema * * @see #fillFromParms(Properties, Properties, boolean) */ public S fillFromParms(Properties parms, boolean checkRequiredFields) { return fillFromParms(parms, null, checkRequiredFields); } /** * Fill this Schema from a set of (generally HTTP) parameters. * <p> * Using reflection this process determines the type of the target field and * conforms the types if possible. For example, if the field is a Keyed type * the name (ID) will be looked up in the DKV and mapped appropriately. * <p> * The process ignores parameters which are not fields in the schema, and it * verifies that all fields marked as required are present in the parameters * list. * <p> * It also does various sanity checks for broken Schemas, for example fields must * not be private, and since input fields get filled here they must not be final. * @param parms Properties map of parameter values * @param unknownParms if not null, bad parameters won't cause an exception, * they will be collected in this Properties object instead * @param checkRequiredFields perform check for missing required fields * @return this schema * @throws H2OIllegalArgumentException for bad/missing parameters */ public S fillFromParms(Properties parms, Properties unknownParms, boolean checkRequiredFields) { // Get passed-in fields, assign into Schema Class thisSchemaClass = this.getClass(); Map<String, Field> fields = new HashMap<>(); Field current = null; // declare here so we can print in catch{} try { Class clz = thisSchemaClass; do { Field[] some_fields = clz.getDeclaredFields(); for (Field f : some_fields) { current = f; if (null == fields.get(f.getName())) fields.put(f.getName(), f); } clz = clz.getSuperclass(); } while (Iced.class.isAssignableFrom(clz.getSuperclass())); } catch (SecurityException e) { throw H2O.fail("Exception accessing field: " + current + " in class: " + this.getClass() + ": " + e); } for( String key : parms.stringPropertyNames() ) { try { Field f = fields.get(key); // No such field error, if parm is junk if (null == f) { if (unknownParms != null) { unknownParms.put(key, parms.getProperty(key)); continue; } else throw new H2OIllegalArgumentException("Unknown parameter: " + key, "Unknown parameter in fillFromParms: " + key + " for class: " + this.getClass().toString()); } int mods = f.getModifiers(); if( Modifier.isTransient(mods) || Modifier.isStatic(mods) ) { // Attempting to set a transient or static; treat same as junk fieldname throw new H2OIllegalArgumentException( "Bad parameter for field: " + key + " for class: " + this.getClass().toString(), "Bad parameter definition for field: " + key + " in fillFromParms for class: " + this.getClass().toString() + " (field was declared static or transient)"); } // Only support a single annotation which is an API, and is required Annotation[] apis = f.getAnnotations(); if( apis.length == 0 ) throw H2O.fail("Broken internal schema; missing API annotation for field: " + key); API api = (API)apis[0]; // Must have one of these set to be an input field if( api.direction() == API.Direction.OUTPUT ) { throw new H2OIllegalArgumentException( "Attempting to set output field: " + key + " for class: " + this.getClass().toString(), "Attempting to set output field: " + key + " in fillFromParms for class: " + this.getClass().toString() + " (field was annotated as API.Direction.OUTPUT)"); } // Parse value and set the field setField(this, f, key, parms.getProperty(key), api.required(), thisSchemaClass); } catch( IllegalAccessException iae ) { // Come here if field is final or private throw H2O.fail("Broken internal schema; field cannot be private nor final: " + key); } } // Here every thing in 'parms' was set into some field - so we have already // checked for unknown or extra parms. // Confirm required fields are set if (checkRequiredFields) { for (Field f : fields.values()) { int mods = f.getModifiers(); if (Modifier.isTransient(mods) || Modifier.isStatic(mods)) continue; // Ignore transient & static try { API api = (API) f.getAnnotations()[0]; // TODO: is there a more specific way we can do this? if (api.required()) { if (parms.getProperty(f.getName()) == null) { IcedHashMapGeneric.IcedHashMapStringObject values = new IcedHashMapGeneric.IcedHashMapStringObject(); values.put("schema", this.getClass().getSimpleName()); values.put("argument", f.getName()); throw new H2OIllegalArgumentException( "Required field " + f.getName() + " not specified", "Required field " + f.getName() + " not specified for schema class: " + this.getClass(), values); } } } catch (ArrayIndexOutOfBoundsException e) { throw H2O.fail("Missing annotation for API field: " + f.getName()); } } } //noinspection unchecked (parameter <S> should be the derived class itself) return (S) this; } /** * Fills this Schema from the body content when available. * By default the body is interpreted as JSON object. * * We use PojoUtils.fillFromJson() rather than just using "schema = Gson.fromJson(post_body)" * so that we have defaults: we only overwrite fields that the client has specified. * * @param body the post body (can't be null), converted to JSON by default * @return the filled schema */ public S fillFromBody(String body) { return (S) PojoUtils.fillFromJson(this, body); } /** * * @param o * @return */ public S fillFromAny(Object o) { throw new IllegalArgumentException("can't convert object of type " + o.getClass() + " to schema " + this.getSchemaType()); } /** * Safe method to set the field on given schema object * @param o schema object to modify * @param f field to modify * @param key name of field to modify * @param value string-based representation of value to set * @param required is field required by API * @param thisSchemaClass class of schema handling this (can be null) * @throws IllegalAccessException */ public static <T extends Schema> void setField(T o, Field f, String key, String value, boolean required, Class thisSchemaClass) throws IllegalAccessException { // Primitive parse by field type Object parse_result = parse(key, value, f.getType(), required, thisSchemaClass); if (parse_result != null && f.getType().isArray() && parse_result.getClass().isArray() && (f.getType().getComponentType() != parse_result.getClass().getComponentType())) { // We have to conform an array of primitives. There's got to be a better way. . . if (parse_result.getClass().getComponentType() == int.class && f.getType().getComponentType() == Integer.class) { int[] from = (int[])parse_result; Integer[] copy = new Integer[from.length]; for (int i = 0; i < from.length; i++) copy[i] = from[i]; f.set(o, copy); } else if (parse_result.getClass().getComponentType() == Integer.class && f.getType().getComponentType() == int.class) { Integer[] from = (Integer[])parse_result; int[] copy = new int[from.length]; for (int i = 0; i < from.length; i++) copy[i] = from[i]; f.set(o, copy); } else if (parse_result.getClass().getComponentType() == Double.class && f.getType().getComponentType() == double.class) { Double[] from = (Double[])parse_result; double[] copy = new double[from.length]; for (int i = 0; i < from.length; i++) copy[i] = from[i]; f.set(o, copy); } else if (parse_result.getClass().getComponentType() == Float.class && f.getType().getComponentType() == float.class) { Float[] from = (Float[]) parse_result; float[] copy = new float[from.length]; for (int i = 0; i < from.length; i++) copy[i] = from[i]; f.set(o, copy); } else if (parse_result.getClass().getComponentType() == Boolean.class && f.getType().getComponentType() == boolean.class) { Boolean[] from = (Boolean[]) parse_result; boolean[] copy = new boolean[from.length]; for (int i = 0; i < from.length; i++) copy[i] = from[i]; f.set(o, copy); } else { throw H2O.fail("Don't know how to cast an array of: " + parse_result.getClass().getComponentType() + " to an array of: " + f.getType().getComponentType()); } } else { f.set(o, parse_result); } } static <E> Object parsePrimitve(String s, Class fclz) { if (fclz.equals(String.class)) return s; // Strings already the right primitive type if (fclz.equals(int.class)) return parseInteger(s, int.class); if (fclz.equals(long.class)) return parseInteger(s, long.class); if (fclz.equals(short.class)) return parseInteger(s, short.class); if (fclz.equals(boolean.class)) { if (s.equals("0")) return Boolean.FALSE; if (s.equals("1")) return Boolean.TRUE; return Boolean.valueOf(s); } if (fclz.equals(byte.class)) return parseInteger(s, byte.class); if (fclz.equals(double.class)) return Double.valueOf(s); if (fclz.equals(float.class)) return Float.valueOf(s); //FIXME: if (fclz.equals(char.class)) return Character.valueOf(s); throw H2O.fail("Unknown primitive type to parse: " + fclz.getSimpleName()); } // URL parameter parse static <E> Object parse(String field_name, String s, Class fclz, boolean required, Class schemaClass) { if (fclz.isPrimitive() || String.class.equals(fclz)) { try { return parsePrimitve(s, fclz); } catch (NumberFormatException ne) { String msg = "Illegal argument for field: " + field_name + " of schema: " + schemaClass.getSimpleName() + ": cannot convert \"" + s + "\" to type " + fclz.getSimpleName(); throw new H2OIllegalArgumentException(msg); } } // An array? if (fclz.isArray()) { // Get component type Class<E> afclz = (Class<E>) fclz.getComponentType(); // Result E[] a = null; // Handle simple case with null-array if (s.equals("null") || s.length() == 0) return null; // Handling of "auto-parseable" cases if (AutoParseable.class.isAssignableFrom(afclz)) return gson.fromJson(s, fclz); // Splitted values String[] splits; // "".split(",") => {""} so handle the empty case explicitly if (s.startsWith("[") && s.endsWith("]")) { // It looks like an array read(s, 0, '[', fclz); read(s, s.length() - 1, ']', fclz); String inside = s.substring(1, s.length() - 1).trim(); if (inside.length() == 0) splits = new String[]{}; else splits = splitArgs(inside); } else { // Lets try to parse single value as an array! // See PUBDEV-1955 splits = new String[]{s.trim()}; } // Can't cast an int[] to an Object[]. Sigh. if (afclz == int.class) { // TODO: other primitive types. . . a = (E[]) Array.newInstance(Integer.class, splits.length); } else if (afclz == double.class) { a = (E[]) Array.newInstance(Double.class, splits.length); } else if (afclz == float.class) { a = (E[]) Array.newInstance(Float.class, splits.length); } else if (afclz == boolean.class) { a = (E[]) Array.newInstance(Boolean.class, splits.length); } else { // Fails with primitive classes; need the wrapper class. Thanks, Java. a = (E[]) Array.newInstance(afclz, splits.length); } for (int i = 0; i < splits.length; i++) { if (String.class == afclz || KeyV3.class.isAssignableFrom(afclz)) { // strip quotes off string values inside array String stripped = splits[i].trim(); if ("null".equals(stripped.toLowerCase()) || "na".equals(stripped.toLowerCase())) { a[i] = null; continue; } // Quotes are now optional because standard clients will send arrays of length one as just strings. if (stripped.startsWith("\"") && stripped.endsWith("\"")) { stripped = stripped.substring(1, stripped.length() - 1); } a[i] = (E) parse(field_name, stripped, afclz, required, schemaClass); } else { a[i] = (E) parse(field_name, splits[i].trim(), afclz, required, schemaClass); } } return a; } // Are we parsing an object from a string? NOTE: we might want to make this check more restrictive. if (! fclz.isAssignableFrom(Schema.class) && s != null && s.startsWith("{") && s.endsWith("}")) { return gson.fromJson(s, fclz); } if (fclz.equals(Key.class)) if ((s == null || s.length() == 0) && required) throw new H2OKeyNotFoundArgumentException(field_name, s); else if (!required && (s == null || s.length() == 0)) return null; else return Key.make(s.startsWith("\"") ? s.substring(1, s.length() - 1) : s); // If the key name is in an array we need to trim surrounding quotes. if (KeyV3.class.isAssignableFrom(fclz)) { if ((s == null || s.length() == 0) && required) throw new H2OKeyNotFoundArgumentException(field_name, s); if (!required && (s == null || s.length() == 0)) return null; return KeyV3.make(fclz, Key.make(s.startsWith("\"") ? s.substring(1, s.length() - 1) : s)); // If the key name is in an array we need to trim surrounding quotes. } if (Enum.class.isAssignableFrom(fclz)) { return EnumUtils.valueOf(fclz, s); } // TODO: these can be refactored into a single case using the facilities in Schema: if (FrameV3.class.isAssignableFrom(fclz)) { if ((s == null || s.length() == 0) && required) throw new H2OKeyNotFoundArgumentException(field_name, s); else if (!required && (s == null || s.length() == 0)) return null; else { Value v = DKV.get(s); if (null == v) return null; // not required if (!v.isFrame()) throw H2OIllegalArgumentException.wrongKeyType(field_name, s, "Frame", v.get().getClass()); return new FrameV3((Frame) v.get()); // TODO: version! } } if (JobV3.class.isAssignableFrom(fclz)) { if ((s == null || s.length() == 0) && required) throw new H2OKeyNotFoundArgumentException(s); else if (!required && (s == null || s.length() == 0)) return null; else { Value v = DKV.get(s); if (null == v) return null; // not required if (!v.isJob()) throw H2OIllegalArgumentException.wrongKeyType(field_name, s, "Job", v.get().getClass()); return new JobV3().fillFromImpl((Job) v.get()); // TODO: version! } } // TODO: for now handle the case where we're only passing the name through; later we need to handle the case // where the frame name is also specified. if (FrameV3.ColSpecifierV3.class.isAssignableFrom(fclz)) { return new FrameV3.ColSpecifierV3(s); } if (ModelSchemaV3.class.isAssignableFrom(fclz)) throw H2O.fail("Can't yet take ModelSchemaV3 as input."); /* if( (s==null || s.length()==0) && required ) throw new IllegalArgumentException("Missing key"); else if (!required && (s == null || s.length() == 0)) return null; else { Value v = DKV.get(s); if (null == v) return null; // not required if (! v.isModel()) throw new IllegalArgumentException("Model argument points to a non-model object."); return v.get(); } */ throw H2O.fail("Unimplemented schema fill from " + fclz.getSimpleName()); } // parse() /** * Helper functions for parse() **/ /** * Parses a string into an integer data type specified by parameter return_type. Accepts any format that * is accepted by java's BigDecimal class. * - Throws a NumberFormatException if the evaluated string is not an integer or if the value is too large to * be stored into return_type without overflow. * - Throws an IllegalAgumentException if return_type is not an integer data type. **/ static private <T> T parseInteger(String s, Class<T> return_type) { try { java.math.BigDecimal num = new java.math.BigDecimal(s); T result = (T) num.getClass().getDeclaredMethod(return_type.getSimpleName() + "ValueExact", new Class[0]).invoke(num); return result; } catch (InvocationTargetException ite) { throw new NumberFormatException("The expression's numeric value is out of the range of type " + return_type.getSimpleName()); } catch (NoSuchMethodException nsme) { throw new IllegalArgumentException(return_type.getSimpleName() + " is not an integer data type"); } catch (IllegalAccessException iae) { throw H2O.fail("Cannot parse expression as " + return_type.getSimpleName() + " (Illegal Access)"); } } static private int read( String s, int x, char c, Class fclz ) { if( peek(s,x,c) ) return x+1; throw new IllegalArgumentException("Expected '"+c+"' while reading a "+fclz.getSimpleName()+", but found "+s); } static private boolean peek( String s, int x, char c ) { return x < s.length() && s.charAt(x) == c; } // Splits on commas, but ignores commas in double quotes. Required // since using a regex blow the stack on long column counts // TODO: detect and complain about malformed JSON private static String[] splitArgs(String argStr) { StringBuilder sb = new StringBuilder(argStr); StringBuilder arg = new StringBuilder(); List<String> splitArgList = new ArrayList<String> (); boolean inDoubleQuotes = false; boolean inSquareBrackets = false; // for arrays of arrays for (int i=0; i < sb.length(); i++) { if (sb.charAt(i) == '"' && !inDoubleQuotes && !inSquareBrackets) { inDoubleQuotes = true; arg.append(sb.charAt(i)); } else if (sb.charAt(i) == '"' && inDoubleQuotes && !inSquareBrackets) { inDoubleQuotes = false; arg.append(sb.charAt(i)); } else if (sb.charAt(i) == ',' && !inDoubleQuotes && !inSquareBrackets) { splitArgList.add(arg.toString()); // clear the field for next word arg.setLength(0); } else if (sb.charAt(i) == '[') { inSquareBrackets = true; arg.append(sb.charAt(i)); } else if (sb.charAt(i) == ']') { inSquareBrackets = false; arg.append(sb.charAt(i)); } else { arg.append(sb.charAt(i)); } } if (arg.length() > 0) splitArgList.add(arg.toString()); return splitArgList.toArray(new String[splitArgList.size()]); } /** * Returns a new Schema instance. Does not throw, nor returns null. * @return New instance of Schema Class 'clz'. */ public static <T extends Schema> T newInstance(Class<T> clz) { try { return clz.newInstance(); } catch (Exception e) { throw H2O.fail("Failed to instantiate schema of class: " + clz.getCanonicalName(),e); } } /** * For a given schema_name (e.g., "FrameV2") return an appropriate new schema object (e.g., a water.api.Framev2). */ protected static Schema newInstance(String schema_name) { return Schema.newInstance(SchemaServer.getSchema(schema_name)); } /** * Generate Markdown documentation for this Schema possibly including only the input or output fields. * @throws H2ONotFoundArgumentException if reflection on a field fails */ public StringBuffer markdown(boolean include_input_fields, boolean include_output_fields) { return markdown(new SchemaMetadata(this), include_input_fields, include_output_fields); } /** * Generate Markdown documentation for this Schema, given we already have the metadata constructed. * @throws H2ONotFoundArgumentException if reflection on a field fails */ public StringBuffer markdown(SchemaMetadata meta, boolean include_input_fields, boolean include_output_fields) { MarkdownBuilder builder = new MarkdownBuilder(); builder.comment("Preview with http://jbt.github.io/markdown-editor"); builder.heading1("schema ", this.getClass().getSimpleName()); builder.hline(); // builder.paragraph(metadata.summary); // TODO: refactor with Route.markdown(): // fields boolean first; // don't print the table at all if there are no rows try { if (include_input_fields) { first = true; builder.heading2("input fields"); for (SchemaMetadata.FieldMetadata field_meta : meta.fields) { if (field_meta.direction == API.Direction.INPUT || field_meta.direction == API.Direction.INOUT) { if (first) { builder.tableHeader("name", "required?", "level", "type", "schema?", "schema", "default", "description", "values", "is member of frames", "is mutually exclusive with"); first = false; } builder.tableRow( field_meta.name, String.valueOf(field_meta.required), field_meta.level.name(), field_meta.type, String.valueOf(field_meta.is_schema), field_meta.is_schema ? field_meta.schema_name : "", (null == field_meta.value ? "(null)" : field_meta.value.toString()), // Something better for toString()? field_meta.help, (field_meta.values == null || field_meta.values.length == 0 ? "" : Arrays.toString(field_meta.values)), (field_meta.is_member_of_frames == null ? "[]" : Arrays.toString(field_meta.is_member_of_frames)), (field_meta.is_mutually_exclusive_with == null ? "[]" : Arrays.toString(field_meta.is_mutually_exclusive_with)) ); } } if (first) builder.paragraph("(none)"); } if (include_output_fields) { first = true; builder.heading2("output fields"); for (SchemaMetadata.FieldMetadata field_meta : meta.fields) { if (field_meta.direction == API.Direction.OUTPUT || field_meta.direction == API.Direction.INOUT) { if (first) { builder.tableHeader("name", "type", "schema?", "schema", "default", "description", "values", "is member of frames", "is mutually exclusive with"); first = false; } builder.tableRow( field_meta.name, field_meta.type, String.valueOf(field_meta.is_schema), field_meta.is_schema ? field_meta.schema_name : "", (null == field_meta.value ? "(null)" : field_meta.value.toString()), // something better than toString()? field_meta.help, (field_meta.values == null || field_meta.values.length == 0 ? "" : Arrays.toString(field_meta.values)), (field_meta.is_member_of_frames == null ? "[]" : Arrays.toString(field_meta.is_member_of_frames)), (field_meta.is_mutually_exclusive_with == null ? "[]" : Arrays.toString(field_meta.is_mutually_exclusive_with))); } } if (first) builder.paragraph("(none)"); } // TODO: render examples and other stuff, if it's passed in } catch (Exception e) { IcedHashMapGeneric.IcedHashMapStringObject values = new IcedHashMapGeneric.IcedHashMapStringObject(); values.put("schema", this); // TODO: This isn't quite the right exception type: throw new H2OIllegalArgumentException("Caught exception using reflection on schema: " + this, "Caught exception using reflection on schema: " + this + ": " + e, values); } return builder.stringBuffer(); } /** * This "Marker Interface" denotes classes that can directly be parsed by GSON parser (skip H2O's own parser) */ public interface AutoParseable { /* nothing here */} }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/SchemaMetadata.java
package water.api; import water.H2O; import water.Iced; import water.IcedWrapper; import water.Weaver; import water.api.schemas3.*; import water.exceptions.H2OIllegalArgumentException; import water.util.*; import java.lang.reflect.Field; import java.lang.reflect.Modifier; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; /** * The metadata info on all the fields in a Schema. This is used to help Schema be self-documenting, * and to generate language bindings for route handlers and entities. */ public final class SchemaMetadata extends Iced { public int version; public String name; public String superclass; public String type; public List<FieldMetadata> fields; public String markdown; // TODO: combine with ModelParameterSchemaV2. static public final class FieldMetadata extends Iced { /** * Field name in the POJO. Set through reflection. */ public String name; /** * Type for this field. Set through reflection. */ public String type; /** * Type for this field is itself a Schema. Set through reflection. */ public boolean is_schema; /** * Schema name for this field, if it is_schema. Set through reflection. */ public String schema_name; /** * Value for this field. Set through reflection. */ public Iced value; /** * A short help description to appear alongside the field in a UI. Set from the @API annotation. */ String help; /** * The label that should be displayed for the field if the name is insufficient. Set from the @API annotation. */ String label; /** * Is this field required, or is the default value generally sufficient? Set from the @API annotation. */ boolean required; /** * How important is this field? The web UI uses the level to do a slow reveal of the parameters. Set from the @API annotation. */ API.Level level; /** * Is this field an input, output or inout? Set from the @API annotation. */ API.Direction direction; /** * Is this field inherited from a class higher in the hierarchy? */ public boolean is_inherited; /** * If this field is inherited from a class higher in the hierarchy which one? */ public String inherited_from; /** * Is this field gridable? Set from the @API annotation. */ public boolean is_gridable; // The following are markers for *input* fields. /** * For enum-type fields the allowed values are specified using the values annotation. * This is used in UIs to tell the user the allowed values, and for validation. * Set from the @API annotation. */ String[] values; /** * Should this field be rendered in the JSON representation? Set from the @API annotation. */ boolean json; /** * For Vec-type fields this is the set of Frame-type fields which must contain the named column. * For example, for a SupervisedModel the response_column must be in both the training_frame * and (if it's set) the validation_frame. */ String[] is_member_of_frames; /** * For Vec-type fields this is the set of other Vec-type fields which must contain * mutually exclusive values. For example, for a SupervisedModel the response_column * must be mutually exclusive with the weights_column. */ String[] is_mutually_exclusive_with; public FieldMetadata() { } /** * Create a new FieldMetadata object for the given Field of the given Schema. * @param schema water.api.Schema object * @param f java.lang.reflect.Field for the Schema class */ public FieldMetadata(Schema schema, Field f, List<Field>superclassFields) { super(); try { f.setAccessible(true); // handle private and protected fields // Get annotation directly API annotation = f.getAnnotation(API.class); this.name = f.getName(); Object o = f.get(schema); this.value = consValue(o); // Enum is a field of enum type or of String type with defined and fixed set of values! boolean is_enum = isEnum(f.getType(), annotation) || (f.getType().isArray() && isEnum(f.getType().getComponentType(), annotation)); boolean is_fake_enum = isFakeEnum(f.getType(), annotation) || (f.getType().isArray() && isFakeEnum(f.getType().getComponentType(), annotation)); this.is_schema = Schema.class.isAssignableFrom(f.getType()) || (f.getType().isArray() && Schema.class.isAssignableFrom(f.getType().getComponentType())); this.type = consType(schema, ReflectionUtils.findActualFieldClass(schema.getClass(), f), f.getName(), annotation); // Note, this has to work when the field is null. In addition, if the field's type is a base class we want to see if we have a versioned schema for its Iced type and, if so, use it. if (this.is_schema) { // First, get the class of the field: NOTE: this gets the actual type for genericized fields, but not for arrays of genericized fields Class schema_class = f.getType().isArray()? f.getType().getComponentType() : ReflectionUtils.findActualFieldClass(schema.getClass(), f); this.schema_name = schema_class.getSimpleName(); } else if ((is_enum || is_fake_enum) && !f.getType().isArray()) { // We have enums of the same name defined in a few classes (e.g., Loss and Initialization) this.schema_name = getEnumSchemaName(is_enum ? f.getType() : annotation.valuesProvider()); } else if ((is_enum || is_fake_enum) && f.getType().isArray()) { // We have enums of the same name defined in a few classes (e.g., Loss and Initialization) this.schema_name = getEnumSchemaName(is_enum ? f.getType().getComponentType() : annotation.valuesProvider()); } this.is_inherited = (superclassFields.contains(f)); if (this.is_inherited) this.inherited_from = f.getDeclaringClass().getSimpleName(); if (null != annotation) { this.help = annotation.help(); this.label = this.name; this.required = annotation.required(); this.level = annotation.level(); this.direction = annotation.direction(); this.is_gridable = annotation.gridable(); this.values = annotation.valuesProvider() == ValuesProvider.NULL ? annotation.values() : getValues(annotation.valuesProvider()); this.json = annotation.json(); this.is_member_of_frames = annotation.is_member_of_frames(); this.is_mutually_exclusive_with = annotation.is_mutually_exclusive_with(); // TODO: need to form the transitive closure // If the field is an enum then the values annotation field had better be set. . . if (is_enum && (null == this.values || 0 == this.values.length)) { throw H2O.fail("Didn't find values annotation for enum field: " + this.name); } } } catch (Exception e) { throw H2O.fail("Caught exception accessing field: " + f + " for schema object: " + schema + ": " + e.toString()); } } // FieldMetadata(Schema, Field) /** * Factory method to create a new FieldMetadata instance if the Field has an @API annotation. * @param schema water.api.Schema object * @param f java.lang.reflect.Field for the Schema class * @return a new FieldMetadata instance if the Field has an @API annotation, else null */ public static FieldMetadata createIfApiAnnotation(Schema schema, Field f, List<Field> superclassFields) { f.setAccessible(true); // handle private and protected fields if (null != f.getAnnotation(API.class)) return new FieldMetadata(schema, f, superclassFields); if (!(Modifier.isPrivate(f.getModifiers()))) { // don't warn in case of private field without annotation Log.warn("Skipping field that lacks an annotation: " + schema.toString() + "." + f); } return null; } /** For a given Class generate a client-friendly type name (e.g., int[][] or Frame). */ public static String consType(Schema schema, Class clz, String field_name, API annotation) { boolean is_enum = isEnum(clz, null) || isFakeEnum(clz, annotation); boolean is_array = clz.isArray(); // built-in Java types: if (is_enum) return "enum"; if (String.class.isAssignableFrom(clz)) return "string"; // lower-case, to be less Java-centric if (clz.equals(Boolean.TYPE) || clz.equals(Byte.TYPE) || clz.equals(Short.TYPE) || clz.equals(Integer.TYPE) || clz.equals(Long.TYPE) || clz.equals(Float.TYPE) || clz.equals(Double.TYPE)) return clz.toString(); if (is_array) return consType(schema, clz.getComponentType(), field_name, annotation) + "[]"; if (Map.class.isAssignableFrom(clz)) { if (IcedHashMapGeneric.class.isAssignableFrom(clz) || IcedHashMapBase.class.isAssignableFrom(clz)) { String type0 = ReflectionUtils.findActualClassParameter(clz, 0).getSimpleName(); String type1 = ReflectionUtils.findActualClassParameter(clz, 1).getSimpleName(); if ("String".equals(type0)) type0 = "string"; if ("String".equals(type1)) type1 = "string"; return "Map<" + type0 + "," + type1 + ">"; } else { Log.warn("Schema Map field isn't a subclass of IcedHashMap, so its metadata won't have type parameters: " + schema.getClass().getSimpleName() + "." + field_name); return "Map"; } } if (List.class.isAssignableFrom(clz)) return "List"; // H2O-specific types: // TODO: NOTE, this is a mix of Schema types and Iced types; that's not right. . . // Should ONLY have schema types. // Also, this mapping could/should be moved to Schema. if (water.Key.class.isAssignableFrom(clz)) { Log.warn("Raw Key (not KeySchema) in Schema: " + schema.getClass() + " field: " + field_name); return "Key"; } if (KeyV3.class.isAssignableFrom(clz)) { return "Key<" + KeyV3.getKeyedClassType((Class<? extends KeyV3>) clz) + ">"; } if (Schema.class.isAssignableFrom(clz)) { return Schema.getImplClass((Class<Schema>)clz).getSimpleName(); // same as Schema.schema_type } if (Iced.class.isAssignableFrom(clz)) { if (clz == SchemaV3.Meta.class) { // Special case where we allow an Iced in a Schema so we don't get infinite meta-regress: return "Schema.Meta"; } else if (clz == JSONValue.class) { return "Polymorphic"; } else { // Special cases: polymorphic metadata fields that can contain scalars, Schemas (any Iced, actually), or arrays of these: if (schema instanceof ModelParameterSchemaV3 && ("default_value".equals(field_name) || "actual_value".equals(field_name) || "input_value".equals(field_name))) return "Polymorphic"; if ((schema instanceof FieldMetadataV3) && "value".equals(field_name)) return "Polymorphic"; if ((schema instanceof TwoDimTableV3) && "data".equals(field_name)) // IcedWrapper return "Polymorphic"; Log.warn("WARNING: found non-Schema Iced field: " + clz.toString() + " in Schema: " + schema.getClass() + " field: " + field_name); return clz.getSimpleName(); } } String msg = "Don't know how to generate a client-friendly type name for class: " + clz.toString() + " in Schema: " + schema.getClass() + " field: " + field_name; Log.warn(msg); throw H2O.fail(msg); } public static Iced consValue(Object o) { if (null == o) return null; Class clz = o.getClass(); if (water.Iced.class.isAssignableFrom(clz)) return (Iced)o; if (clz.isArray()) { return new IcedWrapper(o); } /* if (water.Keyed.class.isAssignableFrom(o.getClass())) { Keyed k = (Keyed)o; return k._key.toString(); } if (! o.getClass().isArray()) { if (Schema.class.isAssignableFrom(o.getClass())) { return new String(((Schema)o).writeJSON(new AutoBuffer()).buf()); } else { return o.toString(); } } StringBuilder sb = new StringBuilder(); sb.append("["); for (int i = 0; i < Array.getLength(o); i++) { if (i > 0) sb.append(", "); sb.append(consValue(Array.get(o, i))); } sb.append("]"); return sb.toString(); */ // Primitive type if (clz.isPrimitive()) return new IcedWrapper(o); if (o instanceof Number) return new IcedWrapper(o); if (o instanceof Boolean) return new IcedWrapper(o); if (o instanceof String) return new IcedWrapper(o); if (o instanceof Enum) return new IcedWrapper(o); throw new H2OIllegalArgumentException("o", "consValue", o); } } // FieldMetadata public SchemaMetadata() { fields = new ArrayList<>(); } public SchemaMetadata(Schema schema) { version = schema.getSchemaVersion(); name = schema.getSchemaName(); type = schema.getSchemaType(); superclass = schema.getClass().getSuperclass().getSimpleName(); // Get metadata of all annotated fields fields = getFieldMetadata(schema); // Also generates markdown markdown = schema.markdown(this, true, true).toString(); } /** * Returns metadata of all annotated fields. * * @param schema a schema instance * @return list of field metadata */ public static List<FieldMetadata> getFieldMetadata(Schema schema) { List<Field> superclassFields = Arrays.asList(Weaver.getWovenFields(schema.getClass().getSuperclass())); List<FieldMetadata> fields = new ArrayList<>(); // Fields up to but not including Schema for (Field field : Weaver.getWovenFields(schema.getClass())) { FieldMetadata fmd = FieldMetadata.createIfApiAnnotation(schema, field, superclassFields); if (null != fmd) // skip transient or other non-annotated fields fields.add(fmd); // NOTE: we include non-JSON fields here; remove them later if we don't want them } return fields; } public static SchemaMetadata createSchemaMetadata(String classname) throws IllegalArgumentException { try { Class<? extends Schema> clz = (Class<? extends Schema>) Class.forName(classname); Schema s = clz.newInstance(); s.fillFromImpl(s.createImpl()); // get defaults return new SchemaMetadata(s); } catch (Exception e) { String msg = "Caught exception fetching schema: " + classname + ": " + e; Log.warn(msg); throw new IllegalArgumentException(msg); } } public static String[] getValues(Class<? extends ValuesProvider> valuesProvider) { String[] values; try { ValuesProvider vp = valuesProvider.newInstance(); values = vp.values(); } catch (Throwable e) { values = null; } return values; } // Enum is a field of enum type or of String type with defined and fixed set of values! private static boolean isEnum(Class<?> type, API annotation) { return Enum.class.isAssignableFrom(type); } private static boolean isFakeEnum(Class<?> type, API annotation) { return (annotation != null && annotation.valuesProvider() != ValuesProvider.NULL && String.class.isAssignableFrom(type)); } private static String getEnumSchemaName(Class<?> type) { StringBuffer sb = new StringBuffer(type.getCanonicalName()); sb.delete(0, sb.indexOf(".")+1); sb.setCharAt(0, Character.toUpperCase(sb.charAt(0))); return sb.toString() .replaceAll("V\\d+\\.", "") // remove the version number in the middle (for internal classes) .replace(".", "") .replace("$", ""); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/SchemaServer.java
package water.api; import water.H2O; import water.Iced; import water.exceptions.H2OIllegalArgumentException; import water.exceptions.H2ONotFoundArgumentException; import water.util.Log; import water.util.Pair; import water.util.ReflectionUtils; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; import java.util.Collections; import java.util.HashMap; import java.util.Map; /** * */ public class SchemaServer { private static final int HIGHEST_SUPPORTED_VERSION = 4; private static final int EXPERIMENTAL_VERSION = 99; private static final int STABLE_VERSION = 3; private static int LATEST_VERSION = -1; private static boolean schemas_registered = false; // Registry which maps a simple schema name to its class. NOTE: the simple names form a single namespace. // E.g., "DeepLearningParametersV2" -> hex.schemas.DeepLearningV2.DeepLearningParametersV2 private static Map<String, Class<? extends Schema>> schemas = new HashMap<>(); // Registry which maps a Schema simpleName to its Iced Class. // E.g., "DeepLearningParametersV2" -> hex.deeplearning.DeepLearning.DeepLearningParameters private static Map<String, Class<? extends Iced>> schema_to_iced = new HashMap<>(); // Registry which maps an Iced simpleName (type) and schema_version to its Schema Class. // E.g., (DeepLearningParameters, 2) -> "DeepLearningParametersV2" // // Note that iced_to_schema gets lazily filled if a higher version is asked for than is // available (e.g., if the highest version of Frame is FrameV2 and the client asks for // the schema for (Frame, 17) then FrameV2 will be returned, and all the mappings between // 17 and 3 will get added to the Map. private static Map<Pair<String, Integer>, Class<? extends Schema>> iced_to_schema = new HashMap<>(); /** * Get the highest schema version number that we've encountered during schema registration. */ public static int getLatestVersion() { return LATEST_VERSION; } /** * Get the highest schema version that we support. This bounds the search for a schema if we haven't yet * registered all schemas and don't yet know the latest_version. */ public static int getHighestSupportedVersion() { return HIGHEST_SUPPORTED_VERSION; } /** * Combines getLatestVersion() and getHighestSupportedVersion(). */ public static int getLatestOrHighestSupportedVersion() { return LATEST_VERSION == -1? HIGHEST_SUPPORTED_VERSION : LATEST_VERSION; } /** * Get the experimental schema version, which indicates that a schema is not guaranteed to be stable between H2O * releases. */ public static int getExperimentalVersion() { return EXPERIMENTAL_VERSION; } public static int getStableVersion() { return STABLE_VERSION; } public static void checkIfRegistered(Schema schema) { if (schemas_registered && !schema_to_iced.containsKey(schema.getSchemaName())) throw H2O.fail("Schema " + schema.getSchemaName() + " was instantiated before it was registered...\n" + "Did you forget to add an entry into your META-INF/services/water.api.Schema file?"); } /** * Register the given schema class. * @throws water.exceptions.H2OFailException if there is a name collision, if the type parameters are bad, or if * the version is bad */ public static void register(Schema schema) { Class clz = schema.getClass(); synchronized(clz) { String clzname = clz.getSimpleName(); // Was there a race to get here? If so, return. Class<? extends Schema> existing = schemas.get(clzname); if (existing != null) { if (clz != existing) throw H2O.fail("Two schema classes have the same simpleName: " + clz + " and " + existing + "."); return; } // Check that the Schema has the correct type parameters: if (clz.getGenericSuperclass() instanceof ParameterizedType) { Type[] schema_type_parms = ((ParameterizedType) (clz.getGenericSuperclass())).getActualTypeArguments(); if (schema_type_parms.length < 2) throw H2O.fail("Found a Schema that does not pass at least two type parameters. Each Schema needs to be " + "parametrized on the backing class (if any, or Iced if not) and itself: " + clz); Class parm0 = ReflectionUtils.findActualClassParameter(clz, 0); Class parm1 = ReflectionUtils.findActualClassParameter(clz, 1); String clzstr = clzname + "<" + parm0.getSimpleName() + "," + parm1.getSimpleName() + ">"; if (!Iced.class.isAssignableFrom(parm0)) throw H2O.fail("Schema " + clzstr + " has bad type parameters: first arg should be a subclass of Iced"); if (Schema.class.isAssignableFrom(parm0)) throw H2O.fail("Schema " + clzstr + " has bad type parameters: first arg cannot be a Schema"); if (!Schema.class.isAssignableFrom(parm1)) throw H2O.fail("Schema " + clzstr + " has bad type parameters: second arg should be a subclass of Schema"); if (!parm1.getSimpleName().equals(clzname)) throw H2O.fail("Schema " + clzstr + " has bad type parameters: second arg should refer to the schema itself"); } else { throw H2O.fail("Found a Schema that does not have a parametrized superclass. Each Schema needs to be " + "parameterized on the backing class (if any, or Iced if not) and itself: " + clz); } // Check the version, and bump the LATEST_VERSION // NOTE: we now allow non-versioned schemas, for example base classes like ModelMetricsBaseV3, so that we can // fetch the metadata for them. int version = Schema.extractVersionFromSchemaName(clzname); if (version > HIGHEST_SUPPORTED_VERSION && version != EXPERIMENTAL_VERSION) throw H2O.fail("Found a schema with a version higher than the highest supported version; you probably want " + "to bump the highest supported version: " + clz); if (version > LATEST_VERSION && version != EXPERIMENTAL_VERSION) synchronized (Schema.class) { if (version > LATEST_VERSION) LATEST_VERSION = version; } Class<? extends Iced> impl_class = ReflectionUtils.findActualClassParameter(clz, 0); Log.debug(String.format("Registering schema: %-40s (v = %2d, impled by %s)", clz.getCanonicalName(), version, impl_class.getCanonicalName())); schemas.put(clzname, clz); schema_to_iced.put(clzname, impl_class); // Check that it is possible to create a schema object try { // Validate the fields: SchemaMetadata meta = new SchemaMetadata(schema); for (SchemaMetadata.FieldMetadata field_meta : meta.fields) { String name = field_meta.name; // TODO: make a new @API field "ignore_naming_rules", and set to true for the names hardcoded below: // After that these all checks could be eliminated... if (name.equals("__meta") || name.equals("__http_status") || name.equals("_exclude_fields") || name.equals("__schema") || name.equals("_fields")) continue; if (name.equals("Gini")) continue; // proper name if (name.equals("pr_auc")) continue; if (name.endsWith("AUC")) continue; // trainAUC, validAUC // TODO: remove after we move these into a TwoDimTable: if ("f0point5".equals(name) || "f0point5_for_criteria".equals(name) || "f1_for_criteria".equals(name) || "f2_for_criteria".equals(name)) continue; if (name.startsWith("_")) Log.warn("Found schema field which violates the naming convention; name starts with underscore: " + meta.name + "." + name); // allow AUC but not residualDeviance // Note: class Word2VecParametersV3 is left as an exception, since it's already a published schema, and it // is not possible to alter its field names. However no other exceptions should be created! if (!name.equals(name.toLowerCase()) && !name.equals(name.toUpperCase())) if (!clzname.equals("Word2VecParametersV3")) Log.warn("Found schema field which violates the naming convention; name has mixed lowercase and " + "uppercase characters: " + meta.name + "." + name); } } catch (Exception e) { throw H2O.fail("Failed to instantiate schema class " + clzname + " because: " + e); } if (impl_class != Iced.class) { Pair<String, Integer> versioned = new Pair<>(impl_class.getSimpleName(), version); // Check for conflicts // The check is invalid: there could be multiple schemas mapping to the same Iced object with the same version. // This is why all calls that depend on this mapping should ideally be eliminated (they cannot by // type-checked anyways, so they greatly increase bugginness of the code...) // if (iced_to_schema.containsKey(versioned)) // throw H2O.fail("Found two schemas mapping to the same Iced class with the same version: " + // iced_to_schema.get(versioned) + " and " + clz + " both map to " + // "version: " + version + " of Iced class: " + impl_class); iced_to_schema.put(versioned, clz); } } } /** * Find all schemas using reflection and register them. */ synchronized static public void registerAllSchemasIfNecessary(Schema ... schemas) { if (schemas_registered) return; long startTime = System.currentTimeMillis(); for (Schema schema : schemas) { register(schema); } Log.info("Registered: " + schemas().size() + " schemas in " + (System.currentTimeMillis() - startTime) + "ms"); schemas_registered = true; } /** * Return an immutable Map of all the schemas: schema_name -> schema Class. */ public static Map<String, Class<? extends Schema>> schemas() { return Collections.unmodifiableMap(new HashMap<>(schemas)); } /** * Lookup schema by name. * @throws H2ONotFoundArgumentException if an appropriate schema is not found */ public static Class<? extends Schema> getSchema(String name) { Class<? extends Schema> clz = schemas.get(name); if (clz == null) throw new H2ONotFoundArgumentException("Failed to find schema for schema_name: " + name, "Failed to find schema for schema_name: " + name + "\n" + "Did you forget to add an entry into META-INF/services/water.api.Schema?"); return clz; } /** * For a given version and type (Iced class simpleName) return the appropriate Schema * class, if any. * <p> * If a higher version is asked for than is available (e.g., if the highest version of * Frame is FrameV2 and the client asks for the schema for (Frame, 17) then FrameV2 will * be returned. This compatibility lookup is cached. * @deprecated */ public static Class<? extends Schema> schemaClass(int version, String type) { if (version < 1) return null; Class<? extends Schema> clz = iced_to_schema.get(new Pair<>(type, version)); if (clz != null) return clz; // found! clz = schemaClass(version==EXPERIMENTAL_VERSION? HIGHEST_SUPPORTED_VERSION : version-1, type); if (clz != null) iced_to_schema.put(new Pair<>(type, version), clz); // found a lower-numbered schema: cache return clz; } /** * For a given version and Iced object return an appropriate Schema instance, if any. * @see #schema(int, java.lang.String) * @deprecated */ public static Schema schema(int version, Iced impl) { if (version == -1) version = getLatestVersion(); return schema(version, impl.getClass().getSimpleName()); } public static Schema schema (Iced impl) { return schema(STABLE_VERSION, impl); } /** * For a given version and Iced class return an appropriate Schema instance, if any. * @param version Version of the schema to create, or pass -1 to use the latest version. * @param impl_class Create schema corresponds to this implementation class. * @throws H2OIllegalArgumentException if Class.newInstance() throws * @see #schema(int, java.lang.String) * @deprecated */ public static Schema schema(int version, Class<? extends Iced> impl_class) { if (version == -1) version = getLatestVersion(); return schema(version, impl_class.getSimpleName()); } /** * For a given version and type (Iced class simpleName) return an appropriate new Schema object, if any. * <p> * If a higher version is asked for than is available (e.g., if the highest version of * Frame is FrameV2 and the client asks for the schema for (Frame, 17) then an instance * of FrameV2 will be returned. This compatibility lookup is cached. * @throws H2ONotFoundArgumentException if an appropriate schema is not found * @deprecated */ private static Schema schema(int version, String type) { Class<? extends Schema> clz = schemaClass(version, type); if (clz == null) clz = schemaClass(EXPERIMENTAL_VERSION, type); if (clz == null) throw new H2ONotFoundArgumentException("Failed to find schema for version: " + version + " and type: " + type, "Failed to find schema for version: " + version + " and type: " + type + "\n" + "Did you forget to add an entry into META-INF/services/water.api.Schema?"); return Schema.newInstance(clz); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/SegmentModelsBuilderHandler.java
package water.api; import hex.ModelBuilder; import water.api.schemas3.SegmentModelsParametersV3; import hex.segments.SegmentModels; import hex.segments.SegmentModelsBuilder; import hex.schemas.ModelBuilderSchema; import water.H2O; import water.Job; import water.api.schemas3.JobV3; import water.api.schemas3.ModelParametersSchemaV3; import java.util.Properties; public class SegmentModelsBuilderHandler<B extends ModelBuilder, S extends ModelBuilderSchema<B,S,P>, P extends ModelParametersSchemaV3> extends Handler { // Invoke the handler with parameters. Can throw any exception the called handler can throw. @Override public JobV3 handle(int version, Route route, Properties parms, String postBody) { if (! "segment_train".equals(route._handler_method.getName())) { throw new IllegalStateException("Only supports `segment_train` handler method"); } Properties modelParms = new Properties(); SegmentModelsBuilder.SegmentModelsParameters smParms = new SegmentModelsParametersV3() .fillFromParms(parms, modelParms, true) .fillImpl(new SegmentModelsBuilder.SegmentModelsParameters()); final String algoURLName = ModelBuilderHandlerUtils.parseAlgoURLName(route); final B builder = ModelBuilderHandlerUtils.makeBuilder(version, algoURLName, modelParms); Job<SegmentModels> job = new SegmentModelsBuilder(smParms, builder._parms).buildSegmentModels(); JobV3 schema = new JobV3(); schema.fillFromImpl(job); return schema; } @SuppressWarnings("unused") // formally required but never actually called because handle() is overridden public S segment_train(int version, S schema) { throw H2O.fail(); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/ShutdownHandler.java
package water.api; import water.H2O; import water.Iced; import water.api.schemas3.ShutdownV3; public class ShutdownHandler extends Handler { public static final class Shutdown extends Iced { } @SuppressWarnings("unused") public ShutdownV3 shutdown (int version, ShutdownV3 s) { Shutdown t = s.createAndFillImpl(); H2O.requestShutdown(); return s.fillFromImpl(t); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/SpecifiesHttpResponseCode.java
package water.api; /** * Interface which allows a Schema, if returned by a handler method, to specify the HTTP response code. */ public interface SpecifiesHttpResponseCode { public int httpStatus(); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/SplitFrameHandler.java
package water.api; import hex.SplitFrame; import water.Job; import water.api.schemas3.KeyV3; import water.api.schemas3.SplitFrameV3; public class SplitFrameHandler extends Handler { public SplitFrameV3 run(int version, SplitFrameV3 sf) { SplitFrame splitFrame = sf.createAndFillImpl(); Job job = splitFrame.exec(); SplitFrameV3 spv3 = new SplitFrameV3(splitFrame); spv3.key = new KeyV3.JobKeyV3(job._key); return spv3; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/SteamMetricsHandler.java
package water.api; import water.api.schemas3.SteamMetricsV3; import water.H2O; public class SteamMetricsHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public SteamMetricsV3 fetch(int version, SteamMetricsV3 s) { s.version = 0; // Fields filled in for version 0. s.idle_millis = H2O.getIdleTimeMillis(); return s; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/StreamWriteOption.java
package water.api; public interface StreamWriteOption { }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/StreamWriter.java
package water.api; import java.io.OutputStream; /** * Stream provider. */ public interface StreamWriter { /** * The implementation write its content to given output stream. * @param os output stream provided by framework */ void writeTo(OutputStream os, StreamWriteOption... options); }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/StreamingSchema.java
package water.api; import water.Iced; import water.api.schemas3.SchemaV3; /** * Schema to represent schema. */ public class StreamingSchema extends SchemaV3<Iced, StreamingSchema> { private final transient StreamWriter streamWriter; private final transient String filename; public StreamingSchema() { this(null); } public StreamingSchema(StreamWriter streamWriter) { this(streamWriter, null); } public StreamingSchema(StreamWriter streamWriter, String filename) { this.streamWriter = streamWriter; this.filename = filename; } /** * Returns stream writer providing output stream. * @return writer outputing to given output stream. */ public StreamWriter getStreamWriter() { return this.streamWriter; } public String getFilename() { return filename; } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/TabulateHandler.java
package water.api; import water.api.schemas3.TabulateV3; import water.util.Tabulate; public class TabulateHandler extends Handler { public TabulateV3 run(int version, TabulateV3 spv3) { Tabulate sp = spv3.createAndFillImpl(); return new TabulateV3(sp.execImpl()); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/TimelineHandler.java
package water.api; import water.H2O; import water.Iced; import water.TimeLine; import water.api.schemas3.TimelineV3; import water.init.TimelineSnapshot; /** UDP Timeline * Created by tomasnykodym on 6/5/14. */ public class TimelineHandler extends Handler { public static final class Timeline extends Iced { public TimelineSnapshot snapshot; } // TODO: should return a base class for TimelineVx @SuppressWarnings("unused") // called through reflection by RequestServer public TimelineV3 fetch(int version, TimelineV3 s) { Timeline t = s.createAndFillImpl(); t.snapshot = new TimelineSnapshot(H2O.CLOUD,TimeLine.system_snapshot()); return s.fillFromImpl(t); } }
0
java-sources/ai/h2o/h2o-core/3.46.0.7/water
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/TypeaheadHandler.java
package water.api; import water.H2O; import water.api.schemas3.TypeaheadV3; import java.util.List; class TypeaheadHandler extends Handler { @SuppressWarnings("unused") // called through reflection by RequestServer public TypeaheadV3 files(int version, TypeaheadV3 t) { List<String> matches = H2O.getPM().calcTypeaheadMatches(t.src, t.limit); t.matches = matches.toArray(new String[0]); return t; } }