index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/ExternalInterface.java
|
package water;
import dontweave.gson.JsonObject;
import java.io.InputStream;
/**
* External interface for H2O.
*
* All functions are delegated through an InternalInterface object
* which will use the H2O.Boot class loader.
*/
public interface ExternalInterface {
public Object makeKey ( String key_name );
public Object makeValue( Object key, byte[] bits );
public void put ( Object key, Object val );
public Object getValue ( Object key );
public byte[] getBytes ( Object val );
public Object ingestRFModelFromR(Object key, InputStream is);
public float[] scoreKey ( Object modelKey, String [] colNames, String [][] domains, double[] row );
public float[] scoreModel( Object model , String [] colNames, String [][] domains, double[] row );
public JsonObject cloudStatus( );
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/FJPacket.java
|
package water;
import water.H2O.H2OCountedCompleter;
/**
* A class to handle the work of a received UDP packet. Typically we'll do a
* small amount of work based on the packet contents (such as returning a Value
* requested by another Node, or recording a heartbeat).
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
class FJPacket extends H2OCountedCompleter {
final AutoBuffer _ab;
final int _ctrl; // 1st byte of packet
FJPacket( AutoBuffer ab, int ctrl ) { _ab = ab; _ctrl = ctrl; }
@Override public void compute2() {
_ab.getPort(); // skip past the port
if( _ctrl <= UDP.udp.ack.ordinal() )
UDP.udp.UDPS[_ctrl]._udp.call(_ab).close();
else
RPC.remote_exec(_ab);
tryComplete();
}
// Run at max priority until we decrypt the packet enough to get priorities out
static private byte[] UDP_PRIORITIES =
new byte[]{-1,
H2O.MAX_PRIORITY, // Heartbeat
H2O.MAX_PRIORITY, // Rebooted
H2O.MAX_PRIORITY, // Timeline
H2O.ACK_ACK_PRIORITY,// Ack Ack
H2O.FETCH_ACK_PRIORITY, // Class/ID mapping ACK
H2O.ACK_PRIORITY, // Ack
H2O.DESERIAL_PRIORITY}; // Exec is very high, so we deserialize early
@Override public byte priority() { return UDP_PRIORITIES[_ctrl]; }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/FetchClazz.java
|
package water;
// Helper to fetch classForName strings from IDs from the leader
public class FetchClazz extends DTask<FetchClazz> {
final int _id;
String _clazz;
private FetchClazz(int id) { _id=id; }
public static String fetchClazz(int id) {
String clazz = RPC.call(H2O.CLOUD.leader(), new FetchClazz(id)).get()._clazz;
assert clazz != null : "No class matching id "+id;
return clazz;
}
@Override public void compute2() { _clazz = TypeMap.className(_id); tryComplete(); }
@Override public byte priority() { return H2O.FETCH_ACK_PRIORITY; }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/FetchId.java
|
package water;
// Helper to fetch class IDs from class Strings from the leader
public class FetchId extends DTask<FetchId> {
final String _clazz;
int _id;
private FetchId(String s) { _clazz=s; }
static public int fetchId(String s) { return RPC.call(H2O.CLOUD.leader(), new FetchId(s)).get()._id; }
@Override public void compute2() { _id = TypeMap.onIce(_clazz); tryComplete(); }
@Override public byte priority() { return H2O.FETCH_ACK_PRIORITY; }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Freezable.java
|
package water;
/**
* Empty marker interface. Used by the auto-serializer to inject implementations.
*/
public interface Freezable {
/** Serialize the 'this' object into the AutoBuffer, returning the AutoBuffer. */
public AutoBuffer write(AutoBuffer bb);
/** Deserialize from the AutoBuffer into a pre-existing 'this' object. */
public <T extends Freezable> T read(AutoBuffer bb);
/** Make a new instance of class 'this' with the empty constructor */
public <T extends Freezable> T newInstance();
/** Return the cluster-wide-unique 2-byte type ID for instances of this class */
public int frozenType();
/** Serialize the 'this' object into the AutoBuffer, returning the AutoBuffer.
Output is legal JSON. */
public AutoBuffer writeJSONFields(AutoBuffer bb);
/** Reflective list of fields */
public water.api.DocGen.FieldDoc[] toDocField();
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Func.java
|
/**
*
*/
package water;
import water.api.DocGen;
import water.fvec.Frame;
import water.fvec.Vec;
import java.util.HashSet;
/**
* Short-time computation which is not a job.
*/
public abstract class Func extends Request2 {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
/** A set containing a temporary vectors which are <strong>automatically</strong> deleted when job is done.
* Deletion is by {@link #cleanup()} call. */
private transient HashSet<Key> _gVecTrash = new HashSet<Key>();
/** Local trash which can be deleted by user call.
* @see #emptyLTrash() */
private transient HashSet<Key> _lVecTrash = new HashSet<Key>();
/** Invoke this function in blocking way. */
public void invoke() {
init();
exec();
}
/** The function implementation.
* <p>It introduces blocking call which after finish of
* the function performs cleanup.
* </p><p>
* The method should not handle exceptions which it cannot handle and should let
* them to propagate to upper levels.
* </p>
*/
protected final void exec() {
try {
execImpl();
} finally {
cleanup(); // Perform job cleanup
}
}
/** The real implementation which should be provided by ancestors. */
protected void execImpl() { throw new RuntimeException("Job does not support exec call! Please implement execImpl method!"); };
/**
* Invoked before run. This is the place to checks arguments are valid or throw
* IllegalArgumentException. It will get invoked both from the Web and Java APIs.
*
* @throws IllegalArgumentException throws the exception if initialization fails to ensure
* correct job runtime environment.
*/
protected void init() throws IllegalArgumentException { }
@Override protected Response serve() {
invoke();
return Response.done(this);
}
/** Clean-up code which is executed after each {@link Job#exec()} call in any case (normal/exceptional). */
protected void cleanup() {
// Clean-up global list of temporary vectors
Futures fs = new Futures();
cleanupTrash(_gVecTrash, fs);
cleanupTrash(_lVecTrash, fs);
fs.blockForPending();
}
/** User call which empty local trash of vectors. */
protected final void emptyLTrash() {
if (_lVecTrash.isEmpty()) return;
Futures fs = new Futures();
cleanupTrash(_lVecTrash, fs);
fs.blockForPending();
}
/** Append all vectors from given frame to a global clean up list.
* If the Frame itself is in the K-V store, then trash that too.
* @see #cleanup()
* @see #_gVecTrash */
protected final void gtrash(Frame fr) { gtrash(fr.vecs()); if (fr._key != null && UKV.get(fr._key) != null) _gVecTrash.add(fr._key); }
/** Append given vector to clean up list.
* @see #cleanup()*/
protected final void gtrash(Vec ...vec) { appendToTrash(_gVecTrash, vec); }
/** Put given frame vectors into local trash which can be emptied by a user calling the {@link #emptyLTrash()} method.
* @see #emptyLTrash() */
protected final void ltrash(Frame fr) { ltrash(fr.vecs()); if (fr._key != null && UKV.get(fr._key) != null) _lVecTrash.add(fr._key); }
/** Put given vectors into local trash.
* * @see #emptyLTrash() */
protected final void ltrash(Vec ...vec) { appendToTrash(_lVecTrash, vec); }
/** Put given vectors into a given trash. */
private void appendToTrash(HashSet<Key> t, Vec[] vec) {
for (Vec v : vec) t.add(v._key);
}
/** Delete all vectors in given trash. */
private void cleanupTrash(HashSet<Key> trash, Futures fs) {
for (Key k : trash) UKV.remove(k, fs);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Futures.java
|
package water;
import java.util.Arrays;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import water.util.Log;
/**
* A collection of Futures. We can add more, or block on the whole collection.
* Undefined if you try to add Futures while blocking.
* <p><p>
* Used as a service to sub-tasks, collect pending-but-not-yet-done future
* tasks, that need to complete prior to *this* task completing... or if the
* caller of this task is knowledgeable, pass these pending tasks along to him
* to block on before he completes. */
public class Futures {
// implemented as an exposed array mostly because ArrayList doesn't offer
// synchronization and constant-time removal.
Future[] _pending = new Future[1];
int _pending_cnt;
/** Some Future task which needs to complete before this task completes */
synchronized public Futures add( Future f ) {
if( f == null ) return this;
if( f.isDone() ) return this;
// NPE here if this Futures has already been added to some other Futures
// list, and should be added to again.
if( _pending_cnt == _pending.length ) {
cleanCompleted();
if( _pending_cnt == _pending.length )
_pending = Arrays.copyOf(_pending,_pending_cnt<<1);
}
_pending[_pending_cnt++] = f;
return this;
}
/** Merge pending-task lists as part of doing a 'reduce' step */
public void add( Futures fs ) {
if( fs == null ) return;
assert fs != this; // No recursive death, please
for( int i=0; i<fs._pending_cnt; i++ )
add(fs._pending[i]); // NPE here if using a dead Future
fs._pending = null; // You are dead, should never be inserted into again
}
/** Clean out from the list any pending-tasks which are already done. Note
* that this drops the algorithm from O(n) to O(1) in practice, since mostly
* things clean out as fast as new ones are added and the list never gets
* very large. */
synchronized private void cleanCompleted() {
for( int i=0; i<_pending_cnt; i++ )
if( _pending[i].isDone() ) // Done?
// Do cheap array compression to remove from list
_pending[i--] = _pending[--_pending_cnt];
}
/** Block until all pending futures have completed */
public final void blockForPending() {
try {
// Block until the last Future finishes.
while( true ) {
Future f = null;
synchronized(this) {
if( _pending_cnt == 0 ) return;
f = _pending[--_pending_cnt];
}
f.get();
}
} catch( InterruptedException e ) {
throw Log.errRTExcept(e);
} catch( ExecutionException e ) {
throw Log.errRTExcept(e);
} catch(Throwable t){
throw new RuntimeException(t);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/H2O.java
|
package water;
import java.io.*;
import java.net.*;
import java.nio.ByteBuffer;
import java.nio.channels.DatagramChannel;
import java.util.*;
import jsr166y.*;
import water.Job.JobCancelledException;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.nbhm.NonBlockingHashMap;
import water.persist.*;
import water.util.*;
import water.util.Log.Tag.Sys;
import water.license.LicenseManager;
/**
* Start point for creating or joining an <code>H2O</code> Cloud.
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public final class H2O {
public static volatile AbstractEmbeddedH2OConfig embeddedH2OConfig;
public static volatile ApiIpPortWatchdogThread apiIpPortWatchdog;
public static volatile LicenseManager licenseManager;
public static String VERSION = "(unknown)";
public static long START_TIME_MILLIS = -1;
// User name for this Cloud (either the username or the argument for the option -name)
public static String NAME;
// The default port for finding a Cloud
public static int DEFAULT_PORT = 54321;
public static int UDP_PORT; // Fast/small UDP transfers
public static int API_PORT; // RequestServer and the new API HTTP port
// Whether to toggle to single precision as upper limit for storing floating point numbers
public static boolean SINGLE_PRECISION = false;
// Max. number of factor levels ber column (before flipping all to NAs)
public static int DATA_MAX_FACTOR_LEVELS = 65000;
// The multicast discovery port
static MulticastSocket CLOUD_MULTICAST_SOCKET;
static NetworkInterface CLOUD_MULTICAST_IF;
static InetAddress CLOUD_MULTICAST_GROUP;
static int CLOUD_MULTICAST_PORT ;
// Default NIO Datagram channel
static DatagramChannel CLOUD_DGRAM;
// Myself, as a Node in the Cloud
public static H2ONode SELF = null;
public static InetAddress SELF_ADDRESS;
public static String DEFAULT_ICE_ROOT() {
String username = System.getProperty("user.name");
if (username == null) username = "";
String u2 = username.replaceAll(" ", "_");
if (u2.length() == 0) u2 = "unknown";
return "/tmp/h2o-" + u2;
}
public static URI ICE_ROOT;
// Initial arguments
public static String[] ARGS;
public static final PrintStream OUT = System.out;
public static final PrintStream ERR = System.err;
public static final int NUMCPUS = Runtime.getRuntime().availableProcessors();
// Convenience error
public static RuntimeException unimpl(String msg) { return new RuntimeException("unimplemented: " + msg); }
public static RuntimeException unimpl() { return new RuntimeException("unimplemented"); }
public static RuntimeException fail() { return new RuntimeException("do not call"); }
public static RuntimeException fail(String msg) { return new RuntimeException("FAILURE: " + msg); }
// Central /dev/null for ignored exceptions
public static void ignore(Throwable e) { ignore(e,"[h2o] Problem ignored: "); }
public static void ignore(Throwable e, String msg) { ignore(e, msg, true); }
public static void ignore(Throwable e, String msg, boolean printException) { Log.debug(Sys.WATER, msg + (printException? e.toString() : "")); }
// --------------------------------------------------------------------------
// Embedded configuration for a full H2O node to be implanted in another
// piece of software (e.g. Hadoop mapper task).
/**
* Register embedded H2O configuration object with H2O instance.
*/
public static void setEmbeddedH2OConfig(AbstractEmbeddedH2OConfig c) { embeddedH2OConfig = c; }
public static AbstractEmbeddedH2OConfig getEmbeddedH2OConfig() { return embeddedH2OConfig; }
/**
* Tell the embedding software that this H2O instance belongs to
* a cloud of a certain size.
* This may be nonblocking.
*
* @param ip IP address this H2O can be reached at.
* @param port Port this H2O can be reached at (for REST API and browser).
* @param size Number of H2O instances in the cloud.
*/
public static void notifyAboutCloudSize(InetAddress ip, int port, int size) {
if (embeddedH2OConfig == null) { return; }
embeddedH2OConfig.notifyAboutCloudSize(ip, port, size);
}
/**
* Notify embedding software instance H2O wants to exit.
* @param status H2O's requested process exit value.
*/
public static void exit(int status) {
// embeddedH2OConfig is only valid if this H2O node is living inside
// another software instance (e.g. a Hadoop mapper task).
//
// Expect embeddedH2OConfig to be null if H2O is run standalone.
// Cleanly shutdown internal H2O services.
if (apiIpPortWatchdog != null) {
apiIpPortWatchdog.shutdown();
}
if (embeddedH2OConfig == null) {
// Standalone H2O path.
System.exit (status);
}
// Embedded H2O path (e.g. inside Hadoop mapper task).
embeddedH2OConfig.exit(status);
// Should never reach here.
System.exit(222);
}
/** Shutdown itself by sending a shutdown UDP packet. */
public void shutdown() {
UDPRebooted.T.shutdown.send(H2O.SELF);
H2O.exit(0);
}
// --------------------------------------------------------------------------
// The Current Cloud. A list of all the Nodes in the Cloud. Changes if we
// decide to change Clouds via atomic Cloud update.
static public volatile H2O CLOUD = new H2O(new H2ONode[0],0,0);
// ---
// A dense array indexing all Cloud members. Fast reversal from "member#" to
// Node. No holes. Cloud size is _members.length.
public final H2ONode[] _memary;
public final int _hash;
//public boolean _healthy;
// A dense integer identifier that rolls over rarely. Rollover limits the
// number of simultaneous nested Clouds we are operating on in-parallel.
// Really capped to 1 byte, under the assumption we won't have 256 nested
// Clouds. Capped at 1 byte so it can be part of an atomically-assigned
// 'long' holding info specific to this Cloud.
public final char _idx; // no unsigned byte, so unsigned char instead
// Is nnn larger than old (counting for wrap around)? Gets confused if we
// start seeing a mix of more than 128 unique clouds at the same time. Used
// to tell the order of Clouds appearing.
static public boolean larger( int nnn, int old ) {
assert (0 <= nnn && nnn <= 255);
assert (0 <= old && old <= 255);
return ((nnn-old)&0xFF) < 64;
}
static public boolean isHealthy() {
H2O cloud = H2O.CLOUD;
for (H2ONode h2o : cloud._memary) {
if(!h2o._node_healthy) return false;
}
return true;
}
// Static list of acceptable Cloud members
public static HashSet<H2ONode> STATIC_H2OS = null;
// Reverse cloud index to a cloud; limit of 256 old clouds.
static private final H2O[] CLOUDS = new H2O[256];
// Enables debug features like more logging and multiple instances per JVM
public static final String DEBUG_ARG = "h2o.debug";
public static final boolean DEBUG = System.getProperty(DEBUG_ARG) != null;
// Construct a new H2O Cloud from the member list
public H2O( H2ONode[] h2os, int hash, int idx ) {
_memary = h2os; // Need to clone?
Arrays.sort(_memary); // ... sorted!
_hash = hash; // And record hash for cloud rollover
_idx = (char)(idx&0x0ff); // Roll-over at 256
}
// One-shot atomic setting of the next Cloud, with an empty K/V store.
// Called single-threaded from Paxos. Constructs the new H2O Cloud from a
// member list.
void set_next_Cloud( H2ONode[] h2os, int hash ) {
synchronized(this) {
int idx = _idx+1; // Unique 1-byte Cloud index
if( idx == 256 ) idx=1; // wrap, avoiding zero
CLOUDS[idx] = CLOUD = new H2O(h2os,hash,idx);
}
SELF._heartbeat._cloud_size=(char)CLOUD.size();
}
public final int size() { return _memary.length; }
public final H2ONode leader() { return _memary[0]; }
public static void waitForCloudSize(int x) {
waitForCloudSize(x, 10000);
}
public static void waitForCloudSize(int x, long ms) {
long start = System.currentTimeMillis();
while( System.currentTimeMillis() - start < ms ) {
if( CLOUD.size() >= x && Paxos._commonKnowledge )
break;
try { Thread.sleep(100); } catch( InterruptedException ie ) { }
}
if( H2O.CLOUD.size() < x )
throw new RuntimeException("Cloud size under " + x);
}
// Find the node index for this H2ONode, or a negative number on a miss
public int nidx( H2ONode h2o ) { return Arrays.binarySearch(_memary,h2o); }
public boolean contains( H2ONode h2o ) { return nidx(h2o) >= 0; }
// BIG WARNING: do you not change this toString() method since cloud hash value depends on it
@Override public String toString() {
return Arrays.toString(_memary);
}
public String toPrettyString() {
if (_memary==null || _memary.length==0) return "[]";
int iMax = _memary.length - 1;
StringBuilder sb = new StringBuilder();
sb.append('[');
for (int i = 0; ; i++) {
sb.append(String.valueOf(_memary[i]));
if (_memary[i]!=null) sb.append(" (").append(PrettyPrint.msecs(_memary[i].runtime(),false)).append(')');
if (i==iMax) return sb.append(']').toString();
sb.append(", ");
}
}
/**
* Return a list of interfaces sorted by importance (most important first).
* This is the order we want to test for matches when selecting an interface.
*/
private static ArrayList<NetworkInterface> calcPrioritizedInterfaceList() {
ArrayList<NetworkInterface> networkInterfaceList = null;
try {
Enumeration<NetworkInterface> nis = NetworkInterface.getNetworkInterfaces();
ArrayList<NetworkInterface> tmpList = Collections.list(nis);
Comparator<NetworkInterface> c = new Comparator<NetworkInterface>() {
@Override public int compare(NetworkInterface lhs, NetworkInterface rhs) {
// Handle null inputs.
if ((lhs == null) && (rhs == null)) { return 0; }
if (lhs == null) { return 1; }
if (rhs == null) { return -1; }
// If the names are equal, then they are equal.
if (lhs.getName().equals (rhs.getName())) { return 0; }
// If both are bond drivers, choose a precedence.
if (lhs.getName().startsWith("bond") && (rhs.getName().startsWith("bond"))) {
Integer li = lhs.getName().length();
Integer ri = rhs.getName().length();
// Bond with most number of characters is always highest priority.
if (li.compareTo(ri) != 0) {
return li.compareTo(ri);
}
// Otherwise, sort lexicographically by name.
return lhs.getName().compareTo(rhs.getName());
}
// If only one is a bond driver, give that precedence.
if (lhs.getName().startsWith("bond")) { return -1; }
if (rhs.getName().startsWith("bond")) { return 1; }
// Everything that isn't a bond driver is equal.
return 0;
}
};
Collections.sort(tmpList, c);
networkInterfaceList = tmpList;
} catch( SocketException e ) { Log.err(e); }
return networkInterfaceList;
}
/**
* Return a list of internet addresses sorted by importance (most important first).
* This is the order we want to test for matches when selecting an internet address.
*/
public static ArrayList<java.net.InetAddress> calcPrioritizedInetAddressList() {
ArrayList<java.net.InetAddress> ips = new ArrayList<java.net.InetAddress>();
{
ArrayList<NetworkInterface> networkInterfaceList = calcPrioritizedInterfaceList();
for (int i = 0; i < networkInterfaceList.size(); i++) {
NetworkInterface ni = networkInterfaceList.get(i);
Enumeration<InetAddress> ias = ni.getInetAddresses();
while( ias.hasMoreElements() ) {
InetAddress ia;
ia = ias.nextElement();
ips.add(ia);
Log.info("Possible IP Address: " + ni.getName() + " (" + ni.getDisplayName() + "), " + ia.getHostAddress());
}
}
}
return ips;
}
public static InetAddress findInetAddressForSelf() throws Error {
if(SELF_ADDRESS == null) {
if ((OPT_ARGS.ip != null) && (OPT_ARGS.network != null)) {
Log.err("ip and network options must not be used together");
H2O.exit(-1);
}
ArrayList<UserSpecifiedNetwork> networkList = UserSpecifiedNetwork.calcArrayList(OPT_ARGS.network);
if (networkList == null) {
Log.err("Exiting.");
H2O.exit(-1);
}
// Get a list of all valid IPs on this machine.
ArrayList<InetAddress> ips = calcPrioritizedInetAddressList();
InetAddress local = null; // My final choice
// Check for an "-ip xxxx" option and accept a valid user choice; required
// if there are multiple valid IP addresses.
InetAddress arg = null;
if (OPT_ARGS.ip != null) {
try{
arg = InetAddress.getByName(OPT_ARGS.ip);
} catch( UnknownHostException e ) {
Log.err(e);
H2O.exit(-1);
}
if( !(arg instanceof Inet4Address) ) {
Log.warn("Only IP4 addresses allowed.");
H2O.exit(-1);
}
if( !ips.contains(arg) ) {
Log.warn("IP address not found on this machine");
H2O.exit(-1);
}
local = arg;
} else if (networkList.size() > 0) {
// Return the first match from the list, if any.
// If there are no matches, then exit.
Log.info("Network list was specified by the user. Searching for a match...");
for( InetAddress ip : ips ) {
Log.info(" Considering " + ip.getHostAddress() + " ...");
for ( UserSpecifiedNetwork n : networkList ) {
if (n.inetAddressOnNetwork(ip)) {
Log.info(" Matched " + ip.getHostAddress());
local = ip;
SELF_ADDRESS = local;
return SELF_ADDRESS;
}
}
}
Log.err("No interface matches the network list from the -network option. Exiting.");
H2O.exit(-1);
}
else {
// No user-specified IP address. Attempt auto-discovery. Roll through
// all the network choices on looking for a single Inet4.
ArrayList<InetAddress> validIps = new ArrayList();
for( InetAddress ip : ips ) {
// make sure the given IP address can be found here
if( ip instanceof Inet4Address &&
!ip.isLoopbackAddress() &&
!ip.isLinkLocalAddress() ) {
validIps.add(ip);
}
}
if( validIps.size() == 1 ) {
local = validIps.get(0);
} else {
local = guessInetAddress(validIps);
}
}
// The above fails with no network connection, in that case go for a truly
// local host.
if( local == null ) {
try {
Log.warn("Failed to determine IP, falling back to localhost.");
// set default ip address to be 127.0.0.1 /localhost
local = InetAddress.getByName("127.0.0.1");
} catch( UnknownHostException e ) {
throw Log.errRTExcept(e);
}
}
SELF_ADDRESS = local;
}
return SELF_ADDRESS;
}
private static InetAddress guessInetAddress(List<InetAddress> ips) {
String m = "Multiple local IPs detected:\n";
for(InetAddress ip : ips) m+=" " + ip;
m+="\nAttempting to determine correct address...\n";
Socket s = null;
try {
// using google's DNS server as an external IP to find
// Add a timeout to the touch of google.
// https://0xdata.atlassian.net/browse/HEX-743
s = new Socket();
// only 3000 milliseconds before giving up
// Exceptions: IOException, SocketTimeoutException, plus two Illegal* exceptions
s.connect(new InetSocketAddress("8.8.8.8", 53), 3000);
m+="Using " + s.getLocalAddress() + "\n";
return s.getLocalAddress();
} catch( java.net.SocketException se ) {
return null; // No network at all? (Laptop w/wifi turned off?)
} catch( java.net.SocketTimeoutException se ) {
return null; // could be firewall?
} catch( Throwable t ) {
Log.err(t);
return null;
} finally {
Log.info(m);
Utils.close(s);
}
}
// --------------------------------------------------------------------------
// The (local) set of Key/Value mappings.
static final NonBlockingHashMap<Key,Value> STORE = new NonBlockingHashMap<Key, Value>();
// Dummy shared volatile for ordering games
static public volatile int VOLATILE;
// PutIfMatch
// - Atomically update the STORE, returning the old Value on success
// - Kick the persistence engine as needed
// - Return existing Value on fail, no change.
//
// Keys are interned here: I always keep the existing Key, if any. The
// existing Key is blind jammed into the Value prior to atomically inserting
// it into the STORE and interning.
//
// Because of the blind jam, there is a narrow unusual race where the Key
// might exist but be stale (deleted, mapped to a TOMBSTONE), a fresh put()
// can find it and jam it into the Value, then the Key can be deleted
// completely (e.g. via an invalidate), the table can resize flushing the
// stale Key, an unrelated weak-put can re-insert a matching Key (but as a
// new Java object), and delete it, and then the original thread can do a
// successful put_if_later over the missing Key and blow the invariant that a
// stored Value always points to the physically equal Key that maps to it
// from the STORE. If this happens, some of replication management bits in
// the Key will be set in the wrong Key copy... leading to extra rounds of
// replication.
public static Value putIfMatch( Key key, Value val, Value old ) {
if( old != null ) // Have an old value?
key = old._key; // Use prior key
if( val != null )
val._key = key;
// Insert into the K/V store
Value res = STORE.putIfMatchUnlocked(key,val,old);
if( res != old ) return res; // Return the failure cause
// Persistence-tickle.
// If the K/V mapping is going away, remove the old guy.
// If the K/V mapping is changing, let the store cleaner just overwrite.
// If the K/V mapping is new, let the store cleaner just create
if( old != null && val == null ) old.removeIce(); // Remove the old guy
if( val != null ) {
dirty_store(); // Start storing the new guy
Scope.track(key);
}
return old; // Return success
}
// Raw put; no marking the memory as out-of-sync with disk. Used to import
// initial keys from local storage, or to intern keys.
public static Value putIfAbsent_raw( Key key, Value val ) {
Value res = STORE.putIfMatchUnlocked(key,val,null);
assert res == null;
return res;
}
// Get the value from the store
public static Value get( Key key ) { return STORE.get(key); }
public static Value raw_get( Key key ) { return STORE.get(key); }
public static Key getk( Key key ) { return STORE.getk(key); }
public static Set<Key> localKeySet( ) { return STORE.keySet(); }
public static Collection<Value> values( ) { return STORE.values(); }
public static int store_size() { return STORE.size(); }
// --------------------------------------------------------------------------
// The worker pools - F/J pools with different priorities.
// These priorities are carefully ordered and asserted for... modify with
// care. The real problem here is that we can get into cyclic deadlock
// unless we spawn a thread of priority "X+1" in order to allow progress
// on a queue which might be flooded with a large number of "<=X" tasks.
//
// Example of deadlock: suppose TaskPutKey and the Invalidate ran at the same
// priority on a 2-node cluster. Both nodes flood their own queues with
// writes to unique keys, which require invalidates to run on the other node.
// Suppose the flooding depth exceeds the thread-limit (e.g. 99); then each
// node might have all 99 worker threads blocked in TaskPutKey, awaiting
// remote invalidates - but the other nodes' threads are also all blocked
// awaiting invalidates!
//
// We fix this by being willing to always spawn a thread working on jobs at
// priority X+1, and guaranteeing there are no jobs above MAX_PRIORITY -
// i.e., jobs running at MAX_PRIORITY cannot block, and when those jobs are
// done, the next lower level jobs get unblocked, etc.
public static final byte MAX_PRIORITY = Byte.MAX_VALUE-1;
public static final byte ACK_ACK_PRIORITY = MAX_PRIORITY-0;
public static final byte FETCH_ACK_PRIORITY = MAX_PRIORITY-1;
public static final byte ACK_PRIORITY = MAX_PRIORITY-2;
public static final byte DESERIAL_PRIORITY = MAX_PRIORITY-3;
public static final byte INVALIDATE_PRIORITY = MAX_PRIORITY-3;
public static final byte GET_KEY_PRIORITY = MAX_PRIORITY-4;
public static final byte PUT_KEY_PRIORITY = MAX_PRIORITY-5;
public static final byte ATOMIC_PRIORITY = MAX_PRIORITY-6;
public static final byte GUI_PRIORITY = MAX_PRIORITY-7;
public static final byte MIN_HI_PRIORITY = MAX_PRIORITY-7;
public static final byte MIN_PRIORITY = 0;
// F/J threads that remember the priority of the last task they started
// working on.
public static class FJWThr extends ForkJoinWorkerThread {
public int _priority;
FJWThr(ForkJoinPool pool) {
super(pool);
_priority = ((ForkJoinPool2)pool)._priority;
setPriority( _priority == Thread.MIN_PRIORITY
? Thread.NORM_PRIORITY-1
: Thread. MAX_PRIORITY-1 );
setName("FJ-"+_priority+"-"+getPoolIndex());
}
}
// Factory for F/J threads, with cap's that vary with priority.
static class FJWThrFact implements ForkJoinPool.ForkJoinWorkerThreadFactory {
private final int _cap;
FJWThrFact( int cap ) { _cap = cap; }
@Override public ForkJoinWorkerThread newThread(ForkJoinPool pool) {
int cap = 4 * NUMCPUS;
return pool.getPoolSize() <= cap ? new FJWThr(pool) : null;
}
}
// A standard FJ Pool, with an expected priority level.
static class ForkJoinPool2 extends ForkJoinPool {
final int _priority;
private ForkJoinPool2(int p, int cap) {
super((OPT_ARGS == null || OPT_ARGS.nthreads <= 0) ? NUMCPUS : OPT_ARGS.nthreads,
new FJWThrFact(cap),
null,
p<MIN_HI_PRIORITY);
_priority = p;
}
private H2OCountedCompleter poll2() { return (H2OCountedCompleter)pollSubmission(); }
}
// Hi-priority work, sorted into individual queues per-priority.
// Capped at a small number of threads per pool.
private static final ForkJoinPool2 FJPS[] = new ForkJoinPool2[MAX_PRIORITY+1];
static {
// Only need 1 thread for the AckAck work, as it cannot block
FJPS[ACK_ACK_PRIORITY] = new ForkJoinPool2(ACK_ACK_PRIORITY,1);
for( int i=MIN_HI_PRIORITY+1; i<MAX_PRIORITY; i++ )
FJPS[i] = new ForkJoinPool2(i,NUMCPUS); // All CPUs, but no more for blocking purposes
FJPS[GUI_PRIORITY] = new ForkJoinPool2(GUI_PRIORITY,2);
}
// Easy peeks at the FJ queues
static int getWrkQueueSize (int i) { return FJPS[i]==null ? -1 : FJPS[i].getQueuedSubmissionCount();}
static int getWrkThrPoolSize(int i) { return FJPS[i]==null ? -1 : FJPS[i].getPoolSize(); }
// Submit to the correct priority queue
public static H2OCountedCompleter submitTask( H2OCountedCompleter task ) {
int priority = task.priority();
assert MIN_PRIORITY <= priority && priority <= MAX_PRIORITY:"priority " + priority + " is out of range, expected range is < " + MIN_PRIORITY + "," + MAX_PRIORITY + ">";
if( FJPS[priority]==null )
synchronized( H2O.class ) { if( FJPS[priority] == null ) FJPS[priority] = new ForkJoinPool2(priority,-1); }
FJPS[priority].submit(task);
return task;
}
// Simple wrapper over F/J CountedCompleter to support priority queues. F/J
// queues are simple unordered (and extremely light weight) queues. However,
// we frequently need priorities to avoid deadlock and to promote efficient
// throughput (e.g. failure to respond quickly to TaskGetKey can block an
// entire node for lack of some small piece of data). So each attempt to do
// lower-priority F/J work starts with an attempt to work & drain the
// higher-priority queues.
public static abstract class
H2OCountedCompleter<T extends H2OCountedCompleter> extends CountedCompleter implements Cloneable {
public H2OCountedCompleter(){}
protected H2OCountedCompleter(H2OCountedCompleter completer){super(completer);}
// Once per F/J task, drain the high priority queue before doing any low
// priority work.
@Override public final void compute() {
FJWThr t = (FJWThr)Thread.currentThread();
int pp = ((ForkJoinPool2)t.getPool())._priority;
// Drain the high priority queues before the normal F/J queue
H2OCountedCompleter h2o = null;
try {
assert priority() == pp; // Job went to the correct queue?
assert t._priority <= pp; // Thread attempting the job is only a low-priority?
final int p2 = Math.max(pp,MIN_HI_PRIORITY);
for( int p = MAX_PRIORITY; p > p2; p-- ) {
if( FJPS[p] == null ) continue;
h2o = FJPS[p].poll2();
if( h2o != null ) { // Got a hi-priority job?
t._priority = p; // Set & do it now!
t.setPriority(Thread.MAX_PRIORITY-1);
h2o.compute2(); // Do it ahead of normal F/J work
p++; // Check again the same queue
}
}
} catch( Throwable ex ) {
// If the higher priority job popped an exception, complete it
// exceptionally... but then carry on and do the lower priority job.
if( h2o != null ) h2o.onExceptionalCompletion(ex, h2o.getCompleter());
else ex.printStackTrace();
} finally {
t._priority = pp;
if( pp == MIN_PRIORITY ) t.setPriority(Thread.NORM_PRIORITY-1);
}
// Now run the task as planned
compute2();
}
// Do the actually intended work
public abstract void compute2();
@Override public boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller) {
if(!(ex instanceof JobCancelledException) && !(ex instanceof IllegalArgumentException) && this.getCompleter() == null)
ex.printStackTrace();
return true;
}
// In order to prevent deadlock, threads that block waiting for a reply
// from a remote node, need the remote task to run at a higher priority
// than themselves. This field tracks the required priority.
public byte priority() { return MIN_PRIORITY; }
@Override public T clone(){
try { return (T)super.clone(); }
catch( CloneNotSupportedException e ) { throw water.util.Log.errRTExcept(e); }
}
}
public static abstract class H2OCallback<T extends H2OCountedCompleter> extends H2OCountedCompleter{
public H2OCallback(){}
public H2OCallback(H2OCountedCompleter cc){super(cc);}
@Override public void compute2(){throw new UnsupportedOperationException();}
@Override public void onCompletion(CountedCompleter caller){callback((T) caller);}
public abstract void callback(T t);
}
public static class H2OEmptyCompleter extends H2OCountedCompleter{
public H2OEmptyCompleter(){}
public H2OEmptyCompleter(H2OCountedCompleter cc){super(cc);}
@Override public void compute2(){throw new UnsupportedOperationException();}
}
// --------------------------------------------------------------------------
public static OptArgs OPT_ARGS = new OptArgs();
public static class OptArgs extends Arguments.Opt {
public String name; // set_cloud_name_and_mcast()
public String flatfile; // set_cloud_name_and_mcast()
public int baseport; // starting number to search for open ports
public int port; // set_cloud_name_and_mcast()
public String ip; // Named IP4/IP6 address instead of the default
public String network; // Network specification for acceptable interfaces to bind to.
public String ice_root; // ice root directory
public String hdfs; // HDFS backend
public String hdfs_version; // version of the filesystem
public String hdfs_config; // configuration file of the HDFS
public String hdfs_skip = null; // used by hadoop driver to not unpack and load any hdfs jar file at runtime.
public String aws_credentials; // properties file for aws credentials
public String keepice; // Do not delete ice on startup
public String soft = null; // soft launch for demos
public String random_udp_drop = null; // test only, randomly drop udp incoming
public int pparse_limit = Integer.MAX_VALUE;
public String no_requests_log = null; // disable logging of Web requests
public boolean check_rest_params = true; // enable checking unused/unknown REST params e.g., -check_rest_params=false disable control of unknown rest params
public int nthreads=NUMCPUS; // desired F/J parallelism level for low priority queues.
public String license; // License file
public String h = null;
public String help = null;
public String version = null;
public String single_precision = null;
public int data_max_factor_levels;
public String beta = null;
public String mem_watchdog = null; // For developer debugging
public boolean md5skip = false;
}
public static void printHelp() {
String s =
"Start an H2O node.\n" +
"\n" +
"Usage: java [-Xmx<size>] -jar h2o.jar [options]\n" +
" (Note that every option has a default and is optional.)\n" +
"\n" +
" -h | -help\n" +
" Print this help.\n" +
"\n" +
" -version\n" +
" Print version info and exit.\n" +
"\n" +
" -name <h2oCloudName>\n" +
" Cloud name used for discovery of other nodes.\n" +
" Nodes with the same cloud name will form an H2O cloud\n" +
" (also known as an H2O cluster).\n" +
"\n" +
" -flatfile <flatFileName>\n" +
" Configuration file explicitly listing H2O cloud node members.\n" +
"\n" +
" -ip <ipAddressOfNode>\n" +
" IP address of this node.\n" +
"\n" +
" -port <port>\n" +
" Port number for this node (note: port+1 is also used).\n" +
" (The default port is " + DEFAULT_PORT + ".)\n" +
"\n" +
" -network <IPv4network1Specification>[,<IPv4network2Specification> ...]\n" +
" The IP address discovery code will bind to the first interface\n" +
" that matches one of the networks in the comma-separated list.\n" +
" Use instead of -ip when a broad range of addresses is legal.\n" +
" (Example network specification: '10.1.2.0/24' allows 256 legal\n" +
" possibilities.)\n" +
"\n" +
" -ice_root <fileSystemPath>\n" +
" The directory where H2O spills temporary data to disk.\n" +
" (The default is '" + DEFAULT_ICE_ROOT() + "'.)\n" +
"\n" +
" -single_precision\n" +
" Reduce the max. (storage) precision for floating point numbers\n" +
" from double to single precision to save memory of numerical data.\n" +
" (The default is double precision.)\n" +
"\n" +
" -data_max_factor_levels <integer>\n" +
" The maximum number of factor levels for categorical columns.\n" +
" Columns with more than the specified number of factor levels\n" +
" are converted into all missing values.\n" +
" (The default is " + DATA_MAX_FACTOR_LEVELS + ".)\n" +
"\n" +
" -nthreads <#threads>\n" +
" Maximum number of threads in the low priority batch-work queue.\n" +
" (The default is 4*numcpus.)\n" +
"\n" +
" -license <licenseFilePath>\n" +
" Path to license file on local filesystem.\n" +
"\n" +
"Cloud formation behavior:\n" +
"\n" +
" New H2O nodes join together to form a cloud at startup time.\n" +
" Once a cloud is given work to perform, it locks out new members\n" +
" from joining.\n" +
"\n" +
"Examples:\n" +
"\n" +
" Start an H2O node with 4GB of memory and a default cloud name:\n" +
" $ java -Xmx4g -jar h2o.jar\n" +
"\n" +
" Start an H2O node with 6GB of memory and a specify the cloud name:\n" +
" $ java -Xmx6g -jar h2o.jar -name MyCloud\n" +
"\n" +
" Start an H2O cloud with three 2GB nodes and a default cloud name:\n" +
" $ java -Xmx2g -jar h2o.jar &\n" +
" $ java -Xmx2g -jar h2o.jar &\n" +
" $ java -Xmx2g -jar h2o.jar &\n" +
"\n";
System.out.print(s);
}
public static boolean IS_SYSTEM_RUNNING = false;
/** Load a h2o build version or return default unknown version
* @return never returns null
*/
public static AbstractBuildVersion getBuildVersion() {
try {
Class klass = Class.forName("water.BuildVersion");
java.lang.reflect.Constructor constructor = klass.getConstructor();
AbstractBuildVersion abv = (AbstractBuildVersion) constructor.newInstance();
return abv;
// it exists on the classpath
} catch (Exception e) {
return AbstractBuildVersion.UNKNOWN_VERSION;
}
}
/**
* If logging has not been setup yet, then Log.info will only print to stdout.
* This allows for early processing of the '-version' option without unpacking
* the jar file and other startup stuff.
*/
public static void printAndLogVersion() {
// Try to load a version
AbstractBuildVersion abv = getBuildVersion();
String build_branch = abv.branchName();
String build_hash = abv.lastCommitHash();
String build_describe = abv.describe();
String build_project_version = abv.projectVersion();
String build_by = abv.compiledBy();
String build_on = abv.compiledOn();
Log.info ("----- H2O started -----");
Log.info ("Build git branch: " + build_branch);
Log.info ("Build git hash: " + build_hash);
Log.info ("Build git describe: " + build_describe);
Log.info ("Build project version: " + build_project_version);
Log.info ("Built by: '" + build_by + "'");
Log.info ("Built on: '" + build_on + "'");
Runtime runtime = Runtime.getRuntime();
double ONE_GB = 1024 * 1024 * 1024;
Log.info ("Java availableProcessors: " + runtime.availableProcessors());
Log.info ("Java heap totalMemory: " + String.format("%.2f gb", runtime.totalMemory() / ONE_GB));
Log.info ("Java heap maxMemory: " + String.format("%.2f gb", runtime.maxMemory() / ONE_GB));
Log.info ("Java version: " + String.format("Java %s (from %s)", System.getProperty("java.version"), System.getProperty("java.vendor")));
Log.info ("OS version: " + String.format("%s %s (%s)", System.getProperty("os.name"), System.getProperty("os.version"), System.getProperty("os.arch")));
long totalMemory = OSUtils.getTotalPhysicalMemory();
Log.info ("Machine physical memory: " + (totalMemory==-1 ? "NA" : String.format("%.2f gb", totalMemory / ONE_GB)));
}
/**
* We had a report from a user that H2O didn't start properly on MacOS X in a
* case where the user was part of the root group. So warn about it.
*/
public static void printWarningIfRootOnMac() {
String os_name = System.getProperty("os.name");
if (os_name.equals("Mac OS X")) {
String user_name = System.getProperty("user.name");
if (user_name.equals("root")) {
Log.warn("Running as root on MacOS; check if java binary is unintentionally setuid");
}
}
}
public static String getVersion() {
String build_project_version = "(unknown)";
try {
Class klass = Class.forName("water.BuildVersion");
java.lang.reflect.Constructor constructor = klass.getConstructor();
AbstractBuildVersion abv = (AbstractBuildVersion) constructor.newInstance();
build_project_version = abv.projectVersion();
// it exists on the classpath
} catch (Exception e) {
// it does not exist on the classpath
}
return build_project_version;
}
// Start up an H2O Node and join any local Cloud
public static void main( String[] args ) {
Log.POST(300,"");
// To support launching from JUnit, JUnit expects to call main() repeatedly.
// We need exactly 1 call to main to startup all the local services.
if (IS_SYSTEM_RUNNING) return;
IS_SYSTEM_RUNNING = true;
VERSION = getVersion(); // Pick this up from build-specific info.
START_TIME_MILLIS = System.currentTimeMillis();
// Parse args
Arguments arguments = new Arguments(args);
arguments.extract(OPT_ARGS);
ARGS = arguments.toStringArray();
printAndLogVersion();
printWarningIfRootOnMac();
if (OPT_ARGS.baseport != 0) {
DEFAULT_PORT = OPT_ARGS.baseport;
}
SINGLE_PRECISION = OPT_ARGS.single_precision != null;
if (SINGLE_PRECISION) Log.info("Using single precision for floating-point numbers.");
if (OPT_ARGS.data_max_factor_levels != 0) {
DATA_MAX_FACTOR_LEVELS = OPT_ARGS.data_max_factor_levels;
Log.info("Max. number of factor levels per column: " + DATA_MAX_FACTOR_LEVELS);
}
// Get ice path before loading Log or Persist class
String ice = DEFAULT_ICE_ROOT();
if( OPT_ARGS.ice_root != null ) ice = OPT_ARGS.ice_root.replace("\\", "/");
try {
ICE_ROOT = new URI(ice);
} catch(URISyntaxException ex) {
throw new RuntimeException("Invalid ice_root: " + ice + ", " + ex.getMessage());
}
Log.info ("ICE root: '" + ICE_ROOT + "'");
findInetAddressForSelf();
//if (OPT_ARGS.rshell.equals("false"))
Log.POST(310,"");
Log.wrap(); // Logging does not wrap when the rshell is on.
// Start the local node
startLocalNode();
Log.POST(320,"");
String logDir = (Log.getLogDir() != null) ? Log.getLogDir() : "(unknown)";
Log.info ("Log dir: '" + logDir + "'");
// Load up from disk and initialize the persistence layer
initializePersistence();
Log.POST(340, "");
initializeLicenseManager();
Log.POST(345, "");
// Start network services, including heartbeats & Paxos
startNetworkServices(); // start server services
Log.POST(350,"");
startApiIpPortWatchdog(); // Check if the API port becomes unreachable
Log.POST(360,"");
if (OPT_ARGS.mem_watchdog != null) {
startMemoryWatchdog();
Log.POST(370, "");
}
startupFinalize(); // finalizes the startup & tests (if any)
Log.POST(380,"");
}
/** Starts the local k-v store.
* Initializes the local k-v store, local node and the local cloud with itself
* as the only member.
*/
private static void startLocalNode() {
// Print this first, so if any network stuff is affected it's clear this is going on.
if (OPT_ARGS.random_udp_drop != null) {
Log.warn("Debugging option RANDOM UDP DROP is ENABLED, make sure you really meant it");
}
// Figure self out; this is surprisingly hard
initializeNetworkSockets();
// Do not forget to put SELF into the static configuration (to simulate
// proper multicast behavior)
if( STATIC_H2OS != null && !STATIC_H2OS.contains(SELF)) {
Log.warn("Flatfile configuration does not include self: " + SELF+ " but contains " + STATIC_H2OS);
STATIC_H2OS.add(SELF);
}
Log.info ("H2O cloud name: '" + NAME + "'");
Log.info("(v"+VERSION+") '"+NAME+"' on " + SELF+(OPT_ARGS.flatfile==null
? (", discovery address "+CLOUD_MULTICAST_GROUP+":"+CLOUD_MULTICAST_PORT)
: ", static configuration based on -flatfile "+OPT_ARGS.flatfile));
Log.info("If you have trouble connecting, try SSH tunneling from your local machine (e.g., via port 55555):\n" +
" 1. Open a terminal and run 'ssh -L 55555:localhost:"
+ API_PORT + " " + System.getProperty("user.name") + "@" + SELF_ADDRESS.getHostAddress() + "'\n" +
" 2. Point your browser to http://localhost:55555");
// Create the starter Cloud with 1 member
SELF._heartbeat._jar_md5 = Boot._init._jarHash;
Paxos.doHeartbeat(SELF);
assert SELF._heartbeat._cloud_hash != 0;
}
/** Initializes the network services of the local node.
*
* Starts the worker threads, receiver threads, heartbeats and all other
* network related services.
*/
private static void startNetworkServices() {
// We've rebooted the JVM recently. Tell other Nodes they can ignore task
// prior tasks by us. Do this before we receive any packets
UDPRebooted.T.reboot.broadcast();
// Start the UDPReceiverThread, to listen for requests from other Cloud
// Nodes. There should be only 1 of these, and it never shuts down.
// Started first, so we can start parsing UDP packets
new UDPReceiverThread().start();
// Start the MultiReceiverThread, to listen for multi-cast requests from
// other Cloud Nodes. There should be only 1 of these, and it never shuts
// down. Started soon, so we can start parsing multicast UDP packets
new MultiReceiverThread().start();
// Start the Persistent meta-data cleaner thread, which updates the K/V
// mappings periodically to disk. There should be only 1 of these, and it
// never shuts down. Needs to start BEFORE the HeartBeatThread to build
// an initial histogram state.
new Cleaner().start();
// Start the heartbeat thread, to publish the Clouds' existence to other
// Clouds. This will typically trigger a round of Paxos voting so we can
// join an existing Cloud.
new HeartBeatThread().start();
// Start a UDP timeout worker thread. This guy only handles requests for
// which we have not recieved a timely response and probably need to
// arrange for a re-send to cover a dropped UDP packet.
new UDPTimeOutThread().start();
new H2ONode.AckAckTimeOutThread().start();
// Start the TCPReceiverThread, to listen for TCP requests from other Cloud
// Nodes. There should be only 1 of these, and it never shuts down.
new TCPReceiverThread().start();
// Start the Nano HTTP server thread
water.api.RequestServer.start();
}
/** Initializes a watchdog thread to make sure the API IP:Port is reachable.
*
* The IP and port are meant to be accessible from outside this
* host, much less inside. The real reason behind this check is the
* one-node cloud case where people move their laptop around and
* DHCP assigns them a new IP address.
*/
private static void startApiIpPortWatchdog() {
apiIpPortWatchdog = new ApiIpPortWatchdogThread();
apiIpPortWatchdog.start();
}
private static void startMemoryWatchdog() {
new MemoryWatchdogThread().start();
}
// Used to update the Throwable detailMessage field.
private static java.lang.reflect.Field DETAILMESSAGE;
public static <T extends Throwable> T setDetailMessage( T t, String s ) {
try { if( DETAILMESSAGE != null ) DETAILMESSAGE.set(t,s); }
catch( IllegalAccessException iae) {}
return t;
}
/** Finalizes the node startup.
*
* Displays the startup message and runs the tests (if applicable).
*/
private static void startupFinalize() {
// Allow Throwable detailMessage's to be updated on the fly. Ugly, ugly,
// but I want to add info without rethrowing/rebuilding whole exceptions.
try {
DETAILMESSAGE = Throwable.class.getDeclaredField("detailMessage");
DETAILMESSAGE.setAccessible(true);
} catch( NoSuchFieldException nsfe ) { }
// Sleep a bit so all my other threads can 'catch up'
try { Thread.sleep(100); } catch( InterruptedException e ) { }
}
public static DatagramChannel _udpSocket;
public static ServerSocket _apiSocket;
// Parse arguments and set cloud name in any case. Strip out "-name NAME"
// and "-flatfile <filename>". Ignore the rest. Set multi-cast port as a hash
// function of the name. Parse node ip addresses from the filename.
static void initializeNetworkSockets( ) {
// Assign initial ports
API_PORT = OPT_ARGS.port != 0 ? OPT_ARGS.port : DEFAULT_PORT;
while (true) {
UDP_PORT = API_PORT+1;
if( API_PORT<0 || API_PORT>65534 ) // 65535 is max, implied for udp port
Log.die("Attempting to use system illegal port, either "+API_PORT+" or "+UDP_PORT);
try {
// kbn. seems like we need to set SO_REUSEADDR before binding?
// http://www.javadocexamples.com/java/net/java.net.ServerSocket.html#setReuseAddress:boolean
// When a TCP connection is closed the connection may remain in a timeout state
// for a period of time after the connection is closed (typically known as the
// TIME_WAIT state or 2MSL wait state). For applications using a well known socket address
// or port it may not be possible to bind a socket to the required SocketAddress
// if there is a connection in the timeout state involving the socket address or port.
// Enabling SO_REUSEADDR prior to binding the socket using bind(SocketAddress)
// allows the socket to be bound even though a previous connection is in a timeout state.
// cnc: this is busted on windows. Back to the old code.
// If the user specified the -ip flag, honor it for the Web UI interface bind.
// Otherwise bind to all interfaces.
_apiSocket = OPT_ARGS.ip == null
? new ServerSocket(API_PORT)
: new ServerSocket(API_PORT, -1/*defaultBacklog*/, SELF_ADDRESS);
_apiSocket.setReuseAddress(true);
_udpSocket = DatagramChannel.open();
_udpSocket.socket().setReuseAddress(true);
_udpSocket.socket().bind(new InetSocketAddress(SELF_ADDRESS, UDP_PORT));
break;
} catch (IOException e) {
try { if( _apiSocket != null ) _apiSocket.close(); } catch( IOException ohwell ) { Log.err(ohwell); }
Utils.close(_udpSocket);
_apiSocket = null;
_udpSocket = null;
if( OPT_ARGS.port != 0 )
Log.die("On " + SELF_ADDRESS +
" some of the required ports " + (OPT_ARGS.port+0) +
", " + (OPT_ARGS.port+1) +
" are not available, change -port PORT and try again.");
}
API_PORT += 2;
}
SELF = H2ONode.self(SELF_ADDRESS);
Log.info("Internal communication uses port: ",UDP_PORT,"\nListening for HTTP and REST traffic on http://",SELF_ADDRESS.getHostAddress(),":"+_apiSocket.getLocalPort()+"/");
String embeddedConfigFlatfile = null;
AbstractEmbeddedH2OConfig ec = getEmbeddedH2OConfig();
if (ec != null) {
ec.notifyAboutEmbeddedWebServerIpPort (SELF_ADDRESS, API_PORT);
if (ec.providesFlatfile()) {
try {
embeddedConfigFlatfile = ec.fetchFlatfile();
}
catch (Exception e) {
Log.err("Failed to get embedded config flatfile");
Log.err(e);
H2O.exit(1);
}
}
}
NAME = OPT_ARGS.name==null? System.getProperty("user.name") : OPT_ARGS.name;
// Read a flatfile of allowed nodes
if (embeddedConfigFlatfile != null) {
STATIC_H2OS = parseFlatFileFromString(embeddedConfigFlatfile);
}
else {
STATIC_H2OS = parseFlatFile(OPT_ARGS.flatfile);
}
// Multi-cast ports are in the range E1.00.00.00 to EF.FF.FF.FF
int hash = NAME.hashCode()&0x7fffffff;
int port = (hash % (0xF0000000-0xE1000000))+0xE1000000;
byte[] ip = new byte[4];
for( int i=0; i<4; i++ )
ip[i] = (byte)(port>>>((3-i)<<3));
try {
CLOUD_MULTICAST_GROUP = InetAddress.getByAddress(ip);
} catch( UnknownHostException e ) { throw Log.errRTExcept(e); }
CLOUD_MULTICAST_PORT = (port>>>16);
}
// Multicast send-and-close. Very similar to udp_send, except to the
// multicast port (or all the individuals we can find, if multicast is
// disabled).
static void multicast( ByteBuffer bb ) {
try { multicast2(bb); }
catch (Exception xe) {}
}
static private void multicast2( ByteBuffer bb ) {
if( H2O.STATIC_H2OS == null ) {
byte[] buf = new byte[bb.remaining()];
bb.get(buf);
synchronized( H2O.class ) { // Sync'd so single-thread socket create/destroy
assert H2O.CLOUD_MULTICAST_IF != null;
try {
if( CLOUD_MULTICAST_SOCKET == null ) {
CLOUD_MULTICAST_SOCKET = new MulticastSocket();
// Allow multicast traffic to go across subnets
CLOUD_MULTICAST_SOCKET.setTimeToLive(2);
CLOUD_MULTICAST_SOCKET.setNetworkInterface(H2O.CLOUD_MULTICAST_IF);
}
// Make and send a packet from the buffer
CLOUD_MULTICAST_SOCKET.send(new DatagramPacket(buf, buf.length, CLOUD_MULTICAST_GROUP,CLOUD_MULTICAST_PORT));
} catch( Exception e ) { // On any error from anybody, close all sockets & re-open
// and if not a soft launch (hibernate mode)
if(H2O.OPT_ARGS.soft == null)
Log.err("Multicast Error ",e);
if( CLOUD_MULTICAST_SOCKET != null )
try { CLOUD_MULTICAST_SOCKET.close(); }
catch( Exception e2 ) { Log.err("Got",e2); }
finally { CLOUD_MULTICAST_SOCKET = null; }
}
}
} else { // Multicast Simulation
// The multicast simulation is little bit tricky. To achieve union of all
// specified nodes' flatfiles (via option -flatfile), the simulated
// multicast has to send packets not only to nodes listed in the node's
// flatfile (H2O.STATIC_H2OS), but also to all cloud members (they do not
// need to be specified in THIS node's flatfile but can be part of cloud
// due to another node's flatfile).
//
// Furthermore, the packet have to be send also to Paxos proposed members
// to achieve correct functionality of Paxos. Typical situation is when
// this node receives a Paxos heartbeat packet from a node which is not
// listed in the node's flatfile -- it means that this node is listed in
// another node's flatfile (and wants to create a cloud). Hence, to
// allow cloud creation, this node has to reply.
//
// Typical example is:
// node A: flatfile (B)
// node B: flatfile (C), i.e., A -> (B), B-> (C), C -> (A)
// node C: flatfile (A)
// Cloud configuration: (A, B, C)
//
// Hideous O(n) algorithm for broadcast - avoid the memory allocation in
// this method (since it is heavily used)
HashSet<H2ONode> nodes = (HashSet<H2ONode>)H2O.STATIC_H2OS.clone();
nodes.addAll(Paxos.PROPOSED.values());
bb.mark();
for( H2ONode h2o : nodes ) {
bb.reset();
try {
H2O.CLOUD_DGRAM.send(bb, h2o._key);
} catch( IOException e ) {
Log.warn("Multicast Error to "+h2o+e);
}
}
}
}
/**
* Read a set of Nodes from a file. Format is:
*
* name/ip_address:port
* - name is unused and optional
* - port is optional
* - leading '#' indicates a comment
*
* For example:
*
* 10.10.65.105:54322
* # disabled for testing
* # 10.10.65.106
* /10.10.65.107
* # run two nodes on 108
* 10.10.65.108:54322
* 10.10.65.108:54325
*/
private static HashSet<H2ONode> parseFlatFile( String fname ) {
if( fname == null ) return null;
File f = new File(fname);
if( !f.exists() ) {
Log.warn("-flatfile specified but not found: " + fname);
return null; // No flat file
}
HashSet<H2ONode> h2os = new HashSet<H2ONode>();
List<FlatFileEntry> list = parseFlatFile(f);
for(FlatFileEntry entry : list)
h2os.add(H2ONode.intern(entry.inet, entry.port+1));// use the UDP port here
return h2os;
}
public static HashSet<H2ONode> parseFlatFileFromString( String s ) {
HashSet<H2ONode> h2os = new HashSet<H2ONode>();
InputStream is = new ByteArrayInputStream(s.getBytes());
List<FlatFileEntry> list = parseFlatFile(is);
for(FlatFileEntry entry : list)
h2os.add(H2ONode.intern(entry.inet, entry.port+1));// use the UDP port here
return h2os;
}
public static class FlatFileEntry {
public InetAddress inet;
public int port;
}
public static List<FlatFileEntry> parseFlatFile( File f ) {
InputStream is = null;
try {
is = new FileInputStream(f);
}
catch (Exception e) { Log.die(e.toString()); }
return parseFlatFile(is);
}
public static List<FlatFileEntry> parseFlatFile( InputStream is ) {
List<FlatFileEntry> list = new ArrayList<FlatFileEntry>();
BufferedReader br = null;
int port = DEFAULT_PORT;
try {
br = new BufferedReader(new InputStreamReader(is));
String strLine = null;
while( (strLine = br.readLine()) != null) {
strLine = strLine.trim();
// be user friendly and skip comments and empty lines
if (strLine.startsWith("#") || strLine.isEmpty()) continue;
String ip = null, portStr = null;
int slashIdx = strLine.indexOf('/');
int colonIdx = strLine.indexOf(':');
if( slashIdx == -1 && colonIdx == -1 ) {
ip = strLine;
} else if( slashIdx == -1 ) {
ip = strLine.substring(0, colonIdx);
portStr = strLine.substring(colonIdx+1);
} else if( colonIdx == -1 ) {
ip = strLine.substring(slashIdx+1);
} else if( slashIdx > colonIdx ) {
Log.die("Invalid format, must be name/ip[:port], not '"+strLine+"'");
} else {
ip = strLine.substring(slashIdx+1, colonIdx);
portStr = strLine.substring(colonIdx+1);
}
InetAddress inet = InetAddress.getByName(ip);
if( !(inet instanceof Inet4Address) )
Log.die("Only IP4 addresses allowed: given " + ip);
if( portStr!=null && !portStr.equals("") ) {
try {
port = Integer.decode(portStr);
} catch( NumberFormatException nfe ) {
Log.die("Invalid port #: "+portStr);
}
}
FlatFileEntry entry = new FlatFileEntry();
entry.inet = inet;
entry.port = port;
list.add(entry);
}
} catch( Exception e ) { Log.die(e.toString()); }
finally { Utils.close(br); }
return list;
}
static void initializePersistence() {
HdfsLoader.loadJars();
if( OPT_ARGS.aws_credentials != null ) {
try {
PersistS3.getClient();
} catch( IllegalArgumentException e ) { Log.err(e); }
}
Persist.initialize();
}
static void initializeLicenseManager() {
licenseManager = new LicenseManager();
if (OPT_ARGS.license != null) {
LicenseManager.Result r = licenseManager.readLicenseFile(OPT_ARGS.license);
if (r == LicenseManager.Result.OK) {
Log.info("Successfully read license file ("+ OPT_ARGS.license + ")");
licenseManager.logLicensedFeatures();
}
else {
Log.err("readLicenseFile failed (" + r + ")");
}
}
}
// Cleaner ---------------------------------------------------------------
// msec time at which the STORE was dirtied.
// Long.MAX_VALUE if clean.
static private volatile long _dirty; // When was store dirtied
static void dirty_store() { dirty_store(System.currentTimeMillis()); }
static void dirty_store( long x ) {
// Keep earliest dirty time seen
if( x < _dirty ) _dirty = x;
}
public abstract static class KVFilter {
public abstract boolean filter(KeyInfo k);
}
public static final class KeyInfo extends Iced implements Comparable<KeyInfo>{
public final Key _key;
public final int _type;
public final boolean _rawData;
public final int _sz;
public final int _ncols;
public final long _nrows;
public final byte _backEnd;
public KeyInfo(Key k, Value v){
assert k!=null : "Key should be not null!";
assert v!=null : "Value should be not null!";
_key = k;
_type = v.type();
_rawData = v.isRawData();
if(v.isFrame()){
Frame f = v.get();
// NOTE: can't get byteSize here as it may invoke RollupStats! :(
// _sz = f.byteSize();
_sz = v._max;
// do at least nrows/ncols instead
_ncols = f.numCols();
_nrows = f.numRows();
} else {
_sz = v._max;
_ncols = 0;
_nrows = 0;
}
_backEnd = v.backend();
}
@Override public int compareTo(KeyInfo ki){ return _key.compareTo(ki._key);}
public boolean isFrame(){
return _type == TypeMap.onIce(Frame.class.getName());
}
public boolean isLockable(){
return TypeMap.newInstance(_type) instanceof Lockable;
}
}
public static class KeySnapshot extends Iced {
private static volatile long _lastUpdate;
private static final long _updateInterval = 1000;
private static volatile KeySnapshot _cache;
public final KeyInfo [] _keyInfos;
public long lastUpdated(){return _lastUpdate;}
public KeySnapshot cache(){return _cache;}
public KeySnapshot filter(KVFilter kvf){
ArrayList<KeyInfo> res = new ArrayList<KeyInfo>();
for(KeyInfo kinfo: _keyInfos)
if(kvf.filter(kinfo))res.add(kinfo);
return new KeySnapshot(res.toArray(new KeyInfo[res.size()]));
}
KeySnapshot(KeyInfo [] snapshot){
_keyInfos = snapshot;}
public Key [] keys(){
Key [] res = new Key[_keyInfos.length];
for(int i = 0; i < _keyInfos.length; ++i)
res[i] = _keyInfos[i]._key;
return res;
}
public <T extends Iced> Map<String, T> fetchAll(Class<T> c) { return fetchAll(c,false,0,Integer.MAX_VALUE);}
public <T extends Iced> Map<String, T> fetchAll(Class<T> c, boolean exact) { return fetchAll(c,exact,0,Integer.MAX_VALUE);}
public <T extends Iced> Map<String, T> fetchAll(Class<T> c, boolean exact, int offset, int limit) {
TreeMap<String, T> res = new TreeMap<String, T>();
final int typeId = TypeMap.onIce(c.getName());
for (KeyInfo kinfo : _keyInfos) {
if (kinfo._type == typeId || (!exact && c.isAssignableFrom(TypeMap.clazz(kinfo._type)))) {
if (offset > 0) {
--offset;
continue;
}
Value v = DKV.get(kinfo._key);
if (v != null) {
T t = v.get();
res.put(kinfo._key.toString(), t);
if (res.size() == limit)
break;
}
}
}
return res;
}
public static KeySnapshot localSnapshot(){return localSnapshot(false);}
public static KeySnapshot localSnapshot(boolean homeOnly){
Object [] kvs = STORE.raw_array();
ArrayList<KeyInfo> res = new ArrayList<KeyInfo>();
for(int i = 2; i < kvs.length; i+= 2){
Object ok = kvs[i], ov = kvs[i+1];
if( !(ok instanceof Key ) || ov==null ) continue; // Ignore tombstones or deleted values
Key key = (Key) ok;
if(!key.user_allowed())continue;
if(homeOnly && !key.home())continue;
// Raw array can contain regular and also wrapped values into Prime marker class:
// - if we see Value object, create instance of KeyInfo
// - if we do not see Value object, try to unwrap it via calling STORE.get and then
// look at wrapped value again.
if (!(ov instanceof Value)) {
ov = H2O.get(key); // H2Oget returns already Value object or null
if (ov==null) continue;
}
res.add(new KeyInfo(key,(Value)ov));
}
final KeyInfo [] arr = res.toArray(new KeyInfo[res.size()]);
Arrays.sort(arr);
return new KeySnapshot(arr);
}
public static KeySnapshot globalSnapshot(){ return globalSnapshot(-1);}
public static KeySnapshot globalSnapshot(long timeTolerance){
KeySnapshot res = _cache;
final long t = System.currentTimeMillis();
if(res == null || (t - _lastUpdate) > timeTolerance)
res = new KeySnapshot(new GlobalUKeySetTask().invokeOnAllNodes()._res);
else if(t - _lastUpdate > _updateInterval)
H2O.submitTask(new H2OCountedCompleter() {
@Override
public void compute2() {
new GlobalUKeySetTask().invokeOnAllNodes();
}
});
return res;
}
private static class GlobalUKeySetTask extends DRemoteTask<GlobalUKeySetTask> {
KeyInfo [] _res;
@Override public byte priority(){return H2O.GET_KEY_PRIORITY;}
@Override public void lcompute(){
_res = localSnapshot(true)._keyInfos;
tryComplete();
}
@Override public void reduce(GlobalUKeySetTask gbt){
if(_res == null)_res = gbt._res;
else if(gbt._res != null){ // merge sort keys together
KeyInfo [] res = new KeyInfo[_res.length + gbt._res.length];
int j = 0, k = 0;
for(int i = 0; i < res.length; ++i)
res[i] = j < gbt._res.length && (k == _res.length || gbt._res[j].compareTo(_res[k]) < 0)?gbt._res[j++]:_res[k++];
_res = res;
}
}
@Override public void postGlobal(){
_cache = new KeySnapshot(_res);
_lastUpdate = System.currentTimeMillis();
}
}
}
// Periodically write user keys to disk
public static class Cleaner extends Thread {
// Desired cache level. Set by the MemoryManager asynchronously.
static public volatile long DESIRED;
// Histogram used by the Cleaner
private final Histo _myHisto;
boolean _diskFull = false;
public Cleaner() {
super("MemCleaner");
setDaemon(true);
setPriority(MAX_PRIORITY-2);
_dirty = Long.MAX_VALUE; // Set to clean-store
_myHisto = new Histo(); // Build/allocate a first histogram
_myHisto.compute(0); // Compute lousy histogram; find eldest
H = _myHisto; // Force to be the most recent
_myHisto.histo(true); // Force a recompute with a good eldest
MemoryManager.set_goals("init",false);
}
static boolean lazyPersist(){ // free disk > our DRAM?
return H2O.SELF._heartbeat.get_free_disk() > MemoryManager.MEM_MAX;
}
static boolean isDiskFull(){ // free disk space < 5K?
long space = Persist.getIce().getUsableSpace();
return space != Persist.UNKNOWN && space < (5 << 10);
}
@Override public void run() {
boolean diskFull = false;
while( true ) {
// Sweep the K/V store, writing out Values (cleaning) and free'ing
// - Clean all "old" values (lazily, optimistically)
// - Clean and free old values if above the desired cache level
// Do not let optimistic cleaning get in the way of emergency cleaning.
// Get a recent histogram, computing one as needed
Histo h = _myHisto.histo(false);
long now = System.currentTimeMillis();
long dirty = _dirty; // When things first got dirtied
// Start cleaning if: "dirty" was set a "long" time ago, or we beyond
// the desired cache levels. Inverse: go back to sleep if the cache
// is below desired levels & nothing has been dirty awhile.
if( h._cached < DESIRED && // Cache is low and
(now-dirty < 5000) ) { // not dirty a long time
// Block asleep, waking every 5 secs to check for stuff, or when poked
Boot.block_store_cleaner();
continue; // Awoke; loop back and re-check histogram.
}
now = System.currentTimeMillis();
_dirty = Long.MAX_VALUE; // Reset, since we are going write stuff out
MemoryManager.set_goals("preclean",false);
// The age beyond which we need to toss out things to hit the desired
// caching levels. If forced, be exact (toss out the minimal amount).
// If lazy, store-to-disk things down to 1/2 the desired cache level
// and anything older than 5 secs.
boolean force = (h._cached >= DESIRED); // Forced to clean
if( force && diskFull )
diskFull = isDiskFull();
long clean_to_age = h.clean_to(force ? DESIRED : (DESIRED>>1));
// If not forced cleaning, expand the cleaning age to allows Values
// more than 5sec old
if( !force ) clean_to_age = Math.max(clean_to_age,now-5000);
// No logging if under memory pressure: can deadlock the cleaner thread
if( Log.flag(Sys.CLEAN) ) {
String s = h+" DESIRED="+(DESIRED>>20)+"M dirtysince="+(now-dirty)+" force="+force+" clean2age="+(now-clean_to_age);
if( MemoryManager.canAlloc() ) Log.debug(Sys.CLEAN ,s);
else Log.unwrap(System.err,s);
}
long cleaned = 0;
long freed = 0;
// For faster K/V store walking get the NBHM raw backing array,
// and walk it directly.
Object[] kvs = STORE.raw_array();
// Start the walk at slot 2, because slots 0,1 hold meta-data
for( int i=2; i<kvs.length; i += 2 ) {
// In the raw backing array, Keys and Values alternate in slots
Object ok = kvs[i], ov = kvs[i+1];
if( !(ok instanceof Key ) ) continue; // Ignore tombstones and Primes and null's
Key key = (Key )ok;
if( !(ov instanceof Value) ) continue; // Ignore tombstones and Primes and null's
Value val = (Value)ov;
byte[] m = val.rawMem();
Object p = val.rawPOJO();
if( m == null && p == null ) continue; // Nothing to throw out
if( val.isLockable() ) continue; // we do not want to throw out Lockables.
boolean isChunk = p instanceof Chunk;
// Ignore things younger than the required age. In particular, do
// not spill-to-disk all dirty things we find.
long touched = val._lastAccessedTime;
if( touched > clean_to_age ) { // Too recently touched?
// But can toss out a byte-array if already deserialized & on disk
// (no need for both forms). Note no savings for Chunks, for which m==p._mem
if( val.isPersisted() && m != null && p != null && !isChunk ) {
val.freeMem(); // Toss serialized form, since can rebuild from POJO
freed += val._max;
}
dirty_store(touched); // But may write it out later
continue; // Too young
}
// Should I write this value out to disk?
// Should I further force it from memory?
if( !val.isPersisted() && !diskFull && (force || (lazyPersist() && lazy_clean(key)))) {
try {
val.storePersist(); // Write to disk
if( m == null ) m = val.rawMem();
if( m != null ) cleaned += m.length;
} catch(IOException e) {
if( isDiskFull() )
Log.warn(Sys.CLEAN,"Disk full! Disabling swapping to disk." + (force?" Memory low! Please free some space in " + Persist.getIce().getPath() + "!":""));
else
Log.warn(Sys.CLEAN,"Disk swapping failed! " + e.getMessage());
// Something is wrong so mark disk as full anyways so we do not
// attempt to write again. (will retry next run when memory is low)
diskFull = true;
}
}
// And, under pressure, free all
if( force && val.isPersisted() ) {
val.freeMem (); if( m != null ) freed += val._max; m = null;
val.freePOJO(); if( p != null ) freed += val._max; p = null;
if( isChunk ) freed -= val._max; // Double-counted freed mem for Chunks since val._pojo._mem & val._mem are the same.
}
// If we have both forms, toss the byte[] form - can be had by
// serializing again.
if( m != null && p != null && !isChunk ) {
val.freeMem();
freed += val._max;
}
}
h = _myHisto.histo(true); // Force a new histogram
MemoryManager.set_goals("postclean",false);
// No logging if under memory pressure: can deadlock the cleaner thread
if( Log.flag(Sys.CLEAN) ) {
String s = h+" cleaned="+(cleaned>>20)+"M, freed="+(freed>>20)+"M, DESIRED="+(DESIRED>>20)+"M";
if( MemoryManager.canAlloc() ) Log.debug(Sys.CLEAN ,s);
else Log.unwrap(System.err,s);
}
}
}
// Rules on when to write & free a Key, when not under memory pressure.
boolean lazy_clean( Key key ) {
// Only data chunks are worth tossing out even lazily.
if( !key.isChunkKey() ) // Not arraylet?
return false; // Not enough savings to write it with mem-pressure to force us
// If this is a chunk of a system-defined array, then assume it has
// short lifetime, and we do not want to spin the disk writing it
// unless we're under memory pressure.
Key veckey = key.getVecKey();
return veckey.user_allowed(); // Write user keys but not system keys
}
// Current best histogram
static private volatile Histo H;
// Histogram class
public static class Histo {
final long[] _hs = new long[128];
long _oldest; // Time of the oldest K/V discovered this pass
long _eldest; // Time of the eldest K/V found in some prior pass
long _hStep; // Histogram step: (now-eldest)/histogram.length
long _cached; // Total alive data in the histogram
long _when; // When was this histogram computed
Value _vold; // For assertions: record the oldest Value
boolean _clean; // Was "clean" K/V when built?
// Return the current best histogram
static Histo best_histo() { return H; }
// Return the current best histogram, recomputing in-place if it is
// getting stale. Synchronized so the same histogram can be called into
// here and will be only computed into one-at-a-time.
synchronized Histo histo( boolean force ) {
final Histo h = H; // Grab current best histogram
if( !force && System.currentTimeMillis() < h._when+100 )
return h; // It is recent; use it
if( h._clean && _dirty==Long.MAX_VALUE )
return h; // No change to the K/V store, so no point
compute(h._oldest); // Use last oldest value for computing the next histogram in-place
return (H = this); // Record current best histogram & return it
}
// Compute a histogram
public void compute( long eldest ) {
Arrays.fill(_hs, 0);
_when = System.currentTimeMillis();
_eldest = eldest; // Eldest seen in some prior pass
_hStep = Math.max(1,(_when-eldest)/_hs.length);
boolean clean = _dirty==Long.MAX_VALUE;
// Compute the hard way
Object[] kvs = STORE.raw_array();
long cached = 0; // Total K/V cached in ram
long oldest = Long.MAX_VALUE; // K/V with the longest time since being touched
Value vold = null;
// Start the walk at slot 2, because slots 0,1 hold meta-data
for( int i=2; i<kvs.length; i += 2 ) {
// In the raw backing array, Keys and Values alternate in slots
Object ok = kvs[i+0], ov = kvs[i+1];
if( !(ok instanceof Key ) ) continue; // Ignore tombstones and Primes and null's
if( !(ov instanceof Value) ) continue; // Ignore tombstones and Primes and null's
Value val = (Value)ov;
int len = 0;
byte[] m = val.rawMem();
Object p = val.rawPOJO();
if( m != null ) len += val._max;
if( p != null ) len += val._max;
if( p instanceof Chunk ) len -= val._max; // Do not double-count Chunks
if( len == 0 ) continue;
cached += len; // Accumulate total amount of cached keys
if( val._lastAccessedTime < oldest ) { // Found an older Value?
vold = val; // Record oldest Value seen
oldest = val._lastAccessedTime;
}
// Compute histogram bucket
int idx = (int)((val._lastAccessedTime - eldest)/_hStep);
if( idx < 0 ) idx = 0;
else if( idx >= _hs.length ) idx = _hs.length-1;
_hs[idx] += len; // Bump histogram bucket
}
_cached = cached; // Total cached; NOTE: larger than sum of histogram buckets
_oldest = oldest; // Oldest seen in this pass
_vold = vold;
_clean = clean && _dirty==Long.MAX_VALUE; // Looks like a clean K/V the whole time?
}
// Compute the time (in msec) for which we need to throw out things
// to throw out enough things to hit the desired cached memory level.
long clean_to( long desired ) {
long age = _eldest; // Age of bucket zero
if( _cached < desired ) return age; // Already there; nothing to remove
long s = 0; // Total amount toss out
for( long t : _hs ) { // For all buckets...
s += t; // Raise amount tossed out
age += _hStep; // Raise age beyond which you need to go
if( _cached - s < desired ) break;
}
return age;
}
// Pretty print
@Override
public String toString() {
long x = _eldest;
long now = System.currentTimeMillis();
return "H("+(_cached>>20)+"M, "+x+"ms < +"+(_oldest-x)+"ms <...{"+_hStep+"ms}...< +"+(_hStep*128)+"ms < +"+(now-x)+")";
}
}
}
// API IP Port Watchdog ---------------------------------------------------------------
// Monitor API IP:Port for availability.
//
// This thread is only a watchdog. You can comment this thread out
// so it does not run without affecting any service functionality.
public static class ApiIpPortWatchdogThread extends Thread {
final private String threadName = "ApiPortWatchdog";
private volatile boolean gracefulShutdownInitiated; // Thread-safe.
// Failure-tracking.
private int consecutiveFailures;
private long failureStartTimestampMillis;
// Timing things that can be tuned if needed.
final private int maxFailureSeconds = 180;
final private int maxConsecutiveFailures = 20;
final private int checkIntervalSeconds = 10;
final private int timeoutSeconds = 30;
final private int millisPerSecond = 1000;
final private int timeoutMillis = timeoutSeconds * millisPerSecond;
final private int sleepMillis = checkIntervalSeconds * millisPerSecond;
// Constructor.
public ApiIpPortWatchdogThread() {
super("ApiWatch"); // Only 9 characters get printed in the log.
setDaemon(true);
setPriority(MAX_PRIORITY-2);
reset();
gracefulShutdownInitiated = false;
}
// Exit this watchdog thread.
public void shutdown() {
gracefulShutdownInitiated = true;
}
// Sleep method.
private void mySleep(int millis) {
try {
Thread.sleep (sleepMillis);
}
catch (Exception xe)
{}
}
// Print some help for the user if a failure occurs.
private void printPossibleCauses() {
Log.info(threadName + ": A possible cause is DHCP (e.g. changing WiFi networks)");
Log.info(threadName + ": A possible cause is your laptop going to sleep (if running on a laptop)");
Log.info(threadName + ": A possible cause is the network interface going down");
Log.info(threadName + ": A possible cause is this host being overloaded");
}
// Reset the failure counting when a successful check() occurs.
private void reset() {
consecutiveFailures = 0;
failureStartTimestampMillis = 0;
}
// Count the impact of one failure.
@SuppressWarnings("unused")
private void failed() {
printPossibleCauses();
if (consecutiveFailures == 0) {
failureStartTimestampMillis = System.currentTimeMillis();
}
consecutiveFailures++;
}
// Check if enough failures have occurred or time has passed to
// shut down this node.
private void testForFailureShutdown() {
if (consecutiveFailures >= maxConsecutiveFailures) {
Log.err(threadName + ": Too many failures (>= " + maxConsecutiveFailures + "), H2O node shutting down");
H2O.exit(1);
}
if (consecutiveFailures > 0) {
final long now = System.currentTimeMillis();
final long deltaMillis = now - failureStartTimestampMillis;
final long thresholdMillis = (maxFailureSeconds * millisPerSecond);
if (deltaMillis > thresholdMillis) {
Log.err(threadName + ": Failure time threshold exceeded (>= " +
thresholdMillis +
" ms), H2O node shutting down");
H2O.exit(1);
}
}
}
// Do the watchdog check.
private void check() {
final Socket s = new Socket();
final InetSocketAddress apiIpPort = new InetSocketAddress(H2O.SELF_ADDRESS, H2O.API_PORT);
Exception e=null;
String msg=null;
try {
s.connect (apiIpPort, timeoutMillis);
reset();
}
catch (SocketTimeoutException se) { e= se; msg=": Timed out"; }
catch (IOException ioe) { e=ioe; msg=": Failed"; }
catch (Exception ee) { e= ee; msg=": Failed unexpectedly"; }
finally {
if (gracefulShutdownInitiated) { return; }
if( e != null ) {
Log.err(threadName+msg+" trying to connect to REST API IP and Port (" +
H2O.SELF_ADDRESS + ":" + H2O.API_PORT + ", " + timeoutMillis + " ms)");
fail();
}
testForFailureShutdown();
try { s.close(); } catch (Exception xe) {}
}
}
// Class main thread.
@Override
public void run() {
Log.debug (threadName + ": Thread run() started");
reset();
while (true) {
mySleep (sleepMillis);
if (gracefulShutdownInitiated) { break; }
check();
if (gracefulShutdownInitiated) { break; }
}
}
}
/**
* Log physical (RSS) memory usage periodically.
* Used by developers to look for memory leaks.
* Currently this only works for Linux.
*/
private static class MemoryWatchdogThread extends Thread {
final private String threadName = "MemoryWatchdog";
private volatile boolean gracefulShutdownInitiated; // Thread-safe.
// Timing things that can be tuned if needed.
final private int checkIntervalSeconds = 5;
final private int millisPerSecond = 1000;
final private int sleepMillis = checkIntervalSeconds * millisPerSecond;
// Constructor.
public MemoryWatchdogThread() {
super("MemWatch"); // Only 9 characters get printed in the log.
setDaemon(true);
setPriority(MAX_PRIORITY - 2);
gracefulShutdownInitiated = false;
}
// Exit this watchdog thread.
public void shutdown() {
gracefulShutdownInitiated = true;
}
// Sleep method.
private void mySleep(int millis) {
try {
Thread.sleep (sleepMillis);
}
catch (Exception xe)
{}
}
// Do the watchdog check.
private void check() {
water.util.LinuxProcFileReader r = new LinuxProcFileReader();
r.read();
long rss = -1;
try {
rss = r.getProcessRss();
}
catch (AssertionError xe) {}
Log.info("RSS: " + rss);
}
// Class main thread.
@Override
public void run() {
Log.debug(threadName + ": Thread run() started");
while (true) {
mySleep (sleepMillis);
if (gracefulShutdownInitiated) { break; }
check();
if (gracefulShutdownInitiated) { break; }
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/H2ONode.java
|
package water;
import water.RPC.RPCCall;
import water.api.DocGen;
import water.api.Request.API;
import water.api.TaskStatus.GetTaskInfo;
import water.nbhm.NonBlockingHashMap;
import water.nbhm.NonBlockingHashMapLong;
import water.util.Log;
import water.util.UnsafeUtils;
import java.io.IOException;
import java.net.*;
import java.nio.channels.DatagramChannel;
import java.nio.channels.SocketChannel;
import java.util.*;
import java.util.Map.Entry;
import java.util.concurrent.DelayQueue;
import java.util.concurrent.atomic.AtomicInteger;
/**
* A <code>Node</code> in an <code>H2O</code> Cloud.
* Basically a worker-bee with CPUs, Memory and Disk.
* One of this is the self-Node, but the rest are remote Nodes.
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public class H2ONode extends Iced implements Comparable {
public int _unique_idx; // Dense integer index, skipping 0. NOT cloud-wide unique.
public long _last_heard_from; // Time in msec since we last heard from this Node
public boolean _announcedLostContact; // True if heartbeat published a no-contact msg
public volatile HeartBeat _heartbeat; // My health info. Changes 1/sec.
public int _tcp_readers; // Count of started TCP reader threads
public boolean _node_healthy;
// A JVM is uniquely named by machine IP address and port#
public H2Okey _key;
public static final class H2Okey extends InetSocketAddress implements Comparable {
final int _ipv4; // cheapo ipv4 address
public H2Okey(InetAddress inet, int port) {
super(inet,port);
byte[] b = inet.getAddress();
_ipv4 = ((b[0]&0xFF)<<0)+((b[1]&0xFF)<<8)+((b[2]&0xFF)<<16)+((b[3]&0xFF)<<24);
}
public int htm_port() { return getPort()-1; }
public int udp_port() { return getPort() ; }
@Override public String toString() { return getAddress()+":"+htm_port(); }
AutoBuffer write( AutoBuffer ab ) {
return ab.put4(_ipv4).put2((char)udp_port());
}
static H2Okey read( AutoBuffer ab ) {
InetAddress inet;
try { inet = InetAddress.getByAddress(ab.getA1(4)); }
catch( UnknownHostException e ) { throw Log.errRTExcept(e); }
int port = ab.get2();
return new H2Okey(inet,port);
}
// Canonical ordering based on inet & port
@Override public int compareTo( Object x ) {
if( x == null ) return -1; // Always before null
if( x == this ) return 0;
H2Okey key = (H2Okey)x;
// Must be unsigned long-math, or overflow will make a broken sort
long res = (_ipv4&0xFFFFFFFFL) - (key._ipv4&0xFFFFFFFFL);
if( res != 0 ) return res < 0 ? -1 : 1;
return udp_port() - key.udp_port();
}
}
public final int ip4() { return _key._ipv4; }
// These are INTERN'd upon construction, and are uniquely numbered within the
// same run of a JVM. If a remote Node goes down, then back up... it will
// come back with the SAME IP address, and the same unique_idx and history
// relative to *this* Node. They can be compared with pointer-equality. The
// unique idx is used to know which remote Nodes have cached which Keys, even
// if the Home#/Replica# change for a Key due to an unrelated change in Cloud
// membership. The unique_idx is *per Node*; not all Nodes agree on the same
// indexes.
private H2ONode( H2Okey key, int unique_idx ) {
_key = key;
_unique_idx = unique_idx;
_last_heard_from = System.currentTimeMillis();
_heartbeat = new HeartBeat();
_node_healthy = true;
}
// ---------------
// A dense integer index for every unique IP ever seen, since the JVM booted.
// Used to track "known replicas" per-key across Cloud change-ups. Just use
// an array-of-H2ONodes, and a limit of 255 unique H2ONodes
static private final NonBlockingHashMap<H2Okey,H2ONode> INTERN = new NonBlockingHashMap<H2Okey,H2ONode>();
static private final AtomicInteger UNIQUE = new AtomicInteger(1);
static public H2ONode IDX[] = new H2ONode[1];
// Create and/or re-use an H2ONode. Each gets a unique dense index, and is
// *interned*: there is only one per InetAddress.
public static final H2ONode intern( H2Okey key ) {
H2ONode h2o = INTERN.get(key);
if( h2o != null ) return h2o;
final int idx = UNIQUE.getAndIncrement();
h2o = new H2ONode(key,idx);
H2ONode old = INTERN.putIfAbsent(key,h2o);
if( old != null ) return old;
synchronized(H2O.class) {
while( idx >= IDX.length )
IDX = Arrays.copyOf(IDX,IDX.length<<1);
IDX[idx] = h2o;
}
return h2o;
}
public static final H2ONode intern( InetAddress ip, int port ) { return intern(new H2Okey(ip,port)); }
public static H2ONode intern( byte[] bs, int off ) {
byte[] b = new byte[4];
UnsafeUtils.set4(b, 0, UnsafeUtils.get4(bs, off));
int port = UnsafeUtils.get2(bs,off+4)&0xFFFF;
try { return intern(InetAddress.getByAddress(b),port); }
catch( UnknownHostException e ) { throw Log.errRTExcept(e); }
}
public static final H2ONode intern( int ip, int port ) {
byte[] b = new byte[4];
b[0] = (byte)(ip>> 0);
b[1] = (byte)(ip>> 8);
b[2] = (byte)(ip>>16);
b[3] = (byte)(ip>>24);
try {
return intern(InetAddress.getByAddress(b),port);
} catch( UnknownHostException e ) {
Log.err(e);
return null;
}
}
// Read & return interned from wire
@Override public AutoBuffer write( AutoBuffer ab ) { return _key.write(ab); }
@Override public H2ONode read( AutoBuffer ab ) { return intern(H2Okey.read(ab)); }
public H2ONode( ) { }
// Get a nice Node Name for this Node in the Cloud. Basically it's the
// InetAddress we use to communicate to this Node.
static H2ONode self(InetAddress local) {
assert H2O.UDP_PORT != 0;
try {
// Figure out which interface matches our IP address
List<NetworkInterface> matchingIfs = new ArrayList();
Enumeration<NetworkInterface> netIfs = NetworkInterface.getNetworkInterfaces();
while( netIfs.hasMoreElements() ) {
NetworkInterface netIf = netIfs.nextElement();
Enumeration<InetAddress> addrs = netIf.getInetAddresses();
while( addrs.hasMoreElements() ) {
InetAddress addr = addrs.nextElement();
if( addr.equals(local) ) {
matchingIfs.add(netIf);
break;
}
}
}
switch( matchingIfs.size() ) {
case 0: H2O.CLOUD_MULTICAST_IF = null; break;
case 1: H2O.CLOUD_MULTICAST_IF = matchingIfs.get(0); break;
default:
String msg = "Found multiple network interfaces for ip address " + local;
for( NetworkInterface ni : matchingIfs ) {
msg +="\n\t" + ni;
}
msg +="\nUsing " + matchingIfs.get(0) + " for UDP broadcast";
Log.warn(msg);
H2O.CLOUD_MULTICAST_IF = matchingIfs.get(0);
}
} catch( SocketException e ) {
throw Log.errRTExcept(e);
}
try {
assert H2O.CLOUD_DGRAM == null;
H2O.CLOUD_DGRAM = DatagramChannel.open();
} catch( Exception e ) {
throw Log.errRTExcept(e);
}
return intern(new H2Okey(local,H2O.UDP_PORT));
}
// Happy printable string
@Override public String toString() { return _key.toString(); }
@Override public int hashCode() { return _key.hashCode(); }
@Override public boolean equals(Object o) { return _key.equals (((H2ONode)o)._key); }
@Override public int compareTo( Object o) { return _key.compareTo(((H2ONode)o)._key); }
// index of this node in the current cloud... can change at the next cloud.
public int index() { return H2O.CLOUD.nidx(this); }
// max memory for this node.
// no need to ask the (possibly not yet populated) heartbeat if we want to know the local max memory.
public long get_max_mem() { return this == H2O.SELF ? Runtime.getRuntime().maxMemory() : _heartbeat.get_max_mem(); }
// ---------------
// A queue of available TCP sockets
// Public re-usable TCP socket opened to this node, or null.
// This is essentially a BlockingQueue/Stack that allows null.
private SocketChannel _socks[] = new SocketChannel[2];
private int _socksAvail=_socks.length;
// Count of concurrent TCP requests both incoming and outgoing
public static final AtomicInteger TCPS = new AtomicInteger(0);
public SocketChannel getTCPSocket() throws IOException {
// Under lock, claim an existing open socket if possible
synchronized(this) {
// Limit myself to the number of open sockets from node-to-node
while( _socksAvail == 0 )
try { wait(); } catch( InterruptedException ie ) { }
// Claim an open socket
SocketChannel sock = _socks[--_socksAvail];
if( sock != null ) {
if( sock.isOpen() ) return sock; // Return existing socket!
// Else its an already-closed socket, lower open TCP count
assert TCPS.get() > 0;
TCPS.decrementAndGet();
}
}
// Must make a fresh socket
SocketChannel sock2 = SocketChannel.open();
sock2.socket().setReuseAddress(true);
sock2.socket().setSendBufferSize(AutoBuffer.BBSIZE);
boolean res = sock2.connect( _key );
assert res && !sock2.isConnectionPending() && sock2.isBlocking() && sock2.isConnected() && sock2.isOpen();
TCPS.incrementAndGet(); // Cluster-wide counting
return sock2;
}
public synchronized void freeTCPSocket( SocketChannel sock ) {
assert 0 <= _socksAvail && _socksAvail < _socks.length;
if( sock != null && !sock.isOpen() ) sock = null;
_socks[_socksAvail++] = sock;
assert TCPS.get() > 0;
if( sock == null ) TCPS.decrementAndGet();
notify();
}
// ---------------
// The *outgoing* client-side calls; pending tasks this Node wants answered.
private final NonBlockingHashMapLong<RPC> _tasks = new NonBlockingHashMapLong();
public void taskPut(int tnum, RPC rpc ) { _tasks.put(tnum,rpc); }
public RPC taskGet(int tnum) { return _tasks.get(tnum); }
public void taskRemove(int tnum) { _tasks.remove(tnum); }
public Collection<RPC> tasks() { return _tasks.values(); }
public int taskSize() { return _tasks.size(); }
// The next unique task# sent *TO* the 'this' Node.
private final AtomicInteger _created_task_ids = new AtomicInteger(1);
public int nextTaskNum() { return _created_task_ids.getAndIncrement(); }
// ---------------
// The Work-In-Progress list. Each item is a UDP packet's worth of work.
// When the RPCCall to _computed, then it's Completed work instead
// work-in-progress. Completed work can be short-circuit replied-to by
// resending the RPC._dt back. Work that we're sure the this Node has seen
// the reply to can be removed - but we must remember task-completion for all
// time (because UDP packets can be dup'd and arrive very very late and
// should not be confused with new work).
private final NonBlockingHashMapLong<RPC.RPCCall> _work = new NonBlockingHashMapLong();
// We must track even dead/completed tasks for All Time (lest a very very
// delayed UDP packet look like New Work). The easy way to do this is leave
// all work packets/RPCs in the _work HashMap for All Time - but this amounts
// to a leak. Instead we "roll up" the eldest completed work items, just
// remembering their completion status. Task id's older (smaller) than the
// _removed_task_ids are both completed, and rolled-up to a single integer.
private final AtomicInteger _removed_task_ids = new AtomicInteger(0);
// A Golden Completed Task: it's a shared completed task used to represent
// all instances of tasks that have been completed and are no longer being
// tracked separately.
private final RPC.RPCCall _removed_task = new RPC.RPCCall(null,this,0);
RPC.RPCCall has_task( int tnum ) {
if( tnum <= _removed_task_ids.get() ) return _removed_task;
return _work.get(tnum);
}
// Record a task-in-progress, or return the prior RPC if one already exists.
// The RPC will flip to "_completed" once the work is done. The RPC._dtask
// can be repeatedly ACKd back to the caller, and the _dtask is removed once
// an ACKACK appears - and the RPC itself is removed once all prior RPCs are
// also ACKACK'd.
RPC.RPCCall record_task( RPC.RPCCall rpc ) {
// Task removal (and roll-up) suffers from classic race-condition, which we
// fix by a classic Dekker's algo; a task# is always in either the _work
// HashMap, or rolled-up in the _removed_task_ids counter, or both (for
// short intervals during the handoff). We can never has a cycle where
// it's in neither or else a late UDP may attempt to "resurrect" the
// already completed task. Hence we must always check the "removed ids"
// AFTER we insert in the HashMap (we can check before also, but that's a
// simple optimization and not sufficient for correctness).
final RPC.RPCCall x = _work.putIfAbsent(rpc._tsknum,rpc);
if( x != null ) return x; // Return pre-existing work
// If this RPC task# is very old, we just return a Golden Completed task.
// The task is not just completed, but also we have already received
// verification that the client got the answer. So this is just a really
// old attempt to restart a long-completed task.
if( rpc._tsknum > _removed_task_ids.get() ) return null; // Task is new
_work.remove(rpc._tsknum); // Bogus insert, need to remove it
return _removed_task; // And return a generic Golden Completed object
}
// Record the final return value for a DTask. Should happen only once.
// Recorded here, so if the client misses our ACK response we can resend the
// same answer back.
void record_task_answer( RPC.RPCCall rpcall ) {
assert rpcall._started == 0 || rpcall._dt.hasException();
rpcall._started = System.currentTimeMillis();
rpcall._retry = RPC.RETRY_MS; // Start the timer on when to resend
AckAckTimeOutThread.PENDING.add(rpcall);
}
// Stop tracking a remote task, because we got an ACKACK.
void remove_task_tracking( int task ) {
RPC.RPCCall rpc = _work.get(task);
if( rpc == null ) return; // Already stopped tracking
// Atomically attempt to remove the 'dt'. If we win, we are the sole
// thread running the dt.onAckAck. Also helps GC: the 'dt' is done (sent
// to client and we received the ACKACK), but the rpc might need to stick
// around a long time - and the dt might be big.
DTask dt = rpc._dt; // The existing DTask, if any
if( dt != null && RPC.RPCCall.CAS_DT.compareAndSet(rpc,dt,null) ) {
assert rpc._computed : "Still not done #"+task+" "+dt.getClass()+" from "+rpc._client;
AckAckTimeOutThread.PENDING.remove(rpc);
dt.onAckAck(); // One-time call on stop-tracking
}
// Roll-up as many done RPCs as we can, into the _removed_task_ids list
while( true ) {
int t = _removed_task_ids.get(); // Last already-removed ID
RPC.RPCCall rpc2 = _work.get(t+1); // RPC of 1st not-removed ID
if( rpc2 == null || rpc2._dt != null || !_removed_task_ids.compareAndSet(t,t+1) )
break; // Stop when we hit in-progress tasks
_work.remove(t+1); // Else we can remove the tracking now
}
}
// Resend ACK's, in case the UDP ACKACK got dropped. Note that even if the
// ACK was sent via TCP, the ACKACK might be dropped. Further: even if we
// *know* the client got our TCP response, we do not know *when* he'll
// process it... so we cannot e.g. eagerly do an ACKACK on this side. We
// must wait for the real ACKACK - which can drop. So we *must* resend ACK's
// occasionally to force a resend of ACKACKs.
static public class AckAckTimeOutThread extends Thread {
public AckAckTimeOutThread() { super("ACKTimeout"); }
// List of DTasks with results ready (and sent!), and awaiting an ACKACK.
static DelayQueue<RPC.RPCCall> PENDING = new DelayQueue<RPC.RPCCall>();
// Started by main() on a single thread, handle timing-out UDP packets
@Override public void run() {
Thread.currentThread().setPriority(Thread.MAX_PRIORITY-1);
while( true ) {
RPC.RPCCall r;
try { r = PENDING.take(); }
// Interrupted while waiting for a packet?
// Blow it off and go wait again...
catch( InterruptedException e ) { continue; }
assert r._computed : "Found RPCCall not computed "+r._tsknum;
r._ackResendCnt++;
if(r._ackResendCnt % 50 == 0)
Log.err("Possibly broken network, can not send ack through, got " + r._ackResendCnt + " resends.");
if( !H2O.CLOUD.contains(r._client) ) { // RPC from somebody who dropped out of cloud?
r._client.remove_task_tracking(r._tsknum);
continue;
}
if( r._dt != null ) { // Not yet run the ACKACK?
r.resend_ack(); // Resend ACK, hoping for ACKACK
PENDING.add(r); // And queue up to send again
}
}
}
}
// This Node rebooted recently; we can quit tracking prior work history
void rebooted() {
_work.clear();
}
/** Returns run time for this node based on last heartbeat. */
public long runtime() {
return _heartbeat!=null ? _heartbeat._jvm_boot_msec==0 ? 0 : System.currentTimeMillis()-_heartbeat._jvm_boot_msec : -1;
}
public enum task_status {INIT, CMP, DONE, RTCP,RUDP}
public static class TaskInfo extends Iced {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="Task name")
public final String task;
@API(help="Task Id, unique id per pair of nodes")
public final long taskId;
@API(help="")
public final int nodeId;
@API(help="")
public final int retriesCnt;
@API(help="")
public final task_status taskStatus;
public TaskInfo(DTask task,long tid, int nid, task_status ts, int retriesCnt){
this.task = task == null?"null":task.toString();
taskId = tid;
nodeId = nid;
taskStatus = ts;
this.retriesCnt = retriesCnt;
}
@Override
public String toString(){
return task +"#" + taskId +" [" + taskStatus + ", " + retriesCnt+"]";
}
}
public TaskInfo [] currentTasksInfo() {
Set<Entry<Long,RPCCall>> s = _work.entrySet();
TaskInfo [] res = new TaskInfo[s.size()];
int i = 0;
for(Entry<Long,RPCCall> e:s){
RPCCall rpc = e.getValue();
if(rpc._dt instanceof GetTaskInfo)
continue;
if(i < res.length) {
DTask dt = rpc._dt;
if(dt != null) // else we got ackack -> not interested!
res[i++] = new TaskInfo(rpc._dt, e.getKey(), _unique_idx, rpc._computedAndReplied ? (dt._repliedTcp ? task_status.RTCP : task_status.RUDP) : rpc._computed ? task_status.DONE : rpc._cmpStarted > 0 ? task_status.CMP : task_status.INIT,(rpc._callCnt+rpc._ackResendCnt));
}
}
return Arrays.copyOf(res,i);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/HeartBeat.java
|
package water;
import java.util.Arrays;
/**
* Struct holding H2ONode health info.
* @author <a href="mailto:cliffc@0xdata.com"></a>
*/
public class HeartBeat extends Iced {
public int _hb_version;
public int _cloud_hash; // Cloud-membership hash?
public boolean _common_knowledge; // Cloud shares common knowledge
public char _cloud_size; // Cloud-size this guy is reporting
public long _jvm_boot_msec; // Boot time of JVM
public byte[] _jar_md5; // JAR file digest
public char _num_cpus; // Number of CPUs for this Node, limit of 65535
public double _gflops; // Number of GFlops for this node
public double _membw; // Memory bandwidth in GB/s
public float _system_load_average;
public long _system_idle_ticks;
public long _system_total_ticks;
public long _process_total_ticks;
public int _process_num_open_fds;
public int _cpus_allowed; // Number of CPUs allowed by process
public int _nthreads;
public String _pid;
// Scaled by K or by M setters & getters.
private int _free_mem; // Free memory in K (goes up and down with GC)
public void set_free_mem (long n) { _free_mem = (int)(n>>10); }
public long get_free_mem () { return ((long) _free_mem)<<10 ; }
int _tot_mem; // Total memory in K (should track virtual mem?)
public void set_tot_mem (long n) { _tot_mem = (int)(n>>10); }
public long get_tot_mem () { return ((long) _tot_mem)<<10 ; }
int _max_mem; // Max memory in K (max mem limit for JVM)
public void set_max_mem (long n) { _max_mem = (int)(n>>10); }
public long get_max_mem () { return ((long) _max_mem)<<10 ; }
public int _keys; // Number of LOCAL keys in this node, cached or homed
int _valsz; // Sum of value bytes used, in K
public void set_valsz(long n) { _valsz = (int)(n>>10); }
public long get_valsz() { return ((long)_valsz)<<10 ; }
int _free_disk; // Free disk (internally stored in megabyte precision)
public void set_free_disk(long n) { _free_disk = (int)(n>>20); }
public long get_free_disk() { return ((long)_free_disk)<<20 ; }
int _max_disk; // Disk size (internally stored in megabyte precision)
public void set_max_disk (long n) { _max_disk = (int)(n>>20); }
public long get_max_disk () { return ((long)_max_disk)<<20 ; }
public boolean check_jar_md5() {
return Arrays.equals(Boot._init._jarHash, _jar_md5);
}
public char _rpcs; // Outstanding DFutureTasks
// Number of elements & threads in high FJ work queues
public short _fjthrds[]; // Number of threads (not all are runnable)
public short _fjqueue[]; // Number of elements in FJ work queue
public char _tcps_active; // Threads trying do a TCP send
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/HeartBeatThread.java
|
package water;
import java.lang.management.ManagementFactory;
import javax.management.*;
import water.persist.Persist;
import water.util.LinuxProcFileReader;
import water.util.Log;
/**
* Starts a thread publishing multicast HeartBeats to the local subnet: the
* Leader of this Cloud.
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public class HeartBeatThread extends Thread {
public HeartBeatThread() {
super("Heartbeat");
setDaemon(true);
}
// Time between heartbeats. Strictly several iterations less than the
// timeout.
static final int SLEEP = 1000;
// Timeout in msec before we decide to not include a Node in the next round
// of Paxos Cloud Membership voting.
static public final int TIMEOUT = 60000;
// Timeout in msec before we decide a Node is suspect, and call for a vote
// to remove him. This must be strictly greater than the TIMEOUT.
static final int SUSPECT = TIMEOUT+500;
// My Histogram. Called from any thread calling into the MM.
// Singleton, allocated now so I do not allocate during an OOM event.
static private final H2O.Cleaner.Histo myHisto = new H2O.Cleaner.Histo();
// uniquely number heartbeats for better timelines
static private int HB_VERSION;
// The Run Method.
// Started by main() on a single thread, this code publishes Cloud membership
// to the Cloud once a second (across all members). If anybody disagrees
// with the membership Heartbeat, they will start a round of Paxos group
// discovery.
public void run() {
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName os;
try {
os = new ObjectName("java.lang:type=OperatingSystem");
} catch( MalformedObjectNameException e ) {
throw Log.errRTExcept(e);
}
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
int counter = 0;
while( true ) {
// Once per second, for the entire cloud a Node will multi-cast publish
// itself, so other unrelated Clouds discover each other and form up.
try { Thread.sleep(SLEEP); } // Only once-sec per entire Cloud
catch( InterruptedException ignore ) { }
// Update the interesting health self-info for publication also
H2O cloud = H2O.CLOUD;
HeartBeat hb = H2O.SELF._heartbeat;
hb._hb_version = HB_VERSION++;
hb._jvm_boot_msec= TimeLine.JVM_BOOT_MSEC;
final Runtime run = Runtime.getRuntime();
hb.set_free_mem (run. freeMemory());
hb.set_max_mem (run. maxMemory());
hb.set_tot_mem (run.totalMemory());
hb._keys = (H2O.STORE.size ());
hb.set_valsz (myHisto.histo(false)._cached);
hb._num_cpus = (char)run.availableProcessors();
// Run mini-benchmark every 5 mins. However, on startup - do not have
// all JVMs immediately launch a all-core benchmark - they will fight
// with each other. Stagger them using the hashcode.
if( (counter+Math.abs(H2O.SELF.hashCode())) % 300 == 0) {
hb._gflops = Linpack.run(hb._cpus_allowed);
hb._membw = MemoryBandwidth.run(hb._cpus_allowed);
}
Object load = null;
try {
load = mbs.getAttribute(os, "SystemLoadAverage");
} catch( Exception e ) {
// Ignore, data probably not available on this VM
}
hb._system_load_average = load instanceof Double ? ((Double) load).floatValue() : 0;
int rpcs = 0;
for( H2ONode h2o : cloud._memary )
rpcs += h2o.taskSize();
hb._rpcs = (char)rpcs;
// Scrape F/J pool counts
hb._fjthrds = new short[H2O.MAX_PRIORITY+1];
hb._fjqueue = new short[H2O.MAX_PRIORITY+1];
for( int i=0; i<hb._fjthrds.length; i++ ) {
hb._fjthrds[i] = (short)H2O.getWrkThrPoolSize(i);
hb._fjqueue[i] = (short)H2O.getWrkQueueSize(i);
}
hb._tcps_active= (char)H2ONode.TCPS.get();
// get the usable and total disk storage for the partition where the
// persistent KV pairs are stored
hb.set_free_disk(Persist.getIce().getUsableSpace());
hb.set_max_disk(Persist.getIce().getTotalSpace());
// get cpu utilization for the system and for this process. (linux only.)
LinuxProcFileReader lpfr = new LinuxProcFileReader();
lpfr.read();
if (lpfr.valid()) {
hb._system_idle_ticks = lpfr.getSystemIdleTicks();
hb._system_total_ticks = lpfr.getSystemTotalTicks();
hb._process_total_ticks = lpfr.getProcessTotalTicks();
hb._process_num_open_fds = lpfr.getProcessNumOpenFds();
}
else {
hb._system_idle_ticks = -1;
hb._system_total_ticks = -1;
hb._process_total_ticks = -1;
hb._process_num_open_fds = -1;
}
hb._cpus_allowed = lpfr.getProcessCpusAllowed();
if (H2O.OPT_ARGS.nthreads < hb._cpus_allowed) {
hb._cpus_allowed = H2O.OPT_ARGS.nthreads;
}
hb._nthreads = H2O.OPT_ARGS.nthreads;
hb._pid = lpfr.getProcessID();
// Announce what Cloud we think we are in.
// Publish our health as well.
UDPHeartbeat.build_and_multicast(cloud, hb);
// If we have no internet connection, then the multicast goes
// nowhere and we never receive a heartbeat from ourselves!
// Fake it now.
long now = System.currentTimeMillis();
H2O.SELF._last_heard_from = now;
// Look for napping Nodes & propose removing from Cloud
for( H2ONode h2o : cloud._memary ) {
long delta = now - h2o._last_heard_from;
if( delta > SUSPECT ) {// We suspect this Node has taken a dirt nap
if( !h2o._announcedLostContact ) {
Paxos.print("hart: announce suspect node",cloud._memary,h2o.toString());
h2o._announcedLostContact = true;
}
} else if( h2o._announcedLostContact ) {
Paxos.print("hart: regained contact with node",cloud._memary,h2o.toString());
h2o._announcedLostContact = false;
}
}
counter++;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Iced.java
|
package water;
/**
* Empty marker class. Used by the auto-serializer.
*/
public abstract class Iced implements Freezable, Cloneable {
// The abstract methods to be filled in by subclasses. These are automatically
// filled in by any subclass of Iced during class-load-time, unless one
// is already defined. These methods are NOT DECLARED ABSTRACT, because javac
// thinks they will be called by subclasses relying on the auto-gen.
private RuntimeException barf() {
return new RuntimeException(getClass().toString()+" should be automatically overridden in the subclass by the auto-serialization code");
}
@Override public AutoBuffer write(AutoBuffer bb) { return bb; }
@Override public <T extends Freezable> T read(AutoBuffer bb) { return (T)this; }
@Override public <T extends Freezable> T newInstance() { throw barf(); }
@Override public int frozenType() { throw barf(); }
@Override public AutoBuffer writeJSONFields(AutoBuffer bb) { return bb; }
public AutoBuffer writeJSON(AutoBuffer bb) { return writeJSONFields(bb.put1('{')).put1('}'); }
@Override public water.api.DocGen.FieldDoc[] toDocField() { return null; }
public Iced init( Key k ) { return this; }
@Override public Iced clone() {
try { return (Iced)super.clone(); }
catch( CloneNotSupportedException e ) { throw water.util.Log.errRTExcept(e); }
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/InternalInterface.java
|
package water;
import java.io.InputStream;
import water.api.Cloud;
import water.util.Log;
import dontweave.gson.JsonObject;
public class InternalInterface implements water.ExternalInterface {
@Override public Key makeKey( String key_name ) { return Key.make(key_name); }
@Override public Value makeValue( Object key, byte[] bits ) { return new Value((Key)key,bits); }
@Override public void put( Object key, Object val ) { UKV.put((Key)key,(Value)val); }
@Override public Value getValue( Object key ) { return UKV.getValue((Key)key); }
@Override public byte[] getBytes( Object val ) { return ((Value)val).memOrLoad(); }
@Override public Model ingestRFModelFromR( Object key, InputStream is ) {
return null;
}
// All-in-one call to lookup a model, map the columns and score
@Override public float[] scoreKey( Object modelKey, String [] colNames, String domains[][], double[] row ) {
Key key = (Key)modelKey;
String sk = key.toString();
Value v = DKV.get(key);
if (v == null)
throw new IllegalArgumentException("Key "+sk+" not found!");
try {
return scoreModel(v.get(),colNames,domains,row);
} catch(Throwable t) {
Log.err(t);
throw new IllegalArgumentException("Key "+sk+" is not a Model key");
}
}
@Override public float[] scoreModel(Object model, String[] colNames, String domains[][], double[] row) {
return ((Model)model).score(colNames,domains,false,row);
}
@Override public JsonObject cloudStatus( ) { return new Cloud().serve().toJson(); }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Job.java
|
package water;
import static water.util.Utils.difference;
import static water.util.Utils.isEmpty;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.Arrays;
import java.util.HashMap;
import water.H2O.H2OCountedCompleter;
import water.H2O.H2OEmptyCompleter;
import water.api.*;
import water.api.Request.Validator.NOPValidator;
import water.api.RequestServer.API_VERSION;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.*;
import water.util.Utils.ExpectedExceptionForDebug;
import dontweave.gson.*;
public abstract class Job extends Func {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
/** A system key for global list of Job keys. */
public static final Key LIST = Key.make(Constants.BUILT_IN_KEY_JOBS, (byte) 0, Key.BUILT_IN_KEY);
/** Shared empty int array. */
private static final int[] EMPTY = new int[0];
@API(help = "Job key")
public Key job_key;
@API(help = "Destination key", filter = Default.class, json = true, validator = DestKeyValidator.class)
public Key destination_key; // Key holding final value after job is removed
static class DestKeyValidator extends NOPValidator<Key> {
@Override public void validateRaw(String value) {
if (Utils.contains(value, Key.ILLEGAL_USER_KEY_CHARS))
throw new IllegalArgumentException("Key '" + value + "' contains illegal character! Please avoid these characters: " + Key.ILLEGAL_USER_KEY_CHARS);
}
}
// Output parameters
@API(help = "Job description") public String description;
@API(help = "Job start time") public long start_time;
@API(help = "Job end time") public long end_time;
@API(help = "Exception") public String exception;
@API(help = "Job state") public JobState state;
transient public H2OCountedCompleter _fjtask; // Top-level task you can block on
transient protected boolean _cv;
/** Possible job states. */
public static enum JobState {
CREATED, // Job was created
RUNNING, // Job is running
CANCELLED, // Job was cancelled by user
FAILED, // Job crashed, error message/exception is available
DONE // Job was successfully finished
}
public Job(Key jobKey, Key dstKey){
job_key = jobKey;
destination_key = dstKey;
state = JobState.CREATED;
}
public Job() {
job_key = defaultJobKey();
description = getClass().getSimpleName();
state = JobState.CREATED;
}
/** Private copy constructor used by {@link JobHandle}. */
private Job(final Job prior) {
this(prior.job_key, prior.destination_key);
this.description = prior.description;
this.start_time = prior.start_time;
this.end_time = prior.end_time;
this.state = prior.state;
this.exception = prior.exception;
}
public Key self() { return job_key; }
public Key dest() { return destination_key; }
public int gridParallelism() {
return 1;
}
protected Key defaultJobKey() {
// Pinned to this node (i.e., the node invoked computation), because it should be almost always updated locally
return Key.make((byte) 0, Key.JOB, H2O.SELF);
}
protected Key defaultDestKey() {
return Key.make(getClass().getSimpleName() + Key.rand());
}
/** Start this task based on given top-level fork-join task representing job computation.
* @param fjtask top-level job computation task.
* @return this job in {@link JobState#RUNNING} state
*
* @see JobState
* @see H2OCountedCompleter
*/
public /** FIXME: should be final or at least protected */ Job start(final H2OCountedCompleter fjtask) {
assert state == JobState.CREATED : "Trying to run job which was already run?";
assert fjtask != null : "Starting a job with null working task is not permitted! Fix you API";
_fjtask = fjtask;
start_time = System.currentTimeMillis();
state = JobState.RUNNING;
// Save the full state of the job
UKV.put(self(), this);
// Update job list
new TAtomic<List>() {
@Override public List atomic(List old) {
if( old == null ) old = new List();
Key[] jobs = old._jobs;
old._jobs = Arrays.copyOf(jobs, jobs.length + 1);
old._jobs[jobs.length] = job_key;
return old;
}
}.invoke(LIST);
return this;
}
/** Return progress of this job.
*
* @return the value in interval <0,1> representing job progress.
*/
public float progress() {
Freezable f = UKV.get(destination_key);
if( f instanceof Progress )
return ((Progress) f).progress();
return 0;
}
/** Blocks and get result of this job.
* <p>
* The call blocks on working task which was passed via {@link #start(H2OCountedCompleter)} method
* and returns the result which is fetched from UKV based on job destination key.
* </p>
* @return result of this job fetched from UKV by destination key.
* @see #start(H2OCountedCompleter)
* @see UKV
*/
public <T> T get() {
_fjtask.join(); // Block until top-level job is done
T ans = (T) UKV.get(destination_key);
remove(); // Remove self-job
return ans;
}
/** Signal cancellation of this job.
* <p>The job will be switched to state {@link JobState#CANCELLED} which signals that
* the job was cancelled by a user. */
public void cancel() {
cancel((String)null, JobState.CANCELLED);
}
/** Signal exceptional cancellation of this job.
* @param ex exception causing the termination of job.
*/
public void cancel(Throwable ex){
if(ex instanceof JobCancelledException || ex.getMessage() != null && ex.getMessage().contains("job was cancelled"))
return;
if(ex instanceof IllegalArgumentException || ex.getCause() instanceof IllegalArgumentException) {
cancel("Illegal argument: " + ex.getMessage());
return;
}
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
ex.printStackTrace(pw);
String stackTrace = sw.toString();
cancel("Got exception '" + ex.getClass() + "', with msg '" + ex.getMessage() + "'\n" + stackTrace, JobState.FAILED);
if(_fjtask != null && !_fjtask.isDone()) _fjtask.completeExceptionally(ex);
}
/** Signal exceptional cancellation of this job.
* @param msg cancellation message explaining reason for cancelation
*/
public void cancel(final String msg) {
JobState js = msg == null ? JobState.CANCELLED : JobState.FAILED;
cancel(msg, js);
}
private void cancel(final String msg, JobState resultingState ) {
if(resultingState == JobState.CANCELLED) {
Log.info("Job " + self() + "(" + description + ") was cancelled.");
}
else {
Log.err("Job " + self() + "(" + description + ") failed.");
Log.err(msg);
}
exception = msg;
state = resultingState;
// replace finished job by a job handle
replaceByJobHandle();
DKV.write_barrier();
final Job job = this;
H2O.submitTask(new H2OCountedCompleter() {
@Override public void compute2() {
job.onCancelled();
}
});
}
/**
* Callback which is called after job cancellation (by user, by exception).
*/
protected void onCancelled() {
}
/** Returns true if the job was cancelled by the user or crashed.
* @return true if the job is in state {@link JobState#CANCELLED} or {@link JobState#FAILED}
*/
public boolean isCancelledOrCrashed() {
return state == JobState.CANCELLED || state == JobState.FAILED;
}
/** Returns true if the job was terminated by unexpected exception.
* @return true, if the job was terminated by unexpected exception.
*/
public boolean isCrashed() { return state == JobState.FAILED; }
/** Returns true if this job is correctly finished.
* @return returns true if the job finished and it was not cancelled or crashed by an exception.
*/
public boolean isDone() { return state == JobState.DONE; }
/** Returns true if this job is running
* @return returns true only if this job is in running state.
*/
public boolean isRunning() { return state == JobState.RUNNING; }
public JobState getState() { return state; }
/** Returns a list of all jobs in a system.
* @return list of all jobs including running, done, cancelled, crashed jobs.
*/
public static Job[] all() {
List list = UKV.get(LIST);
Job[] jobs = new Job[list==null?0:list._jobs.length];
int j=0;
for( int i=0; i<jobs.length; i++ ) {
Job job = UKV.get(list._jobs[i]);
if( job != null ) jobs[j++] = job;
}
if( j<jobs.length ) jobs = Arrays.copyOf(jobs,j);
return jobs;
}
/** Check if given job is running.
*
* @param job_key job key
* @return true if job is still running else returns false.
*/
public static boolean isRunning(Key job_key) {
Job j = UKV.get(job_key);
assert j!=null : "Job should be always in DKV!";
return j.isRunning();
}
/**
* Returns true if job is not running.
* The job can be cancelled, crashed, or already done.
*
* @param jobkey job identification key
* @return true if job is done, cancelled, or crashed, else false
*/
public static boolean isEnded(Key jobkey) { return !isRunning(jobkey); }
/**
* Marks job as finished and records job end time.
*/
public void remove() {
end_time = System.currentTimeMillis();
if( state == JobState.RUNNING )
state = JobState.DONE;
// Overwrite handle - copy end_time, state, msg
replaceByJobHandle();
}
/** Finds a job with given key or returns null.
*
* @param jobkey job key
* @return returns a job with given job key or null if a job is not found.
*/
public static Job findJob(final Key jobkey) { return UKV.get(jobkey); }
/** Finds a job with given dest key or returns null */
public static Job findJobByDest(final Key destKey) {
Job job = null;
for( Job current : Job.all() ) {
if( current.dest().equals(destKey) ) {
job = current;
break;
}
}
return job;
}
/** Returns job execution time in milliseconds.
* If job is not running then returns job execution time. */
public final long runTimeMs() {
long until = end_time != 0 ? end_time : System.currentTimeMillis();
return until - start_time;
}
/** Description of a speed criteria: msecs/frob */
public String speedDescription() { return null; }
/** Value of the described speed criteria: msecs/frob */
public long speedValue() { return 0; }
@Override protected Response serve() {
fork();
return redirect();
}
protected Response redirect() {
return Progress2.redirect(this, job_key, destination_key);
}
/**
* Forks computation of this job.
*
* <p>The call does not block.</p>
* @return always returns this job.
*/
public Job fork() {
init();
H2OCountedCompleter task = new H2OCountedCompleter() {
@Override public void compute2() {
try {
try {
// Exec always waits till the end of computation
Job.this.exec();
Job.this.remove();
} catch (Throwable t) {
if(!(t instanceof ExpectedExceptionForDebug))
Log.err(t);
Job.this.cancel(t);
}
} finally {
tryComplete();
}
}
};
start(task);
H2O.submitTask(task);
return this;
}
@Override public void invoke() {
init();
start(new H2OEmptyCompleter()); // mark job started
exec(); // execute the implementation
remove(); // remove the job
}
/**
* Invoked before job runs. This is the place to checks arguments are valid or throw
* IllegalArgumentException. It will get invoked both from the Web and Java APIs.
*
* @throws IllegalArgumentException throws the exception if initialization fails to ensure
* correct job runtime environment.
*/
@Override protected void init() throws IllegalArgumentException {
if (destination_key == null) destination_key = defaultDestKey();
}
/**
* Block synchronously waiting for a job to end, success or not.
* @param jobkey Job to wait for.
* @param pollingIntervalMillis Polling interval sleep time.
*/
public static void waitUntilJobEnded(Key jobkey, int pollingIntervalMillis) {
while (true) {
if (Job.isEnded(jobkey)) {
return;
}
try { Thread.sleep (pollingIntervalMillis); } catch (Exception ignore) {}
}
}
/**
* Block synchronously waiting for a job to end, success or not.
* @param jobkey Job to wait for.
*/
public static void waitUntilJobEnded(Key jobkey) {
int THREE_SECONDS_MILLIS = 3 * 1000;
waitUntilJobEnded(jobkey, THREE_SECONDS_MILLIS);
}
public static class ChunkProgress extends Iced implements Progress {
final long _nchunks;
final long _count;
private final Status _status;
final String _error;
public enum Status { Computing, Done, Cancelled, Error }
public Status status() { return _status; }
public boolean isDone() { return _status == Status.Done || _status == Status.Error; }
public String error() { return _error; }
public ChunkProgress(long chunksTotal) {
_nchunks = chunksTotal;
_count = 0;
_status = Status.Computing;
_error = null;
}
private ChunkProgress(long nchunks, long computed, Status s, String err) {
_nchunks = nchunks;
_count = computed;
_status = s;
_error = err;
}
public ChunkProgress update(int count) {
if( _status == Status.Cancelled || _status == Status.Error )
return this;
long c = _count + count;
return new ChunkProgress(_nchunks, c, Status.Computing, null);
}
public ChunkProgress done() {
return new ChunkProgress(_nchunks, _nchunks, Status.Done, null);
}
public ChunkProgress cancel() {
return new ChunkProgress(0, 0, Status.Cancelled, null);
}
public ChunkProgress error(String msg) {
return new ChunkProgress(0, 0, Status.Error, msg);
}
@Override public float progress() {
if( _status == Status.Done ) return 1.0f;
return Math.min(0.99f, (float) ((double) _count / (double) _nchunks));
}
}
public static class ChunkProgressJob extends Job {
Key _progress;
public ChunkProgressJob(long chunksTotal, Key destinationKey) {
destination_key = destinationKey;
_progress = Key.make(Key.make()._kb, (byte) 0, Key.DFJ_INTERNAL_USER, destinationKey.home_node());
UKV.put(_progress, new ChunkProgress(chunksTotal));
}
public void updateProgress(final int c) { // c == number of processed chunks
if( isRunning(self()) ) {
new TAtomic<ChunkProgress>() {
@Override public ChunkProgress atomic(ChunkProgress old) {
if( old == null ) return null;
return old.update(c);
}
}.fork(_progress);
}
}
@Override public void remove() {
super.remove();
UKV.remove(_progress);
}
public final Key progressKey() { return _progress; }
public void onException(Throwable ex) {
UKV.remove(dest());
Value v = DKV.get(progressKey());
if( v != null ) {
ChunkProgress p = v.get();
p = p.error(ex.getMessage());
DKV.put(progressKey(), p);
}
cancel(ex);
}
}
public static boolean checkIdx(Frame source, int[] idx) {
for (int i : idx) if (i<0 || i>source.vecs().length-1) return false;
return true;
}
/* Update end_time, state, msg, preserve start_time */
private void replaceByJobHandle() {
assert state != JobState.RUNNING : "Running job cannot be replaced.";
final Job self = this;
new TAtomic<Job>() {
@Override public Job atomic(Job old) {
if( old == null ) return null;
JobHandle jh = new JobHandle(self);
jh.start_time = old.start_time;
return jh;
}
}.fork(job_key);
}
/**
* A job which operates with a frame.
*
* INPUT frame
*/
public static abstract class FrameJob extends Job {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
@API(help = "Source frame", required = true, filter = Default.class, json = true)
public Frame source;
/**
* Annotate the number of columns and rows of the training data set in the job parameter JSON
* @return JsonObject annotated with num_cols and num_rows of the training data set
*/
@Override public JsonObject toJSON() {
JsonObject jo = super.toJSON();
if (source != null) {
jo.getAsJsonObject("source").addProperty("num_cols", source.numCols());
jo.getAsJsonObject("source").addProperty("num_rows", source.numRows());
}
return jo;
}
}
/**
* A job which has an input represented by a frame and frame column filter.
* The filter can be specified by ignored columns or by used columns.
*
* INPUT list ignored columns by idx XOR list of ignored columns by name XOR list of used columns
*
* @see FrameJob
*/
public static abstract class ColumnsJob extends FrameJob {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
@API(help = "Input columns (Indexes start at 0)", filter=colsFilter.class, hide=true)
public int[] cols;
class colsFilter extends MultiVecSelect { public colsFilter() { super("source"); } }
@API(help = "Ignored columns by name and zero-based index", filter=colsNamesIdxFilter.class, displayName="Ignored columns")
public int[] ignored_cols;
class colsNamesIdxFilter extends MultiVecSelect { public colsNamesIdxFilter() {super("source", MultiVecSelectType.NAMES_THEN_INDEXES); } }
@API(help = "Ignored columns by name", filter=colsNamesFilter.class, displayName="Ignored columns by name", hide=true)
public int[] ignored_cols_by_name = EMPTY;
class colsNamesFilter extends MultiVecSelect { public colsNamesFilter() {super("source", MultiVecSelectType.NAMES_ONLY); } }
/**
* Annotate the used and ignored columns in the job parameter JSON
* For both the used and the ignored columns, the following rules apply:
* If the number of columns is less or equal than 100, a dense list of used columns is reported.
* If the number of columns is greater than 100, the number of columns is reported.
* If the number of columns is 0, a "N/A" is reported.
* @return JsonObject annotated with used/ignored columns
*/
@Override public JsonObject toJSON() {
JsonObject jo = super.toJSON();
if (!jo.has("source") || source==null) return jo;
HashMap<String, int[]> map = new HashMap<String, int[]>();
map.put("used_cols", cols);
map.put("ignored_cols", ignored_cols);
for (String key : map.keySet()) {
int[] val = map.get(key);
if (val != null) {
if(val.length>100) jo.getAsJsonObject("source").addProperty("num_" + key, val.length);
else if(val.length>0) {
StringBuilder sb = new StringBuilder();
for (int c : val) sb.append(c + ",");
jo.getAsJsonObject("source").addProperty(key, sb.toString().substring(0, sb.length()-1));
} else {
jo.getAsJsonObject("source").add(key, JsonNull.INSTANCE);
}
}
}
return jo;
}
@Override protected void init() {
super.init();
if (_cv) return;
// At most one of the following may be specified.
int specified = 0;
if (!isEmpty(cols)) { specified++; }
if (!isEmpty(ignored_cols)) { specified++; }
if (!isEmpty(ignored_cols_by_name)) { specified++; }
if (specified > 1) throw new IllegalArgumentException("Arguments 'cols', 'ignored_cols_by_name', and 'ignored_cols' are exclusive");
// Unify all ignored cols specifiers to ignored_cols.
{
if (!isEmpty(ignored_cols_by_name)) {
assert (isEmpty(ignored_cols));
ignored_cols = ignored_cols_by_name;
ignored_cols_by_name = EMPTY;
}
if (ignored_cols == null) {
ignored_cols = new int[0];
}
}
// At this point, ignored_cols_by_name is dead.
assert (isEmpty(ignored_cols_by_name));
// Create map of ignored columns for speed.
HashMap<Integer,Integer> ignoredColsMap = new HashMap<Integer,Integer>();
for ( int i = 0; i < ignored_cols.length; i++) {
int value = ignored_cols[i];
ignoredColsMap.put(new Integer(value), new Integer(1));
}
// Add UUID cols to ignoredColsMap. Duplicates get folded into one entry.
Vec[] vecs = source.vecs();
for( int i = 0; i < vecs.length; i++ ) {
if (vecs[i].isUUID()) {
ignoredColsMap.put(new Integer(i), new Integer(1));
}
}
// Rebuild ignored_cols from the map. Sort it.
{
ignored_cols = new int[ignoredColsMap.size()];
int j = 0;
for (Integer key : ignoredColsMap.keySet()) {
ignored_cols[j] = key.intValue();
j++;
}
Arrays.sort(ignored_cols);
}
// If the columns are not specified, then select everything.
if (isEmpty(cols)) {
cols = new int[source.vecs().length];
for( int i = 0; i < cols.length; i++ )
cols[i] = i;
} else {
if (!checkIdx(source, cols)) throw new IllegalArgumentException("Argument 'cols' specified invalid column!");
}
// Make a set difference between cols and ignored_cols.
if (!isEmpty(ignored_cols)) {
int[] icols = ! isEmpty(ignored_cols) ? ignored_cols : ignored_cols_by_name;
if (!checkIdx(source, icols)) throw new IllegalArgumentException("Argument 'ignored_cols' or 'ignored_cols_by_name' specified invalid column!");
cols = difference(cols, icols);
// Setup all variables in consistent way
ignored_cols = icols;
ignored_cols_by_name = icols;
}
if( cols.length == 0 ) {
throw new IllegalArgumentException("No column selected");
}
}
protected final Vec[] selectVecs(Frame frame) {
Vec[] vecs = new Vec[cols.length];
for( int i = 0; i < cols.length; i++ )
vecs[i] = frame.vecs()[cols[i]];
return vecs;
}
protected final Frame selectFrame(Frame frame) {
Vec[] vecs = new Vec[cols.length];
String[] names = new String[cols.length];
for( int i = 0; i < cols.length; i++ ) {
vecs[i] = frame.vecs()[cols[i]];
names[i] = frame.names()[cols[i]];
}
return new Frame(names, vecs);
}
}
/**
* A columns job that requires a response.
*
* INPUT response column from source
*/
public static abstract class ColumnsResJob extends ColumnsJob {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
@API(help="Column to use as class", required=true, filter=responseFilter.class, json = true)
public Vec response;
class responseFilter extends VecClassSelect { responseFilter() { super("source"); } }
@Override protected void registered(API_VERSION ver) {
super.registered(ver);
Argument c = find("ignored_cols");
Argument r = find("response");
int ci = _arguments.indexOf(c);
int ri = _arguments.indexOf(r);
_arguments.set(ri, c);
_arguments.set(ci, r);
((FrameKeyMultiVec) c).ignoreVec((FrameKeyVec)r);
}
/**
* Annotate the name of the response column in the job parameter JSON
* @return JsonObject annotated with the name of the response column
*/
@Override public JsonObject toJSON() {
JsonObject jo = super.toJSON();
if (source!=null) {
int idx = source.find(response);
if( idx == -1 ) {
Vec vm = response.masterVec();
if( vm != null ) idx = source.find(vm);
}
jo.getAsJsonObject("response").add("name", new JsonPrimitive(idx == -1 ? "null" : source._names[idx]));
}
return jo;
}
@Override protected void init() {
super.init();
// Check if it make sense to build a model
if (source.numRows()==0)
throw new H2OIllegalArgumentException(find("source"), "Cannot build a model on empty dataset!");
// Does not alter the Response to an Enum column if Classification is
// asked for: instead use the classification flag to decide between
// classification or regression.
Vec[] vecs = source.vecs();
for( int i = cols.length - 1; i >= 0; i-- )
if( vecs[cols[i]] == response )
cols = Utils.remove(cols,i);
final boolean has_constant_response = response.isEnum() ?
response.domain().length <= 1 : response.min() == response.max();
if (has_constant_response)
throw new H2OIllegalArgumentException(find("response"), "Constant response column!");
}
}
/**
* A job producing a model.
*
* INPUT response column from source
*/
public static abstract class ModelJob extends ModelJobWithoutClassificationField {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
@API(help="Do classification or regression", filter=myClassFilter.class, json = true)
public boolean classification = true; // we need 3-state boolean: unspecified, true/false BUT we solve that by checking UI layer to see if the classification parameter was passed
class myClassFilter extends DoClassBoolean { myClassFilter() { super("source"); } }
@Override protected void init() {
super.init();
// Reject request if classification is required and response column is float
//Argument a4class = find("classification"); // get UI control
//String p4class = input("classification"); // get value from HTTP requests
// if there is UI control and classification field was passed
final boolean classificationFieldSpecified = true; // ROLLBACK: a4class!=null ? p4class!=null : /* we are not in UI so expect that parameter is specified correctly */ true;
if (!classificationFieldSpecified) { // can happen if a client sends a request which does not specify classification parameter
classification = response.isEnum();
Log.warn("Classification field is not specified - deriving according to response! The classification field set to " + classification);
} else {
if ( classification && response.isFloat()) throw new H2OIllegalArgumentException(find("classification"), "Requested classification on float column!");
if (!classification && response.isEnum() ) throw new H2OIllegalArgumentException(find("classification"), "Requested regression on enum column!");
}
}
}
/**
* A job producing a model that has no notion of Classification or Regression.
*
* INPUT response column from source
*/
public static abstract class ModelJobWithoutClassificationField extends ColumnsResJob {
// This exists to support GLM2, which determines classification/regression using the
// family field, not a second separate field.
}
/**
* Job which produces model and validate it on a given dataset.
* INPUT validation frame
*/
public static abstract class ValidatedJob extends ModelJob {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
protected transient Vec[] _train, _valid;
/** Validation vector extracted from validation frame. */
protected transient Vec _validResponse;
/** Validation response domain or null if validation is not specified or null if response is float. */
protected transient String[] _validResponseDomain;
/** Source response domain or null if response is float. */
protected transient String[] _sourceResponseDomain;
/** CM domain derived from {@link #_validResponseDomain} and {@link #_sourceResponseDomain}. */
protected transient String[] _cmDomain;
/** Names of columns */
protected transient String[] _names;
/** Name of validation response. Should be same as source response. */
public transient String _responseName;
/** Adapted validation frame to a computed model. */
private transient Frame _adaptedValidation;
private transient Vec _adaptedValidationResponse; // Validation response adapted to computed CM domain
private transient int[][] _fromModel2CM; // Transformation for model response to common CM domain
private transient int[][] _fromValid2CM; // Transformation for validation response to common CM domain
@API(help = "Validation frame", filter = Default.class, mustExist = true, json = true)
public Frame validation;
@API(help = "Number of folds for cross-validation (if no validation data is specified)", filter = Default.class, json = true)
public int n_folds = 0;
@API(help = "Keep cross-validation dataset splits", filter = Default.class, json = true)
public boolean keep_cross_validation_splits = false;
@API(help = "Cross-validation models", json = true)
public Key[] xval_models;
public int _cv_count = 0;
/**
* Helper to compute the actual progress if we're doing cross-validation.
* This method is supposed to be called by the progress() implementation for CV-capable algos.
* @param p Progress reported by the main job
* @return actual progress if CV is done, otherwise returns p
*/
public float cv_progress(float p) {
if (n_folds >= 2) {
return (p + _cv_count) / (n_folds + 1); //divide by 1 more to account for final scoring as extra work
}
return p;
}
/**
* Helper to specify which arguments trigger a refresh on change
* @param ver
*/
@Override
protected void registered(RequestServer.API_VERSION ver) {
super.registered(ver);
for (Argument arg : _arguments) {
if ( arg._name.equals("validation")) {
arg.setRefreshOnChange();
}
}
}
/**
* Helper to handle arguments based on existing input values
* @param arg
* @param inputArgs
*/
@Override protected void queryArgumentValueSet(Argument arg, java.util.Properties inputArgs) {
super.queryArgumentValueSet(arg, inputArgs);
if (arg._name.equals("n_folds") && validation != null) {
arg.disable("Only if no validation dataset is provided.");
n_folds = 0;
}
}
/**
* Cross-Validate this Job (to be overridden for each instance, which also calls genericCrossValidation)
* @param splits Frames containing train/test splits
* @param cv_preds Store the predictions for each cross-validation run
* @param offsets Array to store the offsets of starting row indices for each cross-validation run
* @param i Which fold of cross-validation to perform
*/
public void crossValidate(Frame[] splits, Frame[] cv_preds, long[] offsets, int i) { throw H2O.unimpl(); }
/**
* Helper to perform the generic part of cross validation
* Expected to be called from each specific instance's crossValidate method
* @param splits Frames containing train/test splits
* @param offsets Array to store the offsets of starting row indices for each cross-validation run
* @param i Which fold of cross-validation to perform
*/
final protected void genericCrossValidation(Frame[] splits, long[] offsets, int i) {
int respidx = source.find(_responseName);
assert(respidx != -1) : "response is not found in source!";
job_key = Key.make(job_key.toString() + "_xval" + i); //make a new Job for CV
assert(xval_models != null);
destination_key = xval_models[i];
source = splits[0];
validation = splits[1];
response = source.vecs()[respidx];
n_folds = 0;
state = Job.JobState.CREATED; //Hack to allow this job to run
DKV.put(self(), this); //Needed to pass the Job.isRunning(cvdl.self()) check in FrameTask
offsets[i + 1] = offsets[i] + validation.numRows();
_cv = true; //Hack to allow init() to pass for ColumnsJob (allow cols/ignored_cols to co-exist)
invoke();
}
/**
* Annotate the number of columns and rows of the validation data set in the job parameter JSON
* @return JsonObject annotated with num_cols and num_rows of the validation data set
*/
@Override public JsonObject toJSON() {
JsonObject jo = super.toJSON();
if (validation != null) {
jo.getAsJsonObject("validation").addProperty("num_cols", validation.numCols());
jo.getAsJsonObject("validation").addProperty("num_rows", validation.numRows());
}
return jo;
}
@Override protected void init() {
if ( validation != null && n_folds != 0 ) throw new UnsupportedOperationException("Cannot specify a validation dataset and non-zero number of cross-validation folds.");
if ( n_folds < 0 ) throw new UnsupportedOperationException("The number of cross-validation folds must be >= 0.");
super.init();
xval_models = new Key[n_folds];
for (int i=0; i<xval_models.length; ++i)
xval_models[i] = Key.make(dest().toString() + "_xval" + i);
int rIndex = 0;
for( int i = 0; i < source.vecs().length; i++ )
if( source.vecs()[i] == response ) {
rIndex = i;
break;
}
_responseName = source._names != null && rIndex >= 0 ? source._names[rIndex] : "response";
_train = selectVecs(source);
_names = new String[cols.length];
for( int i = 0; i < cols.length; i++ )
_names[i] = source._names[cols[i]];
// Compute source response domain
if (classification) _sourceResponseDomain = getVectorDomain(response);
// Is validation specified?
if( validation != null ) {
// Extract a validation response
int idx = validation.find(source.names()[rIndex]);
if( idx == -1 ) throw new IllegalArgumentException("Validation set does not have a response column called "+_responseName);
_validResponse = validation.vecs()[idx];
// Compute output confusion matrix domain for classification:
// - if validation dataset is specified then CM domain is union of train and validation response domains
// else it is only domain of response column.
if (classification) {
_validResponseDomain = getVectorDomain(_validResponse);
if (_validResponseDomain!=null) {
_cmDomain = Utils.domainUnion(_sourceResponseDomain, _validResponseDomain);
if (!Arrays.deepEquals(_sourceResponseDomain, _validResponseDomain)) {
_fromModel2CM = Model.getDomainMapping(_cmDomain, _sourceResponseDomain, false); // transformation from model produced response ~> cmDomain
_fromValid2CM = Model.getDomainMapping(_cmDomain, _validResponseDomain , false); // transformation from validation response domain ~> cmDomain
}
} else _cmDomain = _sourceResponseDomain;
} /* end of if classification */
} else if (classification) _cmDomain = _sourceResponseDomain;
}
protected String[] getVectorDomain(final Vec v) {
assert v==null || v.isInt() || v.isEnum() : "Cannot get vector domain!";
if (v==null) return null;
String[] r;
if (v.isEnum()) {
r = v.domain();
} else {
Vec tmp = v.toEnum();
r = tmp.domain();
UKV.remove(tmp._key);
}
return r;
}
/** Returns true if the job has specified validation dataset. */
protected final boolean hasValidation() { return validation!=null; }
/** Returns a domain for confusion matrix. */
protected final String[] getCMDomain() { return _cmDomain; }
/** Return validation dataset which can be adapted to a model if it is necessary. */
protected final Frame getValidation() { return _adaptedValidation!=null ? _adaptedValidation : validation; };
/** Returns original validation dataset. */
protected final Frame getOrigValidation() { return validation; }
public final Response2CMAdaptor getValidAdaptor() { return new Response2CMAdaptor(); }
/** */
protected final void prepareValidationWithModel(final Model model) {
if (validation == null) return;
Frame[] av = model.adapt(validation, false);
_adaptedValidation = av[0];
gtrash(av[1]); // delete this after computation
if (_fromValid2CM!=null) {
assert classification : "Validation response transformation should be declared only for classification!";
assert _fromModel2CM != null : "Model response transformation should exist if validation response transformation exists!";
Vec tmp = _validResponse.toEnum();
_adaptedValidationResponse = tmp.makeTransf(_fromValid2CM, getCMDomain()); // Add an original response adapted to CM domain
gtrash(_adaptedValidationResponse); // Add the created vector to a clean-up list
gtrash(tmp);
}
}
/** A micro helper for transforming model/validation responses to confusion matrix domain. */
public class Response2CMAdaptor {
/** Adapt given vector produced by a model to confusion matrix domain. Always return a new vector which needs to be deleted. */
public Vec adaptModelResponse2CM(final Vec v) { return v.makeTransf(_fromModel2CM, getCMDomain()); }
/** Adapt given validation vector to confusion matrix domain. Always return a new vector which needs to be deleted. */
public Vec adaptValidResponse2CM(final Vec v) { return v.makeTransf(_fromValid2CM, getCMDomain()); }
/** Returns validation dataset. */
public Frame getValidation() { return ValidatedJob.this.getValidation(); }
/** Return cached validation response already adapted to CM domain. */
public Vec getAdaptedValidationResponse2CM() { return _adaptedValidationResponse; }
/** Return cm domain. */
public String[] getCMDomain() { return ValidatedJob.this.getCMDomain(); }
/** Returns true if model/validation responses need to be adapted to confusion matrix domain. */
public boolean needsAdaptation2CM() { return _fromModel2CM != null; }
/** Return the adapted response name */
public String adaptedValidationResponse(final String response) { return response + ".adapted"; }
}
}
/**
*
*/
public interface Progress {
float progress();
}
public interface ProgressMonitor {
public void update(long n);
}
public static class Fail extends Iced {
public final String _message;
public Fail(String message) { _message = message; }
}
public static final class List extends Iced {
Key[] _jobs = new Key[0];
@Override
public List clone(){
List l = new List();
l._jobs = _jobs.clone();
for(int i = 0; i < l._jobs.length; ++i)
l._jobs[i] = (Key)l._jobs[i].clone();
return l;
}
}
/** Almost lightweight job handle containing the same content
* as pure Job class.
*/
public static class JobHandle extends Job {
public JobHandle(final Job job) { super(job); }
}
public static class JobCancelledException extends RuntimeException {
public JobCancelledException(){super("job was cancelled!");}
public JobCancelledException(String msg){super("job was cancelled! with msg '" + msg + "'");}
}
/** Hygienic method to prevent accidental capture of non desired values. */
public static <T extends FrameJob> T hygiene(T job) {
job.source = null;
return job;
}
public static <T extends ValidatedJob> T hygiene(T job) {
job.source = null;
job.validation = null;
return job;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Key.java
|
package water;
import java.util.Arrays;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicLongFieldUpdater;
import water.util.UnsafeUtils;
/**
* Keys
*
* This class defines:
* - A Key's bytes (name) and hash
* - Known Disk and memory replicas.
* - A cache of somewhat expensive to compute stuff related to the current
* Cloud, plus a byte of the desired replication factor.
*
* Keys are expected to be a high-count item, hence the care about size.
*
* Keys are *interned* in the local K/V store, a non-blocking hash set and are
* kept pointer equivalent (via the interning) for cheap compares. The
* interning only happens after a successful insert in the local H2O.STORE via
* H2O.put_if_later.
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public final class Key extends Iced implements Comparable {
// The Key!!!
// Limited to 512 random bytes - to fit better in UDP packets.
public static final int KEY_LENGTH = 512;
public byte[] _kb; // Key bytes, wire-line protocol
transient int _hash; // Hash on key alone (and not value)
// The user keys must be ASCII, so the values 0..31 are reserved for system
// keys. When you create a system key, please do add its number to this list
public static final byte BUILT_IN_KEY = 2; // C.f. Constants.BUILT_IN_KEY_*
public static final byte JOB = 3;
public static final byte VEC = 4;
public static final byte DVEC = 5;
public static final byte VGROUP = 6; // vector group
public static final byte DFJ_INTERNAL_USER = 7;
public static final byte HIDDEN_USER_KEY = 31;
public static final byte USER_KEY = 32;
// *Desired* distribution function on keys & replication factor. Replica #0
// is the master, replica #1, 2, 3, etc represent additional desired
// replication nodes. Note that this function is just the distribution
// function - it does not DO any replication, nor does it dictate any policy
// on how fast replication occurs. Returns -1 if the desired replica
// is nonsense, e.g. asking for replica #3 in a 2-Node system.
int D( int repl ) {
int hsz = H2O.CLOUD.size();
// See if this is a specifically homed Key
if( !user_allowed() && repl < _kb[1] ) { // Asking for a replica# from the homed list?
assert _kb[0] != Key.DVEC;
H2ONode h2o = H2ONode.intern(_kb,2+repl*(4+2/*serialized bytesize of H2OKey*/));
// Reverse the home to the index
int idx = h2o.index();
if( idx >= 0 ) return idx;
// Else homed to a node which is no longer in the cloud!
// Fall back to the normal home mode
}
// Distribution of Fluid Vectors is a special case.
// Fluid Vectors are grouped into vector groups, each of which must have
// the same distribution of chunks so that MRTask2 run over group of
// vectors will keep data-locality. The fluid vecs from the same group
// share the same key pattern + each has 4 bytes identifying particular
// vector in the group. Since we need the same chunks end up on the same
// node in the group, we need to skip the 4 bytes containing vec# from the
// hash. Apart from that, we keep the previous mode of operation, so that
// ByteVec would have first 64MB distributed around cloud randomly and then
// go round-robin in 64MB chunks.
if( _kb[0] == DVEC ) {
// Homed Chunk?
if( _kb[1] != -1 ) throw H2O.unimpl();
// For round-robin on Chunks in the following pattern:
// 1 Chunk-per-node, until all nodes have 1 chunk (max parallelism).
// Then 2 chunks-per-node, once around, then 4, then 8, then 16.
// Getting several chunks-in-a-row on a single Node means that stencil
// calculations that step off the end of one chunk into the next won't
// force a chunk local - replicating the data. If all chunks round robin
// exactly, then any stencil calc will double the cached volume of data
// (every node will have it's own chunk, plus a cached next-chunk).
// Above 16-chunks-in-a-row we hit diminishing returns.
int cidx = UnsafeUtils.get4(_kb, 1 + 1 + 4); // Chunk index
int x = cidx/hsz; // Multiples of cluster size
// 0 -> 1st trip around the cluster; nidx= (cidx- 0*hsz)>>0
// 1,2 -> 2nd & 3rd trip; allocate in pairs: nidx= (cidx- 1*hsz)>>1
// 3,4,5,6 -> next 4 rounds; allocate in quads: nidx= (cidx- 3*hsz)>>2
// 7-14 -> next 8 rounds in octets: nidx= (cidx- 7*hsz)>>3
// 15+ -> remaining rounds in groups of 16: nidx= (cidx-15*hsz)>>4
int z = x==0 ? 0 : (x<=2 ? 1 : (x<=6 ? 2 : (x<=14 ? 3 : 4)));
int nidx = (cidx-((1<<z)-1)*hsz)>>z;
return ((nidx+repl)&0x7FFFFFFF) % hsz;
}
// Easy Cheesy Stupid:
return ((_hash+repl)&0x7FFFFFFF) % hsz;
}
/** List of illegal characters which are not allowed in user keys. */
public static final CharSequence ILLEGAL_USER_KEY_CHARS = " !@#$%^&*()+={}[]|\\;:\"'<>,/?";
// 64 bits of Cloud-specific cached stuff. It is changed atomically by any
// thread that visits it and has the wrong Cloud. It has to be read *in the
// context of a specific Cloud*, since a re-read may be for another Cloud.
private transient volatile long _cache;
private static final AtomicLongFieldUpdater<Key> _cacheUpdater =
AtomicLongFieldUpdater.newUpdater(Key.class, "_cache");
public final boolean isVec () { return _kb != null && _kb.length > 0 && _kb[0] == VEC; }
public final boolean isChunkKey() { return _kb != null && _kb.length > 0 && _kb[0] == DVEC; }
public final Key getVecKey() { assert isChunkKey(); return water.fvec.Vec.getVecKey(this); }
// Accessors and updaters for the Cloud-specific cached stuff.
// The Cloud index, a byte uniquely identifying the last 256 Clouds. It
// changes atomically with the _cache word, so we can tell which Cloud this
// data is a cache of.
private static int cloud( long cache ) { return (int)(cache>>> 0)&0x00FF; }
// Shortcut node index for Home replica#0. This replica is responsible for
// breaking ties on writes. 'char' because I want an unsigned 16bit thing,
// limit of 65534 Cloud members. -1 is reserved for a bare-key
private static int home ( long cache ) { return (int)(cache>>> 8)&0xFFFF; }
// Our replica #, or -1 if we're not one of the first 127 replicas. This
// value is found using the Cloud distribution function and changes for a
// changed Cloud.
private static int replica(long cache) { return (byte)(cache>>>24)&0x00FF; }
// Desired replication factor. Can be zero for temp keys. Not allowed to
// later, because it messes with e.g. meta-data on disk.
private static int desired(long cache) { return (int)(cache>>>32)&0x00FF; }
private static long build_cache( int cidx, int home, int replica, int desired ) {
return // Build the new cache word
((long)(cidx &0xFF)<< 0) |
((long)(home &0xFFFF)<< 8) |
((long)(replica&0xFF)<<24) |
((long)(desired&0xFF)<<32) |
((long)(0 )<<40);
}
public int home ( H2O cloud ) { return home (cloud_info(cloud)); }
public int replica( H2O cloud ) { return replica(cloud_info(cloud)); }
public int desired( ) { return desired(_cache); }
public boolean home() { return home_node()==H2O.SELF; }
public H2ONode home_node( ) {
H2O cloud = H2O.CLOUD;
return cloud._memary[home(cloud)];
}
// Update the cache, but only to strictly newer Clouds
private boolean set_cache( long cache ) {
while( true ) { // Spin till get it
long old = _cache; // Read once at the start
if( !H2O.larger(cloud(cache),cloud(old)) ) // Rolling backwards?
// Attempt to set for an older Cloud. Blow out with a failure; caller
// should retry on a new Cloud.
return false;
assert cloud(cache) != cloud(old) || cache == old;
if( old == cache ) return true; // Fast-path cutout
if( _cacheUpdater.compareAndSet(this,old,cache) ) return true;
// Can fail if the cache is really old, and just got updated to a version
// which is still not the latest, and we are trying to update it again.
}
}
// Return the info word for this Cloud. Use the cache if possible
public long cloud_info( H2O cloud ) {
long x = _cache;
// See if cached for this Cloud. This should be the 99% fast case.
if( cloud(x) == cloud._idx ) return x;
// Cache missed! Probaby it just needs (atomic) updating.
// But we might be holding the stale cloud...
// Figure out home Node in this Cloud
char home = (char)D(0);
// Figure out what replica # I am, if any
int desired = desired(x);
int replica = -1;
for( int i=0; i<desired; i++ ) {
int idx = D(i);
if( idx >= 0 && cloud._memary[idx] == H2O.SELF ) {
replica = i;
break;
}
}
long cache = build_cache(cloud._idx,home,replica,desired);
set_cache(cache); // Attempt to upgrade cache, but ignore failure
return cache; // Return the magic word for this Cloud
}
// Default desired replication factor. Unless specified otherwise, all new
// k-v pairs start with this replication factor.
public static final byte DEFAULT_DESIRED_REPLICA_FACTOR = 2;
// Construct a new Key.
private Key(byte[] kb) {
if( kb.length > KEY_LENGTH ) throw new IllegalArgumentException("Key length would be "+kb.length);
_kb = kb;
// Quicky hash: http://en.wikipedia.org/wiki/Jenkins_hash_function
int hash = 0;
for( byte b : kb ) {
hash += b;
hash += (hash << 10);
hash ^= (hash >> 6);
}
hash += (hash << 3);
hash ^= (hash >> 11);
hash += (hash << 15);
_hash = hash;
}
// Make new Keys. Optimistically attempt interning, but no guarantee.
static public Key make(byte[] kb, byte rf) {
if( rf == -1 ) throw new IllegalArgumentException();
Key key = new Key(kb);
Key key2 = H2O.getk(key); // Get the interned version, if any
if( key2 != null ) // There is one! Return it instead
return key2;
// Set the cache with desired replication factor, and a fake cloud index
H2O cloud = H2O.CLOUD; // Read once
key._cache = build_cache(cloud._idx-1,0,0,rf);
key.cloud_info(cloud); // Now compute & cache the real data
return key;
}
// A random string, useful as a Key name or partial Key suffix.
static public String rand() {
UUID uid = UUID.randomUUID();
long l1 = uid.getLeastSignificantBits();
long l2 = uid. getMostSignificantBits();
return "_"+Long.toHexString(l1)+Long.toHexString(l2);
}
static public Key make(byte[] kb) { return make(kb,DEFAULT_DESIRED_REPLICA_FACTOR); }
static public Key make(String s) { return make(decodeKeyName(s));}
static public Key make(String s, byte rf) { return make(decodeKeyName(s), rf);}
static public Key make() { return make(rand()); }
// Make a particular system key that is homed to given node and possibly
// specifies also other 2 replicas. Works for both IPv4 and IPv6 addresses.
// If the addresses are not specified, returns a key with no home information.
static public Key make(String s, byte rf, byte systemType, H2ONode... replicas) {
return make(decodeKeyName(s),rf,systemType,replicas);
}
static public Key make(byte rf, byte systemType, H2ONode... replicas) {
return make(rand(),rf,systemType,replicas);
}
// Make a Key which is homed to specific nodes.
static public Key make(byte[] kb, byte rf, byte systemType, H2ONode... replicas) {
// no more than 3 replicas allowed to be stored in the key
assert 0 <=replicas.length && replicas.length<=3;
assert systemType<32; // only system keys allowed
// Key byte layout is:
// 0 - systemType, from 0-31
// 1 - replica-count, plus up to 3 bits for ip4 vs ip6
// 2-n - zero, one, two or 3 IP4 (4+2 bytes) or IP6 (16+2 bytes) addresses
// 2-5- 4 bytes of chunk#, or -1 for masters
// n+ - repeat of the original kb
AutoBuffer ab = new AutoBuffer();
ab.put1(systemType).put1(replicas.length);
for( H2ONode h2o : replicas )
h2o.write(ab);
ab.put4(-1);
ab.putA1(kb,kb.length);
return make(Arrays.copyOf(ab.buf(),ab.position()),rf);
}
// Hide a user key by turning it into a system key of type HIDDEN_USER_KEY
final public static Key makeSystem(String s) {
byte[] kb= decodeKeyName(s);
byte[] kb2 = new byte[kb.length+1];
System.arraycopy(kb,0,kb2,1,kb.length);
kb2[0] = Key.BUILT_IN_KEY;
return Key.make(kb2);
}
// Custom Serialization Reader: Keys must be interned on construction.
@Override public final Key read(AutoBuffer bb) { return make(bb.getA1()); }
@Override public final AutoBuffer write(AutoBuffer bb) { return bb.putA1(_kb); }
@Override public final AutoBuffer writeJSON(AutoBuffer bb) { return bb.putJSONStr(toString()); }
// User keys must be all ASCII, but we only check the 1st byte
public boolean user_allowed() {
return (_kb[0]&0xFF) >= 32;
}
// Returns the type of the key.
public int type() {
return ((_kb[0]&0xff)>=32) ? USER_KEY : (_kb[0]&0xff);
}
public static final char MAGIC_CHAR = '$';
private static final char[] HEX = "0123456789abcdef".toCharArray();
/** Converts the key to HTML displayable string.
*
* For user keys returns the key itself, for system keys returns their
* hexadecimal values.
*
* @return key as a printable string
*/
@Override
public String toString() {
int len = _kb.length;
while( --len >= 0 ) {
char a = (char) _kb[len];
if (' ' <= a && a <= '#') continue;
// then we have $ which is not allowed
if ('%' <= a && a <= '~') continue;
// already in the one above
//if( 'a' <= a && a <= 'z' ) continue;
//if( 'A' <= a && a <= 'Z' ) continue;
//if( '0' <= a && a <= '9' ) continue;
break;
}
if (len>=0) {
StringBuilder sb = new StringBuilder();
sb.append(MAGIC_CHAR);
for( int i = 0; i <= len; ++i ) {
byte a = _kb[i];
sb.append(HEX[(a >> 4) & 0x0F]);
sb.append(HEX[(a >> 0) & 0x0F]);
}
sb.append(MAGIC_CHAR);
for( int i = len + 1; i < _kb.length; ++i ) sb.append((char)_kb[i]);
return sb.toString();
} else {
return new String(_kb);
}
}
private static byte[] decodeKeyName(String what) {
if( what==null ) return null;
if( what.length()==0 ) return null;
if (what.charAt(0) == MAGIC_CHAR) {
int len = what.indexOf(MAGIC_CHAR,1);
String tail = what.substring(len+1);
byte[] res = new byte[(len-1)/2 + tail.length()];
int r = 0;
for( int i = 1; i < len; i+=2 ) {
char h = what.charAt(i);
char l = what.charAt(i+1);
h -= Character.isDigit(h) ? '0' : ('a' - 10);
l -= Character.isDigit(l) ? '0' : ('a' - 10);
res[r++] = (byte)(h << 4 | l);
}
System.arraycopy(tail.getBytes(), 0, res, r, tail.length());
return res;
} else {
return what.getBytes();
}
}
@Override public int hashCode() { return _hash; }
@Override public boolean equals( Object o ) {
if( o == null || ((Key)(o))._kb == null || _kb == null) return false;
if( this == o ) return true;
Key k = (Key)o;
return Arrays.equals(k._kb,_kb);
}
@Override public int compareTo(Object o) {
assert (o instanceof Key);
return this.toString().compareTo(o.toString());
}
// Simple wrapper class defining an array-of-keys that is serializable.
// Note that if you modify any fields of a POJO that is part of a Value,
// - this is not the recommended programming style,
// - those changes are visible to all on the node,
// - but not to other nodes
// - and the POJO might be dropped by the MemoryManager and reconstitued from
// disk and/or the byte array back to it's original form.
public static class Ary extends Iced {
public final Key[] _keys;
Ary( Key[] keys ) { _keys = keys; }
}
public static String toPrettyString(Key k) {
StringBuilder sb = new StringBuilder("Key { type: ");
switch( k._kb[0] ) {
case 0: sb.append("arraylet chunk"); break;
case 2: sb.append("build-in"); break;
case 3: sb.append("job"); break;
case 4: sb.append("vec"); break;
case 5: sb.append("dvec"); break;
case 6: sb.append("vgroup"); break;
case 7: sb.append("DFJ internal"); break;
case 31: sb.append("hidden user"); break;
case 32: sb.append("user"); break;
default: sb.append("UNKNOWN"); break;
}
sb.append(",replicas: ").append(k._kb[1]).append(",");
sb.append(k.toString()).append("}");
return sb.toString();
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Linpack.java
|
package water;
/*
Modified 7/12/14 by Arno E. Candel arno.candel@gmail.com
Added support for repeating the main loop to improve timing.
Added support for warming up the JIT.
Added support for nanosecond timer.
Added support for multi-threading.
Modified 3/3/97 by David M. Doolin (dmd) doolin@cs.utk.edu
Fixed error in matgen() method. Added some comments.
Modified 1/22/97 by Paul McMahan mcmahan@cs.utk.edu
Added more MacOS options to form.
Optimized by Jonathan Hardwick (jch@cs.cmu.edu), 3/28/96
Compare to Linkpack.java.
Optimizations performed:
- added "final" modifier to performance-critical methods.
- changed lines of the form "a[i] = a[i] + x" to "a[i] += x".
- minimized array references using common subexpression elimination.
- eliminated unused variables.
- undid an unrolled loop.
- added temporary 1D arrays to hold frequently-used columns of 2D arrays.
- wrote my own abs() method
See http://www.cs.cmu.edu/~jch/java/linpack.html for more details.
Ported to Java by Reed Wade (wade@cs.utk.edu) 2/96
built using JDK 1.0 on solaris
using "javac -O Linpack.java"
Translated to C by Bonnie Toy 5/88
(modified on 2/25/94 to fix a problem with daxpy for
unequal increments or equal increments not equal to 1.
Jack Dongarra)
*/
import water.util.Log;
import water.util.Utils;
public class Linpack {
public static void main(String[] args) {
int num_threads = Runtime.getRuntime().availableProcessors();
double sumgflops = run(num_threads);
Log.info("CPU speed (" + num_threads + " cores) : " + sumgflops + " Gflops.");
}
/**
* Compute system CPU speed in Gflops
*/
public static double run(int num_threads) {
final double gflops[] = new double[num_threads];
Thread[] threads = new Thread[num_threads];
for (int t=0;t<num_threads;++t) {
final int thread_num = t;
threads[t] = new Thread() {
public void run() {
Linpack l = new Linpack();
gflops[thread_num] = l.run_benchmark();
}
};
}
for (int t=0;t<num_threads;++t) {
threads[t].start();
}
for (int t=0;t<num_threads;++t) {
try {
threads[t].join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
return Utils.sum(gflops);
}
final double abs (double d) {
return (d >= 0) ? d : -d;
}
double second_orig = -1;
double second()
{
if (second_orig==-1) {
second_orig = System.currentTimeMillis();
}
return (System.currentTimeMillis() - second_orig)/1000;
}
public double run_benchmark()
{
double gflops_result = 0.0;
double residn_result = 0.0;
double time_result = 0.0;
double eps_result = 0.0;
double a[][] = new double[200][201];
double b[] = new double[200];
double x[] = new double[200];
double cray,ops,total,norma,normx;
double resid,time;
double kf;
int n,i,ntimes,info,lda,ldaa,kflops;
int ipvt[] = new int[200];
//double gflops_result;
//double residn_result;
//double time_result;
//double eps_result;
lda = 201;
ldaa = 200;
cray = .056;
n = 200;
ops = (2.0e0*(n*n*n))/3.0 + 2.0*(n*n);
norma = matgen(a,lda,n,b);
int repeats = 200;
//warmup JIT
for (int r=0; r<10; ++r) {
info = dgefa(a, lda, n, ipvt);
dgesl(a, lda, n, ipvt, b, 0);
}
//actual run
Timer timer = new Timer(); //ms
for (int r=0; r<repeats; ++r) {
info = dgefa(a, lda, n, ipvt);
dgesl(a, lda, n, ipvt, b, 0);
}
total = (double)timer.time()/1000.;
for (i = 0; i < n; i++) {
x[i] = b[i];
}
norma = matgen(a,lda,n,b);
for (i = 0; i < n; i++) {
b[i] = -b[i];
}
dmxpy(n,b,n,lda,x,a);
resid = 0.0;
normx = 0.0;
for (i = 0; i < n; i++) {
resid = (resid > abs(b[i])) ? resid : abs(b[i]);
normx = (normx > abs(x[i])) ? normx : abs(x[i]);
}
eps_result = epslon(1.0);
/*
residn_result = resid/( n*norma*normx*eps_result );
time_result = total;
gflops_result = ops/(1.0e6*total);
return ("Mflops/s: " + gflops_result +
" Time: " + time_result + " secs" +
" Norm Res: " + residn_result +
" Precision: " + eps_result);
*/
residn_result = resid/( n*norma*normx*eps_result );
residn_result += 0.005; // for rounding
residn_result = (int)(residn_result*100);
residn_result /= 100;
time_result = total;
time_result += 0.005; // for rounding
time_result = (int)(time_result*100);
time_result /= 100;
gflops_result = ops/(1.0e9*total)*repeats;
gflops_result += 0.0005; // for rounding
gflops_result = (int)(gflops_result*1000);
gflops_result /= 1000;
// System.out.println("Gflops/s: " + gflops_result +
// " Time: " + time_result + " secs" +
// " Norm Res: " + residn_result +
// " Precision: " + eps_result);
return gflops_result;
}
final double matgen (double a[][], int lda, int n, double b[])
{
double norma;
int init, i, j;
init = 1325;
norma = 0.0;
/* Next two for() statements switched. Solver wants
matrix in column order. --dmd 3/3/97
*/
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
init = 3125*init % 65536;
a[j][i] = (init - 32768.0)/16384.0;
norma = (a[j][i] > norma) ? a[j][i] : norma;
}
}
for (i = 0; i < n; i++) {
b[i] = 0.0;
}
for (j = 0; j < n; j++) {
for (i = 0; i < n; i++) {
b[i] += a[j][i];
}
}
return norma;
}
/*
dgefa factors a double precision matrix by gaussian elimination.
dgefa is usually called by dgeco, but it can be called
directly with a saving in time if rcond is not needed.
(time for dgeco) = (1 + 9/n)*(time for dgefa) .
on entry
a double precision[n][lda]
the matrix to be factored.
lda integer
the leading dimension of the array a .
n integer
the order of the matrix a .
on return
a an upper triangular matrix and the multipliers
which were used to obtain it.
the factorization can be written a = l*u where
l is a product of permutation and unit lower
triangular matrices and u is upper triangular.
ipvt integer[n]
an integer vector of pivot indices.
info integer
= 0 normal value.
= k if u[k][k] .eq. 0.0 . this is not an error
condition for this subroutine, but it does
indicate that dgesl or dgedi will divide by zero
if called. use rcond in dgeco for a reliable
indication of singularity.
linpack. this version dated 08/14/78.
cleve moler, university of new mexico, argonne national lab.
functions
blas daxpy,dscal,idamax
*/
final int dgefa( double a[][], int lda, int n, int ipvt[])
{
double[] col_k, col_j;
double t;
int j,k,kp1,l,nm1;
int info;
// gaussian elimination with partial pivoting
info = 0;
nm1 = n - 1;
if (nm1 >= 0) {
for (k = 0; k < nm1; k++) {
col_k = a[k];
kp1 = k + 1;
// find l = pivot index
l = idamax(n-k,col_k,k,1) + k;
ipvt[k] = l;
// zero pivot implies this column already triangularized
if (col_k[l] != 0) {
// interchange if necessary
if (l != k) {
t = col_k[l];
col_k[l] = col_k[k];
col_k[k] = t;
}
// compute multipliers
t = -1.0/col_k[k];
dscal(n-(kp1),t,col_k,kp1,1);
// row elimination with column indexing
for (j = kp1; j < n; j++) {
col_j = a[j];
t = col_j[l];
if (l != k) {
col_j[l] = col_j[k];
col_j[k] = t;
}
daxpy(n-(kp1),t,col_k,kp1,1,
col_j,kp1,1);
}
}
else {
info = k;
}
}
}
ipvt[n-1] = n-1;
if (a[(n-1)][(n-1)] == 0) info = n-1;
return info;
}
/*
dgesl solves the double precision system
a * x = b or trans(a) * x = b
using the factors computed by dgeco or dgefa.
on entry
a double precision[n][lda]
the output from dgeco or dgefa.
lda integer
the leading dimension of the array a .
n integer
the order of the matrix a .
ipvt integer[n]
the pivot vector from dgeco or dgefa.
b double precision[n]
the right hand side vector.
job integer
= 0 to solve a*x = b ,
= nonzero to solve trans(a)*x = b where
trans(a) is the transpose.
on return
b the solution vector x .
error condition
a division by zero will occur if the input factor contains a
zero on the diagonal. technically this indicates singularity
but it is often caused by improper arguments or improper
setting of lda . it will not occur if the subroutines are
called correctly and if dgeco has set rcond .gt. 0.0
or dgefa has set info .eq. 0 .
to compute inverse(a) * c where c is a matrix
with p columns
dgeco(a,lda,n,ipvt,rcond,z)
if (!rcond is too small){
for (j=0,j<p,j++)
dgesl(a,lda,n,ipvt,c[j][0],0);
}
linpack. this version dated 08/14/78 .
cleve moler, university of new mexico, argonne national lab.
functions
blas daxpy,ddot
*/
final void dgesl( double a[][], int lda, int n, int ipvt[], double b[], int job)
{
double t;
int k,kb,l,nm1,kp1;
nm1 = n - 1;
if (job == 0) {
// job = 0 , solve a * x = b. first solve l*y = b
if (nm1 >= 1) {
for (k = 0; k < nm1; k++) {
l = ipvt[k];
t = b[l];
if (l != k){
b[l] = b[k];
b[k] = t;
}
kp1 = k + 1;
daxpy(n-(kp1),t,a[k],kp1,1,b,kp1,1);
}
}
// now solve u*x = y
for (kb = 0; kb < n; kb++) {
k = n - (kb + 1);
b[k] /= a[k][k];
t = -b[k];
daxpy(k,t,a[k],0,1,b,0,1);
}
}
else {
// job = nonzero, solve trans(a) * x = b. first solve trans(u)*y = b
for (k = 0; k < n; k++) {
t = ddot(k,a[k],0,1,b,0,1);
b[k] = (b[k] - t)/a[k][k];
}
// now solve trans(l)*x = y
if (nm1 >= 1) {
for (kb = 1; kb < nm1; kb++) {
k = n - (kb+1);
kp1 = k + 1;
b[k] += ddot(n-(kp1),a[k],kp1,1,b,kp1,1);
l = ipvt[k];
if (l != k) {
t = b[l];
b[l] = b[k];
b[k] = t;
}
}
}
}
}
/*
constant times a vector plus a vector.
jack dongarra, linpack, 3/11/78.
*/
final void daxpy( int n, double da, double dx[], int dx_off, int incx,
double dy[], int dy_off, int incy)
{
int i,ix,iy;
if ((n > 0) && (da != 0)) {
if (incx != 1 || incy != 1) {
// code for unequal increments or equal increments not equal to 1
ix = 0;
iy = 0;
if (incx < 0) ix = (-n+1)*incx;
if (incy < 0) iy = (-n+1)*incy;
for (i = 0;i < n; i++) {
dy[iy +dy_off] += da*dx[ix +dx_off];
ix += incx;
iy += incy;
}
return;
} else {
// code for both increments equal to 1
for (i=0; i < n; i++)
dy[i +dy_off] += da*dx[i +dx_off];
}
}
}
/*
forms the dot product of two vectors.
jack dongarra, linpack, 3/11/78.
*/
final double ddot( int n, double dx[], int dx_off, int incx, double dy[],
int dy_off, int incy)
{
double dtemp;
int i,ix,iy;
dtemp = 0;
if (n > 0) {
if (incx != 1 || incy != 1) {
// code for unequal increments or equal increments not equal to 1
ix = 0;
iy = 0;
if (incx < 0) ix = (-n+1)*incx;
if (incy < 0) iy = (-n+1)*incy;
for (i = 0;i < n; i++) {
dtemp += dx[ix +dx_off]*dy[iy +dy_off];
ix += incx;
iy += incy;
}
} else {
// code for both increments equal to 1
for (i=0;i < n; i++)
dtemp += dx[i +dx_off]*dy[i +dy_off];
}
}
return(dtemp);
}
/*
scales a vector by a constant.
jack dongarra, linpack, 3/11/78.
*/
final void dscal( int n, double da, double dx[], int dx_off, int incx)
{
int i,nincx;
if (n > 0) {
if (incx != 1) {
// code for increment not equal to 1
nincx = n*incx;
for (i = 0; i < nincx; i += incx)
dx[i +dx_off] *= da;
} else {
// code for increment equal to 1
for (i = 0; i < n; i++)
dx[i +dx_off] *= da;
}
}
}
/*
finds the index of element having max. absolute value.
jack dongarra, linpack, 3/11/78.
*/
final int idamax( int n, double dx[], int dx_off, int incx)
{
double dmax, dtemp;
int i, ix, itemp=0;
if (n < 1) {
itemp = -1;
} else if (n ==1) {
itemp = 0;
} else if (incx != 1) {
// code for increment not equal to 1
dmax = abs(dx[0 +dx_off]);
ix = 1 + incx;
for (i = 1; i < n; i++) {
dtemp = abs(dx[ix + dx_off]);
if (dtemp > dmax) {
itemp = i;
dmax = dtemp;
}
ix += incx;
}
} else {
// code for increment equal to 1
itemp = 0;
dmax = abs(dx[0 +dx_off]);
for (i = 1; i < n; i++) {
dtemp = abs(dx[i + dx_off]);
if (dtemp > dmax) {
itemp = i;
dmax = dtemp;
}
}
}
return (itemp);
}
/*
estimate unit roundoff in quantities of size x.
this program should function properly on all systems
satisfying the following two assumptions,
1. the base used in representing dfloating point
numbers is not a power of three.
2. the quantity a in statement 10 is represented to
the accuracy used in dfloating point variables
that are stored in memory.
the statement number 10 and the go to 10 are intended to
force optimizing compilers to generate code satisfying
assumption 2.
under these assumptions, it should be true that,
a is not exactly equal to four-thirds,
b has a zero for its last bit or digit,
c is not exactly equal to one,
eps measures the separation of 1.0 from
the next larger dfloating point number.
the developers of eispack would appreciate being informed
about any systems where these assumptions do not hold.
*****************************************************************
this routine is one of the auxiliary routines used by eispack iii
to avoid machine dependencies.
*****************************************************************
this version dated 4/6/83.
*/
final double epslon (double x)
{
double a,b,c,eps;
a = 4.0e0/3.0e0;
eps = 0;
while (eps == 0) {
b = a - 1.0;
c = b + b + b;
eps = abs(c-1.0);
}
return(eps*abs(x));
}
/*
purpose:
multiply matrix m times vector x and add the result to vector y.
parameters:
n1 integer, number of elements in vector y, and number of rows in
matrix m
y double [n1], vector of length n1 to which is added
the product m*x
n2 integer, number of elements in vector x, and number of columns
in matrix m
ldm integer, leading dimension of array m
x double [n2], vector of length n2
m double [ldm][n2], matrix of n1 rows and n2 columns
*/
final void dmxpy ( int n1, double y[], int n2, int ldm, double x[], double m[][])
{
int j,i;
// cleanup odd vector
for (j = 0; j < n2; j++) {
for (i = 0; i < n1; i++) {
y[i] += x[j]*m[j][i];
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Lockable.java
|
package water;
import water.api.DocGen;
import water.api.Request.API;
import water.util.Log;
import java.util.Arrays;
/**
* Lockable Keys - locked during long running jobs, to prevent overwriting
* in-use keys. e.g. model-building: expected to read-lock input ValueArray and
* Frames, and write-lock the output Model. Parser should write-lock the
* output VA/Frame, to guard against double-parsing.
*
* Supports:
* lock-and-delete-old-and-update (for new Keys)
* lock-and-delete (for removing old Keys)
* unlock
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public abstract class Lockable<T extends Lockable<T>> extends Iced {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
/** The Key being locked */
@API(help="My Key")
public final Key _key;
/** Write-locker job is in _jobs[0 ]. Can be null locker.
* Read -locker jobs are in _jobs[1+].
* Unlocked has _jobs equal to null.
* Only 1 situation will be true at a time; atomically updated.
* Transient, because this data is only valid on the master node.
*/
@API(help="Jobs locking this key")
public transient Key _lockers[];
// Create unlocked
public Lockable( Key key ) { _key = key; }
// -----------
// Atomic create+overwrite of prior key.
// If prior key exists, block until acquire a write-lock.
// The call delete_impl, removing all of a prior key.
// The replace this object as the new Lockable, still write-locked.
// "locker" can be null, meaning the special no-Job locker; for use by expected-fast operations
//
// Example: write-lock & remove an old VA, and replace with a new locked Frame
// Local-Node Master-Node
// (1) FR,VA -->write_lock(job)--> VA
// (2) FR,VA.waiting... FR,VA+job-locked atomic xtn loop
// (3) VA.delete_impl onSuccess
// (4) FR <--update success <-- FR+job-locked
// Write-lock 'this', returns OLD guy
public Lockable write_lock( Key job_key ) {
Log.debug(Log.Tag.Sys.LOCKS,"write-lock "+_key+" by job "+job_key);
return ((PriorWriteLock)new PriorWriteLock(job_key).invoke(_key))._old;
}
// Write-lock 'this', delete any old thing, returns NEW guy
public T delete_and_lock( Key job_key ) {
Lockable old = write_lock(job_key);
if( old != null ) {
Log.debug(Log.Tag.Sys.LOCKS,"lock-then-clear "+_key+" by job "+job_key);
old.delete_impl(new Futures()).blockForPending();
}
return (T)this;
}
// Obtain the write-lock on _key, which may already exist, using the current 'this'.
private class PriorWriteLock extends TAtomic<Lockable> {
final Key _job_key; // Job doing the locking
Lockable _old; // Return the old thing, for deleting later
PriorWriteLock( Key job_key ) { _job_key = job_key; }
@Override public Lockable atomic(Lockable old) {
_old = old;
if( old != null ) { // Prior Lockable exists?
assert !old.is_wlocked(_job_key) : "Key "+_key+" already locked; lks="+Arrays.toString(old._lockers); // No double locking by same job
if( old.is_locked(_job_key) ) // read-locked by self? (double-write-lock checked above)
old.set_unlocked(old._lockers,_job_key); // Remove read-lock; will atomically upgrade to write-lock
if( !old.is_unlocked() ) // Blocking for some other Job to finish???
throw new IllegalArgumentException(old.errStr()+" "+_key+" is already in use. Unable to use it now. Consider using a different destination name.");
assert old.is_unlocked() : "Not unlocked when locking "+Arrays.toString(old._lockers)+" for "+_job_key;
}
// Update & set the new value
set_write_lock(_job_key);
return Lockable.this;
}
}
// -----------
// Atomic lock & remove self. Nothing remains when done.
// Write-lock & delete 'k'. Will fail if 'k' is locked by anybody.
public static void delete( Key k ) { delete(k,null); }
// Write-lock & delete 'k'. Will fail if 'k' is locked by anybody other than 'job_key'
public static void delete( Key k, Key job_key ) {
if( k == null ) return;
Value val = DKV.get(k);
if( val == null ) return; // Or just nothing there to delete
if( !val.isLockable() ) UKV.remove(k); // Simple things being deleted
else ((Lockable)val.get()).delete(job_key,0.0f); // Lockable being deleted
}
// Will fail if locked by anybody.
public void delete( ) { delete(null,0.0f); }
// Will fail if locked by anybody other than 'job_key'
public void delete( Key job_key, float dummy ) {
if( _key != null ) {
Log.debug(Log.Tag.Sys.LOCKS,"lock-then-delete "+_key+" by job "+job_key);
new PriorWriteLock(job_key).invoke(_key);
}
Futures fs = new Futures();
delete_impl(fs);
if( _key != null ) DKV.remove(_key,fs); // Delete self also
fs.blockForPending();
}
// -----------
// Atomically get a read-lock, preventing future deletes or updates
public static void read_lock( Key k, Key job_key ) {
Value val = DKV.get(k);
if( val.isLockable() )
((Lockable)val.get()).read_lock(job_key); // Lockable being locked
}
public void read_lock( Key job_key ) {
if( _key != null ) {
Log.debug(Log.Tag.Sys.LOCKS,"shared-read-lock "+_key+" by job "+job_key);
new ReadLock(job_key).invoke(_key);
}
}
// Obtain read-lock
static private class ReadLock extends TAtomic<Lockable> {
final Key _job_key; // Job doing the unlocking
ReadLock( Key job_key ) { _job_key = job_key; }
@Override public Lockable atomic(Lockable old) {
if( old == null ) throw new IllegalArgumentException("Nothing to lock!");
if( old.is_wlocked() )
throw new IllegalArgumentException( old.errStr()+" "+_key+" is being created; Unable to read it now.");
old.set_read_lock(_job_key);
return old;
}
}
// -----------
// Atomically set a new version of self
public void update( Key job_key ) {
Log.debug(Log.Tag.Sys.LOCKS,"update write-locked "+_key+" by job "+job_key);
new Update(job_key).invoke(_key);
}
// Freshen 'this' and leave locked
private class Update extends TAtomic<Lockable> {
final Key _job_key; // Job doing the unlocking
Update( Key job_key ) { _job_key = job_key; }
@Override public Lockable atomic(Lockable old) {
assert old != null && old.is_wlocked();
_lockers = old._lockers; // Keep lock state
return Lockable.this; // Freshen this
}
}
public static void unlock_lockable(final Key lockable, final Key job){
new DTask.DKeyTask<DTask.DKeyTask,Lockable>(null,lockable){
@Override
public void map(Lockable l) { l.unlock(job);}
}.invokeTask();
}
// -----------
// Atomically set a new version of self & unlock.
public void unlock( Key job_key ) {
if( _key != null ) {
Log.debug(Log.Tag.Sys.LOCKS,"unlock "+_key+" by job "+job_key);
new Unlock(job_key).invoke(_key);
}
}
// Freshen 'this' and unlock
private class Unlock extends TAtomic<Lockable> {
final Key _job_key; // Job doing the unlocking
Unlock( Key job_key ) { _job_key = job_key; }
@Override public Lockable atomic(Lockable old) {
assert old.is_locked(_job_key) : old.getClass().getSimpleName() + " cannot be unlocked (not locked by job " + _job_key + ").";
set_unlocked(old._lockers,_job_key);
return Lockable.this;
}
}
// -----------
// Accessers for locking state. Minimal self-checking; primitive results.
private boolean is_locked(Key job_key) {
if( _lockers==null ) return false;
for( int i=(_lockers.length==1?0:1); i<_lockers.length; i++ ) {
Key k = _lockers[i];
if( job_key==k || (job_key != null && k != null && job_key.equals(k)) ) return true;
}
return false;
}
protected boolean is_wlocked() { return _lockers!=null && _lockers.length==1; }
private boolean is_wlocked(Key job_key) { return is_wlocked() && (_lockers[0] == job_key || _lockers[0] != null && _lockers[0].equals(job_key)); }
protected boolean is_unlocked() { return _lockers== null; }
private void set_write_lock( Key job_key ) {
_lockers=new Key[]{job_key};
assert is_locked(job_key) : "Job " + job_key + " must be locked.";
}
private void set_read_lock(Key job_key) {
assert !is_locked(job_key) : this.getClass().getSimpleName() + " is already locked by job " + job_key + "."; // no double locking
assert !is_wlocked() : this.getClass().getSimpleName() + " is already write locked."; // not write locked
_lockers = _lockers == null ? new Key[2] : Arrays.copyOf(_lockers,_lockers.length+1);
_lockers[_lockers.length-1] = job_key;
assert is_locked(job_key);
}
private void set_unlocked(Key lks[], Key job_key) {
if( lks.length==1 ) { // Is write-locked?
assert job_key==lks[0] || job_key.equals(lks[0]);
_lockers = null; // Then unlocked
} else if( lks.length==2 ) { // One reader
assert lks[0]==null; // Not write-locked
assert lks[1]==job_key || (job_key != null && job_key.equals(lks[1]));
_lockers = null; // So unlocked
} else { // Else one of many readers
assert lks.length>2;
_lockers = Arrays.copyOf(lks,lks.length-1);
int j=1; // Skip the initial null slot
for( int i=1; i<lks.length; i++ )
if(job_key != null && !job_key.equals(lks[i]) || (job_key == null && lks[i] != null)){
_lockers[j++] = lks[i];
}
assert j==lks.length-1; // Was locked exactly once
}
assert !is_locked(job_key);
}
// Unlock from all lockers
public void unlock_all() {
if( _key != null )
for (Key k : _lockers) new UnlockSafe(k).invoke(_key);
}
private class UnlockSafe extends TAtomic<Lockable> {
final Key _job_key; // potential job doing the unlocking
UnlockSafe( Key job_key ) { _job_key = job_key; }
@Override public Lockable atomic(Lockable old) {
if (old.is_locked(_job_key))
set_unlocked(old._lockers,_job_key);
return Lockable.this;
}
}
// Remove any subparts before removing the whole thing
protected abstract Futures delete_impl( Futures fs );
// Pretty string when locking fails
protected abstract String errStr();
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/MRTask.java
|
package water;
import jsr166y.CountedCompleter;
/** Map/Reduce style distributed computation. */
public abstract class MRTask<T extends MRTask> extends DRemoteTask<T> {
transient protected int _lo, _hi; // Range of keys to work on
transient private T _left, _rite; // In-progress execution tree
// This method is another backpressure mechanism to make sure we do not
// exhaust system's resources by running too many tasks at the same time.
// Tasks are expected to reserve memory before proceeding with their
// execution and making sure they release it when done.
public long memOverheadPerChunk() { return 0; }
static final long log2(long a) {
long x = a, y = 0;
while( (x >>= 1) > 0 ) ++y;
return (a > (1L << y)) ? y+1 : y;
}
@Override public void init() {
_lo = 0;
_hi = _keys.length;
long reqMem = (log2(_hi - _lo)+2)*memOverheadPerChunk();
MemoryManager.reserveTaskMem(reqMem); // min. memory required to run at least single threaded
_reservedMem = reqMem;
}
/** Run some useful function over this <strong>local</strong> key, and
* record the results in the <em>this</em> MRTask. */
abstract public void map( Key key );
protected boolean _runSingleThreaded = false;
transient long _reservedMem;
/** Do all the keys in the list associated with this Node. Roll up the
* results into <em>this</em> MRTask. */
@Override public final void lcompute() {
if( _hi-_lo >= 2 ) { // Multi-key case: just divide-and-conquer to 1 key
final int mid = (_lo+_hi)>>>1; // Mid-point
assert _left == null && _rite == null;
T l = clone();
T r = clone();
_left = l; l._reservedMem = 0;
_rite = r; r._reservedMem = 0;
_left._hi = mid; // Reset mid-point
_rite._lo = mid; // Also set self mid-point
setPendingCount(1);
// Compute min. memory required to run the right branch in parallel. Min
// memory equals to the max memory used if the right branch will be
// executed single threaded (but in parallel with our left branch).
// Assuming all memory is kept in the tasks and it is halved by reduce
// operation, the min memory is proportional to the depth of the right
// subtree.
long reqMem = (log2(_hi - mid)+3)*memOverheadPerChunk();
if(!_runSingleThreaded && MemoryManager.tryReserveTaskMem(reqMem)){
_reservedMem += reqMem; // Remember the amount of reserved memory to free it later.
_left.fork(); // Runs in another thread/FJ instance
} else {
_left.compute2();
}
_rite.compute2(); // Runs in THIS F/J thread
} else {
if( _hi > _lo ) { // Single key?
try {
map(_keys[_lo]); // Get it, run it locally
} catch( RuntimeException re ) { // Catch user-map-thrown exceptions
throw H2O.setDetailMessage(re,re.getMessage()+" while mapping key "+_keys[_lo]);
} catch( AssertionError re ) { // Catch user-map-thrown exceptions
throw H2O.setDetailMessage(re,re.getMessage()+" while mapping key "+_keys[_lo]);
} catch( OutOfMemoryError re ) { // Catch user-map-thrown exceptions
throw H2O.setDetailMessage(re,re.getMessage()+" while mapping key "+_keys[_lo]);
}
}
tryComplete(); // And this task is complete
}
}
private final void returnReservedMemory(){
if(_reservedMem > 0)MemoryManager.freeTaskMem(_reservedMem);
}
@Override public void lonCompletion( CountedCompleter caller ) {
// Reduce results into 'this' so they collapse going up the execution tree.
// NULL out child-references so we don't accidentally keep large subtrees
// alive: each one may be holding large partial results.
if( _left != null ) reduceAlsoBlock(_left); _left = null;
if( _rite != null ) reduceAlsoBlock(_rite); _rite = null;
returnReservedMemory();
}
@Override public final boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller ) {
_left = null;
_rite = null;
returnReservedMemory();
return super.onExceptionalCompletion(ex, caller);
}
// Caveat Emptor:
// Hopefully used for debugging only... not only are these likely to change
// in the near future, there's very few guarantees placed on these values.
// At various points they are chunk-number ranges (before & during maps), and
// stale values that *look* like ranges but are not (during reduces) or maybe
// they will morph into row#'s (new not-yet-ready api) and/or forms of
// "visited" flags (also new api).
public final int lo() { return _lo; }
public final int hi() { return _hi; }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/MRTask2.java
|
package water;
import jsr166y.CountedCompleter;
import jsr166y.ForkJoinPool;
import water.H2O.H2OCountedCompleter;
import water.fvec.*;
import water.fvec.Vec.VectorGroup;
/**
* Map/Reduce style distributed computation.
* <br>
* MRTask2 provides several <code>map</code> and <code>reduce</code> methods that can be
* overriden to specify a computation. Several instances of this class will be
* created to distribute the computation over F/J threads and machines. Non-transient
* fields are copied and serialized to instances created for map invocations. Reduce
* methods can store their results in fields. Results are serialized and reduced all the
* way back to the invoking node. When the last reduce method has been called, fields
* of the initial MRTask2 instance contains the computation results.
* <br>
* Apart from small reduced POJO returned to the calling node, MRtask2 can
* produce output vector(s) as a result. These will have chunks co-located
* with the input dataset, however, their number of lines will generally
* differ, (so they won't be strictly compatible with the original). To produce
* output vectors, call doAll.dfork version with required number of outputs and
* override appropriate <code>map</code> call taking required number of
* NewChunks. MRTask2 will automatically close the new Appendable vecs and
* produce an output frame with newly created Vecs.
*/
public abstract class MRTask2<T extends MRTask2<T>> extends DTask implements Cloneable, ForkJoinPool.ManagedBlocker {
public MRTask2() { }
public MRTask2(H2OCountedCompleter completer){super(completer); }
/** The Vectors to work on. */
public Frame _fr;
// appendables are treated separately (roll-ups computed in map/reduce style, can not be passed via K/V store).
protected AppendableVec [] _appendables;
private int _vid;
private int _noutputs;
// If TRUE, run entirely local - which will pull all the data locally.
private boolean _run_local;
private byte _priority = H2O.MIN_PRIORITY;
@Override public byte priority() { return _priority; }
private void raisePriority() {
// Always 1 higher priority than calling thread... because the caller will
// block & burn a thread waiting for this MRTask2 to complete.
Thread cThr = Thread.currentThread();
_priority = (byte)((cThr instanceof H2O.FJWThr) ? ((H2O.FJWThr)cThr)._priority+1 : super.priority());
}
public Frame outputFrame(String [] names, String [][] domains){ return outputFrame(null,names,domains); }
public Frame outputFrame(Key key, String [] names, String [][] domains){
Futures fs = new Futures();
Frame res = outputFrame(key, names, domains, fs);
fs.blockForPending();
return res;
}
public Frame outputFrame(Key key, String [] names, String [][] domains, Futures fs){
if(_noutputs == 0)return null;
Vec [] vecs = new Vec[_noutputs];
for(int i = 0; i < _noutputs; ++i) {
if( _appendables==null ) // Zero rows?
vecs[i] = _fr.anyVec().makeZero();
else {
_appendables[i]._domain = domains==null ? null : domains[i];
vecs[i] = _appendables[i].close(fs);
}
}
return new Frame(key,names,vecs);
}
/** Override with your map implementation. This overload is given a single
* <strong>local</strong> input Chunk. It is meant for map/reduce jobs that use a
* single column in a input Frame. All map variants are called, but only one is
* expected to be overridden. */
public void map( Chunk c ) { }
public void map( Chunk c, NewChunk nc ) { }
/** Override with your map implementation. This overload is given two
* <strong>local</strong> Chunks. All map variants are called, but only one
* is expected to be overridden. */
public void map( Chunk c0, Chunk c1 ) { }
public void map( Chunk c0, Chunk c1, NewChunk nc) { }
public void map( Chunk c0, Chunk c1, NewChunk nc1, NewChunk nc2 ) { }
/** Override with your map implementation. This overload is given three
* <strong>local</strong> input Chunks. All map variants are called, but only one
* is expected to be overridden. */
public void map( Chunk c0, Chunk c1, Chunk c2 ) { }
public void map( Chunk c0, Chunk c1, Chunk c2, NewChunk nc ) { }
public void map( Chunk c0, Chunk c1, Chunk c2, NewChunk nc1, NewChunk nc2 ) { }
/** Override with your map implementation. This overload is given an array
* of <strong>local</strong> input Chunks, for Frames with arbitrary column
* numbers. All map variants are called, but only one is expected to be
* overridden. */
public void map( Chunk cs[] ) { }
public void map( Chunk cs[], NewChunk nc ) { }
public void map( Chunk cs[], NewChunk nc1, NewChunk nc2 ) { }
public void map( Chunk cs[], NewChunk [] ncs ) { }
/** Override to combine results from 'mrt' into 'this' MRTask2. Both 'this'
* and 'mrt' are guaranteed to either have map() run on them, or be the
* results of a prior reduce(). Reduce is optional if, e.g., the result is
* some output vector. */
public void reduce( T mrt ) { }
/** Override to do any remote initialization on the 1st remote instance of
* this object, for initializing node-local shared data structures. */
protected void setupLocal() {} // load the vecs in non-racy way (we will definitely need them and in case we don;t have cached version there will be unnecessary racy update from multiple maps at the same time)!
/** Override to do any remote cleaning on the last remote instance of
* this object, for disposing of node-local shared data structures. */
protected void closeLocal() { }
/** Internal field to track a range of remote nodes/JVMs to work on */
protected short _nxx, _nhi; // Range of Nodes to work on - remotely
private int addShift( int x ) { x += _nxx; int sz = H2O.CLOUD.size(); return x < sz ? x : x-sz; }
private int subShift( int x ) { x -= _nxx; int sz = H2O.CLOUD.size(); return x < 0 ? x+sz : x; }
/** Internal field to track the left and right remote nodes/JVMs to work on */
transient protected RPC<T> _nleft, _nrite;
/** Internal field to track if this is a top-level local call */
transient protected boolean _topLocal; // Top-level local call, returning results over the wire
/** Internal field to track a range of local Chunks to work on */
transient protected int _lo, _hi; // Range of Chunks to work on - locally
/** Internal field to track the left and right sub-range of chunks to work on */
transient protected T _left, _rite; // In-progress execution tree
transient private T _res; // Result
/** We can add more things to block on - in case we want a bunch of lazy
* tasks produced by children to all end before this top-level task ends.
* Semantically, these will all complete before we return from the top-level
* task. Pragmatically, we block on a finer grained basis. */
transient protected Futures _fs; // More things to block on
// Profiling support. Time for each subpart of a single M/R task, plus any
// nested MRTasks. All numbers are CTM stamps or millisecond times.
private static class MRProfile extends Iced {
String _clz;
public MRProfile(MRTask2 mrt) {
_clz = mrt.getClass().toString();
_localdone = System.currentTimeMillis();
}
// See where these are set to understand their meaning. If we split the
// job, then _lstart & _rstart are the start of left & right jobs. If we
// do NOT split, then _rstart is 0 and _lstart is for the user map job(s).
long _localstart, _rpcLstart, _rpcRstart, _rpcRdone, _localdone; // Local setup, RPC network i/o times
long _mapstart, _userstart, _closestart, _mapdone; // MAP phase
long _onCstart, _reducedone, _remoteBlkDone, _localBlkDone, _onCdone; // REDUCE phase
// If we split the job left/right, then we get a total recording of the
// last job, and the exec time & completion time of 1st job done.
long _time1st, _done1st;
int _size_rez0, _size_rez1; // i/o size in bytes during reduce
MRProfile _last;
long sumTime() { return _onCdone - (_localstart==0 ? _mapstart : _localstart); }
void gather( MRProfile p, int size_rez ) {
p._clz=null;
if( _last == null ) _last=p;
else {
MRProfile first = _last._onCdone <= p._onCdone ? _last : p;
MRProfile last = _last._onCdone > p._onCdone ? _last : p;
_last = last;
if( first._onCdone > _done1st ) { _time1st = first.sumTime(); _done1st = first._onCdone; }
}
if( size_rez !=0 ) // Record i/o result size
if( _size_rez0 == 0 ) { _size_rez0=size_rez; }
else { /*assert _size_rez1==0;*/ _size_rez1=size_rez; }
assert _last._onCdone >= _done1st;
}
@Override public String toString() { return toString(new StringBuilder(),0).toString(); }
private StringBuilder toString(StringBuilder sb, int d) {
if( d==0 ) sb.append(_clz).append("\n");
for( int i=0; i<d; i++ ) sb.append(" ");
if( _localstart != 0 ) sb.append("Node local ").append(_localdone - _localstart).append("ms, ");
if( _userstart == 0 ) { // Forked job?
sb.append("Slow wait ").append(_mapstart-_localdone).append("ms + work ").append(_last.sumTime()).append("ms, ");
sb.append("Fast work ").append(_time1st).append("ms + wait ").append(_onCstart-_done1st).append("ms\n");
_last.toString(sb,d+1); // Nested slow-path print
for( int i=0; i<d; i++ ) sb.append(" ");
sb.append("join-i/o ").append(_onCstart-_last._onCdone).append("ms, ");
} else { // Leaf map call?
sb.append("Map ").append(_mapdone - _mapstart).append("ms (prep ").append(_userstart - _mapstart);
sb.append("ms, user ").append(_closestart-_userstart);
sb.append("ms, closeChk ").append(_mapdone-_closestart).append("ms), ");
}
sb.append("Red ").append(_onCdone - _onCstart).append("ms (locRed ");
sb.append(_reducedone-_onCstart).append("ms");
if( _remoteBlkDone!=0 ) {
sb.append(", remBlk ").append(_remoteBlkDone-_reducedone).append("ms, locBlk ");
sb.append(_localBlkDone-_remoteBlkDone).append("ms, close ");
sb.append(_onCdone-_localBlkDone).append("ms, size ");
sb.append(PrettyPrint.bytes(_size_rez0)).append("+").append(PrettyPrint.bytes(_size_rez1));
}
sb.append(")\n");
return sb;
}
}
MRProfile _profile;
public String profString() { return _profile.toString(); }
// Support for fluid-programming with strong types
private final T self() { return (T)this; }
/** Returns a Vec from the Frame. */
public final Vec vecs(int i) { return _fr.vecs()[i]; }
/** Invokes the map/reduce computation over the given Vecs. This call is
* blocking. */
public final T doAll( Vec... vecs ) { return doAll(0,vecs); }
public final T doAll(int outputs, Vec... vecs ) { return doAll(outputs,new Frame(null,vecs), false); }
/** Invokes the map/reduce computation over the given Frame. This call is
* blocking. */
public final T doAll( Frame fr, boolean run_local) { return doAll(0,fr, run_local); }
public final T doAll( Frame fr ) { return doAll(0,fr, false); }
public final T doAll( int outputs, Frame fr) {return doAll(outputs,fr,false);}
public final T doAll( int outputs, Frame fr, boolean run_local) {
dfork(outputs,fr, run_local);
return getResult();
}
public final void asyncExec(Vec... vecs){asyncExec(0,new Frame(vecs),false);}
public final void exec(Vec... vecs){exec(0, new Frame(vecs), false);}
public final void asyncExec(Frame fr){asyncExec(0,fr,false);}
public final void exec(Frame fr){exec(0, fr, false);}
public final void exec( int outputs, Frame fr, boolean run_local){
// Use first readable vector to gate home/not-home
fr.checkCompatible(); // Check for compatible vectors
if((_noutputs = outputs) > 0) _vid = fr.anyVec().group().reserveKeys(outputs);
_fr = fr; // Record vectors to work on
_nxx = (short)H2O.SELF.index(); _nhi = (short)H2O.CLOUD.size(); // Do Whole Cloud
_run_local = run_local; // Run locally by copying data, or run globally?
setupLocal0(); // Local setup
compute2();
}
/**
* Fork the task in strictly non-blocking fashion.
*
* Same functionality as dfork, but does not raise priority, so user is should
* *never* block on it
*/
public final void asyncExec( int outputs, Frame fr, boolean run_local){
// Use first readable vector to gate home/not-home
fr.checkCompatible(); // Check for compatible vectors
if((_noutputs = outputs) > 0) _vid = fr.anyVec().group().reserveKeys(outputs);
_fr = fr; // Record vectors to work on
_nxx = (short)H2O.SELF.index(); _nhi = (short)H2O.CLOUD.size(); // Do Whole Cloud
_run_local = run_local; // Run locally by copying data, or run globally?
setupLocal0(); // Local setup
H2O.submitTask(this); // Begin normal execution on a FJ thread
}
/** Invokes the map/reduce computation over the given Frame. This call is
* asynchronous. It returns 'this', on which getResult() can be invoked
* later to wait on the computation. */
public final T dfork( Vec...vecs ) {return dfork(0,vecs);}
public T dfork( Frame fr ) {return dfork(0,fr,false);}
public final T dfork( int outputs, Vec... vecs) {
return dfork(outputs,new Frame(vecs),false);
}
public final T dfork( int outputs, Frame fr, boolean run_local) {
raisePriority();
asyncExec(outputs,fr,run_local);
return self();
}
/** Block for and get any final results from a dfork'd MRTask2.
* Note: the desired name 'get' is final in ForkJoinTask. */
public final T getResult() {
try {
try {
ForkJoinPool.managedBlock(this);
} catch (InterruptedException e) {
}
return self();
}catch(Throwable t){
throw new RuntimeException(t);
}
}
// Return true if blocking is unnecessary, which is true if the Task isDone.
public boolean isReleasable() { return isDone(); }
// Possibly blocks the current thread. Returns true if isReleasable would
// return true. Used by the FJ Pool management to spawn threads to prevent
// deadlock is otherwise all threads would block on waits.
public boolean block() {
while( !isDone() ) join();
return true;
}
/** Called once on remote at top level, probably with a subset of the cloud.
* Called internal by D/F/J. Not expected to be user-called. */
@Override public final void dinvoke(H2ONode sender) {
setupLocal0(); // Local setup
compute2(); // Do The Main Work
// nothing here... must do any post-work-cleanup in onCompletion
}
// Setup for local work: fire off any global work to cloud neighbors; do all
// chunks; call user's init.
private final void setupLocal0() {
assert _profile==null;
_fs = new Futures();
_profile = new MRProfile(this);
_profile._localstart = System.currentTimeMillis();
_topLocal = true;
// Check for global vs local work
int selfidx = H2O.SELF.index();
int nlo = subShift(selfidx);
assert nlo < _nhi;
final int nmid = (nlo+_nhi)>>>1; // Mid-point
if( !_run_local && nlo+1 < _nhi ) { // Have global work?
_profile._rpcLstart = System.currentTimeMillis();
_nleft = remote_compute(nlo+1,nmid);
_profile._rpcRstart = System.currentTimeMillis();
_nrite = remote_compute( nmid,_nhi);
_profile._rpcRdone = System.currentTimeMillis();
}
_lo = 0; _hi = _fr.anyVec().nChunks(); // Do All Chunks
// If we have any output vectors, make a blockable Futures for them to
// block on.
// get the Vecs from the K/V store, to avoid racing fetches from the map calls
_fr.vecs();
setupLocal(); // Setup any user's shared local structures
_profile._localdone = System.currentTimeMillis();
}
// Make an RPC call to some node in the middle of the given range. Add a
// pending completion to self, so that we complete when the RPC completes.
private final RPC<T> remote_compute( int nlo, int nhi ) {
// No remote work?
if( !(nlo < nhi) ) return null;
int node = addShift(nlo);
assert node != H2O.SELF.index();
T rpc = clone();
rpc.setCompleter(null);
rpc._nhi = (short)nhi;
addToPendingCount(1); // Not complete until the RPC returns
// Set self up as needing completion by this RPC: when the ACK comes back
// we'll get a wakeup.
return new RPC(H2O.CLOUD._memary[node], rpc).addCompleter(this).call();
}
protected long _t0;
/** Called from FJ threads to do local work. The first called Task (which is
* also the last one to Complete) also reduces any global work. Called
* internal by F/J. Not expected to be user-called. */
@Override public final void compute2() {
_t0 = System.nanoTime();
assert _left == null && _rite == null && _res == null;
_profile._mapstart = System.currentTimeMillis();
if( _hi-_lo >= 2 ) { // Multi-chunk case: just divide-and-conquer to 1 chunk
final int mid = (_lo+_hi)>>>1; // Mid-point
_left = clone();
_rite = clone();
_left._profile = new MRProfile(this);
_rite._profile = new MRProfile(this);
_left._hi = mid; // Reset mid-point
_rite._lo = mid; // Also set self mid-point
addToPendingCount(1); // One fork awaiting completion
_left.fork(); // Runs in another thread/FJ instance
_rite.compute2(); // Runs in THIS F/J thread
_profile._mapdone = System.currentTimeMillis();
return; // Not complete until the fork completes
}
// Zero or 1 chunks, and further chunk might not be homed here
if( _hi > _lo ) { // Single chunk?
Vec v0 = _fr.anyVec();
if( _run_local || v0.chunkKey(_lo).home() ) { // And chunk is homed here?
// Make decompression chunk headers for these chunks
Vec vecs[] = _fr.vecs();
Chunk bvs[] = new Chunk[vecs.length];
NewChunk [] appendableChunks = null;
for( int i=0; i<vecs.length; i++ )
if( vecs[i] != null ) {
assert _run_local || vecs[i].chunkKey(_lo).home()
: "Chunk="+_lo+" v0="+v0+", k="+v0.chunkKey(_lo)+" v["+i+"]="+vecs[i]+", k="+vecs[i].chunkKey(_lo);
try{
bvs[i] = vecs[i].chunkForChunkIdx(_lo);
} catch(Throwable t){
System.err.println("missing chunk in MRTask " + getClass().getName());
t.printStackTrace();
throw new RuntimeException(t);
}
}
if(_noutputs > 0){
final VectorGroup vg = vecs[0].group();
_appendables = new AppendableVec[_noutputs];
appendableChunks = new NewChunk[_noutputs];
for(int i = 0; i < _appendables.length; ++i){
_appendables[i] = new AppendableVec(vg.vecKey(_vid+i));
appendableChunks[i] = (NewChunk)_appendables[i].chunkForChunkIdx(_lo);
}
}
// Call all the various map() calls that apply
_profile._userstart = System.currentTimeMillis();
if( _fr.vecs().length == 1 ) map(bvs[0]);
if( _fr.vecs().length == 2 ) map(bvs[0], bvs[1]);
if( _fr.vecs().length == 3 ) map(bvs[0], bvs[1], bvs[2]);
if( true ) map(bvs );
if(_noutputs == 1){ // convenience versions for cases with single output.
if( _fr.vecs().length == 1 ) map(bvs[0], appendableChunks[0]);
if( _fr.vecs().length == 2 ) map(bvs[0], bvs[1],appendableChunks[0]);
if( _fr.vecs().length == 3 ) map(bvs[0], bvs[1], bvs[2],appendableChunks[0]);
if( true ) map(bvs, appendableChunks[0]);
}
if(_noutputs == 2){ // convenience versions for cases with 2 outputs (e.g split).
if( _fr.vecs().length == 1 ) map(bvs[0], appendableChunks[0],appendableChunks[1]);
if( _fr.vecs().length == 2 ) map(bvs[0], bvs[1],appendableChunks[0],appendableChunks[1]);
if( _fr.vecs().length == 3 ) map(bvs[0], bvs[1], bvs[2],appendableChunks[0],appendableChunks[1]);
if( true ) map(bvs, appendableChunks[0],appendableChunks[1]);
}
map(bvs,appendableChunks);
_res = self(); // Save results since called map() at least once!
// Further D/K/V put any new vec results.
_profile._closestart = System.currentTimeMillis();
for( Chunk bv : bvs ) bv.close(_lo,_fs);
if(_noutputs > 0) for(NewChunk nch:appendableChunks)nch.close(_lo, _fs);
}
}
_profile._mapdone = System.currentTimeMillis();
tryComplete(); // And this task is complete
}
/** OnCompletion - reduce the left and right into self. Called internal by
* F/J. Not expected to be user-called. */
@Override public final void onCompletion( CountedCompleter caller ) {
_profile._onCstart = System.currentTimeMillis();
// Reduce results into 'this' so they collapse going up the execution tree.
// NULL out child-references so we don't accidentally keep large subtrees
// alive since each one may be holding large partial results.
reduce2(_left); _left = null;
reduce2(_rite); _rite = null;
// Only on the top local call, have more completion work
_profile._reducedone = System.currentTimeMillis();
if( _topLocal ) postLocal();
_profile._onCdone = System.currentTimeMillis();
}
// Call 'reduce' on pairs of mapped MRTask2's.
// Collect all pending Futures from both parties as well.
private void reduce2( MRTask2<T> mrt ) {
if( mrt == null ) return;
_profile.gather(mrt._profile,0);
if( _res == null ) _res = mrt._res;
else if( mrt._res != null ) _res.reduce4(mrt._res);
// Futures are shared on local node and transient (so no remote updates)
assert _fs == mrt._fs;
}
protected void postGlobal(){}
// Work done after all the main local work is done.
// Gather/reduce remote work.
// Block for other queued pending tasks.
// Copy any final results into 'this', such that a return of 'this' has the results.
private final void postLocal() {
reduce3(_nleft); // Reduce global results from neighbors.
reduce3(_nrite);
_profile._remoteBlkDone = System.currentTimeMillis();
_fs.blockForPending();
_profile._localBlkDone = System.currentTimeMillis();
// Finally, must return all results in 'this' because that is the API -
// what the user expects
int nlo = subShift(H2O.SELF.index());
int nhi = _nhi; // Save before copyOver crushes them
if( _res == null ) _nhi=-1; // Flag for no local results *at all*
else if( _res != this ) { // There is a local result, and its not self
_res._profile = _profile; // Use my profile (not childs)
copyOver(_res); // So copy into self
}
closeLocal();
if( nlo==0 && nhi == H2O.CLOUD.size() ) {
// Do any post-writing work (zap rollup fields, etc)
_fr.reloadVecs();
for( int i=0; i<_fr.numCols(); i++ )
_fr.vecs()[i].postWrite();
postGlobal();
}
}
// Block for RPCs to complete, then reduce global results into self results
private void reduce3( RPC<T> rpc ) {
if( rpc == null ) return;
T mrt = rpc.get(); // This is a blocking remote call
// Note: because _fs is transient it is not set or cleared by the RPC.
// Because the MRT object is a clone of 'self' it's likely to contain a ptr
// to the self _fs which will be not-null and still have local pending
// blocks. Not much can be asserted there.
_profile.gather(mrt._profile, rpc.size_rez());
// Unlike reduce2, results are in mrt directly not mrt._res.
if( mrt._nhi != -1L ) { // Any results at all?
if( _res == null ) _res = mrt;
else _res.reduce4(mrt);
}
}
/** Call user's reduction. Also reduce any new AppendableVecs. Called
* internal by F/J. Not expected to be user-called. */
protected void reduce4( T mrt ) {
// Reduce any AppendableVecs
if( _noutputs > 0 )
for( int i=0; i<_appendables.length; i++ )
_appendables[i].reduce(mrt._appendables[i]);
// User's reduction
reduce(mrt);
}
/** Cancel/kill all work as we can, then rethrow... do not invisibly swallow
* exceptions (which is the F/J default). Called internal by F/J. Not
* expected to be user-called. */
@Override public final boolean onExceptionalCompletion(Throwable ex, CountedCompleter caller ) {
//if( _nleft != null ) _nleft.cancel(true); _nleft = null;
//if( _nrite != null ) _nrite.cancel(true); _nrite = null;
//if( _left != null ) _left.cancel(true); _left = null;
//if( _rite != null ) _rite.cancel(true); _rite = null;
_nleft = _nrite = null;
_left = _rite = null;
return super.onExceptionalCompletion(ex, caller);
}
/** Local Clone - setting final-field completer */
@Override public T clone() {
T x = (T)super.clone();
x.setCompleter(this); // Set completer, what used to be a final field
x._topLocal = false; // Not a top job
x._nleft = x._nrite = null;
x. _left = x. _rite = null;
x._fs = _fs;
x._profile = null; // Clone needs its own profile
x.setPendingCount(0); // Volatile write for completer field; reset pending count also
return x;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/MemoryBandwidth.java
|
package water;
import water.util.Log;
import water.util.Utils;
public class MemoryBandwidth {
public static void main(String[] args) {
int num_threads = Runtime.getRuntime().availableProcessors();
double membw = run(num_threads);
Log.info("Memory bandwidth (" + num_threads + " cores) : " + membw + " GB/s.");
}
/**
* Compute memory bandwidth in bytes / second
*/
static double run(int num_threads) {
final double membw[] = new double[num_threads];
Thread[] threads = new Thread[num_threads];
for (int t=0;t<num_threads;++t) {
final int thread_num = t;
threads[t] = new Thread() {
public void run() {
MemoryBandwidth l = new MemoryBandwidth();
membw[thread_num] = l.run_benchmark();
}
};
}
for (int t=0;t<num_threads;++t) {
threads[t].start();
}
for (int t=0;t<num_threads;++t) {
try {
threads[t].join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
return Utils.sum(membw);
}
// memory bandwidth in bytes / second
double run_benchmark() {
// use the lesser of 40MB or 10% of Heap
final long M = Math.min(10000000l, Runtime.getRuntime().maxMemory()/40);
int[] vals = MemoryManager.malloc4((int)M);
double total;
int repeats = 20;
Timer timer = new Timer(); //ms
long sum = 0;
// write repeats * M ints
// read repeats * M ints
for (int l=repeats-1; l>=0; --l) {
for (int i=0; i<M; ++i) {
vals[i] = i + l;
}
sum = 0;
for (int i=0; i<M; ++i) {
sum += vals[i];
}
}
total = (double)timer.time()/1000./repeats;
//use the sum in a way that doesn't affect the result (don't want the compiler to optimize it away)
double time = total + ((M*(M-1)/2) - sum); // == total
return (double)2*M*4/time; //(read+write) * 4 bytes
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/MemoryManager.java
|
package water;
import java.lang.management.*;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicLong;
import javax.management.Notification;
import javax.management.NotificationEmitter;
import jsr166y.ForkJoinPool;
import jsr166y.ForkJoinPool.ManagedBlocker;
import water.util.Log;
import water.util.Log.Tag.Sys;
/**
* Manages memory assigned to key/value pairs. All byte arrays used in
* keys/values should be allocated through this class - otherwise we risking
* running out of java memory, and throw unexpected OutOfMemory errors. The
* theory here is that *most* allocated bytes are allocated in large chunks by
* allocating new Values - with large backing arrays. If we intercept these
* allocation points, we cover most Java allocations. If such an allocation
* might trigger an OOM error we first free up some other memory.
*
* MemoryManager monitors memory used by the K/V store (by walking through the
* store (see Cleaner) and overall heap usage by hooking into gc.
*
* Memory is freed if either the cached memory is above the limit or if the
* overall heap usage is too high (in which case we want to use less mem for
* cache). There is also a lower limit on the amount of cache so that we never
* delete all the cache and therefore some computation should always be able to
* progress.
*
* The amount of memory to be freed is determined as the max of cached mem above
* the limit and heap usage above the limit.
*
* One of the primary control inputs is FullGC cycles: we check heap usage and
* set guidance for cache levels. We assume after a FullGC that the heap only
* has POJOs (Plain Old Java Objects, unknown size) and K/V Cached stuff
* (counted by us). We compute the free heap as MEM_MAX-heapUsage (after GC),
* and we compute POJO size as (heapUsage - K/V cache usage).
*
* @author tomas
* @author cliffc
*/
public abstract class MemoryManager {
// max heap memory
static public final long MEM_MAX = Runtime.getRuntime().maxMemory();
// Callbacks from GC
static final HeapUsageMonitor HEAP_USAGE_MONITOR = new HeapUsageMonitor();
// Keep the K/V store below this threshold AND this is the FullGC call-back
// threshold - which is limited in size to the old-gen pool size.
static long MEM_CRITICAL = HEAP_USAGE_MONITOR._gc_callback;
// Block allocations?
private static volatile boolean CAN_ALLOC = true;
private static volatile boolean MEM_LOW_CRITICAL = false;
// Lock for blocking on allocations
private static Object _lock = new Object();
// My Histogram. Called from any thread calling into the MM.
// Singleton, allocated now so I do not allocate during an OOM event.
static private final H2O.Cleaner.Histo myHisto = new H2O.Cleaner.Histo();
// A monitonically increasing total count memory allocated via MemoryManager.
// Useful in tracking total memory consumed by algorithms - just ask for the
// before & after amounts and diff them.
public static final AtomicLong MEM_ALLOC = new AtomicLong();
public static void setMemGood() {
if( CAN_ALLOC ) return;
synchronized(_lock) { CAN_ALLOC = true; _lock.notifyAll(); }
// NO LOGGING UNDER LOCK!
Log.info(Sys.CLEAN,"Continuing after swapping");
}
public static void setMemLow() {
if( !CAN_ALLOC ) return;
synchronized(_lock) { CAN_ALLOC = false; }
// NO LOGGING UNDER LOCK!
Log.info(Sys.CLEAN,"Pausing to swap to disk; more memory may help");
}
public static boolean canAlloc() { return CAN_ALLOC; }
public static void set_goals( String msg, boolean oom){
set_goals(msg, oom, 0);
}
// Set K/V cache goals.
// Allow (or disallow) allocations.
// Called from the Cleaner, when "cacheUsed" has changed significantly.
// Called from any FullGC notification, and HEAP/POJO_USED changed.
// Called on any OOM allocation
public static void set_goals( String msg, boolean oom , long bytes) {
// Our best guess of free memory, as of the last GC cycle
final long heapUsed = Boot.HEAP_USED_AT_LAST_GC;
final long timeGC = Boot.TIME_AT_LAST_GC;
final long freeHeap = MEM_MAX - heapUsed;
assert freeHeap >= 0 : "I am really confused about the heap usage; MEM_MAX="+MEM_MAX+" heapUsed="+heapUsed;
// Current memory held in the K/V store.
final long cacheUsage = myHisto.histo(false)._cached;
// Our best guess of POJO object usage: Heap_used minus cache used
final long pojoUsedGC = Math.max(heapUsed - cacheUsage,0);
// Block allocations if:
// the cache is > 7/8 MEM_MAX, OR
// we cannot allocate an equal amount of POJOs, pojoUsedGC > freeHeap.
// Decay POJOS_USED by 1/8th every 5 sec: assume we got hit with a single
// large allocation which is not repeating - so we do not need to have
// double the POJO amount.
// Keep at least 1/8th heap for caching.
// Emergency-clean the cache down to the blocking level.
long d = MEM_CRITICAL;
// Decay POJO amount
long p = pojoUsedGC;
long age = (System.currentTimeMillis() - timeGC); // Age since last FullGC
age = Math.min(age,10*60*1000 ); // Clip at 10mins
while( (age-=5000) > 0 ) p = p-(p>>3); // Decay effective POJO by 1/8th every 5sec
d -= 2*p - bytes; // Allow for the effective POJO, and again to throttle GC rate
d = Math.max(d,MEM_MAX>>3); // Keep at least 1/8th heap
H2O.Cleaner.DESIRED = d;
String m="";
if( cacheUsage > H2O.Cleaner.DESIRED ) {
m = (CAN_ALLOC?"Blocking! ":"blocked: ");
if( oom ) setMemLow(); // Stop allocations; trigger emergency clean
Boot.kick_store_cleaner();
} else { // Else we are not *emergency* cleaning, but may be lazily cleaning.
setMemGood(); // Cache is as low as we'll go; unblock
if( oom ) { // But still have an OOM?
m = "Unblock allocations; cache emptied but memory is low: ";
// Means the heap is full of uncached POJO's - which cannot be spilled.
// Here we enter the zone of possibly dieing for OOM. There's no point
// in blocking allocations, as no more memory can be freed by more
// cache-flushing. Might as well proceed on a "best effort" basis.
Log.warn(Sys.CLEAN,m+" OOM but cache is emptied: MEM_MAX = " + PrettyPrint.bytes(MEM_MAX) + ", DESIRED_CACHE = " + PrettyPrint.bytes(d) +", CACHE = " + PrettyPrint.bytes(cacheUsage) + ", POJO = " + PrettyPrint.bytes(p) + ", this request bytes = " + PrettyPrint.bytes(bytes));
} else {
m = "MemGood: ";
}
}
// No logging if under memory pressure: can deadlock the cleaner thread
if( Log.flag(Sys.CLEAN) ) {
String s = m+msg+", HEAP_LAST_GC="+(heapUsed>>20)+"M, KV="+(cacheUsage>>20)+"M, POJO="+(pojoUsedGC>>20)+"M, free="+(freeHeap>>20)+"M, MAX="+(MEM_MAX>>20)+"M, DESIRED="+(H2O.Cleaner.DESIRED>>20)+"M"+(oom?" OOM!":" NO-OOM");
if( CAN_ALLOC ) Log.debug(Sys.CLEAN ,s);
else Log.unwrap(System.err,s);
}
}
/**
* Monitors the heap usage after full gc run and tells Cleaner to free memory
* if mem usage is too high. Stops new allocation if mem usage is critical.
* @author tomas
*/
private static class HeapUsageMonitor implements javax.management.NotificationListener {
MemoryMXBean _allMemBean = ManagementFactory.getMemoryMXBean(); // general
MemoryPoolMXBean _oldGenBean;
public long _gc_callback;
HeapUsageMonitor() {
int c = 0;
for( MemoryPoolMXBean m : ManagementFactory.getMemoryPoolMXBeans() ) {
if( m.getType() != MemoryType.HEAP ) // only interested in HEAP
continue;
if( m.isCollectionUsageThresholdSupported()
&& m.isUsageThresholdSupported()) {
// should be Old pool, get called when memory is critical
_oldGenBean = m;
_gc_callback = MEM_MAX;
// Really idiotic API: no idea what the usageThreshold is, so I have
// to guess. Start high, catch IAE & lower by 1/8th and try again.
while( true ) {
try {
m.setCollectionUsageThreshold(_gc_callback);
break;
} catch( IllegalArgumentException iae ) {
// Do NOT log this exception, it is expected and unavoidable and
// entirely handled.
_gc_callback -= (_gc_callback>>3);
}
}
NotificationEmitter emitter = (NotificationEmitter) _allMemBean;
emitter.addNotificationListener(this, null, m);
++c;
}
}
assert c == 1;
}
/**
* Callback routine called by JVM after full gc run. Has two functions:
* 1) sets the amount of memory to be cleaned from the cache by the Cleaner
* 2) sets the CAN_ALLOC flag to false if memory level is critical
*
* The callback happens in a system thread, and hence not through the usual
* water.Boot loader - and so any touched classes are in the wrong class
* loader and you end up with new classes with uninitialized global vars.
* Limit to touching global vars in the Boot class.
*/
public void handleNotification(Notification notification, Object handback) {
String notifType = notification.getType();
if( notifType.equals(MemoryNotificationInfo.MEMORY_COLLECTION_THRESHOLD_EXCEEDED)) {
// Memory used after this FullGC
Boot.TIME_AT_LAST_GC = System.currentTimeMillis();
Boot.HEAP_USED_AT_LAST_GC = _allMemBean.getHeapMemoryUsage().getUsed();
MEM_LOW_CRITICAL = Boot.HEAP_USED_AT_LAST_GC > (MEM_MAX - (MEM_MAX >> 2));
if(Boot.HEAP_USED_AT_LAST_GC > (MEM_MAX - (MEM_MAX >> 1))) { // emergency measure - really low on memory, stop allocations right now!
setMemLow();
} else // enable new allocations (even if cleaner is still running, we have enough RAM)
setMemGood();
Boot.kick_store_cleaner();
}
}
}
// Allocates memory with cache management
// Will block until there is enough available memory.
// Catches OutOfMemory, clears cache & retries.
public static Object malloc(int elems, long bytes, int type, Object orig, int from ) {
return malloc(elems,bytes,type,orig,from,false);
}
public static Object malloc(int elems, long bytes, int type, Object orig, int from , boolean force) {
// Do not assert on large-size here. RF's temp internal datastructures are
// single very large arrays.
//assert bytes < Value.MAX : "malloc size=0x"+Long.toHexString(bytes);
while( true ) {
if( (!MEM_LOW_CRITICAL && !force) && !CAN_ALLOC && // Not allowing allocations?
bytes > 256 && // Allow tiny ones in any case
// To prevent deadlock, we cannot block the cleaner thread in any
// case. This is probably an allocation for logging (ouch! shades of
// logging-induced deadlock!) which will probably be recycled quickly.
!(Thread.currentThread() instanceof H2O.Cleaner) ) {
synchronized(_lock) {
try { _lock.wait(3*1000); } catch (InterruptedException ex) { }
}
}
MEM_ALLOC.addAndGet(bytes);
try {
switch( type ) {
case 1: return new byte [elems];
case 2: return new short [elems];
case 4: return new int [elems];
case 8: return new long [elems];
case 5: return new float [elems];
case 9: return new double [elems];
case 0: return new boolean[elems];
case -1: return Arrays.copyOfRange((byte [])orig,from,elems);
case -4: return Arrays.copyOfRange((int [])orig,from,elems);
case -8: return Arrays.copyOfRange((long [])orig,from,elems);
case -9: return Arrays.copyOfRange((double[])orig,from,elems);
default: throw H2O.unimpl();
}
}
catch( OutOfMemoryError e ) {
// Do NOT log OutOfMemory, it is expected and unavoidable and handled
// in most cases by spilling to disk.
if( H2O.Cleaner.isDiskFull() )
UDPRebooted.suicide(UDPRebooted.T.oom, H2O.SELF);
}
set_goals("OOM",true, bytes); // Low memory; block for swapping
}
}
// Allocates memory with cache management
public static byte [] malloc1 (int size) { return malloc1(size,false); }
public static byte [] malloc1 (int size, boolean force) { return (byte [])malloc(size,size*1, 1,null,0,force); }
public static short [] malloc2 (int size) { return (short [])malloc(size,size*2, 2,null,0); }
public static int [] malloc4 (int size) { return (int [])malloc(size,size*4, 4,null,0); }
public static long [] malloc8 (int size) { return (long [])malloc(size,size*8, 8,null,0); }
public static float [] malloc4f(int size) { return (float [])malloc(size,size*4, 5,null,0); }
public static double [] malloc8d(int size) { return (double [])malloc(size,size*8, 9,null,0); }
public static boolean[] mallocZ (int size) { return (boolean[])malloc(size,size*1, 0,null,0); }
public static byte [] arrayCopyOfRange(byte [] orig, int from, int sz) { return (byte []) malloc(sz,(sz-from)*1,-1,orig,from); }
public static int [] arrayCopyOfRange(int [] orig, int from, int sz) { return (int []) malloc(sz,(sz-from)*4,-4,orig,from); }
public static long [] arrayCopyOfRange(long [] orig, int from, int sz) { return (long []) malloc(sz,(sz-from)*8,-8,orig,from); }
public static double [] arrayCopyOfRange(double[] orig, int from, int sz) { return (double[]) malloc(sz,(sz-from)*8,-9,orig,from); }
public static byte [] arrayCopyOf( byte [] orig, int sz) { return arrayCopyOfRange(orig,0,sz); }
public static int [] arrayCopyOf( int [] orig, int sz) { return arrayCopyOfRange(orig,0,sz); }
public static long [] arrayCopyOf( long [] orig, int sz) { return arrayCopyOfRange(orig,0,sz); }
public static double [] arrayCopyOf( double[] orig, int sz) { return arrayCopyOfRange(orig,0,sz); }
// Memory available for tasks (we assume 3/4 of the heap is available for tasks)
static final AtomicLong _taskMem = new AtomicLong(MEM_MAX-(MEM_MAX>>2));
/**
* Try to reserve memory needed for task execution and return true if
* succeeded. Tasks have a shared pool of memory which they should ask for
* in advance before they even try to allocate it.
*
* This method is another backpressure mechanism to make sure we do not
* exhaust system's resources by running too many tasks at the same time.
* Tasks are expected to reserve memory before proceeding with their
* execution and making sure they release it when done.
*
* @param m - requested number of bytes
* @return true if there is enough free memory
*/
public static boolean tryReserveTaskMem(long m){
if(!CAN_ALLOC)return false;
if( m == 0 ) return true;
assert m >= 0:"m < 0: " + m;
long current = _taskMem.addAndGet(-m);
if(current < 0){
_taskMem.addAndGet(m);
return false;
}
return true;
}
private static Object _taskMemLock = new Object();
public static void reserveTaskMem(long m){
final long bytes = m;
while(!tryReserveTaskMem(bytes)){
try {
ForkJoinPool.managedBlock(new ManagedBlocker() {
@Override
public boolean isReleasable() {return _taskMem.get() >= bytes;}
@Override
public boolean block() throws InterruptedException {
synchronized(_taskMemLock){
try {_taskMemLock.wait();} catch( InterruptedException e ) {}
}
return isReleasable();
}
});
} catch (InterruptedException e){throw Log.errRTExcept(e); }
}
}
/**
* Free the memory successfully reserved by task.
* @param m
*/
public static void freeTaskMem(long m){
if(m == 0)return;
_taskMem.addAndGet(m);
synchronized(_taskMemLock){
_taskMemLock.notifyAll();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Model.java
|
package water;
import hex.ConfusionMatrix;
import hex.VarImp;
import javassist.*;
import water.api.*;
import water.api.Request.API;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.TransfVec;
import water.fvec.Vec;
import water.serial.AutoBufferSerializer;
import water.util.*;
import water.util.Log.Tag.Sys;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import static water.util.JCodeGen.toStaticVar;
import static water.util.Utils.contains;
/**
* A Model models reality (hopefully).
* A model can be used to 'score' a row, or a collection of rows on any
* compatible dataset - meaning the row has all the columns with the same names
* as used to build the mode.
*/
public abstract class Model extends Lockable<Model> {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
/** Dataset key used to *build* the model, for models for which this makes
* sense, or null otherwise. Not all models are built from a dataset (eg
* artificial models), or are built from a single dataset (various ensemble
* models), so this key has no *mathematical* significance in the model but
* is handy during common model-building and for the historical record. */
@API(help="Datakey used to *build* the model")
public final Key _dataKey;
/** Columns used in the model and are used to match up with scoring data
* columns. The last name is the response column name. */
@API(help="Column names used to build the model")
public final String _names[];
/** Categorical/factor/enum mappings, per column. Null for non-enum cols.
* The last column holds the response col enums. */
@API(help="Column names used to build the model")
public final String _domains[][];
@API(help = "Relative class distribution factors in original data")
public final float[] _priorClassDist;
@API(help = "Relative class distribution factors used for model building")
protected float[] _modelClassDist;
// WARNING: be really careful to modify this POJO because
// modification does not involve update in DKV
public void setModelClassDistribution(float[] classdist) {
_modelClassDist = classdist.clone();
}
private final UniqueId uniqueId;
/** The start time in mS since the epoch for model training. */
public long training_start_time = 0L;
/** The duration in mS for model training. */
public long training_duration_in_ms = 0L;
/** Any warnings thrown during model building. */
@API(help="warnings")
public String[] warnings = new String[0];
/** Whether or not this model has cross-validated results stored. */
protected boolean _have_cv_results;
/** Full constructor from frame: Strips out the Vecs to just the names needed
* to match columns later for future datasets.
*/
public Model( Key selfKey, Key dataKey, Frame fr, float[] priorClassDist ) {
this(selfKey,dataKey,fr.names(),fr.domains(), priorClassDist, null, 0, 0);
}
public Model( Key selfKey, Key dataKey, String names[], String domains[][], float[] priorClassDist, float[] modelClassDist) {
this(selfKey,dataKey,names,domains,priorClassDist,modelClassDist,0,0);
}
/** Full constructor */
public Model( Key selfKey, Key dataKey, String names[], String domains[][], float[] priorClassDist, float[] modelClassDist, long training_start_time, long training_duration_in_ms ) {
super(selfKey);
this.uniqueId = new UniqueId(_key);
if( domains == null ) domains=new String[names.length+1][];
assert domains.length==names.length;
assert names.length >= 1;
assert names[names.length-1] != null; // Have a valid response-column name?
_dataKey = dataKey;
_names = names;
_domains = domains;
_priorClassDist = priorClassDist;
_modelClassDist = modelClassDist;
this.training_duration_in_ms = training_duration_in_ms;
this.training_start_time = training_start_time;
}
// Currently only implemented by GLM2, DeepLearning, GBM and DRF:
public Request2 get_params() { throw new UnsupportedOperationException("get_params() has not yet been implemented in class: " + this.getClass()); }
// NOTE: this is a local copy of the Job; to get the real state you need to get it from the DKV.
// Currently only implemented by GLM2, DeepLearning, GBM and DRF:
public Request2 job() { throw new UnsupportedOperationException("job() has not yet been implemented in class: " + this.getClass()); }
public enum ModelCategory {
Unknown,
Binomial,
Multinomial,
Regression,
Clustering;
}
// TODO: override in KMeansModel once that's rewritten on water.Model
public ModelCategory getModelCategory() {
return (isClassifier() ?
(nclasses() > 2 ? ModelCategory.Multinomial : ModelCategory.Binomial) :
ModelCategory.Regression);
}
/** Remove any Model internal Keys */
@Override public Futures delete_impl(Futures fs) { return fs; /* None in the default Model */ }
@Override public String errStr() { return "Model"; }
public void addWarning(String warning) {
if(this.warnings == null || this.warnings.length == 0)
this.warnings = new String[]{warning};
else {
this.warnings = Arrays.copyOf(this.warnings,this.warnings.length+1);
this.warnings[this.warnings.length-1] = warning;
}
}
public boolean isSupervised() { return true; }
public UniqueId getUniqueId() {
return this.uniqueId;
}
public void start_training(long training_start_time) {
Log.info("setting training_start_time to: " + training_start_time + " for Model: " + this._key.toString() + " (" + this.getClass().getSimpleName() + "@" + System.identityHashCode(this) + ")");
final long t = training_start_time;
new TAtomic<Model>() {
@Override public Model atomic(Model m) {
if (m != null) {
m.training_start_time = t;
} return m;
}
}.invoke(_key);
this.training_start_time = training_start_time;
}
public void start_training(Model previous) {
training_start_time = System.currentTimeMillis();
Log.info("setting training_start_time to: " + training_start_time + " for Model: " + this._key.toString() + " (" + this.getClass().getSimpleName() + "@" + System.identityHashCode(this) + ") [checkpoint case]");
if (null != previous)
training_duration_in_ms += previous.training_duration_in_ms;
final long t = training_start_time;
final long d = training_duration_in_ms;
new TAtomic<Model>() {
@Override public Model atomic(Model m) {
if (m != null) {
m.training_start_time = t;
m.training_duration_in_ms = d;
} return m;
}
}.invoke(_key);
}
public void stop_training() {
training_duration_in_ms += (System.currentTimeMillis() - training_start_time);
Log.info("setting training_duration_in_ms to: " + training_duration_in_ms + " for Model: " + this._key.toString() + " (" + this.getClass().getSimpleName() + "@" + System.identityHashCode(this) + ")");
final long d = training_duration_in_ms;
new TAtomic<Model>() {
@Override public Model atomic(Model m) {
if (m != null) {
m.training_duration_in_ms = d;
} return m;
}
}.invoke(_key);
}
public String responseName() { return _names[ _names.length-1]; }
public String[] classNames() { return _domains[_domains.length-1]; }
public boolean isClassifier() { return classNames() != null ; }
public int nclasses() {
String cns[] = classNames();
return cns==null ? 1 : cns.length;
}
/** Returns number of input features */
public int nfeatures() { return _names.length - 1; }
/** For classifiers, confusion matrix on validation set. */
public ConfusionMatrix cm() { return null; }
/** Returns mse for validation set. */
public double mse() { return Double.NaN; }
/** Variable importance of individual input features measured by this model. */
public VarImp varimp() { return null; }
/** Bulk score for given <code>fr</code> frame.
* The frame is always adapted to this model.
*
* @param fr frame to be scored
* @return frame holding predicted values
*
* @see #score(Frame, boolean)
*/
public Frame score(Frame fr) {
return score(fr, true);
}
/** Bulk score the frame <code>fr</code>, producing a Frame result; the 1st Vec is the
* predicted class, the remaining Vecs are the probability distributions.
* For Regression (single-class) models, the 1st and only Vec is the
* prediction value.
*
* The flat <code>adapt</code>
* @param fr frame which should be scored
* @param adapt a flag enforcing an adaptation of <code>fr</code> to this model. If flag
* is <code>false</code> scoring code expect that <code>fr</code> is already adapted.
* @return a new frame containing a predicted values. For classification it contains a column with
* prediction and distribution for all response classes. For regression it contains only
* one column with predicted values.
*/
public final Frame score(Frame fr, boolean adapt) {
if (isSupervised()) {
int ridx = fr.find(responseName());
if (ridx != -1) { // drop the response for scoring!
fr = new Frame(fr);
fr.remove(ridx);
}
}
// Adapt the Frame layout - returns adapted frame and frame containing only
// newly created vectors
Frame[] adaptFrms = adapt ? adapt(fr,false) : null;
// Adapted frame containing all columns - mix of original vectors from fr
// and newly created vectors serving as adaptors
Frame adaptFrm = adapt ? adaptFrms[0] : fr;
// Contains only newly created vectors. The frame eases deletion of these vectors.
Frame onlyAdaptFrm = adapt ? adaptFrms[1] : null;
// Invoke scoring
Frame output = scoreImpl(adaptFrm);
// Be nice to DKV and delete vectors which i created :-)
if (adapt) onlyAdaptFrm.delete();
return output;
}
/** Score already adapted frame.
*
* @param adaptFrm
* @return
*/
protected Frame scoreImpl(Frame adaptFrm) {
if (isSupervised()) {
int ridx = adaptFrm.find(responseName());
assert ridx == -1 : "Adapted frame should not contain response in scoring method!";
assert nfeatures() == adaptFrm.numCols() : "Number of model features " + nfeatures() + " != number of test set columns: " + adaptFrm.numCols();
assert adaptFrm.vecs().length == nfeatures() : "Scoring data set contains wrong number of columns: " + adaptFrm.vecs().length + " instead of " + nfeatures();
}
// Create a new vector for response
// If the model produces a classification/enum, copy the domain into the
// result vector.
int nc = nclasses();
Vec [] newVecs = new Vec[]{adaptFrm.anyVec().makeZero(classNames())};
if(nc > 1)
newVecs = Utils.join(newVecs,adaptFrm.anyVec().makeZeros(nc));
String [] names = new String[newVecs.length];
names[0] = "predict";
for(int i = 1; i < names.length; ++i)
names[i] = classNames()[i-1];
final int num_features = nfeatures();
new MRTask2() {
@Override public void map( Chunk chks[] ) {
double tmp [] = new double[num_features]; // We do not need the last field representing response
float preds[] = new float [nclasses()==1?1:nclasses()+1];
int len = chks[0]._len;
for( int row=0; row<len; row++ ) {
float p[] = score0(chks,row,tmp,preds);
for( int c=0; c<preds.length; c++ )
chks[num_features+c].set0(row,p[c]);
}
}
}.doAll(Utils.join(adaptFrm.vecs(),newVecs));
// Return just the output columns
return new Frame(names,newVecs);
}
/** Single row scoring, on a compatible Frame. */
public final float[] score( Frame fr, boolean exact, int row ) {
double tmp[] = new double[fr.numCols()];
for( int i=0; i<tmp.length; i++ )
tmp[i] = fr.vecs()[i].at(row);
return score(fr.names(),fr.domains(),exact,tmp);
}
/** Single row scoring, on a compatible set of data. Fairly expensive to adapt. */
public final float[] score( String names[], String domains[][], boolean exact, double row[] ) {
return score(adapt(names,domains,exact),row,new float[nclasses()]);
}
/** Single row scoring, on a compatible set of data, given an adaption vector */
public final float[] score( int map[][][], double row[], float[] preds ) {
/*FIXME final int[][] colMap = map[map.length-1]; // Response column mapping is the last array
assert colMap.length == _names.length-1 : " "+Arrays.toString(colMap)+" "+Arrays.toString(_names);
double tmp[] = new double[colMap.length]; // The adapted data
for( int i=0; i<colMap.length; i++ ) {
// Column mapping, or NaN for missing columns
double d = colMap[i]==-1 ? Double.NaN : row[colMap[i]];
if( map[i] != null ) { // Enum mapping
int e = (int)d;
if( e < 0 || e >= map[i].length ) d = Double.NaN; // User data is out of adapt range
else {
e = map[i][e];
d = e==-1 ? Double.NaN : (double)e;
}
}
tmp[i] = d;
}
return score0(tmp,preds); // The results. */
return null;
}
/** Build an adaption array. The length is equal to the Model's vector length.
* Each inner 2D-array is a
* compressed domain map from data domains to model domains - or null for non-enum
* columns, or null for identity mappings. The extra final int[] is the
* column mapping itself, mapping from model columns to data columns. or -1
* if missing.
* If 'exact' is true, will throw if there are:
* any columns in the model but not in the input set;
* any enums in the data that the model does not understand
* any enums returned by the model that the data does not have a mapping for.
* If 'exact' is false, these situations will use or return NA's instead.
*/
private int[][][] adapt( String names[], String domains[][], boolean exact) {
int maplen = names.length;
int map[][][] = new int[maplen][][];
// Make sure all are compatible
for( int c=0; c<names.length;++c) {
// Now do domain mapping
String ms[] = _domains[c]; // Model enum
String ds[] = domains[c]; // Data enum
if( ms == ds ) { // Domains trivially equal?
} else if( ms == null ) {
throw new IllegalArgumentException("Incompatible column: '" + _names[c] + "', expected (trained on) numeric, was passed a categorical");
} else if( ds == null ) {
if( exact )
throw new IllegalArgumentException("Incompatible column: '" + _names[c] + "', expected (trained on) categorical, was passed a numeric");
throw H2O.unimpl(); // Attempt an asEnum?
} else if( !Arrays.deepEquals(ms, ds) ) {
map[c] = getDomainMapping(_names[c], ms, ds, exact);
} // null mapping is equal to identity mapping
}
return map;
}
/**
* Type of missing columns during adaptation between train/test datasets
* Overload this method for models that have sparse data handling.
* Otherwise, NaN is used.
* @return real-valued number (can be NaN)
*/
protected double missingColumnsType() { return Double.NaN; }
/** Build an adapted Frame from the given Frame. Useful for efficient bulk
* scoring of a new dataset to an existing model. Same adaption as above,
* but expressed as a Frame instead of as an int[][]. The returned Frame
* does not have a response column.
* It returns a <b>two element array</b> containing an adapted frame and a
* frame which contains only vectors which where adapted (the purpose of the
* second frame is to delete all adapted vectors with deletion of the
* frame). */
public Frame[] adapt( final Frame fr, boolean exact) {
return adapt(fr, exact, true);
}
public Frame[] adapt( final Frame fr, boolean exact, boolean haveResponse) {
Frame vfr = new Frame(fr); // To avoid modification of original frame fr
int n = _names.length;
if (haveResponse && isSupervised()) {
int ridx = vfr.find(_names[_names.length - 1]);
if (ridx != -1 && ridx != vfr._names.length - 1) { // Unify frame - put response to the end
String name = vfr._names[ridx];
vfr.add(name, vfr.remove(ridx));
}
n = ridx == -1 ? _names.length - 1 : _names.length;
}
String [] names = isSupervised() ? Arrays.copyOf(_names, n) : _names.clone();
Frame [] subVfr;
// replace missing columns with NaNs (or 0s for DeepLearning with sparse data)
subVfr = vfr.subframe(names, missingColumnsType());
vfr = subVfr[0]; // extract only subframe but keep the rest for delete later
Vec[] frvecs = vfr.vecs();
boolean[] toEnum = new boolean[frvecs.length];
if(!exact) for(int i = 0; i < n;++i)
if(_domains[i] != null && !frvecs[i].isEnum()) {// if model expects domain but input frame does not have domain => switch vector to enum
frvecs[i] = frvecs[i].toEnum();
toEnum[i] = true;
}
int[][][] map = adapt(names,vfr.domains(),exact);
assert map.length == names.length; // Be sure that adapt call above do not skip any column
ArrayList<Vec> avecs = new ArrayList<Vec>(); // adapted vectors
ArrayList<String> anames = new ArrayList<String>(); // names for adapted vector
for( int c=0; c<map.length; c++ ) // Iterate over columns
if(map[c] != null) { // Column needs adaptation
Vec adaptedVec;
if (toEnum[c]) { // Vector was flipped to column already, compose transformation
adaptedVec = TransfVec.compose( (TransfVec) frvecs[c], map[c], vfr.domains()[c], false);
} else adaptedVec = frvecs[c].makeTransf(map[c], vfr.domains()[c]);
avecs.add(frvecs[c] = adaptedVec);
anames.add(names[c]); // Collect right names
} else if (toEnum[c]) { // Vector was transformed to enum domain, but does not need adaptation we need to record it
avecs.add(frvecs[c]);
anames.add(names[c]);
}
// Fill trash bin by vectors which need to be deleted later by the caller.
Frame vecTrash = new Frame(anames.toArray(new String[anames.size()]), avecs.toArray(new Vec[avecs.size()]));
if (subVfr[1]!=null) vecTrash.add(subVfr[1], true);
return new Frame[] { new Frame(names,frvecs), vecTrash };
}
/** Returns a mapping between values of model domains (<code>modelDom</code>) and given column domain.
* @see #getDomainMapping(String, String[], String[], boolean) */
public static int[][] getDomainMapping(String[] modelDom, String[] colDom, boolean exact) {
return getDomainMapping(null, modelDom, colDom, exact);
}
/**
* Returns a mapping for given column according to given <code>modelDom</code>.
* In this case, <code>modelDom</code> is
*
* @param colName name of column which is mapped, can be null.
* @param modelDom
* @param logNonExactMapping
* @return
*/
public static int[][] getDomainMapping(String colName, String[] modelDom, String[] colDom, boolean logNonExactMapping) {
int emap[] = new int[modelDom.length];
boolean bmap[] = new boolean[modelDom.length];
HashMap<String,Integer> md = new HashMap<String, Integer>((int) ((colDom.length/0.75f)+1));
for( int i = 0; i < colDom.length; i++) md.put(colDom[i], i);
for( int i = 0; i < modelDom.length; i++) {
Integer I = md.get(modelDom[i]);
if (I == null && logNonExactMapping)
Log.warn(Sys.SCORM, "Domain mapping: target domain contains the factor '"+modelDom[i]+"' which DOES NOT appear in input domain " + (colName!=null?"(column: " + colName+")":""));
if (I!=null) {
emap[i] = I;
bmap[i] = true;
}
}
if (logNonExactMapping) { // Inform about additional values in column domain which do not appear in model domain
for (int i=0; i<colDom.length; i++) {
boolean found = false;
for (int j=0; j<emap.length; j++)
if (emap[j]==i) { found=true; break; }
if (!found)
Log.warn(Sys.SCORM, "Domain mapping: target domain DOES NOT contain the factor '"+colDom[i]+"' which appears in input domain "+ (colName!=null?"(column: " + colName+")":""));
}
}
// produce packed values
int[][] res = Utils.pack(emap, bmap);
// Sort values in numeric order to support binary search in TransfVec
Utils.sortWith(res[0], res[1]);
return res;
}
/** Bulk scoring API for one row. Chunks are all compatible with the model,
* and expect the last Chunks are for the final distribution and prediction.
* Default method is to just load the data into the tmp array, then call
* subclass scoring logic. */
protected float[] score0( Chunk chks[], int row_in_chunk, double[] tmp, float[] preds ) {
assert chks.length>=_names.length; // Last chunk is for the response
for( int i=0; i<nfeatures(); i++ ) // Do not include last value since it can contains a response
tmp[i] = chks[i].at0(row_in_chunk);
float[] scored = score0(tmp,preds);
// Correct probabilities obtained from training on oversampled data back to original distribution
// C.f. http://gking.harvard.edu/files/0s.pdf Eq.(27)
if (isClassifier() && _priorClassDist != null && _modelClassDist != null) {
assert(scored.length == nclasses()+1); //1 label + nclasses probs
ModelUtils.correctProbabilities(scored, _priorClassDist, _modelClassDist);
//set label based on corrected probabilities (max value wins, with deterministic tie-breaking)
scored[0] = ModelUtils.getPrediction(scored, tmp);
}
return scored;
}
/**
* Compute the model error for a given test data set
* For multi-class classification, this is the classification error based on assigning labels for the highest predicted per-class probability.
* For binary classification, this is the classification error based on assigning labels using the optimal threshold for maximizing the F1 score.
* For regression, this is the mean squared error (MSE).
* @param ftest Frame containing test data
* @param vactual The response column Vec
* @param fpreds Frame containing ADAPTED (domain labels from train+test data) predicted data (classification: label + per-class probabilities, regression: target)
* @param hitratio_fpreds Frame containing predicted data (domain labels from test data) (classification: label + per-class probabilities, regression: target)
* @param label Name for the scored data set to be printed
* @param printMe Whether to print the scoring results to Log.info
* @param max_conf_mat_size Largest size of Confusion Matrix (#classes) for it to be printed to Log.info
* @param cm Confusion Matrix object to populate for multi-class classification (also used for regression)
* @param auc AUC object to populate for binary classification
* @param hr HitRatio object to populate for classification
* @return model error, see description above
*/
public double calcError(final Frame ftest, final Vec vactual,
final Frame fpreds, final Frame hitratio_fpreds,
final String label, final boolean printMe,
final int max_conf_mat_size, final water.api.ConfusionMatrix cm,
final AUC auc,
final HitRatio hr)
{
StringBuilder sb = new StringBuilder();
double error = Double.POSITIVE_INFINITY;
// populate AUC
if (auc != null) {
assert(isClassifier());
assert(nclasses() == 2);
auc.actual = ftest;
auc.vactual = vactual;
auc.predict = fpreds;
auc.vpredict = fpreds.vecs()[2]; //binary classifier (label, prob0, prob1 (THIS ONE), adaptedlabel)
auc.invoke();
auc.toASCII(sb);
error = auc.data().err(); //using optimal threshold for F1
}
// populate CM
if (cm != null) {
cm.actual = ftest;
cm.vactual = vactual;
cm.predict = fpreds;
cm.vpredict = fpreds.vecs()[0]; // prediction (either label or regression target)
cm.invoke();
if (isClassifier()) {
if (auc != null) {
AUCData aucd = auc.data();
//override the CM with the one computed by AUC (using optimal threshold)
//Note: must still call invoke above to set the domains etc.
cm.cm = new long[3][3]; // 1 extra layer for NaNs (not populated here, since AUC skips them)
cm.cm[0][0] = aucd.cm()[0][0];
cm.cm[1][0] = aucd.cm()[1][0];
cm.cm[0][1] = aucd.cm()[0][1];
cm.cm[1][1] = aucd.cm()[1][1];
double cm_err = new hex.ConfusionMatrix(cm.cm).err();
double auc_err = aucd.err();
if (! (Double.isNaN(cm_err) && Double.isNaN(auc_err))) // NOTE: NaN != NaN
assert(cm_err == auc_err); //check consistency with AUC-computed error
} else {
error = new hex.ConfusionMatrix(cm.cm).err(); //only set error if AUC didn't already set the error
}
if (cm.cm.length <= max_conf_mat_size+1) cm.toASCII(sb);
} else {
assert(auc == null);
error = cm.mse;
cm.toASCII(sb);
}
}
// populate HitRatio
if (hr != null) {
assert(isClassifier());
hr.actual = ftest;
hr.vactual = vactual;
hr.predict = hitratio_fpreds;
hr.invoke();
hr.toASCII(sb);
}
if (printMe && sb.length() > 0) {
Log.info("Scoring on " + label + " data:");
for (String s : sb.toString().split("\n")) Log.info(s);
}
return error;
}
/** Subclasses implement the scoring logic. The data is pre-loaded into a
* re-used temp array, in the order the model expects. The predictions are
* loaded into the re-used temp array, which is also returned. */
protected abstract float[] score0(double data[/*ncols*/], float preds[/*nclasses+1*/]);
// Version where the user has just ponied-up an array of data to be scored.
// Data must be in proper order. Handy for JUnit tests.
public double score(double [] data){ return Utils.maxIndex(score0(data,new float[nclasses()])); }
/** Debug flag to generate benchmar code */
protected static final boolean GEN_BENCHMARK_CODE = false;
/** Return a String which is a valid Java program representing a class that
* implements the Model. The Java is of the form:
* <pre>
* class UUIDxxxxModel {
* public static final String NAMES[] = { ....column names... }
* public static final String DOMAINS[][] = { ....domain names... }
* // Pass in data in a double[], pre-aligned to the Model's requirements.
* // Jam predictions into the preds[] array; preds[0] is reserved for the
* // main prediction (class for classifiers or value for regression),
* // and remaining columns hold a probability distribution for classifiers.
* float[] predict( double data[], float preds[] );
* double[] map( HashMap < String,Double > row, double data[] );
* // Does the mapping lookup for every row, no allocation
* float[] predict( HashMap < String,Double > row, double data[], float preds[] );
* // Allocates a double[] for every row
* float[] predict( HashMap < String,Double > row, float preds[] );
* // Allocates a double[] and a float[] for every row
* float[] predict( HashMap < String,Double > row );
* }
* </pre>
*/
public String toJava() { return toJava(new SB()).toString(); }
public SB toJava( SB sb ) {
SB fileContextSB = new SB(); // preserve file context
String modelName = JCodeGen.toJavaId(_key.toString());
// HEADER
sb.p("import java.util.Map;").nl();
sb.p("import water.genmodel.GenUtils.*;").nl().nl();
sb.p("// AUTOGENERATED BY H2O at ").p(new Date().toString()).nl();
sb.p("// ").p(H2O.getBuildVersion().toString()).nl();
sb.p("//").nl();
sb.p("// Standalone prediction code with sample test data for ").p(this.getClass().getSimpleName()).p(" named ").p(modelName).nl();
sb.p("//").nl();
sb.p("// How to download, compile and execute:").nl();
sb.p("// mkdir tmpdir").nl();
sb.p("// cd tmpdir").nl();
sb.p("// curl http:/").p(H2O.SELF.toString()).p("/h2o-model.jar > h2o-model.jar").nl();
sb.p("// curl http:/").p(H2O.SELF.toString()).p("/2/").p(this.getClass().getSimpleName()).p("View.java?_modelKey=").pobj(_key).p(" > ").p(modelName).p(".java").nl();
sb.p("// javac -cp h2o-model.jar -J-Xmx2g -J-XX:MaxPermSize=128m ").p(modelName).p(".java").nl();
if (GEN_BENCHMARK_CODE)
sb.p("// java -cp h2o-model.jar:. -Xmx2g -XX:MaxPermSize=256m -XX:ReservedCodeCacheSize=256m ").p(modelName).nl();
sb.p("//").nl();
sb.p("// (Note: Try java argument -XX:+PrintCompilation to show runtime JIT compiler behavior.)").nl();
sb.nl();
sb.p("public class ").p(modelName).p(" extends water.genmodel.GeneratedModel {").nl(); // or extends GenerateModel
toJavaInit(sb, fileContextSB).nl();
toJavaNAMES(sb);
toJavaNCLASSES(sb);
toJavaDOMAINS(sb, fileContextSB);
toJavaPROB(sb);
toJavaSuper(sb); //
toJavaPredict(sb, fileContextSB);
sb.p("}").nl();
sb.p(fileContextSB).nl(); // Append file
return sb;
}
// Same thing as toJava, but as a Javassist CtClass
private CtClass makeCtClass() throws CannotCompileException {
CtClass clz = ClassPool.getDefault().makeClass(JCodeGen.toJavaId(_key.toString()));
clz.addField(CtField.make(toJavaNAMES (new SB()).toString(),clz));
clz.addField(CtField.make(toJavaNCLASSES(new SB()).toString(),clz));
toJavaInit(clz); // Model-specific top-level goodness
clz.addMethod(CtMethod.make(toJavaPredict(new SB(), new SB()).toString(),clz)); // FIX ME
return clz;
}
/** Generate implementation for super class. */
protected SB toJavaSuper( SB sb ) {
sb.nl();
sb.ii(1);
sb.i().p("public String[] getNames() { return NAMES; } ").nl();
sb.i().p("public String[][] getDomainValues() { return DOMAINS; }").nl();
return sb;
}
private SB toJavaNAMES( SB sb ) { return JCodeGen.toStaticVar(sb, "NAMES", _names, "Names of columns used by model."); }
private SB toJavaNCLASSES( SB sb ) { return isClassifier() ? JCodeGen.toStaticVar(sb, "NCLASSES", nclasses(), "Number of output classes included in training data response column.") : sb; }
private SB toJavaDOMAINS( SB sb, SB fileContextSB ) {
sb.nl();
sb.ii(1);
sb.i().p("// Column domains. The last array contains domain of response column.").nl();
sb.i().p("public static final String[][] DOMAINS = new String[][] {").nl();
for (int i=0; i<_domains.length; i++) {
String[] dom = _domains[i];
String colInfoClazz = "ColInfo_"+i;
sb.i(1).p("/* ").p(_names[i]).p(" */ ");
sb.p(colInfoClazz).p(".VALUES");
if (i!=_domains.length-1) sb.p(',');
sb.nl();
fileContextSB.i().p("// The class representing column ").p(_names[i]).nl();
JCodeGen.toClassWithArray(fileContextSB, null, colInfoClazz, dom);
}
return sb.i().p("};").nl();
}
private SB toJavaPROB( SB sb) {
sb.di(1);
toStaticVar(sb, "PRIOR_CLASS_DISTRIB", _priorClassDist, "Prior class distribution");
toStaticVar(sb, "MODEL_CLASS_DISTRIB", _modelClassDist, "Class distribution used for model building");
return sb;
}
// Override in subclasses to provide some top-level model-specific goodness
protected SB toJavaInit(SB sb, SB fileContextSB) { return sb; }
protected void toJavaInit(CtClass ct) { }
// Override in subclasses to provide some inside 'predict' call goodness
// Method returns code which should be appended into generated top level class after
// predict method.
protected void toJavaPredictBody(SB bodySb, SB classCtxSb, SB fileCtxSb) {
throw new IllegalArgumentException("This model type does not support conversion to Java");
}
// Wrapper around the main predict call, including the signature and return value
private SB toJavaPredict(SB ccsb, SB fileCtxSb) { // ccsb = classContext
ccsb.nl();
ccsb.p(" // Pass in data in a double[], pre-aligned to the Model's requirements.").nl();
ccsb.p(" // Jam predictions into the preds[] array; preds[0] is reserved for the").nl();
ccsb.p(" // main prediction (class for classifiers or value for regression),").nl();
ccsb.p(" // and remaining columns hold a probability distribution for classifiers.").nl();
ccsb.p(" public final float[] predict( double[] data, float[] preds) { preds = predict( data, preds, "+toJavaDefaultMaxIters()+"); return preds; }").nl();
// ccsb.p(" public final float[] predict( double[] data, float[] preds) { return predict( data, preds, "+toJavaDefaultMaxIters()+"); }").nl();
ccsb.p(" public final float[] predict( double[] data, float[] preds, int maxIters ) {").nl();
SB classCtxSb = new SB();
toJavaPredictBody(ccsb.ii(1), classCtxSb, fileCtxSb); ccsb.di(1);
ccsb.p(" return preds;").nl();
ccsb.p(" }").nl();
ccsb.p(classCtxSb);
return ccsb;
}
protected String toJavaDefaultMaxIters() { return "-1"; }
// Convenience method for testing: build Java, convert it to a class &
// execute it: compare the results of the new class's (JIT'd) scoring with
// the built-in (interpreted) scoring on this dataset. Throws if there
// is any error (typically an AssertionError).
public void testJavaScoring( Frame fr ) {
try {
//System.out.println(toJava());
Class clz = ClassPool.getDefault().toClass(makeCtClass());
Object modelo = clz.newInstance();
}
catch( CannotCompileException cce ) { throw new Error(cce); }
catch( InstantiationException cce ) { throw new Error(cce); }
catch( IllegalAccessException cce ) { throw new Error(cce); }
}
/** Generates code which unify preds[1,...NCLASSES] */
protected void toJavaUnifyPreds(SB bodySb) {
}
/** Fill preds[0] based on already filled and unified preds[1,..NCLASSES]. */
protected void toJavaFillPreds0(SB bodySb) {
// Pick max index as a prediction
if (isClassifier()) {
if (_priorClassDist!=null && _modelClassDist!=null) {
bodySb.i().p("water.util.ModelUtils.correctProbabilities(preds, PRIOR_CLASS_DISTRIB, MODEL_CLASS_DISTRIB);").nl();
}
bodySb.i().p("preds[0] = water.util.ModelUtils.getPrediction(preds,data);").nl();
} else {
bodySb.i().p("preds[0] = preds[1];").nl();
}
}
/**
* Compute the cross validation error from an array of predictions for N folds.
* Also stores the results in the model for display/query.
* @param source Full training data
* @param response Full response
* @param cv_preds N Frames containing predictions made by N-fold CV runs on disjoint contiguous holdout pieces of the training data
* @param offsets Starting row numbers for the N CV pieces (length = N+1, first element: 0, last element: #rows)
*/
public final void scoreCrossValidation(Job.ValidatedJob job, Frame source, Vec response, Frame[] cv_preds, long[] offsets) {
assert(offsets[0] == 0);
assert(offsets[offsets.length-1] == source.numRows());
//Hack to make a frame with the correct dimensions and vector group
Frame cv_pred = score(source);
// Stitch together the content of cv_pred from cv_preds
for (int i=0; i<cv_preds.length; ++i) {
// stitch probabilities (or regression values)
for (int c=(isClassifier() ? 1 : 0); c<cv_preds[i].numCols(); ++c) {
Vec.Writer vw = cv_pred.vec(c).open();
try {
for (long r=0; r < cv_preds[i].numRows(); ++r) {
vw.set(offsets[i] + r, cv_preds[i].vec(c).at(r));
}
} finally {
vw.close();
}
}
if (isClassifier()) {
// make labels
float[] probs = new float[cv_preds[i].numCols()];
Vec.Writer vw = cv_pred.vec(0).open();
try {
for (long r = 0; r < cv_preds[i].numRows(); ++r) {
//probs[0] stays 0, is not used in getPrediction
for (int c = 1; c < cv_preds[i].numCols(); ++c) {
probs[c] = (float) cv_preds[i].vec(c).at(r);
}
final int label = ModelUtils.getPrediction(probs, (int)r);
vw.set(offsets[i] + r, label);
}
} finally {
vw.close();
}
}
}
// Now score the model on the N folds
try {
AUC auc = nclasses() == 2 ? new AUC() : null;
water.api.ConfusionMatrix cm = new water.api.ConfusionMatrix();
HitRatio hr = isClassifier() ? new HitRatio() : null;
double cv_error = calcError(source, response, cv_pred, cv_pred, "cross-validated", true, 10, cm, auc, hr);
setCrossValidationError(job, cv_error, cm, auc == null ? null : auc.data(), hr);
} finally {
// cleanup temporary frame wit predictions
cv_pred.delete();
}
}
protected void setCrossValidationError(Job.ValidatedJob job, double cv_error, water.api.ConfusionMatrix cm, AUCData auc, HitRatio hr) { throw H2O.unimpl(); }
protected void printCrossValidationModelsHTML(StringBuilder sb) {
if (job() == null) return;
Job.ValidatedJob job = (Job.ValidatedJob)job();
if (job.xval_models != null && job.xval_models.length > 0) {
sb.append("<h4>Cross Validation Models</h4>");
sb.append("<table class='table table-bordered table-condensed'>");
sb.append("<tr><th>Model</th></tr>");
for (Key k : job.xval_models) {
Model m = UKV.get(k);
Job j = m != null ? (Job)m.job() : null;
sb.append("<tr>");
sb.append("<td>" + (m != null ? Inspector.link(k.toString(), k.toString()) : "Pending") + (j != null ? ", Progress: " + Utils.formatPct(j.progress()) : "") + "</td>");
sb.append("</tr>");
}
sb.append("</table>");
}
}
/** Helper type for serialization */
protected static class ModelAutobufferSerializer extends AutoBufferSerializer<Model> { }
/** Returns a model serializer into AutoBuffer. */
public AutoBufferSerializer<Model> getModelSerializer() {
return new ModelAutobufferSerializer();
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/ModelMetrics.java
|
package water;
import dontweave.gson.JsonObject;
import dontweave.gson.JsonParser;
import water.Model.ModelCategory;
import water.api.AUCData;
import water.api.ConfusionMatrix;
import water.api.DocGen;
import water.api.Request.API;
import water.api.Request.Default;
import water.fvec.Frame;
import water.util.Log;
/**
* Container to hold the metric for a model as scored on a specific frame.
*/
public final class ModelMetrics extends Iced {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
@API(help="The unique ID (key / uuid / creation timestamp) for the model used for this scoring run.", required=false, filter=Default.class, json=true)
public UniqueId model = null;
@API(help="The category (e.g., Clustering) for the model used for this scoring run.", required=false, filter=Default.class, json=true)
public Model.ModelCategory model_category = null;
@API(help="The unique ID (key / uuid / creation timestamp) for the frame used for this scoring run.", required=false, filter=Default.class, json=true)
public UniqueId frame = null;
@API(help="The duration in mS for this scoring run.", required=false, filter=Default.class, json=true)
public long duration_in_ms =-1L;
@API(help="The time in mS since the epoch for the start of this scoring run.", required=false, filter=Default.class, json=true)
public long scoring_time = -1L;
@API(help="The AUC object for this scoring run.", required=false, filter=Default.class, json=true)
public AUCData auc = null;
@API(help="The ConfusionMatrix object for this scoring run.", required=false, filter=Default.class, json=true)
public ConfusionMatrix cm = null;
public ModelMetrics(UniqueId model, ModelCategory model_category, UniqueId frame, long duration_in_ms, long scoring_time, AUCData auc, ConfusionMatrix cm) {
this.model = model;
this.model_category = model_category;
this.frame = frame;
this.duration_in_ms = duration_in_ms;
this.scoring_time = scoring_time;
this.auc = auc;
this.cm = cm;
}
public static Key buildKey(Model model, Frame frame) {
return Key.makeSystem("modelmetrics_" + model.getUniqueId().getId() + "_on_" + frame.getUniqueId().getId());
}
public static Key buildKey(UniqueId model, UniqueId frame) {
return Key.makeSystem("modelmetrics_" + model.getId() + "_on_" + frame.getId());
}
public Key buildKey() {
return Key.makeSystem("modelmetrics_" + this.model.getId() + "_on_" + this.frame.getId());
}
public void putInDKV() {
Key metricsKey = this.buildKey();
Log.debug("Putting ModelMetrics: " + metricsKey.toString());
DKV.put(metricsKey, this);
}
public static ModelMetrics getFromDKV(Model model, Frame frame) {
Key metricsKey = buildKey(model, frame);
Log.debug("Getting ModelMetrics: " + metricsKey.toString());
Value v = DKV.get(metricsKey);
if (null == v)
return null;
return (ModelMetrics)v.get();
}
public static ModelMetrics getFromDKV(UniqueId model, UniqueId frame) {
Key metricsKey = buildKey(model, frame);
Log.debug("Getting ModelMetrics: " + metricsKey.toString());
Value v = DKV.get(metricsKey);
if (null == v)
return null;
return (ModelMetrics)v.get();
}
public JsonObject toJSON() {
final String json = new String(writeJSON(new AutoBuffer()).buf());
if (json.length() == 0) return new JsonObject();
JsonObject jo = (JsonObject)new JsonParser().parse(json);
if (jo.has("model"))
jo.getAsJsonObject("model").addProperty("model_category", this.model_category.toString());
return jo;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/MultiReceiverThread.java
|
package water;
import java.net.*;
import water.util.Log;
/**
* The Thread that looks for Multicast UDP Cloud requests.
*
* This thread just spins on reading multicast UDP packets from the kernel and
* either dispatching on them directly itself (if the request is known short)
* or queuing them up for worker threads. Multicast *Channels* are available
* Java 7, but we are writing to Java 6 JDKs. SO back to the old-school
* MulticastSocket.
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public class MultiReceiverThread extends Thread {
public MultiReceiverThread() { super("Multi-UDP-R"); }
// The Run Method.
// ---
// Started by main() on a single thread, this code manages reading UDP packets
@SuppressWarnings("resource")
public void run() {
// No multicast? Then do not bother with listening for them
if( H2O.STATIC_H2OS != null ) return;
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
MulticastSocket sock = null, errsock = null;
InetAddress group = null, errgroup = null;
boolean saw_error = false;
// Loop forever accepting Cloud Management requests
while( true ) {
try {
// ---
// Cleanup from any prior socket failures. Rare unless we're really sick.
if( errsock != null && errgroup != null ) { // socket error AND group present
final InetAddress tmp = errgroup; errgroup = null;
errsock.leaveGroup(tmp); // Could throw, but errgroup cleared for next pass
}
if( errsock != null ) { // One time attempt a socket close
final MulticastSocket tmp2 = errsock; errsock = null;
tmp2.close(); // Could throw, but errsock cleared for next pass
}
if( saw_error ) Thread.sleep(1000); // prevent deny-of-service endless socket-creates
saw_error = false;
// ---
// Actually do the common-case setup of Inet multicast group
if( group == null ) group = H2O.CLOUD_MULTICAST_GROUP;
// More common-case setup of a MultiCast socket
if( sock == null ) {
sock = new MulticastSocket(H2O.CLOUD_MULTICAST_PORT);
if( H2O.CLOUD_MULTICAST_IF != null )
sock.setNetworkInterface(H2O.CLOUD_MULTICAST_IF);
sock.joinGroup(group);
}
// Receive a packet & handle it
byte[] buf = new byte[AutoBuffer.MTU];
DatagramPacket pack = new DatagramPacket(buf,buf.length);
sock.receive(pack);
UDPReceiverThread.basic_packet_handling(new AutoBuffer(pack));
} catch( Exception e ) {
// On any error from anybody, close all sockets & re-open
Log.err("Multicast "+H2O.CLOUD_MULTICAST_GROUP+":"+H2O.CLOUD_MULTICAST_PORT, e);
saw_error = true;
errsock = sock ; sock = null; // Signal error recovery on the next loop
errgroup = group; group = null;
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/NOPTask.java
|
package water;
import water.DTask;
public class NOPTask extends DTask<NOPTask> {
public void compute2() { throw H2O.unimpl(); }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/NanoHTTPD.java
|
package water;
import water.fvec.UploadFileVec;
import water.util.Log;
import water.util.Log.Tag.Sys;
import water.util.Utils;
import java.io.*;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.SocketException;
import java.net.URLEncoder;
import java.util.*;
import java.util.regex.Pattern;
/**
* A simple, tiny, nicely embeddable HTTP 1.0 (partially 1.1) server in Java
*
* <p> NanoHTTPD version 1.25,
* Copyright © 2001,2005-2012 Jarno Elonen (elonen@iki.fi, http://iki.fi/elonen/)
* and Copyright © 2010 Konstantinos Togias (info@ktogias.gr, http://ktogias.gr)
*
* <p><b>Features + limitations: </b><ul>
*
* <li> Only one Java file </li>
* <li> Java 1.1 compatible </li>
* <li> Released as open source, Modified BSD licence </li>
* <li> No fixed config files, logging, authorization etc. (Implement yourself if you need them.) </li>
* <li> Supports parameter parsing of GET and POST methods (+ rudimentary PUT support in 1.25) </li>
* <li> Supports both dynamic content and file serving </li>
* <li> Supports file upload (since version 1.2, 2010) </li>
* <li> Supports partial content (streaming)</li>
* <li> Supports ETags</li>
* <li> Never caches anything </li>
* <li> Doesn't limit bandwidth, request time or simultaneous connections </li>
* <li> Default code serves files and shows all HTTP parameters and headers</li>
* <li> File server supports directory listing, index.html and index.htm</li>
* <li> File server supports partial content (streaming)</li>
* <li> File server supports ETags</li>
* <li> File server does the 301 redirection trick for directories without '/'</li>
* <li> File server supports simple skipping for files (continue download) </li>
* <li> File server serves also very long files without memory overhead </li>
* <li> Contains a built-in list of most common mime types </li>
* <li> All header names are converted lowercase so they don't vary between browsers/clients </li>
*
* </ul>
*
* <p><b>Ways to use: </b><ul>
*
* <li> Run as a standalone app, serves files and shows requests</li>
* <li> Subclass serve() and embed to your own program </li>
* <li> Call serveFile() from serve() with your own base directory </li>
*
* </ul>
*
* See the end of the source file for distribution license
* (Modified BSD licence)
*/
public class NanoHTTPD
{
// ==================================================
// API parts
// ==================================================
/**
* Override this to customize the server.<p>
*
* (By default, this delegates to serveFile() and allows directory listing.)
*
* @param uri Percent-decoded URI without parameters, for example "/index.cgi"
* @param method "GET", "POST" etc.
* @param parms Parsed, percent decoded parameters from URI and, in case of POST, data.
* @param header Header entries, percent decoded
* @return HTTP response, see class Response for details
*/
public Response serve( String uri, String method, Properties header, Properties parms )
{
myOut.println( method + " '" + uri + "' " );
Enumeration e = header.propertyNames();
while ( e.hasMoreElements())
{
String value = (String)e.nextElement();
myOut.println( " HDR: '" + value + "' = '" +
header.getProperty( value ) + "'" );
}
e = parms.propertyNames();
while ( e.hasMoreElements())
{
String value = (String)e.nextElement();
myOut.println( " PRM: '" + value + "' = '" +
parms.getProperty( value ) + "'" );
}
return serveFile( uri, header, myRootDir, true );
}
/**
* HTTP response.
* Return one of these from serve().
*/
public class Response
{
/**
* Default constructor: response = HTTP_OK, data = mime = 'null'
*/
public Response()
{
this.status = HTTP_OK;
}
/**
* Basic constructor.
*/
public Response( String status, String mimeType, InputStream data )
{
this.status = status;
this.mimeType = mimeType;
this.data = data;
}
/**
* Convenience method that makes an InputStream out of
* given text.
*/
public Response( String status, String mimeType, String txt )
{
this.status = status;
this.mimeType = mimeType;
try
{
this.data = new ByteArrayInputStream( txt.getBytes("UTF-8"));
}
catch ( java.io.UnsupportedEncodingException e ) { Log.err(e); }
}
/**
* Adds given line to the header.
*/
public void addHeader( String name, String value )
{
header.put( name, value );
}
/**
* HTTP status code after processing, e.g. "200 OK", HTTP_OK
*/
public String status;
/**
* MIME type of content, e.g. "text/html"
*/
public String mimeType;
/**
* Data of the response, may be null.
*/
public InputStream data;
/**
* Headers for the HTTP response. Use addHeader()
* to add lines.
*/
public Properties header = new Properties();
}
/**
* Some HTTP response status codes
*/
public static final String
HTTP_OK = "200 OK",
HTTP_PARTIALCONTENT = "206 Partial Content",
HTTP_RANGE_NOT_SATISFIABLE = "416 Requested Range Not Satisfiable",
HTTP_REDIRECT = "301 Moved Permanently",
HTTP_NOTMODIFIED = "304 Not Modified",
HTTP_FORBIDDEN = "403 Forbidden",
HTTP_UNAUTHORIZED = "401 Unauthorized",
HTTP_NOTFOUND = "404 Not Found",
HTTP_BADREQUEST = "400 Bad Request",
HTTP_TOOLONGREQUEST = "414 Request-URI Too Long",
HTTP_INTERNALERROR = "500 Internal Server Error",
HTTP_NOTIMPLEMENTED = "501 Not Implemented";
/**
* Common mime types for dynamic content
*/
public static final String
MIME_PLAINTEXT = "text/plain",
MIME_HTML = "text/html",
MIME_JSON = "application/json",
MIME_DEFAULT_BINARY = "application/octet-stream",
MIME_XML = "text/xml";
// ==================================================
// Socket & server code
// ==================================================
/**
* Starts a HTTP server to given port.<p>
* Throws an IOException if the socket is already in use
*/
public NanoHTTPD( ServerSocket socket, File wwwroot ) throws IOException {
myRootDir = wwwroot;
myServerSocket = socket;
myServerSocket.setReuseAddress(true);
myThread = new Thread(new Runnable() {
public void run() {
try {
while( true )
new HTTPSession( myServerSocket.accept());
} catch ( IOException e ) { }
}
}, "NanoHTTPD Thread");
myThread.setDaemon( true );
myThread.start();
}
/**
* Stops the server.
*/
public void stop() {
try {
myServerSocket.close();
myThread.join();
} catch ( IOException e ) {
} catch ( InterruptedException e ) { }
}
/**
* Starts as a standalone file server and waits for Enter.
*/
public static void main( String[] args ) {
myOut.println( "NanoHTTPD 1.25 (C) 2001,2005-2011 Jarno Elonen and (C) 2010 Konstantinos Togias\n" +
"(Command line options: [-p port] [-d root-dir] [--licence])\n" );
// Defaults
int port = 80;
File wwwroot = new File(".").getAbsoluteFile();
// Show licence if requested
for ( int i=0; i<args.length; ++i )
if(args[i].equalsIgnoreCase("-p"))
port = Integer.parseInt( args[i+1] );
else if(args[i].equalsIgnoreCase("-d"))
wwwroot = new File( args[i+1] ).getAbsoluteFile();
else if ( args[i].toLowerCase().endsWith( "licence" ))
{
myOut.println( LICENCE + "\n" );
break;
}
try {
new NanoHTTPD( new ServerSocket(port), wwwroot );
} catch( IOException ioe ) {
Log.err(Sys.HTTPD, "Couldn't start server:\n", ioe );
H2O.exit( -1 );
}
myOut.println( "Now serving files in port " + port + " from \"" + wwwroot + "\"" );
myOut.println( "Hit Enter to stop.\n" );
try { System.in.read(); } catch( Throwable t ) { Log.err(t); }
}
/**
* Handles one session, i.e. parses the HTTP request
* and returns the response.
*/
private class HTTPSession implements Runnable {
public HTTPSession( Socket s ) {
mySocket = s;
Thread t = new Thread( this, "NanoHTTPD Session" );
t.setDaemon( true );
t.setPriority(Thread.MAX_PRIORITY-1);
t.start();
}
/** Maximal supported header. */
static final int MAX_HEADER_BUFFER_SIZE = 1 << 16; // 64k
public void run() {
try {
long startMillis = System.currentTimeMillis();
InputStream is = new BufferedInputStream(mySocket.getInputStream());
is.mark(MAX_HEADER_BUFFER_SIZE);
// Read the first 8192 bytes.
// The full header should fit in here.
// Apache's default header limit is 8KB.
int bufsize = 8192;
byte[] buf = new byte[bufsize];
boolean nl = false; // Saw a nl
int rlen=0;
while( rlen < MAX_HEADER_BUFFER_SIZE ) {
int b = is.read();
if( b == -1 ) return;
buf[rlen++] = (byte)b;
if( b == '\n' ) {
if( nl == true ) break; // 2nd nl in a row ==> done with header
nl = true;
} else if( b != '\r' ) nl = false;
if (rlen == buf.length) buf = Arrays.copyOf(buf, 2*buf.length);
}
if (rlen == MAX_HEADER_BUFFER_SIZE)
sendError(HTTP_TOOLONGREQUEST, "Requested URL is too long!");
// Create a BufferedReader for parsing the header.
ByteArrayInputStream hbis = new ByteArrayInputStream(buf, 0, rlen);
BufferedReader hin = new BufferedReader( new InputStreamReader( hbis ));
Properties pre = new Properties();
Properties parms = new Properties();
Properties header = new Properties();
// Decode the header into parms and header java properties
decodeHeader(hin, pre, parms, header);
String method = pre.getProperty("method");
String uri = pre.getProperty("uri");
long size = 0x7FFFFFFFFFFFFFFFl;
String contentLength = header.getProperty("content-length");
if (contentLength != null) {
try { size = Integer.parseInt(contentLength); }
catch (NumberFormatException ex) {}
}
// We are looking for the byte separating header from body.
// It must be the last byte of the first two sequential new lines.
int splitbyte = 0;
boolean sbfound = false;
while (splitbyte < rlen) {
if (buf[splitbyte] == '\r' && buf[++splitbyte] == '\n' && buf[++splitbyte] == '\r' && buf[++splitbyte] == '\n') {
sbfound = true;
break;
}
splitbyte++;
}
splitbyte++;
is.reset();
is.skip(splitbyte);
// While Firefox sends on the first read all the data fitting
// our buffer, Chrome and Opera sends only the headers even if
// there is data for the body. So we do some magic here to find
// out whether we have already consumed part of body, if we
// have reached the end of the data to be sent or we should
// expect the first byte of the body at the next read.
if (splitbyte < rlen)
size -= rlen - splitbyte +1;
else if (!sbfound || size == 0x7FFFFFFFFFFFFFFFl)
size = 0;
// If the method is POST, there may be parameters
// in data section, too, read it:
BufferedReader in = new BufferedReader( new InputStreamReader(is));
if ( method.equalsIgnoreCase( "POST" ))
{
String contentType = "";
String contentTypeHeader = header.getProperty("content-type");
if (contentTypeHeader == null)
contentTypeHeader = "";
StringTokenizer st = new StringTokenizer( contentTypeHeader , "; " );
if ( st.hasMoreTokens()) {
contentType = st.nextToken();
}
if (contentType.equalsIgnoreCase("multipart/form-data"))
{
// Handle multipart/form-data
if ( !st.hasMoreTokens())
sendError( HTTP_BADREQUEST, "BAD REQUEST: Content type is multipart/form-data but boundary missing. Usage: GET /example/file.html" );
String boundaryExp = st.nextToken();
st = new StringTokenizer( boundaryExp , "=" );
if (st.countTokens() != 2)
sendError( HTTP_BADREQUEST, "BAD REQUEST: Content type is multipart/form-data but boundary syntax error. Usage: GET /example/file.html" );
st.nextToken();
String boundary = st.nextToken();
fileUpload(boundary,is,parms);
} else {
// Handle application/x-www-form-urlencoded
String postLine = "";
if (size >= 0) {
//
// content-length is specified. Use it.
//
char pbuf[] = new char[4096];
long bytesRead = 0;
long bytesToRead = size;
StringBuffer sb = new StringBuffer();
while (bytesRead < bytesToRead) {
int n = in.read(pbuf);
if (n < 0) {
break;
}
else if (n == 0) {
// this is supposed to be blocking, so i don't know what this means.
// but it isn't good.
assert(false);
break;
}
bytesRead += n;
sb.append(pbuf, 0, n);
}
postLine = sb.toString();
}
else {
//
// The original path for x-www-form-urlencoded.
// Don't have content-length. Look for \r\n to stop the input.
//
char pbuf[] = new char[512];
int read = in.read(pbuf);
while ( read >= 0 && !postLine.endsWith("\r\n") )
{
postLine += String.valueOf(pbuf, 0, read);
read = in.read(pbuf);
}
postLine = postLine.trim();
}
decodeParms( postLine, parms );
}
}
// Ok, now do the serve()
Response r = serve( uri, method, header, parms );
if ( r == null )
sendError( HTTP_INTERNALERROR, "SERVER INTERNAL ERROR: Serve() returned a null response." );
else
sendResponse( startMillis, r.status, r.mimeType, r.header, r.data );
in.close();
is.close();
} catch ( IOException ioe ) {
try {
sendError( HTTP_INTERNALERROR, "SERVER INTERNAL ERROR: IOException: " + ioe.getMessage());
} catch ( Throwable t ) { Log.err(t); }
} catch ( InterruptedException e ) {
// Thrown by sendError, ignore and exit the thread.
} finally {
Utils.close(mySocket);
}
}
/**
* Decodes the sent headers and loads the data into
* java Properties' key - value pairs
**/
private void decodeHeader(BufferedReader in, Properties pre, Properties parms, Properties header)
throws InterruptedException
{
try {
// Read the request line
String inLine = in.readLine();
if (inLine == null) return;
StringTokenizer st = new StringTokenizer( inLine );
if ( !st.hasMoreTokens())
sendError( HTTP_BADREQUEST, "BAD REQUEST: Syntax error. Usage: GET /example/file.html" );
String method = st.nextToken();
pre.put("method", method);
if ( !st.hasMoreTokens())
sendError( HTTP_BADREQUEST, "BAD REQUEST: Missing URI. Usage: GET /example/file.html" );
String uri = st.nextToken();
// Decode parameters from the URI
int qmi = uri.indexOf( '?' );
if ( qmi >= 0 )
{
decodeParms( uri.substring( qmi+1 ), parms );
uri = decodePercent( uri.substring( 0, qmi ));
}
else uri = decodePercent(uri);
// If there's another token, it's protocol version,
// followed by HTTP headers. Ignore version but parse headers.
// NOTE: this now forces header names lowercase since they are
// case insensitive and vary by client.
if ( st.hasMoreTokens())
{
String line = in.readLine();
while ( line != null && line.trim().length() > 0 )
{
int p = line.indexOf( ':' );
if ( p >= 0 )
header.put( line.substring(0,p).trim().toLowerCase(), line.substring(p+1).trim());
line = in.readLine();
}
}
pre.put("uri", uri);
} catch ( IOException ioe ) {
sendError( HTTP_INTERNALERROR, "SERVER INTERNAL ERROR: IOException: " + ioe.getMessage());
}
}
public String readLine(InputStream in) throws IOException {
StringBuilder sb = new StringBuilder();
byte[] mem = new byte[1024];
while (true) {
int sz = readBufOrLine(in,mem);
sb.append(new String(mem,0,sz));
if (sz < mem.length)
break;
if (mem[sz-1]=='\n')
break;
}
if (sb.length()==0)
return null;
String line = sb.toString();
if (line.endsWith("\r\n"))
line = line.substring(0,line.length()-2);
else if (line.endsWith("\n"))
line = line.substring(0,line.length()-1);
return line;
}
private int readBufOrLine(InputStream in, byte[] mem) throws IOException {
byte[] bb = new byte[1];
int sz = 0;
while (true) {
byte b;
byte b2;
if (sz==mem.length)
break;
try {
in.read(bb,0,1);
b = bb[0];
mem[sz++] = b;
} catch (EOFException e) {
break;
}
if (b == '\n')
break;
if (sz==mem.length)
break;
if (b == '\r') {
try {
in.read(bb,0,1);
b2 = bb[0];
mem[sz++] = b2;
} catch (EOFException e) {
break;
}
if (b2 == '\n')
break;
}
}
return sz;
}
private void fileUpload(String boundary, InputStream in, Properties parms) throws InterruptedException {
try {
String line = readLine(in);
int i = line.indexOf(boundary);
if (i!=2)
sendError( HTTP_BADREQUEST, "BAD REQUEST: Content type is multipart/form-data but next chunk does not start with boundary. Usage: GET /example/file.html" );
if (line.substring(i+boundary.length()).startsWith("--"))
return;
// read the header
Properties item = new Properties();
line = readLine(in);
while ((line != null) && (line.trim().length()>0)) {
int p = line.indexOf(':');
if (p != -1)
item.put( line.substring(0,p).trim().toLowerCase(), line.substring(p+1).trim());
line = readLine(in);
}
// analyze the header
if (line!=null) {
String contentDisposition = item.getProperty("content-disposition");
if (contentDisposition == null) {
sendError( HTTP_BADREQUEST, "BAD REQUEST: Content type is multipart/form-data but no content-disposition info found. Usage: GET /example/file.html" );
}
String key = parms.getProperty("key");
UploadFileVec.readPut(key, new InputStreamWrapper(in, boundary.getBytes()));
}
}
catch (Exception e) {
sendError( HTTP_INTERNALERROR, "SERVER INTERNAL ERROR: Exception: " + e.getMessage());
}
}
/**
* Decodes the percent encoding scheme. <br/>
* For example: "an+example%20string" -> "an example string"
*/
private String decodePercent( String str ) throws InterruptedException
{
try
{
StringBuffer sb = new StringBuffer();
for( int i=0; i<str.length(); i++ )
{
char c = str.charAt( i );
switch ( c )
{
case '+':
sb.append( ' ' );
break;
case '%':
sb.append((char)Integer.parseInt( str.substring(i+1,i+3), 16 ));
i += 2;
break;
default:
sb.append( c );
break;
}
}
return sb.toString();
}
catch( Exception e ) {
sendError( HTTP_BADREQUEST, "BAD REQUEST: Bad percent-encoding." );
return null;
}
}
/**
* Decodes parameters in percent-encoded URI-format
* ( e.g. "name=Jack%20Daniels&pass=Single%20Malt" ) and
* adds them to given Properties. NOTE: this doesn't support multiple
* identical keys due to the simplicity of Properties -- if you need multiples,
* you might want to replace the Properties with a Hashtable of Vectors or such.
*/
private void decodeParms( String parms, Properties p )
throws InterruptedException
{
if ( parms == null )
return;
StringTokenizer st = new StringTokenizer( parms, "&" );
while ( st.hasMoreTokens())
{
String e = st.nextToken();
int sep = e.indexOf( '=' );
if ( sep >= 0 ) {
String key = decodePercent( e.substring( 0, sep ) ).trim();
String value = decodePercent( e.substring( sep+1 ) );
String old = p.getProperty(key, null);
p.put(key, old == null ? value : (old+","+value));
}
}
}
/**
* Returns an error message as a HTTP response and
* throws InterruptedException to stop further request processing.
*/
private void sendError( String status, String msg ) throws InterruptedException
{
String s = " HTTP_status: " + status;
Log.info_no_stdout(Sys.HTLOG, s);
sendResponse( status, MIME_PLAINTEXT, null, new ByteArrayInputStream( msg.getBytes()));
throw new InterruptedException();
}
private void sendResponse( long startMillis, String status, String mime, Properties header, InputStream data ) {
long deltaMillis = System.currentTimeMillis() - startMillis;
String s = " HTTP_status: " + status + ", millis: " + deltaMillis;
Log.info_no_stdout(Sys.HTLOG, s);
sendResponse(status, mime, header, data);
}
/**
* Sends given response to the socket.
*/
private void sendResponse( String status, String mime, Properties header, InputStream data )
{
try
{
if ( status == null )
throw new RuntimeException( "sendResponse(): Status can't be null." );
OutputStream out = mySocket.getOutputStream();
PrintWriter pw = new PrintWriter( out );
pw.print("HTTP/1.0 " + status + " \r\n");
if ( mime != null )
pw.print("Content-Type: " + mime + "\r\n");
if ( header == null || header.getProperty( "Date" ) == null )
pw.print( "Date: " + gmtFrmt.format( new Date()) + "\r\n");
if ( header != null )
{
Enumeration e = header.keys();
while ( e.hasMoreElements())
{
String key = (String)e.nextElement();
String value = header.getProperty( key );
pw.print( key + ": " + value + "\r\n");
}
}
pw.print("\r\n");
pw.flush();
if ( data != null )
{
int pending = data.available(); // This is to support partial sends, see serveFile()
byte[] buff = new byte[theBufferSize];
while (pending>0)
{
int read = data.read( buff, 0, ( (pending>theBufferSize) ? theBufferSize : pending ));
if (read <= 0) break;
try {
out.write(buff, 0, read);
} catch (SocketException ex) {
// don't print exceptions from NanoHTTPD
}
//pending -= read;
pending = data.available();
}
}
out.flush();
out.close();
if ( data != null )
data.close();
}
catch( IOException e ) {
Log.err(e);
// Couldn't write? No can do.
Utils.close(mySocket);
}
}
private Socket mySocket;
}
private static final class InputStreamWrapper extends InputStream {
static final byte[] BOUNDARY_PREFIX = { '\r', '\n', '-', '-' };
final InputStream _wrapped;
final byte[] _boundary;
final byte[] _lookAheadBuf;
int _lookAheadLen;
public InputStreamWrapper(InputStream is, byte[] boundary) {
_wrapped = is;
_boundary = Arrays.copyOf(BOUNDARY_PREFIX, BOUNDARY_PREFIX.length + boundary.length);
System.arraycopy(boundary, 0, _boundary, BOUNDARY_PREFIX.length, boundary.length);
_lookAheadBuf = new byte[_boundary.length];
_lookAheadLen = 0;
}
@Override public void close() throws IOException { _wrapped.close(); }
@Override public int available() throws IOException { return _wrapped.available(); }
@Override public long skip(long n) throws IOException { return _wrapped.skip(n); }
@Override public void mark(int readlimit) { _wrapped.mark(readlimit); }
@Override public void reset() throws IOException { _wrapped.reset(); }
@Override public boolean markSupported() { return _wrapped.markSupported(); }
@Override public int read() throws IOException { throw new UnsupportedOperationException(); }
@Override public int read(byte[] b) throws IOException { return read(b, 0, b.length); }
@Override public int read(byte[] b, int off, int len) throws IOException {
if(_lookAheadLen == -1)
return -1;
int readLen = readInternal(b, off, len);
if (readLen != -1) {
int pos = findBoundary(b, off, readLen);
if (pos != -1) {
_lookAheadLen = -1;
return pos - off;
}
}
return readLen;
}
private int readInternal(byte b[], int off, int len) throws IOException {
if (len < _lookAheadLen ) {
System.arraycopy(_lookAheadBuf, 0, b, off, len);
_lookAheadLen -= len;
System.arraycopy(_lookAheadBuf, len, _lookAheadBuf, 0, _lookAheadLen);
return len;
}
if (_lookAheadLen > 0) {
System.arraycopy(_lookAheadBuf, 0, b, off, _lookAheadLen);
off += _lookAheadLen;
len -= _lookAheadLen;
int r = Math.max(_wrapped.read(b, off, len), 0) + _lookAheadLen;
_lookAheadLen = 0;
return r;
} else {
return _wrapped.read(b, off, len);
}
}
private int findBoundary(byte[] b, int off, int len) throws IOException {
int bidx = -1; // start index of boundary
int idx = 0; // actual index in boundary[]
for(int i = off; i < off+len; i++) {
if (_boundary[idx] != b[i]) { // reset
idx = 0;
bidx = -1;
}
if (_boundary[idx] == b[i]) {
if (idx == 0) bidx = i;
if (++idx == _boundary.length) return bidx; // boundary found
}
}
if (bidx != -1) { // it seems that there is boundary but we did not match all boundary length
assert _lookAheadLen == 0; // There should not be not read lookahead
_lookAheadLen = _boundary.length - idx;
int readLen = _wrapped.read(_lookAheadBuf, 0, _lookAheadLen);
if (readLen < _boundary.length - idx) { // There is not enough data to match boundary
_lookAheadLen = readLen;
return -1;
}
for (int i = 0; i < _boundary.length - idx; i++)
if (_boundary[i+idx] != _lookAheadBuf[i])
return -1; // There is not boundary => preserve lookahead buffer
// Boundary found => do not care about lookAheadBuffer since all remaining data are ignored
}
return bidx;
}
}
/**
* URL-encodes everything between "/"-characters.
* Encodes spaces as '%20' instead of '+'.
* @throws UnsupportedEncodingException
*/
private String encodeUri( String uri ) {
String newUri = "";
StringTokenizer st = new StringTokenizer( uri, "/ ", true );
while ( st.hasMoreTokens()) {
String tok = st.nextToken();
if ( tok.equals( "/" ))
newUri += "/";
else if ( tok.equals( " " ))
newUri += "%20";
else {
try {
newUri += URLEncoder.encode( tok, "UTF-8" );
} catch( UnsupportedEncodingException e ) {
throw Log.errRTExcept(e);
}
}
}
return newUri;
}
private final ServerSocket myServerSocket;
private Thread myThread;
private File myRootDir;
// ==================================================
// File server code
// ==================================================
/**
* Serves file from homeDir and its' subdirectories (only).
* Uses only URI, ignores all headers and HTTP parameters.
*/
public Response serveFile( String uri, Properties header, File homeDir,
boolean allowDirectoryListing )
{
Response res = null;
// Make sure we won't die of an exception later
if ( !homeDir.isDirectory())
res = new Response( HTTP_INTERNALERROR, MIME_PLAINTEXT,
"INTERNAL ERRROR: serveFile(): given homeDir is not a directory." );
if ( res == null )
{
// Remove URL arguments
uri = uri.trim().replace( File.separatorChar, '/' );
if ( uri.indexOf( '?' ) >= 0 )
uri = uri.substring(0, uri.indexOf( '?' ));
// Prohibit getting out of current directory
if ( uri.startsWith( ".." ) || uri.endsWith( ".." ) || uri.indexOf( "../" ) >= 0 )
res = new Response( HTTP_FORBIDDEN, MIME_PLAINTEXT,
"FORBIDDEN: Won't serve ../ for security reasons." );
}
File f = new File( homeDir, uri );
if ( res == null && !f.exists())
res = new Response( HTTP_NOTFOUND, MIME_PLAINTEXT,
"Error 404, file not found." );
// List the directory, if necessary
if ( res == null && f.isDirectory())
{
// Browsers get confused without '/' after the
// directory, send a redirect.
if ( !uri.endsWith( "/" ))
{
uri += "/";
res = new Response( HTTP_REDIRECT, MIME_HTML,
"<html><body>Redirected: <a href=\"" + uri + "\">" +
uri + "</a></body></html>");
res.addHeader( "Location", uri );
}
if ( res == null )
{
// First try index.html and index.htm
if ( new File( f, "index.html" ).exists())
f = new File( homeDir, uri + "/index.html" );
else if ( new File( f, "index.htm" ).exists())
f = new File( homeDir, uri + "/index.htm" );
// No index file, list the directory if it is readable
else if ( allowDirectoryListing && f.canRead() )
{
String[] files = f.list();
String msg = "<html><body><h1>Directory " + uri + "</h1><br/>";
if ( uri.length() > 1 )
{
String u = uri.substring( 0, uri.length()-1 );
int slash = u.lastIndexOf( '/' );
if ( slash >= 0 && slash < u.length())
msg += "<b><a href=\"" + uri.substring(0, slash+1) + "\">..</a></b><br/>";
}
if (files!=null)
{
for ( int i=0; i<files.length; ++i )
{
File curFile = new File( f, files[i] );
boolean dir = curFile.isDirectory();
if ( dir )
{
msg += "<b>";
files[i] += "/";
}
msg += "<a href=\"" + encodeUri( uri + files[i] ) + "\">" +
files[i] + "</a>";
// Show file size
if ( curFile.isFile())
{
long len = curFile.length();
msg += " <font size=2>(";
if ( len < 1024 )
msg += len + " bytes";
else if ( len < 1024 * 1024 )
msg += len/1024 + "." + (len%1024/10%100) + " KB";
else
msg += len/(1024*1024) + "." + len%(1024*1024)/10%100 + " MB";
msg += ")</font>";
}
msg += "<br/>";
if ( dir ) msg += "</b>";
}
}
msg += "</body></html>";
res = new Response( HTTP_OK, MIME_HTML, msg );
}
else
{
res = new Response( HTTP_FORBIDDEN, MIME_PLAINTEXT,
"FORBIDDEN: No directory listing." );
}
}
}
try
{
if ( res == null )
{
// Get MIME type from file name extension, if possible
String mime = null;
int dot = f.getCanonicalPath().lastIndexOf( '.' );
if ( dot >= 0 )
mime = (String)theMimeTypes.get( f.getCanonicalPath().substring( dot + 1 ).toLowerCase());
if ( mime == null )
mime = MIME_DEFAULT_BINARY;
// Calculate etag
String etag = Integer.toHexString((f.getAbsolutePath() + f.lastModified() + "" + f.length()).hashCode());
// Support (simple) skipping:
long startFrom = 0;
long endAt = -1;
String range = header.getProperty( "range" );
if ( range != null )
{
if ( range.startsWith( "bytes=" ))
{
range = range.substring( "bytes=".length());
int minus = range.indexOf( '-' );
try {
if ( minus > 0 )
{
startFrom = Long.parseLong( range.substring( 0, minus ));
endAt = Long.parseLong( range.substring( minus+1 ));
}
}
catch ( NumberFormatException e ) { Log.err(e); }
}
}
// Change return code and add Content-Range header when skipping is requested
long fileLen = f.length();
if (range != null && startFrom >= 0)
{
if ( startFrom >= fileLen)
{
res = new Response( HTTP_RANGE_NOT_SATISFIABLE, MIME_PLAINTEXT, "" );
res.addHeader( "Content-Range", "bytes 0-0/" + fileLen);
res.addHeader( "ETag", etag);
}
else
{
if ( endAt < 0 )
endAt = fileLen-1;
long newLen = endAt - startFrom + 1;
if ( newLen < 0 ) newLen = 0;
final long dataLen = newLen;
FileInputStream fis = new FileInputStream( f ) {
public int available() throws IOException { return (int)dataLen; }
};
try {
fis.skip( startFrom );
res = new Response( HTTP_PARTIALCONTENT, mime, fis );
res.addHeader( "Content-Length", "" + dataLen);
res.addHeader( "Content-Range", "bytes " + startFrom + "-" + endAt + "/" + fileLen);
res.addHeader( "ETag", etag);
} finally { fis.close(); }
}
}
else
{
if (etag.equals(header.getProperty("if-none-match")))
res = new Response( HTTP_NOTMODIFIED, mime, "");
else
{
res = new Response( HTTP_OK, mime, new FileInputStream( f ));
res.addHeader( "Content-Length", "" + fileLen);
res.addHeader( "ETag", etag);
}
}
}
}
catch( IOException e ) {
Log.err(e);
res = new Response( HTTP_FORBIDDEN, MIME_PLAINTEXT, "FORBIDDEN: Reading file failed." );
}
res.addHeader( "Accept-Ranges", "bytes"); // Announce that the file server accepts partial content requestes
return res;
}
/**
* Hashtable mapping (String)FILENAME_EXTENSION -> (String)MIME_TYPE
*/
private static Hashtable theMimeTypes = new Hashtable();
static
{
StringTokenizer st = new StringTokenizer(
"css text/css "+
"htm text/html "+
"html text/html "+
"xml text/xml "+
"txt text/plain "+
"asc text/plain "+
"gif image/gif "+
"jpg image/jpeg "+
"jpeg image/jpeg "+
"png image/png "+
"mp3 audio/mpeg "+
"m3u audio/mpeg-url " +
"mp4 video/mp4 " +
"ogv video/ogg " +
"flv video/x-flv " +
"mov video/quicktime " +
"swf application/x-shockwave-flash " +
"js application/javascript "+
"pdf application/pdf "+
"doc application/msword "+
"ogg application/x-ogg "+
"zip application/octet-stream "+
"exe application/octet-stream "+
"class application/octet-stream " );
while ( st.hasMoreTokens())
theMimeTypes.put( st.nextToken(), st.nextToken());
}
private static int theBufferSize = 16 * 1024;
// Change this if you want to log to somewhere else than stdout
protected static final PrintStream myOut = System.out;
/**
* GMT date formatter
*/
private static java.text.SimpleDateFormat gmtFrmt;
static
{
gmtFrmt = new java.text.SimpleDateFormat( "E, d MMM yyyy HH:mm:ss 'GMT'", Locale.US);
gmtFrmt.setTimeZone(TimeZone.getTimeZone("GMT"));
}
/**
* The distribution licence
*/
private static final String LICENCE =
"Copyright (C) 2001,2005-2011 by Jarno Elonen <elonen@iki.fi>\n"+
"and Copyright (C) 2010 by Konstantinos Togias <info@ktogias.gr>\n"+
"\n"+
"Redistribution and use in source and binary forms, with or without\n"+
"modification, are permitted provided that the following conditions\n"+
"are met:\n"+
"\n"+
"Redistributions of source code must retain the above copyright notice,\n"+
"this list of conditions and the following disclaimer. Redistributions in\n"+
"binary form must reproduce the above copyright notice, this list of\n"+
"conditions and the following disclaimer in the documentation and/or other\n"+
"materials provided with the distribution. The name of the author may not\n"+
"be used to endorse or promote products derived from this software without\n"+
"specific prior written permission. \n"+
" \n"+
"THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n"+
"IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n"+
"OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n"+
"IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n"+
"INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n"+
"NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n"+
"DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n"+
"THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"+
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n"+
"OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.";
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/NetworkTest.java
|
package water;
import water.api.DocGen;
import water.fvec.Vec;
import water.util.Log;
import water.util.Utils;
import java.util.Random;
public class NetworkTest extends Func {
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "Message sizes", filter = Default.class, json=true)
public int[] msg_sizes = new int[]{1,1<<10,1<<20}; //INPUT
@API(help = "Repeats", filter = Default.class, json=true)
public int repeats = 10; //INPUT
@API(help = "Do collective test", filter = Default.class, json=true)
public boolean collective = true;
@API(help = "Do serial test", filter = Default.class, json=true)
public boolean serial = true;
@API(help = "Collective broadcast/reduce times in microseconds (for each message size)", json=true)
public double[] microseconds_collective; //OUTPUT
@API(help = "Collective bandwidths in Bytes/sec (for each message size, for each node)", json=true)
public double[] bandwidths_collective; //OUTPUT
@API(help = "Round-trip times in microseconds (for each message size, for each node)", json=true)
public double[][] microseconds; //OUTPUT
@API(help = "Bi-directional bandwidths in Bytes/sec (for each message size, for each node)", json=true)
public double[][] bandwidths; //OUTPUT
@API(help = "Nodes", json=true)
public String[] nodes; //OUTPUT
@Override protected void execImpl() {
logStart();
microseconds = new double[msg_sizes.length][];
microseconds_collective = new double[msg_sizes.length];
NetworkTester nt = new NetworkTester(msg_sizes, microseconds, microseconds_collective, repeats, serial, collective);
Log.debug("Starting top-level NetworkTester...");
H2O.submitTask(nt);
nt.join();
Log.debug("NetworkTester top-level after join");
// compute bandwidths from timing results
bandwidths = new double[msg_sizes.length][];
for (int i=0; i<bandwidths.length; ++i) {
bandwidths[i] = new double[microseconds[i].length];
for (int j=0; j< microseconds[i].length; ++j) {
//send and receive the same message -> 2x
bandwidths[i][j] = ( 2*msg_sizes[i] /*Bytes*/) / (microseconds[i][j] / 1e6 /*Seconds*/) ;
}
}
bandwidths_collective = new double[msg_sizes.length];
for (int i=0; i<bandwidths_collective.length; ++i) {
//broadcast and reduce the message to all nodes -> 2 x nodes
bandwidths_collective[i] = ( 2*H2O.CLOUD.size()*msg_sizes[i] /*Bytes*/) / (microseconds_collective[i] / 1e6 /*Seconds*/) ;
}
Log.debug("NetworkTest calculated bandwidths");
// populate node names
nodes = new String[H2O.CLOUD.size()];
for (int i=0; i<nodes.length; ++i)
nodes[i] = H2O.CLOUD._memary[i]._key.toString();
StringBuilder sb = new StringBuilder();
toASCII(sb);
Log.info(sb);
Log.debug("NetworkTester top-level completed");
}
// Helper class to run the actual test
public static class NetworkTester extends H2O.H2OCountedCompleter {
double[][] microseconds;
double[] microseconds_collective;
int[] msg_sizes;
public int repeats = 10;
boolean serial;
boolean collective;
public NetworkTester(int[] msg, double[][] res, double[] res_collective, int rep, boolean serial, boolean collective) {
microseconds = res;
microseconds_collective = res_collective;
msg_sizes = msg;
repeats = rep;
this.serial = serial;
this.collective = collective;
}
@Override
public void compute2() {
Log.debug("NetworkTester compute2 starting...");
// serial comm
if (serial) {
for (int i = 0; i < microseconds.length; ++i) {
microseconds[i] = send_recv_all(msg_sizes[i], repeats);
Utils.div(microseconds[i], 1e3f); //microseconds
}
}
// collective comm
if (collective) {
for (int i = 0; i < microseconds_collective.length; ++i) {
microseconds_collective[i] = send_recv_collective(msg_sizes[i], repeats);
}
Utils.div(microseconds_collective, 1e3f); //microseconds
}
tryComplete();
Log.debug("NetworkTester compute2 completed");
}
}
/**
* Helper class that contains a payload and has an empty compute2().
* If it is remotely executed, it will just send the payload over the wire.
*/
private static class PingPongTask extends DTask<PingPongTask> {
private final byte[] _payload;
public PingPongTask(byte[] payload) {
_payload = payload;
}
@Override public void compute2() {
tryComplete();
}
@Override public byte priority() {
return H2O.MIN_HI_PRIORITY;
}
}
/**
* Send a message from this node to all nodes in serial (including self), and receive it back
* @param msg_size message size in bytes
* @return Time in nanoseconds that it took to send and receive the message (one per node)
*/
private static double[] send_recv_all(int msg_size, int repeats) {
byte[] payload = new byte[msg_size];
new Random().nextBytes(payload);
final int siz = H2O.CLOUD.size();
double[] times = new double[siz];
for (int i = 0; i < siz; ++i) { //loop over compute nodes
Log.debug("NetworkTest send_recv_all starting PingPong to node " + i + "...");
H2ONode node = H2O.CLOUD._memary[i];
Timer t = new Timer();
for (int l = 0; l < repeats; ++l) {
Log.debug("NetworkTest send_recv_all starting msg_size " + msg_size + " bytes, iteration "+ l +" of "+ repeats + " ...");
PingPongTask ppt = new PingPongTask(payload); //same payload for all nodes
new RPC<PingPongTask>(node, ppt).call().get(); //blocking send
Log.debug("NetworkTest send_recv_all completed iteration "+ l +" of "+ repeats);
}
times[i] = (double) t.nanos() / repeats;
Log.debug("NetworkTest send_recv_all completed PingPong to node " + i);
}
return times;
}
/**
* Helper class that contains a payload and has an empty map/reduce.
* If it is remotely executed, it will just send the payload over the wire.
*/
private static class CollectiveTask extends MRTask2<CollectiveTask> {
private final byte[] _payload; //will be sent over the wire (broadcast/reduce)
public CollectiveTask(byte[] payload){
_payload = payload;
}
}
/**
* Broadcast a message from this node to all nodes and reduce it back
* @param msg_size message size in bytes
* @return Time in nanoseconds that it took
*/
private static double send_recv_collective(int msg_size, int repeats) {
Log.debug("NetworkTest send_recv_collective starting...");
byte[] payload = new byte[msg_size];
new Random().nextBytes(payload);
Vec v = Vec.makeConSeq(0., 1); //trivial Vec: 1 element with value 0.
Timer t = new Timer();
for (int l = 0; l < repeats; ++l) {
Log.debug("NetworkTest send_recv_collective starting msg_size " + msg_size + " bytes, iteration "+ l +" of "+ repeats + " ...");
new CollectiveTask(payload).doAll(v); //same payload for all nodes
Log.debug("NetworkTest send_recv_collective completed iteration "+ l +" of "+ repeats);
}
v.remove(new Futures()).blockForPending();
Log.debug("NetworkTest send_recv_collective completed");
return (double) t.nanos() / repeats;
}
@Override
public boolean toHTML(StringBuilder sb) {
try {
DocGen.HTML.section(sb, "Origin: " + H2O.SELF._key);
sb.append("<table cellpadding='10'>");
sb.append("<tr>");
sb.append("<th>Destination / Message Size</th>");
for (int msg_size : msg_sizes) {
sb.append("<th>");
sb.append(PrettyPrint.bytes(msg_size));
sb.append("</th>");
}
sb.append("</tr>");
sb.append("<tr>");
sb.append("<td>");
sb.append("All (broadcast & reduce)");
sb.append("</td>");
for (int m = 0; m < msg_sizes.length; ++m) {
sb.append("<td>");
sb.append(PrettyPrint.usecs((long) microseconds_collective[m])).append(", ").
append(PrettyPrint.bytesPerSecond((long)bandwidths_collective[m]));
sb.append("</td>");
}
sb.append("</tr>");
for (int n = 0; n < H2O.CLOUD._memary.length; ++n) {
sb.append("</tr>");
sb.append("<tr>");
sb.append("<td>");
sb.append(H2O.CLOUD._memary[n]._key);
sb.append("</td>");
for (int m = 0; m < msg_sizes.length; ++m) {
sb.append("<td>");
sb.append(PrettyPrint.usecs((long) microseconds[m][n])).append(", ").
append(PrettyPrint.bytesPerSecond((long)bandwidths[m][n]));
sb.append("</td>");
}
}
sb.append("</tr>");
sb.append("</table>");
} catch (Throwable t) {
return false;
}
return true;
}
public boolean toASCII(StringBuilder sb) {
try {
sb.append("Origin: " + H2O.SELF._key);
sb.append("\n");
sb.append("Destination / Message Size\t");
for (int msg_size : msg_sizes) {
sb.append(" ").append(PrettyPrint.bytes(msg_size)).append(" ");
}
sb.append("\n");
sb.append("All (broadcast & reduce)");
sb.append("\t");
for (int m = 0; m < msg_sizes.length; ++m) {
sb.append(" ").append(PrettyPrint.usecs((long) microseconds_collective[m])).append(", ").
append(PrettyPrint.bytesPerSecond((long)bandwidths_collective[m])).append(" ");
sb.append("\t");
}
for (int n = 0; n < H2O.CLOUD._memary.length; ++n) {
sb.append("\n");
sb.append(H2O.CLOUD._memary[n]._key);
sb.append(" \t");
for (int m = 0; m < msg_sizes.length; ++m) {
sb.append(" ").append(PrettyPrint.usecs((long) microseconds[m][n])).append(", ").
append(PrettyPrint.bytesPerSecond((long)bandwidths[m][n])).append(" ");
sb.append("\t");
}
}
} catch (Throwable t) {
return false;
}
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Paxos.java
|
package water;
import java.util.Arrays;
import water.H2ONode.H2Okey;
import water.nbhm.NonBlockingHashMap;
import water.util.Log;
/**
* (Not The) Paxos
*
* Used to define Cloud membership. See:
* http://en.wikipedia.org/wiki/Paxos_%28computer_science%29
*
* Detects and builds a "cloud" - a cooperating group of nodes, with mutual
* knowledge of each other. Basically tracks all the nodes that *this* node
* has ever heard of, and when *all* of the other nodes have all heard of each
* other, declares the situation as "commonKnowledge", and a Cloud. This
* algorithm differs from Paxos in a number of obvious ways:
* - it is not robust against failing nodes
* - it requires true global consensus (a Quorum of All)
* - it is vastly simpler than Paxos
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public abstract class Paxos {
// Whether or not we have common knowledge
public static volatile boolean _commonKnowledge = false;
// Whether or not we're allowing distributed-writes. The cloud is not
// allowed to change shape once we begin writing.
public static volatile boolean _cloudLocked = false;
public static NonBlockingHashMap<H2Okey,H2ONode> PROPOSED = new NonBlockingHashMap();
// ---
// This is a packet announcing what Cloud this Node thinks is the current
// Cloud, plus other status bits
static synchronized int doHeartbeat( H2ONode h2o ) {
// Kill somebody if the jar files mismatch. Do not attempt to deal with
// mismatched jars.
if( !H2O.OPT_ARGS.md5skip && !h2o._heartbeat.check_jar_md5() ) {
if( H2O.CLOUD.size() > 1 ) {
Log.warn("Killing "+h2o+" because of H2O version mismatch (md5 differs).");
UDPRebooted.T.mismatch.send(h2o);
} else {
Log.err("Attempting to join "+h2o+" with an H2O version mismatch (md5 differs). (Is H2O already running?) Exiting.");
H2O.exit(-1);
}
return 0;
}
// Never heard of this dude? See if we want to kill him off for being cloud-locked
if( !PROPOSED.contains(h2o) ) {
if( _cloudLocked ) {
Log.warn("Killing "+h2o+" because the cloud is no longer accepting new H2O nodes.");
UDPRebooted.T.locked.send(h2o);
return 0;
}
if( _commonKnowledge ) {
_commonKnowledge = false; // No longer sure about things
H2O.SELF._heartbeat._common_knowledge = false;
Log.debug("Cloud voting in progress");
}
// Add to proposed set, update cloud hash
H2ONode res = PROPOSED.putIfAbsent(h2o._key,h2o);
assert res==null;
H2O.SELF._heartbeat._cloud_hash += h2o.hashCode();
} else if( _commonKnowledge ) {
return 0; // Already know about you, nothing more to do
}
int chash = H2O.SELF._heartbeat._cloud_hash, dummy = 0;
assert chash == (dummy=doHash()) : "mismatched hash4, HB="+chash+" full="+dummy;
assert _commonKnowledge==false;
// Do we have consensus now?
H2ONode h2os[] = PROPOSED.values().toArray(new H2ONode[0]);
for( H2ONode h2o2 : h2os )
if( chash != h2o2._heartbeat._cloud_hash )
return print("Heartbeat hashes differ, self=0x"+Integer.toHexString(chash)+" "+h2o2+"=0x"+Integer.toHexString(h2o2._heartbeat._cloud_hash)+" ",PROPOSED);
// Hashes are same, so accept the new larger cloud-size
H2O.CLOUD.set_next_Cloud(h2os,chash);
// Demand everybody has rolled forward to same size before consensus
boolean same_size=true;
for( H2ONode h2o2 : h2os )
same_size &= (h2o2._heartbeat._cloud_size == H2O.CLOUD.size());
if( !same_size ) return 0;
H2O.SELF._heartbeat._common_knowledge = true;
for( H2ONode h2o2 : h2os )
if( !h2o2._heartbeat._common_knowledge ) {
return print("Missing common knowledge from all nodes!" ,PROPOSED);
}
_commonKnowledge = true; // Yup! Have global consensus
Paxos.class.notifyAll(); // Also, wake up a worker thread stuck in DKV.put
Paxos.print("Announcing new Cloud Membership: ", H2O.CLOUD._memary);
Log.info("Cloud of size ", H2O.CLOUD.size(), " formed ", H2O.CLOUD.toPrettyString());
H2O.notifyAboutCloudSize(H2O.SELF_ADDRESS, H2O.API_PORT, H2O.CLOUD.size());
return 0;
}
static private int doHash() {
int hash = 0;
for( H2ONode h2o : PROPOSED.values() )
hash += h2o.hashCode();
assert hash != 0;
return hash;
}
// Before we start doing distributed writes... block until the cloud
// stablizes. After we start doing distributed writes, it is an error to
// change cloud shape - the distributed writes will be in the wrong place.
static void lockCloud() {
if( _cloudLocked ) return; // Fast-path cutout
synchronized(Paxos.class) {
while( !_commonKnowledge )
try { Paxos.class.wait(); } catch( InterruptedException ie ) { }
_cloudLocked = true;
}
}
static int print( String msg, NonBlockingHashMap<H2Okey,H2ONode> p ) {
return print(msg,p.values().toArray(new H2ONode[0]));
}
static int print( String msg, H2ONode h2os[] ) { return print(msg,h2os,""); }
static int print( String msg, H2ONode h2os[], String msg2 ) {
Log.debug(msg,Arrays.toString(h2os),msg2);
return 0; // handy flow-coding return
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/PrettyPrint.java
|
package water;
import java.util.concurrent.TimeUnit;
public class PrettyPrint {
public static String msecs(long msecs, boolean truncate) {
final long hr = TimeUnit.MILLISECONDS.toHours (msecs); msecs -= TimeUnit.HOURS .toMillis(hr);
final long min = TimeUnit.MILLISECONDS.toMinutes(msecs); msecs -= TimeUnit.MINUTES.toMillis(min);
final long sec = TimeUnit.MILLISECONDS.toSeconds(msecs); msecs -= TimeUnit.SECONDS.toMillis(sec);
final long ms = TimeUnit.MILLISECONDS.toMillis (msecs);
if( !truncate ) return String.format("%02d:%02d:%02d.%03d", hr, min, sec, ms);
if( hr != 0 ) return String.format("%2d:%02d:%02d.%03d", hr, min, sec, ms);
if( min != 0 ) return String.format("%2d min %2d.%03d sec", min, sec, ms);
return String.format("%2d.%03d sec", sec, ms);
}
public static String usecs(long usecs) {
final long hr = TimeUnit.MICROSECONDS.toHours (usecs); usecs -= TimeUnit.HOURS .toMicros(hr);
final long min = TimeUnit.MICROSECONDS.toMinutes(usecs); usecs -= TimeUnit.MINUTES.toMicros(min);
final long sec = TimeUnit.MICROSECONDS.toSeconds(usecs); usecs -= TimeUnit.SECONDS.toMicros(sec);
final long ms = TimeUnit.MICROSECONDS.toMillis(usecs); usecs -= TimeUnit.MILLISECONDS.toMicros(ms);
if( hr != 0 ) return String.format("%2d:%02d:%02d.%03d", hr, min, sec, ms);
if( min != 0 ) return String.format("%2d min %2d.%03d sec", min, sec, ms);
if( sec != 0 ) return String.format("%2d.%03d sec", sec, ms);
if( ms != 0 ) return String.format("%3d.%03d msec", ms, usecs);
return String.format("%3d usec", usecs);
}
// Return X such that (bytes < 1L<<(X*10))
public static int byteScale(long bytes) {
for( int i=0; i<6; i++ )
if( bytes < 1L<<(i*10) )
return i;
return 6;
}
public static double bytesScaled(long bytes, int scale) {
if( scale == 0 ) return bytes;
return bytes / (double)(1L<<((scale-1)*10));
}
public static final String[] SCALE = new String[] {"N/A","%4.0f B","%.1f KB","%.1f MB","%.2f GB","%.3f TB","%.3f PB"};
public static String bytes(long bytes) { return bytes(bytes,byteScale(bytes)); }
public static String bytes(long bytes, int scale) { return String.format(SCALE[scale],bytesScaled(bytes,scale)); }
public static String bytesPerSecond(long bytes) {
if( bytes < 0 ) return "N/A";
return bytes(bytes)+"/S";
}
// About as clumsy and random as a blaster...
public static String UUID( long lo, long hi ) {
long lo0 = (lo>>32)&0xFFFFFFFFL;
long lo1 = (lo>>16)&0xFFFFL;
long lo2 = (lo>> 0)&0xFFFFL;
long hi0 = (hi>>48)&0xFFFFL;
long hi1 = (hi>> 0)&0xFFFFFFFFFFFFL;
return String.format("%08X-%04X-%04X-%04X-%012X",lo0,lo1,lo2,hi0,hi1);
}
static double [] powers10 = new double[]{
0.0000000001,
0.000000001,
0.00000001,
0.0000001,
0.000001,
0.00001,
0.0001,
0.001,
0.01,
0.1,
1.0,
10.0,
100.0,
1000.0,
10000.0,
100000.0,
1000000.0,
10000000.0,
100000000.0,
1000000000.0,
10000000000.0,
};
static public long [] powers10i = new long[]{
1l,
10l,
100l,
1000l,
10000l,
100000l,
1000000l,
10000000l,
100000000l,
1000000000l,
10000000000l,
100000000000l,
1000000000000l,
10000000000000l,
100000000000000l,
1000000000000000l,
10000000000000000l,
100000000000000000l,
1000000000000000000l,
};
public static double pow10(int exp){
return ((exp >= -10 && exp <= 10)?powers10[exp+10]:Math.pow(10, exp));
}
public static long pow10i(int exp){
return powers10i[exp];
}
public static final boolean fitsIntoInt(double d){
return Math.abs((int)d - d) < 1e-8;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/RPC.java
|
package water;
import jsr166y.CountedCompleter;
import jsr166y.ForkJoinPool;
import water.H2O.FJWThr;
import water.H2O.H2OCountedCompleter;
import water.util.Log;
import java.io.IOException;
import java.util.ArrayList;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
/**
* A remotely executed FutureTask. Flow is:
*
* 1- Build a DTask (or subclass). This object will be replicated remotely.
* 2- Make a RPC object, naming the target Node. Call (re)call(). Call get()
* to block for result, or cancel() or isDone(), etc. Caller can also arrange
* for caller.tryComplete() to be called in a F/J thread, to support completion
* style execution (i.e. Continuation Passing Style).
* 3- DTask will be serialized and sent to the target; small objects via UDP
* and large via TCP (using AutoBuffer and auto-gen serializers).
* 4- An RPC UDP control packet will be sent to target; this will also contain
* the DTask if its small enough.
* 4.5- The network may replicate (or drop) the UDP packet. Dups may arrive.
* 4.5- Sender may timeout, and send dup control UDP packets.
* 5- Target will capture a UDP packet, and begin filtering dups (via task#).
* 6- Target will deserialize the DTask, and call DTask.invoke() in a F/J thread.
* 6.5- Target continues to filter (and drop) dup UDP sends (and timeout resends)
* 7- Target finishes call, and puts result in DTask.
* 8- Target serializes result and sends to back to sender.
* 9- Target sends an ACK back (may be combined with the result if small enough)
* 10- Target puts the ACK in H2ONode.TASKS for later filtering.
* 10.5- Target receives dup UDP request, then replies with ACK back.
* 11- Sender receives ACK result; deserializes; notifies waiters
* 12- Sender sends ACKACK back
* 12.5- Sender recieves dup ACK's, sends dup ACKACK's back
* 13- Target recieves ACKACK, removes TASKS tracking
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public class RPC<V extends DTask> implements Future<V>, Delayed, ForkJoinPool.ManagedBlocker {
// The target remote node to pester for a response. NULL'd out if the target
// disappears or we cancel things (hence not final).
H2ONode _target;
// The distributed Task to execute. Think: code-object+args while this RPC
// is a call-in-progress (i.e. has an 'execution stack')
final V _dt;
// True if _dt contains the final answer
volatile boolean _done;
// A locally-unique task number; a "cookie" handed to the remote process that
// they hand back with the response packet. These *never* repeat, so that we
// can tell when a reply-packet points to e.g. a dead&gone task.
int _tasknum;
// Time we started this sucker up. Controls re-send behavior.
final long _started;
long _retry; // When we should attempt a retry
// A list of CountedCompleters we will call tryComplete on when the RPC
// finally completes. Frequently null/zero.
ArrayList<H2OCountedCompleter> _fjtasks;
// We only send non-failing TCP info once; also if we used TCP it was large
// so duplications are expensive. However, we DO need to keep resending some
// kind of "are you done yet?" UDP packet, incase the reply packet got dropped
// (but also in case the main call was a single UDP packet and it got dropped).
// Not volatile because read & written under lock.
boolean _sentTcp;
// To help with asserts, record the size of the sent DTask - if we resend
// if should remain the same size.
int _size;
int _size_rez; // Size of received results
// Magic Cookies
static final byte SERVER_UDP_SEND = 10;
static final byte SERVER_TCP_SEND = 11;
static final byte CLIENT_UDP_SEND = 12;
static final byte CLIENT_TCP_SEND = 13;
static final private String[] COOKIES = new String[] {
"SERVER_UDP","SERVER_TCP","CLIENT_UDP","CLIENT_TCP" };
public static <DT extends DTask> RPC<DT> call(H2ONode target, DT dtask) {
return new RPC(target,dtask).call();
}
// Make a remotely executed FutureTask. Must name the remote target as well
// as the remote function. This function is expected to be subclassed.
public RPC( H2ONode target, V dtask ) {
this(target,dtask,1.0f);
setTaskNum();
}
// Only used for people who optimistically make RPCs that get thrown away and
// never sent over the wire. Split out task# generation from RPC <init> -
// every task# MUST be sent over the wires, because the far end tracks the
// task#'s in a dense list (no holes).
RPC( H2ONode target, V dtask, float ignore ) {
_target = target;
_dt = dtask;
_started = System.currentTimeMillis();
_retry = RETRY_MS;
}
RPC<V> setTaskNum() {
assert _tasknum == 0;
_tasknum = _target.nextTaskNum();
return this;
}
// Make an initial RPC, or re-send a packet. Always called on 1st send; also
// called on a timeout.
public synchronized RPC<V> call() {
++_callCnt;
// completer will not be carried over to remote
// add it to the RPC call.
if(_dt.getCompleter() != null){
CountedCompleter cc = _dt.getCompleter();
assert cc instanceof H2OCountedCompleter;
boolean alreadyIn = false;
if(_fjtasks != null)
for( H2OCountedCompleter hcc : _fjtasks )
if( hcc == cc) alreadyIn = true;
if( !alreadyIn ) addCompleter((H2OCountedCompleter)cc);
_dt.setCompleter(null);
}
// If running on self, just submit to queues & do locally
if( _target==H2O.SELF ) {
assert _dt.getCompleter()==null;
_dt.setCompleter(new H2O.H2OCallback<DTask>() {
@Override public void callback(DTask dt){
assert dt==_dt;
synchronized(RPC.this) {
assert !_done; // F/J guarentees called once
_done = true;
RPC.this.notifyAll();
}
doAllCompletions();
}
@Override public boolean onExceptionalCompletion(Throwable ex, CountedCompleter dt){
assert dt==_dt;
synchronized(RPC.this) { // Might be called several times
if( _done ) return true; // Filter down to 1st exceptional completion
_dt.setException(ex);
_done = true; // must be set as the last thing before notify, the waiting thread can wake up any at any time!
RPC.this.notifyAll();
}
doAllCompletions();
return true;
}
});
H2O.submitTask(_dt);
return this;
}
// Keep a global record, for awhile
if( _target != null ) _target.taskPut(_tasknum,this);
try {
// We could be racing timeouts-vs-replies. Blow off timeout if we have an answer.
if( isDone() ) {
if( _target != null ) _target.taskRemove(_tasknum);
return this;
}
// Default strategy: (re)fire the packet and (re)start the timeout. We
// "count" exactly 1 failure: just whether or not we shipped via TCP ever
// once. After that we fearlessly (re)send UDP-sized packets until the
// server replies.
// Pack classloader/class & the instance data into the outgoing
// AutoBuffer. If it fits in a single UDP packet, ship it. If not,
// finish off the current AutoBuffer (which is now going TCP style), and
// make a new UDP-sized packet. On a re-send of a TCP-sized hunk, just
// send the basic UDP control packet.
if( !_sentTcp ) {
// Ship the UDP packet!
while( true ) { // Retry loop for broken TCP sends
AutoBuffer ab = new AutoBuffer(_target);
try {
ab.putTask(UDP.udp.exec,_tasknum).put1(CLIENT_UDP_SEND).put(_dt);
boolean t = ab.hasTCP();
assert sz_check(ab) : "Resend of "+_dt.getClass()+" changes size from "+_size+" to "+ab.size()+" for task#"+_tasknum;
ab.close(); // Then close; send final byte
_sentTcp = t; // Set after close (and any other possible fail)
break; // Break out of retry loop
} catch( AutoBuffer.AutoBufferException e ) {
Log.info_no_DKV(Log.Tag.Sys.WATER, "IOException during RPC call: " + e._ioe.getMessage() + ", AB=" + ab + ", for task#" + _tasknum + ", waiting and retrying...");
ab.drainClose();
try { Thread.sleep(500); } catch (InterruptedException ignore) {}
}
} // end of while(true)
} else {
// Else it was sent via TCP in a prior attempt, and we've timed out.
// This means the caller's ACK/answer probably got dropped and we need
// him to resend it (or else the caller is still processing our
// request). Send a UDP reminder - but with the CLIENT_TCP_SEND flag
// instead of the UDP send, and no DTask (since it previously went via
// TCP, no need to resend it).
AutoBuffer ab = new AutoBuffer(_target).putTask(UDP.udp.exec,_tasknum);
ab.put1(CLIENT_TCP_SEND).close();
}
// Double retry until we exceed existing age. This is the time to delay
// until we try again. Note that we come here immediately on creation,
// so the first doubling happens before anybody does any waiting. Also
// note the generous 5sec cap: ping at least every 5 sec.
_retry += (_retry < 5000 ) ? _retry : 5000;
// Put self on the "TBD" list of tasks awaiting Timeout.
// So: dont really 'forget' but remember me in a little bit.
UDPTimeOutThread.PENDING.add(this);
return this;
} catch(Error t) {
throw Log.err(t);
}
}
private V result(){
DException.DistributedException t = _dt.getDException();
if( t != null ) throw t;
return _dt;
}
// Similar to FutureTask.get() but does not throw any exceptions. Returns
// null for canceled tasks, including those where the target dies.
@Override public V get() {
// check priorities - FJ task can only block on a task with higher priority!
Thread cThr = Thread.currentThread();
int priority = (cThr instanceof FJWThr) ? ((FJWThr)cThr)._priority : -1;
// was hitting this (priority=1 but _dt.priority()=0 for DRemoteTask) - not clear who increased priority of FJWThr to 1...
// assert _dt.priority() > priority || (_dt.priority() == priority && (_dt instanceof DRemoteTask || _dt instanceof MRTask2))
assert _dt.priority() > priority || ((_dt instanceof DRemoteTask || _dt instanceof MRTask2))
: "*** Attempting to block on task (" + _dt.getClass() + ") with equal or lower priority. Can lead to deadlock! " + _dt.priority() + " <= " + priority;
if( _done ) return result(); // Fast-path shortcut
// Use FJP ManagedBlock for this blocking-wait - so the FJP can spawn
// another thread if needed.
try {
try {
ForkJoinPool.managedBlock(this);
} catch (InterruptedException e) {
}
} catch(Throwable t){
// catch and rethrow to preserve the stack trace!
throw new RuntimeException(t);
}
if( _done ) return result(); // Fast-path shortcut
assert isCancelled();
return null;
}
// Return true if blocking is unnecessary, which is true if the Task isDone.
@Override public boolean isReleasable() { return isDone(); }
// Possibly blocks the current thread. Returns true if isReleasable would
// return true. Used by the FJ Pool management to spawn threads to prevent
// deadlock is otherwise all threads would block on waits.
@Override public synchronized boolean block() throws InterruptedException {
while( !isDone() ) { wait(1000); }
return true;
}
@Override public final V get(long timeout, TimeUnit unit) {
if( _done ) return _dt; // Fast-path shortcut
throw H2O.unimpl();
}
// Done if target is dead or canceled, or we have a result.
@Override public final boolean isDone() { return _target==null || _done; }
// Done if target is dead or canceled
@Override public final boolean isCancelled() { return _target==null; }
// Attempt to cancel job
@Override public final boolean cancel( boolean mayInterruptIfRunning ) {
boolean did = false;
synchronized(this) { // Install the answer under lock
if( !isCancelled() ) {
did = true; // Did cancel (was not cancelled already)
_target.taskRemove(_tasknum);
_target = null; // Flag as canceled
UDPTimeOutThread.PENDING.remove(this);
}
notifyAll(); // notify in any case
}
return did;
}
// ---
// Handle the remote-side incoming UDP packet. This is called on the REMOTE
// Node, not local. Wrong thread, wrong JVM.
static class RemoteHandler extends UDP {
@Override AutoBuffer call(AutoBuffer ab) { throw H2O.fail(); }
// Pretty-print bytes 1-15; byte 0 is the udp_type enum
@Override public String print16( AutoBuffer ab ) {
int flag = ab.getFlag();
String clazz = (flag == CLIENT_UDP_SEND) ? TypeMap.className(ab.get2()) : "";
return "task# "+ab.getTask()+" "+ clazz+" "+COOKIES[flag-SERVER_UDP_SEND];
}
}
public static class RPCCall extends H2OCountedCompleter implements Delayed {
volatile DTask _dt; // Set on construction, atomically set to null onAckAck
final H2ONode _client;
final int _tsknum;
long _started; // Retry fields for the ackack
int _callCnt;
int _ackResendCnt;
long _cmpStarted;
long _retry;
volatile boolean _computedAndReplied; // One time transition from false to true
volatile boolean _computed; // One time transition from false to true
transient AtomicBoolean _firstException = new AtomicBoolean(false);
// To help with asserts, record the size of the sent DTask - if we resend
// if should remain the same size. Also used for profiling.
int _size;
RPCCall(DTask dt, H2ONode client, int tsknum) {
_dt = dt;
_client = client;
_tsknum = tsknum;
if( _dt == null ) _computedAndReplied = true; // Only for Golden Completed Tasks (see H2ONode.java)
}
@Override public void compute2() {
// First set self to be completed when this subtask completer
assert _dt.getCompleter() == null;
_dt.setCompleter(this);
// Run the remote task on this server...
_cmpStarted = System.currentTimeMillis();
_dt.dinvoke(_client);
}
// When the task completes, ship results back to client. F/J guarantees
// that this is called only once with no onExceptionalCompletion calls - or
// 1-or-more onExceptionalCompletion calls.
@Override public void onCompletion( CountedCompleter caller ) {
synchronized(this) {
assert !_computed;
_computed = true;
}
sendAck();
}
// Exception occured when processing this task locally, set exception and
// send it back to the caller. Can be called lots of times (e.g., once per
// MRTask2.map call that throws).
@Override public boolean onExceptionalCompletion( Throwable ex, CountedCompleter caller ) {
if( _computed ) return false;
synchronized(this) { // Filter dup calls to onExCompletion
if( _computed ) return false;
_computed = true;
}
_dt.setException(ex);
sendAck();
return false;
}
private void sendAck() {
// Send results back
DTask dt, origDt = _dt; // _dt can go null the instant it is send over wire
assert origDt!=null; // Freed after completion
while((dt = _dt) != null) { // Retry loop for broken TCP sends
AutoBuffer ab = null;
try {
// Start the ACK with results back to client. If the client is
// asking for a class/id mapping (or any job running at FETCH_ACK
// priority) then return a udp.fetchack byte instead of a udp.ack.
// The receiver thread then knows to handle the mapping at the higher
// priority.
UDP.udp udp = dt.priority()==H2O.FETCH_ACK_PRIORITY ? UDP.udp.fetchack : UDP.udp.ack;
ab = new AutoBuffer(_client).putTask(udp,_tsknum).put1(SERVER_UDP_SEND);
dt.write(ab); // Write the DTask - could be very large write
dt._repliedTcp = ab.hasTCP(); // Resends do not need to repeat TCP result
ab.close(); // Then close; send final byte
_computedAndReplied = true; // After the final handshake, set computed+replied bit
break; // Break out of retry loop
} catch( AutoBuffer.AutoBufferException e ) {
Log.info("IOException during ACK, "+e._ioe.getMessage()+", t#"+_tsknum+" AB="+ab+", waiting and retrying...");
ab.drainClose();
try { Thread.sleep(100); } catch (InterruptedException ignore) {}
} catch( Exception e ) { // Custom serializer just barfed?
Log.err(e); // Log custom serializer exception
ab.drainClose();
}
} // end of while(true)
if( dt == null )
Log.info("Cancelled remote task#"+_tsknum+" "+origDt.getClass()+" to "+_client + " has been cancelled by remote");
else {
if( (dt instanceof DRemoteTask || dt instanceof MRTask2) && dt.logVerbose() )
Log.debug("Done remote task#"+_tsknum+" "+dt.getClass()+" to "+_client);
_client.record_task_answer(this); // Setup for retrying Ack & AckAck, if not canceled
}
}
// Re-send strictly the ack, because we're missing an AckAck
final void resend_ack() {
assert _computedAndReplied : "Found RPCCall not computed "+_tsknum;
DTask dt = _dt;
if( dt == null ) return; // Received ACKACK already
UDP.udp udp = dt.priority()==H2O.FETCH_ACK_PRIORITY ? UDP.udp.fetchack : UDP.udp.ack;
AutoBuffer rab = new AutoBuffer(_client).putTask(udp,_tsknum);
boolean wasTCP = dt._repliedTcp;
if( wasTCP ) rab.put1(RPC.SERVER_TCP_SEND) ; // Original reply sent via TCP
else dt.write(rab.put1(RPC.SERVER_UDP_SEND)); // Original reply sent via UDP
assert sz_check(rab) : "Resend of "+_dt.getClass()+" changes size from "+_size+" to "+rab.size();
assert dt._repliedTcp==wasTCP;
rab.close();
// Double retry until we exceed existing age. This is the time to delay
// until we try again. Note that we come here immediately on creation,
// so the first doubling happens before anybody does any waiting. Also
// note the generous 5sec cap: ping at least every 5 sec.
_retry += (_retry < 5000 ) ? _retry : 5000;
}
@Override public byte priority() { return _dt.priority(); }
// How long until we should do the "timeout" action?
@Override public final long getDelay( TimeUnit unit ) {
long delay = (_started+_retry)-System.currentTimeMillis();
return unit.convert( delay, TimeUnit.MILLISECONDS );
}
// Needed for the DelayQueue API
@Override public final int compareTo( Delayed t ) {
RPCCall r = (RPCCall)t;
long nextTime = _started+_retry, rNextTime = r._started+r._retry;
return nextTime == rNextTime ? 0 : (nextTime > rNextTime ? 1 : -1);
}
static AtomicReferenceFieldUpdater<RPCCall,DTask> CAS_DT =
AtomicReferenceFieldUpdater.newUpdater(RPCCall.class, DTask.class,"_dt");
// Assertion check that size is not changing between resends,
// i.e., resends sent identical data.
private boolean sz_check(AutoBuffer ab) {
final int absize = ab.size();
if( _size == 0 ) { _size = absize; return true; }
return _size==absize;
}
public int size() { return _size; }
}
// Handle traffic, from a client to this server asking for work to be done.
// Called from either a F/J thread (generally with a UDP packet) or from the
// TCPReceiver thread.
static void remote_exec( final AutoBuffer ab ) {
long lo = ab.get8(0), hi = ab.get8(8); // for dbg
final int task = ab.getTask();
final int flag = ab.getFlag();
assert flag==CLIENT_UDP_SEND || flag==CLIENT_TCP_SEND; // Client-side send
// Atomically record an instance of this task, one-time-only replacing a
// null with an RPCCall, a placeholder while we work on a proper response -
// and it serves to let us discard dup UDP requests.
RPCCall old = ab._h2o.has_task(task);
// This is a UDP packet requesting an answer back for a request sent via
// TCP but the UDP packet has arrived ahead of the TCP. Just drop the UDP
// and wait for the TCP to appear.
if( old == null && flag == CLIENT_TCP_SEND ) {
if(ab.hasTCP())TimeLine.printMyTimeLine();
assert !ab.hasTCP():"ERROR: got tcp with existing task #, FROM " + ab._h2o.toString() + " AB: " + UDP.printx16(lo,hi); // All the resends should be UDP only
// DROP PACKET
} else if( old == null ) { // New task?
RPCCall rpc;
try {
// Read the DTask Right Now. If we are the TCPReceiver thread, then we
// are reading in that thread... and thus TCP reads are single-threaded.
rpc = new RPCCall(ab.get(water.DTask.class),ab._h2o,task);
} catch( AutoBuffer.AutoBufferException e ) {
// Here we assume it's a TCP fail on read - and ignore the remote_exec
// request. The caller will send it again. NOTE: this case is
// indistinguishable from a broken short-writer/long-reader bug, except
// that we'll re-send endlessly and fail endlessly.
Log.info("Network congestion OR short-writer/long-reader: TCP "+e._ioe.getMessage()+", AB="+ab+", ignoring partial send");
ab.drainClose();
return;
}
RPCCall rpc2 = ab._h2o.record_task(rpc);
if( rpc2==null ) { // Atomically insert (to avoid double-work)
if( (rpc._dt instanceof DRemoteTask || rpc._dt instanceof MRTask2) && rpc._dt.logVerbose() )
Log.debug("Start remote task#"+task+" "+rpc._dt.getClass()+" from "+ab._h2o);
H2O.submitTask(rpc); // And execute!
} else { // Else lost the task-insertion race
if(ab.hasTCP())TimeLine.printMyTimeLine();
assert !ab.hasTCP():"ERROR: got tcp with existing task #, FROM " + ab._h2o.toString() + " AB: " + UDP.printx16(lo,hi); // All the resends should be UDP only
// DROP PACKET
}
} else if( !old._computedAndReplied) {
++old._callCnt;
// This packet has not been fully computed. Hence it's still a work-in-
// progress locally. We have no answer to reply but we do not want to
// re-offer the packet for repeated work. Just ignore the packet.
if(ab.hasTCP())TimeLine.printMyTimeLine();
assert !ab.hasTCP():"ERROR: got tcp resend with existing in-progress task #, FROM " + ab._h2o.toString() + " AB: " + UDP.printx16(lo,hi); // All the resends should be UDP only
// DROP PACKET
} else {
++old._ackResendCnt;
if(old._ackResendCnt % 50 == 0)
Log.err("Possibly broken network, can not send ack through, got " + old._ackResendCnt + " resends.");
// This is an old re-send of the same thing we've answered to before.
// Send back the same old answer ACK. If we sent via TCP before, then
// we know the answer got there so just send a control-ACK back. If we
// sent via UDP, resend the whole answer.
if(ab.hasTCP())TimeLine.printMyTimeLine();
assert !ab.hasTCP():"ERROR: got tcp with existing task #, FROM " + ab._h2o.toString() + " AB: " + UDP.printx16(lo,hi); // All the resends should be UDP only
old.resend_ack();
}
ab.close();
}
// TCP large RECEIVE of results. Note that 'this' is NOT the RPC object
// that is hoping to get the received object, nor is the current thread the
// RPC thread blocking for the object. The current thread is the TCP
// reader thread.
static void tcp_ack( final AutoBuffer ab ) throws IOException {
// Get the RPC we're waiting on
int task = ab.getTask();
RPC rpc = ab._h2o.taskGet(task);
// Race with canceling a large RPC fetch: Task is already dead. Do not
// bother reading from the TCP socket, just bail out & close socket.
if( rpc == null ) {
ab.drainClose();
} else {
assert rpc._tasknum == task;
assert !rpc._done;
// Here we have the result, and we're on the correct Node but wrong
// Thread. If we just return, the TCP reader thread will close the
// remote, the remote will UDP ACK the RPC back, and back on the current
// Node but in the correct Thread, we'd wake up and realize we received a
// large result.
try {
rpc.response(ab);
} catch( AutoBuffer.AutoBufferException e ) {
// If TCP fails, we will have done a short-read crushing the original
// _dt object, and be unable to resend. This is fatal right now.
// Really: an unimplemented feature; fix is to notice that a partial
// TCP read means that the server (1) got our remote_exec request, (2)
// has computed an answer and was trying to send it to us, (3) failed
// sending via TCP hence the server knows it failed and will send again
// without any further work from us. We need to disable all the resend
// & retry logic, and wait for the server to re-send our result.
// Meanwhile the _dt object is crushed with half-read crap, and cannot
// be trusted except in the base fields.
throw Log.err("Network congestion OR short-writer/long-reader, AB="+ab,e._ioe);
}
}
// ACKACK the remote, telling him "we got the answer"
new AutoBuffer(ab._h2o).putTask(UDP.udp.ackack.ordinal(),task).close();
}
// Got a response UDP packet, or completed a large TCP answer-receive.
// Install it as The Answer packet and wake up anybody waiting on an answer.
protected int response( AutoBuffer ab ) {
try{
assert _tasknum==ab.getTask();
if( _done ) return ab.close(); // Ignore duplicate response packet
int flag = ab.getFlag(); // Must read flag also, to advance ab
if( flag == SERVER_TCP_SEND ) return ab.close(); // Ignore UDP packet for a TCP reply
assert flag == SERVER_UDP_SEND;
synchronized(this) { // Install the answer under lock
if( _done ) return ab.close(); // Ignore duplicate response packet
UDPTimeOutThread.PENDING.remove(this);
_dt.read(ab); // Read the answer (under lock?)
_size_rez = ab.size(); // Record received size
ab.close(); // Also finish the read (under lock?)
_dt.onAck(); // One time only execute (before sending ACKACK)
_done = true; // Only read one (of many) response packets
ab._h2o.taskRemove(_tasknum); // Flag as task-completed, even if the result is null
notifyAll(); // And notify in any case
}
doAllCompletions(); // Send all tasks needing completion to the work queues
}catch(Throwable t){
t.printStackTrace();
}
return 0;
}
private void doAllCompletions() {
final Exception e = _dt.getDException();
// Also notify any and all pending completion-style tasks
if( _fjtasks != null )
for( final H2OCountedCompleter task : _fjtasks )
H2O.submitTask(new H2OCountedCompleter() {
@Override public void compute2() {
if(e != null) // re-throw exception on this side as if it happened locally
task.completeExceptionally(e);
else try {
task.tryComplete();
} catch(Throwable e) {
task.completeExceptionally(e);
}
}
@Override public byte priority() { return task.priority(); }
});
}
// ---
public synchronized RPC<V> addCompleter( H2OCountedCompleter task ) {
if( _fjtasks == null ) _fjtasks = new ArrayList(2);
_fjtasks.add(task);
return this;
}
// Assertion check that size is not changing between resends,
// i.e., resends sent identical data.
private boolean sz_check(AutoBuffer ab) {
final int absize = ab.size();
if( _size == 0 ) { _size = absize; return true; }
return _size==absize;
}
// Size of received results
int size_rez() { return _size_rez; }
// ---
static final long RETRY_MS = 200; // Initial UDP packet retry in msec
// How long until we should do the "timeout" action?
@Override public final long getDelay( TimeUnit unit ) {
long delay = (_started+_retry)-System.currentTimeMillis();
return unit.convert( delay, TimeUnit.MILLISECONDS );
}
// Needed for the DelayQueue API
@Override public final int compareTo( Delayed t ) {
RPC<?> dt = (RPC<?>)t;
long nextTime = _started+_retry, dtNextTime = dt._started+dt._retry;
return nextTime == dtNextTime ? 0 : (nextTime > dtNextTime ? 1 : -1);
}
public final DTask task(){return _dt;}
public final int taskNum(){return _tasknum;}
public final H2ONode target(){return _target;}
public transient int _callCnt;
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Request2.java
|
package water;
import dontweave.gson.JsonElement;
import dontweave.gson.JsonObject;
import dontweave.gson.JsonParser;
import hex.GridSearch;
import water.api.DocGen;
import water.api.Request;
import water.api.RequestArguments;
import water.api.RequestServer.API_VERSION;
import water.fvec.Vec;
import water.util.Log;
import water.util.Utils;
import java.lang.annotation.Annotation;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.*;
public abstract class Request2 extends Request {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
protected transient Properties _parms;
@API(help = "Response stats and info.")
public ResponseInfo response_info;
public String input(String fieldName) {
return _parms == null ? null : _parms.getProperty(fieldName);
}
public class TypeaheadKey extends TypeaheadInputText<Key> {
transient Key _defaultValue;
transient Class _type;
public TypeaheadKey() {
this(null, true);
}
public TypeaheadKey(Class type, boolean required) {
super(mapTypeahead(type), "", required);
_type = type;
setRefreshOnChange();
}
public void setValue(Key key) {
record()._value = key;
record()._originalValue = key.toString();
}
@Override protected Key parse(String input) {
if (_validator!=null) _validator.validateRaw(input);
Key k = Key.make(input);
Value v = DKV.get(k);
if( v == null && _mustExist )
throw new H2OIllegalArgumentException(this, "Key '" + input + "' does not exist!");
if( _type != null ) {
if( v == null && _required )
throw new H2OIllegalArgumentException(this, "Key '" + input + "' does not exist!");
}
return k;
}
@Override protected Key defaultValue() {
return _defaultValue;
}
@Override protected String queryDescription() {
return "A key" + (_type != null ? " of type " + _type.getSimpleName() : "");
}
@Override protected String[] errors() {
if( _type != null )
return new String[] { "Key is not a " + _type.getSimpleName() };
return super.errors();
}
}
/**
* Fields that depends on another, e.g. select Vec from a Frame.
*/
public class Dependent implements Filter {
public final String _ref;
protected Dependent(String name) {
_ref = name;
}
@Override public boolean run(Object value) {
return true;
}
}
public class ColumnSelect extends Dependent {
protected ColumnSelect(String key) {
super(key);
}
}
public class VecSelect extends Dependent {
protected VecSelect(String key) {
super(key);
}
}
public class SpecialVecSelect extends VecSelect {
public boolean optional = false;
protected SpecialVecSelect(String key) { this(key,false);}
protected SpecialVecSelect(String key, boolean optional) {
super(key);
this.optional = optional;
}
}
public class VecClassSelect extends Dependent {
protected VecClassSelect(String key) {
super(key);
}
}
/**
* Specify how a column specifier field is parsed.
*/
public enum MultiVecSelectType {
/**
* Treat a token as a column name. Otherwise, treat it as a 0-based index if it looks like a
* positive integer.
*/
NAMES_THEN_INDEXES,
/**
* Treat a token as a column name no matter what (even if it looks like it is an integer). This
* is used by the Web UI, which blindly specifies column names.
*/
NAMES_ONLY
}
public class MultiVecSelect extends Dependent {
boolean _namesOnly;
private void init(MultiVecSelectType selectType) {
_namesOnly = false;
switch( selectType ) {
case NAMES_THEN_INDEXES:
_namesOnly = false;
break;
case NAMES_ONLY:
_namesOnly = true;
break;
}
}
protected MultiVecSelect(String key) {
super(key);
init(MultiVecSelectType.NAMES_THEN_INDEXES);
}
protected MultiVecSelect(String key, MultiVecSelectType selectType) {
super(key);
init(selectType);
}
}
public class DoClassBoolean extends Dependent {
protected DoClassBoolean(String key) {
super(key);
}
}
public class DRFCopyDataBoolean extends Dependent {
protected DRFCopyDataBoolean(String key) { super(key); }
}
/**
* Iterates over fields and their annotations, and creates argument handlers.
*/
@Override protected void registered(API_VERSION version) {
try {
ArrayList<Class> classes = new ArrayList<Class>();
{
Class c = getClass();
while( c != null ) {
classes.add(c);
c = c.getSuperclass();
}
}
// Fields from parent classes first
Collections.reverse(classes);
ArrayList<Field> fields = new ArrayList<Field>();
for( Class c : classes )
for( Field field : c.getDeclaredFields() )
if( !Modifier.isStatic(field.getModifiers()) )
fields.add(field);
// TODO remove map, response field already processed specifically
HashMap<String, FrameClassVec> classVecs = new HashMap<String, FrameClassVec>();
for( Field f : fields ) {
Annotation[] as = f.getAnnotations();
API api = find(as, API.class);
if( api != null && Helper.isInput(api) ) {
f.setAccessible(true);
Object defaultValue = f.get(this);
// Create an Argument instance to reuse existing Web framework for now
Argument arg = null;
// Simplest case, filter is an Argument
if( Argument.class.isAssignableFrom(api.filter()) ) {
arg = (Argument) newInstance(api);
}
//
else if( ColumnSelect.class.isAssignableFrom(api.filter()) ) {
ColumnSelect name = (ColumnSelect) newInstance(api);
throw H2O.fail();
//H2OHexKey key = null;
//for( Argument a : _arguments )
// if( a instanceof H2OHexKey && name._ref.equals(((H2OHexKey) a)._name) )
// key = (H2OHexKey) a;
//arg = new HexAllColumnSelect(f.getName(), key);
}
//
else if( Dependent.class.isAssignableFrom(api.filter()) ) {
Dependent d = (Dependent) newInstance(api);
Argument ref = find(d._ref);
if( d instanceof VecSelect )
arg = new FrameKeyVec(f.getName(), (TypeaheadKey) ref, api.help(), api.required());
else if( d instanceof VecClassSelect ) {
arg = new FrameClassVec(f.getName(), (TypeaheadKey) ref);
classVecs.put(d._ref, (FrameClassVec) arg);
} else if( d instanceof MultiVecSelect ) {
FrameClassVec response = classVecs.get(d._ref);
boolean names = ((MultiVecSelect) d)._namesOnly;
arg = new FrameKeyMultiVec(f.getName(), (TypeaheadKey) ref, response, api.help(), names,filterNaCols());
} else if( d instanceof DoClassBoolean ) {
FrameClassVec response = classVecs.get(d._ref);
arg = new ClassifyBool(f.getName(), response);
} else if( d instanceof DRFCopyDataBoolean ) {
arg = new DRFCopyDataBool(f.getName(), (TypeaheadKey)ref);
}
}
// String
else if( f.getType() == String.class )
arg = new Str(f.getName(), (String) defaultValue);
// Real
else if( f.getType() == float.class || f.getType() == double.class ) {
double val = ((Number) defaultValue).doubleValue();
arg = new Real(f.getName(), api.required(), val, api.dmin(), api.dmax(), api.help());
}
// LongInt
else if( f.getType() == int.class || f.getType() == long.class ) {
long val = ((Number) defaultValue).longValue();
arg = new LongInt(f.getName(), api.required(), val, api.lmin(), api.lmax(), api.help());
}
// RSeq
else if( f.getType() == int[].class ) {
int[] val = (int[]) defaultValue;
double[] ds = null;
if( val != null ) {
ds = new double[val.length];
for( int i = 0; i < ds.length; i++ )
ds[i] = val[i];
}
arg = new RSeq(f.getName(), api.required(), new NumberSequence(ds, null, true), false, api.help());
}
// RSeq
else if( f.getType() == double[].class ) {
double[] val = (double[]) defaultValue;
arg = new RSeq(f.getName(), api.required(), new NumberSequence(val, null, false), false, api.help());
}
// RSeq float
else if( f.getType() == float[].class ) {
float[] val = (float[]) defaultValue;
arg = new RSeqFloat(f.getName(), api.required(), new NumberSequenceFloat(val, null, false), false, api.help());
}
// Bool
else if( f.getType() == boolean.class && api.filter() == Default.class ) {
boolean val = (Boolean) defaultValue;
arg = new Bool(f.getName(), val, api.help());
}
// Enum
else if( Enum.class.isAssignableFrom(f.getType()) ) {
Enum val = (Enum) defaultValue;
arg = new EnumArgument(f.getName(), val);
}
// Key
else if( f.getType() == Key.class ) {
TypeaheadKey t = new TypeaheadKey();
t._defaultValue = (Key) defaultValue;
arg = t;
}
// Generic Freezable field
else if( Freezable.class.isAssignableFrom(f.getType()) )
arg = new TypeaheadKey(f.getType(), api.required());
if( arg != null ) {
arg._name = f.getName();
arg._displayName = api.displayName().length() > 0 ? api.displayName() : null;
arg._required = api.required();
arg._field = f;
arg._hideInQuery = api.hide();
arg._gridable = api.gridable();
arg._mustExist = api.mustExist();
arg._validator = newValidator(api);
}
}
}
} catch( Exception e ) {
throw new RuntimeException(e);
}
}
final protected Argument find(String name) {
for( Argument a : _arguments )
if( name.equals(a._name) )
return a;
return null;
}
// Extracted in separate class as Weaver cannot load Request during boot
static final class Helper {
static boolean isInput(API api) {
return api.filter() != Filter.class || api.filters().length != 0;
}
}
private static <T> T find(Annotation[] as, Class<T> c) {
for( Annotation a : as )
if( a.annotationType() == c )
return (T) a;
return null;
}
private Filter newInstance(API api) throws Exception {
for( Constructor c : api.filter().getDeclaredConstructors() ) {
c.setAccessible(true);
Class[] ps = c.getParameterTypes();
if( ps.length == 1 && RequestArguments.class.isAssignableFrom(ps[0]) )
return (Filter) c.newInstance(this);
}
for( Constructor c : api.filter().getDeclaredConstructors() ) {
Class[] ps = c.getParameterTypes();
if( ps.length == 0 )
return (Filter) c.newInstance();
}
throw new Exception("Class " + api.filter().getName() + " must have an empty constructor");
}
private Validator newValidator(API api) throws Exception {
for( Constructor c : api.validator().getDeclaredConstructors() ) {
c.setAccessible(true);
Class[] ps = c.getParameterTypes();
return (Validator) c.newInstance();
}
return null;
}
// Create an instance per call instead of ThreadLocals
@Override protected Request create(Properties parms) {
Request2 request;
try {
request = getClass().newInstance();
request._arguments = _arguments;
request._parms = parms;
} catch( Exception e ) {
throw new RuntimeException(e);
}
return request;
}
public Response servePublic() {
return serve();
}
// Expand grid search related argument sets
@Override protected NanoHTTPD.Response serveGrid(NanoHTTPD server, Properties parms, RequestType type) {
String[][] values = new String[_arguments.size()][];
boolean gridSearch = false;
for( int i = 0; i < _arguments.size(); i++ ) {
Argument arg = _arguments.get(i);
if( arg._gridable ) {
String value = _parms.getProperty(arg._name);
if( value != null ) {
// Skips grid if argument is an array, except if imbricated expression
// Little hackish, waiting for real language
boolean imbricated = value.contains("(");
if( !arg._field.getType().isArray() || imbricated ) {
values[i] = split(value);
if( values[i] != null && values[i].length > 1 )
gridSearch = true;
} else if (arg._field.getType().isArray() && !imbricated) { // Copy values which are arrays
values[i] = new String[] { value };
}
}
}
}
if( !gridSearch )
return superServeGrid(server, parms, type);
// Ignore destination key so that each job gets its own
_parms.remove("destination_key");
for( int i = 0; i < _arguments.size(); i++ )
if( _arguments.get(i)._name.equals("destination_key") )
values[i] = null;
// Iterate over all argument combinations
int[] counters = new int[values.length];
ArrayList<Job> jobs = new ArrayList<Job>();
for( ;; ) {
Job job = (Job) create(_parms);
Properties combination = new Properties();
for( int i = 0; i < values.length; i++ ) {
if( values[i] != null ) {
String value = values[i][counters[i]];
value = value.trim();
combination.setProperty(_arguments.get(i)._name, value);
_arguments.get(i).reset();
_arguments.get(i).check(job, value);
}
}
job._parms = combination;
jobs.add(job);
if( !increment(counters, values) )
break;
}
GridSearch grid = new GridSearch();
grid.jobs = jobs.toArray(new Job[jobs.size()]);
return grid.superServeGrid(server, parms, type);
}
// Splits one-level imbricated expressions like 4, 5, (2, 3), 7
// TODO: switch to real parser for unified imbricated argument sets, expressions etc.
public static String[] split(String value) {
String[] values = null;
value = value.trim();
StringTokenizer st = new StringTokenizer(value, ",()", true);
String s, current = "";
while( (s = getNextToken(st)) != null ) {
if( ",".equals(s) ) {
values = addSplit(values, current);
current = "";
} else if( "(".equals(s) ) {
while( !(")".equals((s = getNextToken(st)))) ) {
if( s == null )
throw new IllegalArgumentException("Missing closing parenthesis");
current += s;
}
values = addSplit(values, current);
current = "";
} else
current += s;
}
values = addSplit(values, current);
return values;
}
private static String[] addSplit(String[] values, String value) {
if( value.contains(":") ) {
double[] gen = NumberSequence.parseGenerator(value, false, 1);
for( double d : gen )
values = Utils.append(values, "" + d);
} else if( value.length() > 0 )
values = Utils.append(values, value);
return values;
}
private static String getNextToken(StringTokenizer st) {
while( st.hasMoreTokens() ) {
String tok = st.nextToken().trim();
if( tok.length() > 0 )
return tok;
}
return null;
}
public final NanoHTTPD.Response superServeGrid(NanoHTTPD server, Properties parms, RequestType type) {
return super.serveGrid(server, parms, type);
}
private static boolean increment(int[] counters, String[][] values) {
for( int i = 0; i < counters.length; i++ ) {
if( values[i] != null && counters[i] < values[i].length - 1 ) {
counters[i]++;
return true;
} else
counters[i] = 0;
}
return false;
}
/*
* Arguments to fields casts.
*/
public void set(Argument arg, String input, Object value) {
if( arg._field.getType() != Key.class && value instanceof Key )
value = UKV.get((Key) value);
try {
//
if( arg._field.getType() == int.class && value instanceof Long )
value = ((Long) value).intValue();
//
else if( arg._field.getType() == float.class && value instanceof Double )
value = ((Double) value).floatValue();
//
else if( value instanceof NumberSequence ) {
double[] ds = ((NumberSequence) value)._arr;
if( arg._field.getType() == int[].class ) {
int[] is = new int[ds.length];
for( int i = 0; i < is.length; i++ )
is[i] = (int) ds[i];
value = is;
} else
value = ds;
}
else if( value instanceof NumberSequenceFloat ) {
float[] fs = ((NumberSequenceFloat) value)._arr;
if( arg._field.getType() == int[].class ) {
int[] is = new int[fs.length];
for( int i = 0; i < is.length; i++ )
is[i] = (int) fs[i];
value = is;
} else
value = fs;
}
arg._field.set(this, value);
} catch( Exception e ) {
throw new RuntimeException(e);
}
}
@Override public API_VERSION[] supportedVersions() {
return SUPPORTS_ONLY_V2;
}
public void fillResponseInfo(Response response) {
this.response_info = response.extractInfo();
}
public JsonObject toJSON() {
final String json = new String(writeJSON(new AutoBuffer()).buf());
if (json.length() == 0) return new JsonObject();
JsonObject jo = (JsonObject)new JsonParser().parse(json);
jo.remove("Request2");
jo.remove("response_info");
return jo;
}
public JsonObject toJSON(Set<String> whitelist) {
JsonObject jo = toJSON();
for (Map.Entry<String , JsonElement> entry : jo.entrySet()) {
String key = entry.getKey();
if (! whitelist.contains(key))
jo.remove(key);
}
return jo;
}
@Override
public String toString() {
return GSON_BUILDER.toJson(toJSON());
}
protected void logStart() {
Log.info("Building H2O " + this.getClass().getSimpleName() + " model with these parameters:");
for (String s : toString().split("\n")) Log.info(s);
}
public boolean makeJsonBox(StringBuilder sb) {
sb.append("<div class='pull-right'><a href='#' onclick='$(\"#params\").toggleClass(\"hide\");'"
+ " class='btn btn-inverse btn-mini'>Model Parameters</a></div><div class='hide' id='params'>"
+ "<pre><code class=\"language-json\">");
sb.append(toString());
sb.append("</code></pre></div>");
return true;
}
protected boolean filterNaCols(){return false;}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Scope.java
|
package water;
import java.util.HashSet;
import java.util.Stack;
// A "scope" for tracking Key lifetimes.
//
// A Scope defines a *SINGLE THREADED* local lifetime management context,
// stored in Thread Local Storage. Scopes can be explicitly entered or exited.
// User keys created by this thread are tracked, and deleted when the scope is
// exited. Since enter & exit are explicit, failure to exit means the Keys
// leak (there is no reliable thread-on-exit cleanup action). You must call
// Scope.exit() at some point. Only user keys & Vec keys are tracked.
//
// Scopes support nesting. Scopes support partial cleanup: you can list Keys
// you'd like to keep in the exit() call. These will be "bumped up" to the
// higher nested scope - or escaped and become untracked at the top-level.
public class Scope {
// Thread-based Key lifetime tracking
static private final ThreadLocal<Scope> _scope = new ThreadLocal<Scope>() {
@Override protected Scope initialValue() { return new Scope(); }
};
private final Stack<HashSet<Key>> _keys = new Stack<HashSet<Key>>();
static public void enter() { _scope.get()._keys.push(new HashSet<Key>()); }
static public void exit () {
Stack<HashSet<Key>> keys = _scope.get()._keys;
if( keys.size()==0 ) return;
for( Key key : keys.pop() )
Lockable.delete(key);
}
static public Key exit(Key key) { throw H2O.unimpl(); }
static public Key[] exit(Key... key) { throw H2O.unimpl(); }
static public void track( Key k ) {
if( !k.user_allowed() && !k.isVec() ) return; // Not tracked
Scope scope = _scope.get(); // Pay the price of T.L.S. lookup
if( scope == null ) return; // Not tracking this thread
if( scope._keys.size() == 0 ) return; // Tracked in the past, but no scope now
scope._keys.peek().add(k); // Track key
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/TAtomic.java
|
package water;
import water.H2O.H2OCountedCompleter;
/**
* A typed atomic update.
*/
public abstract class TAtomic<T extends Iced> extends Atomic<TAtomic<T>> {
/** Atomically update an old value to a new one.
* @param old The old value, it may be null. It is a defensive copy.
* @return The new value; if null if this atomic update no longer needs to be run
*/
public abstract T atomic(T old);
public TAtomic(){}
public TAtomic(H2OCountedCompleter completer){super(completer);}
@Override public Value atomic(Value val) {
T old = val == null ? null : (T)(val.get().clone());
T nnn = atomic(old);
// Atomic operation changes the data, so it can not be performed over values persisted on read-only data source
// as we would not be able to write those changes back.
assert val == null || val.onICE() || !val.isPersisted();
return nnn == null ? null : new Value(_key,nnn,val==null?Value.ICE:(byte)(val._persist&Value.BACKEND_MASK));
}
@Override public void onSuccess( Value old ) { onSuccess(old==null?null:(T)old.get()); }
// Upcast the old value to T
public void onSuccess( T old ) { }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/TCPReceiverThread.java
|
package water;
import java.nio.channels.ServerSocketChannel;
import java.nio.channels.SocketChannel;
import water.util.Log;
/**
* The Thread that looks for TCP Cloud requests.
*
* This thread just spins on reading TCP requests from other Nodes.
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public class TCPReceiverThread extends Thread {
public static ServerSocketChannel SOCK;
public TCPReceiverThread() { super("TCP-Accept"); }
// The Run Method.
// Started by main() on a single thread, this code manages reading TCP requests
@SuppressWarnings("resource")
public void run() {
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
ServerSocketChannel errsock = null;
boolean saw_error = false;
while( true ) {
try {
// Cleanup from any prior socket failures. Rare unless we're really sick.
if( errsock != null ) { // One time attempt a socket close
final ServerSocketChannel tmp2 = errsock; errsock = null;
tmp2.close(); // Could throw, but errsock cleared for next pass
}
if( saw_error ) Thread.sleep(100); // prevent deny-of-service endless socket-creates
saw_error = false;
// ---
// More common-case setup of a ServerSocket
if( SOCK == null ) {
SOCK = ServerSocketChannel.open();
SOCK.socket().setReceiveBufferSize(AutoBuffer.BBSIZE);
SOCK.socket().bind(H2O.SELF._key);
}
// Block for TCP connection and setup to read from it.
SocketChannel sock = SOCK.accept();
// Pass off the TCP connection to a separate reader thread
new TCPReaderThread(sock,new AutoBuffer(sock)).start();
} catch( java.nio.channels.AsynchronousCloseException ex ) {
break; // Socket closed for shutdown
} catch( Exception e ) {
// On any error from anybody, close all sockets & re-open
Log.err("Retrying after IO error on TCP port "+H2O.UDP_PORT+": ",e);
saw_error = true;
errsock = SOCK ; SOCK = null; // Signal error recovery on the next loop
}
}
}
// A private thread for reading from this open socket.
public static class TCPReaderThread extends Thread {
public SocketChannel _sock;
public AutoBuffer _ab;
public TCPReaderThread(SocketChannel sock, AutoBuffer ab) {
super("TCP-"+ab._h2o+"-"+(ab._h2o._tcp_readers++));
_sock = sock;
_ab = ab;
setPriority(MAX_PRIORITY-1);
}
public void run() {
while( true ) { // Loop, reading fresh TCP requests until the sender closes
try {
// Record the last time we heard from any given Node
_ab._h2o._last_heard_from = System.currentTimeMillis();
TimeLine.record_recv(_ab, true,0);
// Hand off the TCP connection to the proper handler
int ctrl = _ab.getCtrl();
int x = ctrl;
if( ctrl < 0 || ctrl >= UDP.udp.UDPS.length ) x = 0;
switch( UDP.udp.UDPS[x] ) {
case exec: RPC.remote_exec (_ab); break;
case ack: RPC.tcp_ack (_ab); break;
case timeline: TimeLine.tcp_call(_ab); break;
default: throw new RuntimeException("Unknown TCP Type: " + ctrl+" "+_ab._h2o);
}
} catch( java.nio.channels.AsynchronousCloseException ex ) {
break; // Socket closed for shutdown
} catch( Exception e ) {
// On any error from anybody, close everything
System.err.println("IO error");
e.printStackTrace();
Log.err("IO error on TCP port "+H2O.UDP_PORT+": ",e);
break;
}
// Reuse open sockets for the next task
try {
if( !_sock.isOpen() ) break;
_ab = new AutoBuffer(_sock);
} catch( Exception e ) {
// Exceptions here are *normal*, this is an idle TCP connection and
// either the OS can time it out, or the cloud might shutdown. We
// don't care what happens to this socket.
break; // Ignore all errors; silently die if socket is closed
}
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/TaskGetKey.java
|
package water;
import water.DTask;
import water.nbhm.NonBlockingHashMap;
/**
* Get the given key from the remote node
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public class TaskGetKey extends DTask<TaskGetKey> {
Key _key; // Set by client/sender JVM, cleared by server JVM
Value _val; // Set by server JVM, read by client JVM
transient Key _xkey; // Set by client, read by client
transient H2ONode _h2o; // Set by server JVM, read by server JVM on ACKACK
final byte _priority;
// Unify multiple Key/Value fetches for the same Key from the same Node at
// the "same time". Large key fetches are slow, and we'll get multiple
// requests close in time. Batch them up.
public static final NonBlockingHashMap<Key,RPC<TaskGetKey>> TGKS = new NonBlockingHashMap();
// Get a value from a named remote node
public static Value get( H2ONode target, Key key, int priority ) {
RPC<TaskGetKey> rpc, old;
while( true ) { // Repeat until we get a unique TGK installed per key
// Do we have an old TaskGetKey in-progress?
rpc = TGKS.get(key);
if( rpc != null && rpc._dt._priority >= priority )
break;
old = rpc;
// Make a new TGK.
rpc = new RPC(target,new TaskGetKey(key,priority),1.0f);
if( TGKS.putIfMatchUnlocked(key,rpc,old) == old ) {
rpc.setTaskNum().call(); // Start the op
break; // Successful install of a fresh RPC
}
}
Value val = rpc.get()._val; // Block for, then fetch out the result
TGKS.putIfMatchUnlocked(key,null,rpc); // Clear from cache
return val;
}
private TaskGetKey( Key key, int priority ) { _key = _xkey = key; _priority = (byte)priority; }
// Top-level non-recursive invoke
@Override public void dinvoke( H2ONode sender ) {
_h2o = sender;
Key k = _key;
_key = null; // Not part of the return result
assert k.home(); // Gets are always from home (less we do replication)
// Shipping a result? Track replicas so we can invalidate. There's a
// narrow race on a moving K/V mapping tracking this Value just as it gets
// deleted - in which case, simply retry for another Value.
do _val = H2O.get(k); // The return result
while( _val != null && !_val.setReplica(sender) );
tryComplete();
}
@Override public void compute2() { throw H2O.unimpl(); }
// Received an ACK; executes on the node asking&receiving the Value
@Override public void onAck() {
if( _val != null ) { // Set transient fields after deserializing
assert !_xkey.home() && _val._key == null;
_val._key = _xkey;
}
// Now update the local store, caching the result.
// We only started down the TGK path because we missed locally, so we only
// expect to find a NULL in the local store. If somebody else installed
// another value (e.g. a racing TGK, or racing local Put) this value must
// be more recent than our NULL - but is UNORDERED relative to the Value
// returned from the Home. We'll take the local Value to preserve ordering
// and rely on invalidates from Home to force refreshes as needed.
// Hence we can do a blind putIfMatch here over a null or empty Value
// If it fails, what is there is also the TGK result.
Value old = H2O.raw_get(_xkey);
if( old != null && !old.isEmpty() ) old=null;
Value res = H2O.putIfMatch(_xkey,_val,old);
if( res != old ) _val = res;
}
// Received an ACKACK; executes on the node sending the Value
@Override public void onAckAck() {
if( _val != null ) _val.lowerActiveGetCount(_h2o);
}
@Override public byte priority() { return _priority; }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/TaskInvalidateKey.java
|
package water;
import java.util.concurrent.Future;
public class TaskInvalidateKey extends TaskPutKey {
private TaskInvalidateKey(Key key){super(key,null);}
@Override public byte priority(){return H2O.INVALIDATE_PRIORITY;}
static void invalidate( H2ONode h2o, Key key, Futures fs ) {
Future f = RPC.call(h2o,new TaskInvalidateKey(key));
if( fs != null ) fs.add(f);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/TaskPutKey.java
|
package water;
import java.util.concurrent.Future;
import water.DTask;
/**
* Push the given key to the remote node
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public class TaskPutKey extends DTask<TaskPutKey> {
Key _key;
Value _val;
boolean _dontCache; // delete cached value on the sender's side?
transient Value _xval;
transient Key _xkey;
static void put( H2ONode h2o, Key key, Value val, Futures fs, boolean dontCache) {
Future f = RPC.call(h2o,new TaskPutKey(key,val,dontCache));
if( fs != null ) fs.add(f);
}
protected TaskPutKey( Key key, Value val ) { this(key,val,false);}
protected TaskPutKey( Key key, Value val, boolean removeCache ) { _xkey = _key = key; _xval = _val = val; _dontCache = removeCache;}
@Override public void dinvoke( H2ONode sender ) {
assert _key.home() || _val==null; // Only PUT to home for keys, or remote invalidation from home
Paxos.lockCloud();
// Initialize Value for having a single known replica (the sender)
if( _val != null ) _val.initReplicaHome(sender,_key);
// Spin, until we update something.
Value old = H2O.raw_get(_key); // Raw-get: do not lazy-manifest if overwriting
while( H2O.putIfMatch(_key,_val,old) != old )
old = H2O.raw_get(_key); // Repeat until we update something.
// Invalidate remote caches. Block, so that all invalidates are done
// before we return to the remote caller.
if( _key.home() && old != null )
old.lockAndInvalidate(sender,new Futures()).blockForPending();
// No return result
_key = null;
_val = null;
tryComplete();
}
@Override public void compute2() { throw H2O.unimpl(); }
// Received an ACK
@Override public void onAck() {
// remove local cache but NOT in case it is already on disk
// (ie memory can be reclaimed and we assume we have plenty of disk space)
if( _dontCache && !_xval.isPersisted() ) H2O.putIfMatch(_xkey, null, _xval);
if( _xval != null ) _xval.completeRemotePut();
}
@Override public byte priority() {
return H2O.PUT_KEY_PRIORITY;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/TaskSendKey.java
|
package water;
import water.DTask;
import water.nbhm.NonBlockingHashMap;
/**
* Send a Key from its home node to some remote node via a "push"
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public class TaskSendKey extends DTask<TaskSendKey> {
Key _key; // Set by client/sender JVM, cleared by server JVM
final int _max;
final short _type;
final byte _be;
protected TaskSendKey( Key key, Value val ) { _key = key; _max = val._max; _type = (short)val.type(); _be = val.backend(); }
@Override public void dinvoke( H2ONode sender ) {
assert !_key.home(); // No point in sending Keys to home
// Update ONLY if there is not something there already.
// Update only a bare Value, with no backing data.
// Real data can be fetched on demand.
Value val = new Value(_key,_max,null,_type,_be);
Value old = H2O.raw_get(_key);
while( old == null && H2O.putIfMatch(_key,val,null) != null )
old = H2O.raw_get(_key);
_key = null; // No return result
tryComplete();
}
@Override public void compute2() { throw H2O.unimpl(); }
@Override public byte priority() { return H2O.GUI_PRIORITY; }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/TimeLine.java
|
package water;
import java.net.InetAddress;
import java.net.UnknownHostException;
import sun.misc.Unsafe;
import water.nbhm.UtilUnsafe;
/**
* Maintain a VERY efficient list of events in the system. This must be VERY
* cheap to call, as it will get called alot. On demand, we can snapshot this
* list gather all other lists from all other (responsive) Nodes, and build a
* whole-Cloud timeline for dumping.
*
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public class TimeLine extends UDP {
private static final Unsafe _unsafe = UtilUnsafe.getUnsafe();
// The TimeLine buffer.
// The TimeLine buffer is full of Events; each event has a timestamp and some
// event bytes. The buffer is a classic ring buffer; we toss away older
// events. We snapshot the buffer by replacing it with a fresh array. The
// index of the next free slot is kept in the 1st long of the array, and
// there are MAX_EVENTS (a power of 2) more slots.
// A TimeLine event is:
// - Milliseconds since JVM boot; 4 bytes
// - IP4 of send/recv
// - Sys.Nano, 8 bytes-3 bits
// - Nano low bit is 1 id packet was droped, next bit is 0 for send, 1 for recv, next bit is 0 for udp, 1 for tcp
// - 16 bytes of payload; 1st byte is a udp_type opcode, next 4 bytes are typically task#
public static final int MAX_EVENTS=2048; // Power-of-2, please
static final int WORDS_PER_EVENT=4;
static final long[] TIMELINE = new long[MAX_EVENTS*WORDS_PER_EVENT+1];
static long JVM_BOOT_MSEC = System.currentTimeMillis();
// Snapshot and return the current TIMELINE array
public static long[] snapshot() { return TIMELINE.clone(); }
// CAS access to the TIMELINE array
private static final int _Lbase = _unsafe.arrayBaseOffset(long[].class);
private static final int _Lscale = _unsafe.arrayIndexScale(long[].class);
private static long rawIndex(long[] ary, int i) {
assert i >= 0 && i < ary.length;
return _Lbase + i * _Lscale;
}
private final static boolean CAS( long[] A, int idx, long old, long nnn ) {
return _unsafe.compareAndSwapLong( A, rawIndex(A,idx), old, nnn );
}
// Return the next index into the TIMELINE array
private final static int next_idx( long [] tl ) {
// Spin until we can CAS-acquire a fresh index
while( true ) {
int oldidx = (int)tl[0];
int newidx = (oldidx+1)&(MAX_EVENTS-1);
if( CAS( tl, 0, oldidx, newidx ) )
return oldidx;
}
}
// Record 1 event, the first 16 bytes of this buffer. This is expected to be
// a high-volume multi-thread operation so needs to be fast. "sr" is send-
// receive and must be either 0 or 1. "drop" is whether or not the UDP
// packet is dropped as-if a network drop, and must be either 0 (kept) or 2
// (dropped).
private static void record2( H2ONode h2o, long ns, boolean tcp, int sr, int drop, long b0, long b8 ) {
final long ms = System.currentTimeMillis(); // Read first, in case we're slow storing values
long deltams = ms-JVM_BOOT_MSEC;
assert deltams < 0x0FFFFFFFFL; // No daily overflow
final long[] tl = TIMELINE; // Read once, in case the whole array shifts out from under us
final int idx = next_idx(tl); // Next free index
tl[idx*WORDS_PER_EVENT+0+1] = (deltams)<<32 | (h2o.ip4()&0x0FFFFFFFFL);
tl[idx*WORDS_PER_EVENT+1+1] = (ns&~7)| (tcp?4:0)|sr|drop;
// More complexities: record the *receiver* port in the timeline - but not
// in the outgoing UDP packet! The outgoing packet always has the sender's
// port (that's us!) - which means the recorded timeline packet normally
// never carries the *receiver* port - meaning the sender's timeline does
// not record who he sent to! With this hack the Timeline record always
// contains the info about "the other guy": inet+port for the receiver in
// the sender's Timeline, and vice-versa for the receiver's Timeline.
if( sr==0 ) b0 = (b0 & ~0xFFFF00) | (h2o._key.udp_port()<<8);
tl[idx*WORDS_PER_EVENT+2+1] = b0;
tl[idx*WORDS_PER_EVENT+3+1] = b8;
}
private static void record1( AutoBuffer b, boolean tcp, int sr, int drop) {
if( b.position() < 16 ) b.position(16);
final long ns = System.nanoTime();
record2(b._h2o, ns, tcp,sr,drop,b.get8(0),b.get8(8));
}
public static void record_send( AutoBuffer b, boolean tcp) { record1(b,tcp,0, 0); }
public static void record_recv( AutoBuffer b, boolean tcp, int drop) { record1(b,tcp,1,drop); }
// Record a completed I/O event. The nanosecond time slot is actually nano's-blocked-on-io
public static void record_IOclose( AutoBuffer b, int flavor ) {
H2ONode h2o = b._h2o==null ? H2O.SELF : b._h2o;
// First long word going out has sender-port and a 'bad' control packet
long b0 = UDP.udp.i_o.ordinal(); // Special flag to indicate io-record and not a rpc-record
b0 |= H2O.SELF._key.udp_port()<<8;
b0 |= flavor<<24; // I/O flavor; one of the Value.persist backends
long iotime = b._time_start_ms > 0 ? (b._time_close_ms - b._time_start_ms) : 0;
b0 |= iotime<<32; // msec from start-to-finish, including non-i/o overheads
long b8 = b._size; // byte's transfered in this I/O
long ns = b._time_io_ns; // nano's blocked doing I/O
record2(h2o,ns,true,b.readMode()?1:0,0,b0,b8);
}
/* Record an I/O call without using an AutoBuffer / NIO.
* Used by e.g. HDFS & S3
*
* @param block_ns - ns of blocking i/o call,
* @param io_msg - ms of overall i/o time
* @param r_w - 1 for read, 0 for write
* @param size - bytes read/written
* @param flavor - Value.HDFS or Value.S3
*/
public static void record_IOclose( long start_ns, long start_io_ms, int r_w, long size, int flavor ) {
long block_ns = System.nanoTime() - start_ns;
long io_ms = System.currentTimeMillis() - start_io_ms;
// First long word going out has sender-port and a 'bad' control packet
long b0 = UDP.udp.i_o.ordinal(); // Special flag to indicate io-record and not a rpc-record
b0 |= H2O.SELF._key.udp_port()<<8;
b0 |= flavor<<24; // I/O flavor; one of the Value.persist backends
b0 |= io_ms<<32; // msec from start-to-finish, including non-i/o overheads
record2(H2O.SELF,block_ns,true,r_w,0,b0,size);
}
// Accessors, for TimeLines that come from all over the system
public static int length( ) { return MAX_EVENTS; }
// Internal array math so we can keep layout private
private static int idx(long[] tl, int i ) { return (((int)tl[0]+i)&(MAX_EVENTS-1))*WORDS_PER_EVENT+1; }
// That first long is complex: compressed CTM and IP4
private static long x0( long[] tl, int idx ) { return tl[idx(tl,idx)+0]; }
// ms since boot of JVM
public static long ms( long[] tl, int idx ) { return x0(tl,idx)>>>32; }
public static InetAddress inet( long[] tl, int idx ) {
int adr = (int)x0(tl,idx);
byte[] ip4 = new byte[4];
ip4[0] = (byte)(adr>> 0);
ip4[1] = (byte)(adr>> 8);
ip4[2] = (byte)(adr>>16);
ip4[3] = (byte)(adr>>24);
try { return InetAddress.getByAddress(ip4); }
catch( UnknownHostException e ) { }
return null;
}
// That 2nd long is nanosec, plus the low bit is send/recv & 2nd low is drop
public static long ns( long[] tl, int idx ) { return tl[idx(tl,idx)+1]; }
// Returns zero for send, 1 for recv
public static int send_recv( long[] tl, int idx ) { return (int)(ns(tl,idx)&1); }
// Returns zero for kept, 2 for dropped
public static int dropped ( long[] tl, int idx ) { return (int)(ns(tl,idx)&2); }
// 16 bytes of payload
public static long l0( long[] tl, int idx ) { return tl[idx(tl,idx)+2]; }
public static long l8( long[] tl, int idx ) { return tl[idx(tl,idx)+3]; }
public static boolean isEmpty( long[] tl, int idx ) { return tl[idx(tl,idx)+0]==0; }
// Take a system-wide snapshot. Return an array, indexed by H2ONode _idx,
// containing that Node's snapshot. Try to get all the snapshots as close as
// possible to the same point in time.
static long[][] SNAPSHOT;
static long TIME_LAST_SNAPSHOT = 1;
static public H2O CLOUD; // Cloud instance being snapshotted
static public long[][] system_snapshot() {
// Now spin-wait until we see all snapshots check in.
// Be atomic about it.
synchronized( TimeLine.class ) {
// First see if we have a recent snapshot already.
long now = System.currentTimeMillis();
if( now - TIME_LAST_SNAPSHOT < 3*1000 )
return SNAPSHOT; // Use the recent snapshot
// A new snapshot is being built?
if( TIME_LAST_SNAPSHOT != 0 ) {
TIME_LAST_SNAPSHOT = 0; // Only fire off the UDP packet once; flag it
// Make a new empty snapshot
CLOUD = H2O.CLOUD;
SNAPSHOT = new long[CLOUD.size()][];
// Broadcast a UDP packet, with the hopes of getting all SnapShots as close
// as possible to the same point in time.
new AutoBuffer(H2O.SELF).putUdp(udp.timeline).close();
}
// Spin until all snapshots appear
while( true ) {
boolean done = true;
for( int i=0; i<CLOUD._memary.length; i++ )
if( SNAPSHOT[i] == null )
done = false;
if( done ) break;
try { TimeLine.class.wait(); } catch( InterruptedException e ) {}
}
TIME_LAST_SNAPSHOT = System.currentTimeMillis();
return SNAPSHOT;
}
}
// Send our most recent timeline to the remote via TCP
@Override public AutoBuffer call( AutoBuffer ab ) {
long[] a = snapshot();
if( ab._h2o == H2O.SELF ) {
synchronized(TimeLine.class) {
for( int i=0; i<CLOUD._memary.length; i++ )
if( CLOUD._memary[i]==H2O.SELF )
SNAPSHOT[i] = a;
TimeLine.class.notify();
}
return null; // No I/O needed for my own snapshot
}
// Send timeline to remote
while( true ) {
AutoBuffer tab = new AutoBuffer(ab._h2o);
try {
tab.putUdp(UDP.udp.timeline).putA8(a).close();
return null;
} catch( AutoBuffer.AutoBufferException tue ) {
tab.close();
}
}
}
// Receive a remote timeline
static void tcp_call( final AutoBuffer ab ) {
int port = ab.getPort();
long[] snap = ab.getA8();
int idx = CLOUD.nidx(ab._h2o);
if( idx >= 0 && idx < SNAPSHOT.length )
SNAPSHOT[idx] = snap; // Ignore out-of-cloud timelines
ab.close();
synchronized(TimeLine.class) { TimeLine.class.notify(); }
}
public String print16( AutoBuffer ab ) { return ""; } // no extra info in a timeline packet
/**
* Only for debugging.
* Prints local timeline to stdout.
*
* To be used in case of an error when global timeline can not be relied upon as we might not be able to talk to other nodes.
*/
public static void printMyTimeLine(){
long [] s = TimeLine.snapshot();
System.err.println("===================================<TIMELINE>==============================================");
for(int i = 0; i < TimeLine.length(); ++i) {
long lo = TimeLine.l0(s, i),hi = TimeLine.l8(s, i);
int port = (int)((lo >> 8) & 0xFFFF);
String op = TimeLine.send_recv(s,i) == 0?"SEND":"RECV";
if(!TimeLine.isEmpty(s, i) && (lo & 0xFF) == UDP.udp.exec.ordinal())
System.err.println(TimeLine.ms(s, i) + ": " + op + " " + (((TimeLine.ns(s, i) & 4) != 0)?"TCP":"UDP") + TimeLine.inet(s, i) + ":" + port + " | " + UDP.printx16(lo, hi));
}
System.err.println("===========================================================================================");
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Timer.java
|
package water;
import java.text.SimpleDateFormat;
import java.util.Date;
/**
* Simple Timer class.
**/
public class Timer {
/** SimpleDataFormat is not thread safe. To avoid constructing them repeatedly we store them into thread
* local variables. */
private static final ThreadLocal<SimpleDateFormat> dateFormat = new ThreadLocal<SimpleDateFormat>() {
@Override protected SimpleDateFormat initialValue() {
SimpleDateFormat format = new SimpleDateFormat("dd-MMM HH:mm:ss.SSS");
return format;
}
};
private static final ThreadLocal<SimpleDateFormat> shortFormat = new ThreadLocal<SimpleDateFormat>() {
@Override protected SimpleDateFormat initialValue() {
SimpleDateFormat format = new SimpleDateFormat("HH:mm:ss.SSS");
return format;
}
};
public final long _start = System.currentTimeMillis();
public final long _nanos = System.nanoTime();
/**Return the difference between when the timer was created and the current time. */
public long time() { return System.currentTimeMillis() - _start; }
public long nanos(){ return System.nanoTime() - _nanos; }
/**Return the difference between when the timer was created and the current time as a
* string along with the time of creation in date format. */
@Override public String toString() {
final long now = System.currentTimeMillis();
return PrettyPrint.msecs(now - _start, false) + " (Wall: " + dateFormat.get().format(new Date(now)) + ") ";
}
/** return the start time of this timer.**/
public String startAsString() { return dateFormat.get().format(new Date(_start)); }
/** return the start time of this timer.**/
public String startAsShortString() { return shortFormat.get().format(new Date(_start)); }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/TypeMap.java
|
package water;
import java.util.Arrays;
import water.nbhm.NonBlockingHashMap;
import water.util.Log;
import water.H2O;
public class TypeMap {
static public final short NULL = (short) -1;
static public final short PRIM_B = 1;
static public final short C1NCHUNK;
static public final short FRAME;
static final public String BOOTSTRAP_CLASSES[] = {
" BAD",
"[B",
"water.FetchClazz", // used to fetch IDs from leader
"water.FetchId", // used to fetch IDs from leader
"water.ValueArray", // used in TypeaheadKeys
"water.fvec.C1NChunk",// used as constant in parser
"water.fvec.Frame", // used in TypeaheadKeys & Exec2
"water.TaskPutKey", // Needed to write that first Key
"water.Key", // Needed to write that first Key
"water.Value", // Needed to write that first Key
"water.TaskGetKey", // Read that first Key
"water.Job$List", // First Key which locks the cloud for all JUnit tests
"water.DException", // Do not fetch clazz during distributed exception reporting
};
// String -> ID mapping
static private final NonBlockingHashMap<String, Integer> MAP = new NonBlockingHashMap();
// ID -> String mapping
static private String[] CLAZZES;
// ID -> pre-allocated Golden Instance of class
static private Freezable[] GOLD;
// Unique ides
static private int IDS;
static {
CLAZZES = BOOTSTRAP_CLASSES;
GOLD = new Freezable[BOOTSTRAP_CLASSES.length];
int id=0;
for( String s : CLAZZES )
MAP.put(s,id++);
IDS = id;
C1NCHUNK = (short)onIce("water.fvec.C1NChunk");
FRAME = (short)onIce("water.fvec.Frame");
}
// During first Icing, get a globally unique class ID for a className
static public int onIce(String className) {
Integer I = MAP.get(className);
if( I != null ) return I;
// Need to install a new cloud-wide type ID for className
assert H2O.CLOUD.size() > 0 : "No cloud when getting type id for "+className;
int id = -1;
if( H2O.CLOUD.leader() != H2O.SELF ) // Not leader?
id = FetchId.fetchId(className);
return install(className,id);
}
// Install the type mapping under lock, and grow all the arrays as needed.
// The grow-step is not obviously race-safe: readers of all the arrays will
// get either the old or new arrays. However readers are all reader with
// smaller type ids, and these will work fine in either old or new arrays.
synchronized static private int install( String className, int id ) {
Paxos.lockCloud();
if( id == -1 ) id = IDS++; // Leader will get an ID under lock
MAP.put(className,id); // No race on insert, since under lock
// Expand lists to handle new ID, as needed
if( id >= CLAZZES.length ) CLAZZES = Arrays.copyOf(CLAZZES,Math.max(CLAZZES.length<<1,id+1));
if( id >= GOLD .length ) GOLD = Arrays.copyOf(GOLD ,Math.max(CLAZZES.length<<1,id+1));
CLAZZES[id] = className;
return id;
}
// During deserialization, figure out the mapping from a type ID to a type
// String (and Class). Mostly forced into another class to avoid circular
// class-loading issues.
static public void loadId(int id) {
assert H2O.CLOUD.leader() != H2O.SELF; // Leaders always have the latest mapping already
install( FetchClazz.fetchClazz(id), id );
}
static public Iced newInstance(int id) {
if( id >= CLAZZES.length || CLAZZES[id] == null ) loadId(id);
Iced f = (Iced) GOLD[id];
if( f == null ) {
try { GOLD[id] = f = (Iced) Class.forName(CLAZZES[id]).newInstance(); }
catch( Exception e ) { Log.err("Failed newinstance for class "+className(id)); throw Log.errRTExcept(e); }
}
return f.newInstance();
}
static public Freezable newFreezable(int id) {
assert id >= 0 : "Bad type id "+id;
if( id >= CLAZZES.length || CLAZZES[id] == null ) loadId(id);
Freezable f = GOLD[id];
if( f == null ) {
try { GOLD[id] = f = (Freezable) Class.forName(CLAZZES[id]).newInstance(); }
catch( Exception e ) {
throw Log.errRTExcept(e);
}
}
return f.newInstance();
}
static public String className(int id) {
if( id >= CLAZZES.length || CLAZZES[id] == null ) loadId(id);
assert CLAZZES[id] != null : "No class matching id "+id;
return CLAZZES[id];
}
static public Class clazz(int id) {
if( id >= CLAZZES.length || CLAZZES[id] == null ) loadId(id);
if( GOLD[id] == null ) newInstance(id);
return GOLD[id].getClass();
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/UDP.java
|
package water;
import sun.misc.Unsafe;
import water.nbhm.UtilUnsafe;
/**
* Do Something with an incoming UDP packet
*
* Classic Single Abstract Method pattern.
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public abstract class UDP {
// Types of UDP packets I grok
public static enum udp {
bad(false,null), // Do not use the zero packet, too easy to make mistakes
// Some health-related packet types. These packets are all stateless, in
// that we do not need to send any replies back.
heartbeat ( true, new UDPHeartbeat()),
rebooted ( true, new UDPRebooted()), // This node has rebooted recently
timeline (false, new TimeLine()), // Get timeline dumps from across the Cloud
// All my *reliable* tasks (below), are sent to remote nodes who then ACK
// back an answer. To be reliable, I might send the TASK multiple times.
// To get a reliable answer, the remote might send me multiple ACKs with
// the same answer every time. When does the remote know it can quit
// tracking reply ACKs? When it recieves an ACKACK.
ackack(false,new UDPAckAck()), // a generic ACKACK for a UDP async task
// In order to unpack an ACK (which contains an arbitrary returned POJO)
// the reciever might need to fetch a id/class mapping from the leader -
// while inside an ACK-priority thread holding onto lots of resources
// (e.g. TCP channel). Allow the fetch to complete on a higher priority
// thread.
fetchack(false,new UDPFetchAck()), // a class/id fetch ACK
ack (false,new UDPAck ()), // a generic ACK for a UDP async task
// These packets all imply some sort of request/response handshake.
// We'll hang on to these packets; filter out dup sends and auto-reply
// identical result ACK packets.
exec(false,new RPC.RemoteHandler()), // Remote hi-q execution request
i_o (false,new UDP.IO_record()); // Only used to profile I/O
final UDP _udp; // The Callable S.A.M. instance
final boolean _paxos; // Ignore (or not) packets from outside the Cloud
udp( boolean paxos, UDP udp ) { _paxos = paxos; _udp = udp; }
static udp[] UDPS = values();
}
public static udp getUdp(int id){return udp.UDPS[id];}
// Handle an incoming I/O transaction, probably from a UDP packet. The
// returned Autobuffer will be closed(). If the returned buffer is not the
// passed-in buffer, the call() method must close it's AutoBuffer arg.
abstract AutoBuffer call(AutoBuffer ab);
// Pretty-print bytes 1-15; byte 0 is the udp_type enum
static final char[] cs = new char[32];
static char hex(int x) { x &= 0xf; return (char)(x+((x<10)?'0':('a'-10))); }
public String print16( AutoBuffer ab ) {
for( int i=0; i<16; i++ ) {
int b = ab.get1();
cs[(i<<1)+0 ] = hex(b>>4);
cs[(i<<1)+1 ] = hex(b );
}
return new String(cs);
}
// Dispatch on the enum opcode and return a pretty string
static private final byte[] pbuf = new byte[16];
static public String printx16( long lo, long hi ) {
set8(pbuf,0,lo);
set8(pbuf,8,hi);
return udp.UDPS[(int)(lo&0xFF)]._udp.print16(new AutoBuffer(pbuf));
}
// ---
private static final Unsafe _unsafe = UtilUnsafe.getUnsafe();
private static final long _Bbase = _unsafe.arrayBaseOffset(byte[].class);
public static int get2 ( byte[] buf, int off ) { return _unsafe.getShort (buf, _Bbase+off); }
public static int get2u( byte[] buf, int off ) { return _unsafe.getChar (buf, _Bbase+off); }
public static int get4 ( byte[] buf, int off ) { return _unsafe.getInt (buf, _Bbase+off); }
public static long get8 ( byte[] buf, int off ) { return _unsafe.getLong (buf, _Bbase+off); }
public static float get4f( byte[] buf, int off ) { return _unsafe.getFloat (buf, _Bbase+off); }
public static double get8d( byte[] buf, int off ) { return _unsafe.getDouble(buf, _Bbase+off); }
public static int set2 (byte[] buf, int off, short x ) {_unsafe.putShort (buf, _Bbase+off, x); return 2;}
public static int set4 (byte[] buf, int off, int x ) {_unsafe.putInt (buf, _Bbase+off, x); return 4;}
public static int set4f(byte[] buf, int off, float f ) {_unsafe.putFloat (buf, _Bbase+off, f); return 4;}
public static int set8 (byte[] buf, int off, long x ) {_unsafe.putLong (buf, _Bbase+off, x); return 8;}
public static int set8d(byte[] buf, int off, double x) {_unsafe.putDouble(buf, _Bbase+off, x); return 8;}
private static class IO_record extends UDP {
public AutoBuffer call(AutoBuffer ab) { throw H2O.unimpl(); }
public String print16( AutoBuffer ab ) {
int flavor = ab.get1(3);
int iotime = ab.get4(4);
int size = ab.get4(8);
return "I/O "+Value.nameOfPersist(flavor)+" "+iotime+"ms "+size+"b";
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/UDPAck.java
|
package water;
/**
* A remote task request has just returned an ACK with answer
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public class UDPAck extends UDP {
// Received an ACK for a remote Task. Ping the task.
AutoBuffer call(AutoBuffer ab) {
int tnum = ab.getTask();
RPC<?> t = ab._h2o.taskGet(tnum);
assert t== null || t._tasknum == tnum;
if( t != null ) t.response(ab); // Do the 2nd half of this task, includes ACKACK
else ab.close();
// Else forgotten task, but still must ACKACK
return new AutoBuffer(ab._h2o).putTask(UDP.udp.ackack.ordinal(),tnum);
}
// Pretty-print bytes 1-15; byte 0 is the udp_type enum
public String print16( AutoBuffer b ) { return "task# "+b.getTask(); }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/UDPAckAck.java
|
package water;
/**
* A task initiator has his response, we can quit sending him ACKs.
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public class UDPAckAck extends UDP {
// Received an ACKACK for a remote Task. Drop the task tracking
@Override AutoBuffer call(AutoBuffer ab) {
ab._h2o.remove_task_tracking(ab.getTask());
return ab;
}
// Pretty-print bytes 1-15; byte 0 is the udp_type enum
@Override public String print16( AutoBuffer ab ) { return "task# "+ab.getTask(); }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/UDPBrokenPacket.java
|
package water;
/**
* An unexpected UDP packet
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public class UDPBrokenPacket extends UDP {
@Override AutoBuffer call(AutoBuffer ab) {
throw new RuntimeException("I really should complain more about this broken packet "+ab);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/UDPDropTest.java
|
package water;
import jsr166y.CountedCompleter;
import jsr166y.ForkJoinPool;
import jsr166y.ForkJoinTask;
import jsr166y.RecursiveAction;
import water.H2O.H2OCountedCompleter;
import water.api.DocGen;
import water.fvec.Vec;
import water.util.Log;
import water.util.Utils;
import java.util.Arrays;
import java.util.Random;
import java.util.concurrent.atomic.AtomicBoolean;
public class UDPDropTest extends Func {
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "Message sizes", filter = Default.class, json=true)
public int[] msg_sizes = new int[]{1,32,64,128,256,512,1024,AutoBuffer.MTU-100}; //INPUT
@API(help = "Nodes", json=true)
public String[] nodes; //OUTPUT
@API(help = "Drop rates between each (ordered) pair of nodes for different message sizes", json = true)
public UDPDropMatrix [] dropRates;
private static class UDPPing extends DTask<UDPPing>{
boolean _done;
int _retries = -1;
final long _t1;
long _t2;
byte [] _payload;
public UDPPing(){_t1 = -1;}
public UDPPing(int sz){
assert sz <= AutoBuffer.MTU:"msg size does not fit into UDP";
_payload = MemoryManager.malloc1(sz);
Random rnd = new Random();
for(int i = 0; i < _payload.length; ++i)
_payload[i] = (byte)rnd.nextInt();
_t1 = System.currentTimeMillis();
}
@Override
public void compute2() { tryComplete();}
@Override public synchronized UDPPing read(AutoBuffer ab){
if(_done)return this;
_done = true;
_t2 = System.currentTimeMillis();
_retries = ab.get4();
byte [] bs = ab.getA1();
_payload = bs;
return this;
}
@Override public synchronized AutoBuffer write(AutoBuffer ab){
if(!_done) ++_retries;
ab.put4(_retries); // count the number of retries as number of serialization calls
ab.putA1(_payload);
return ab;
}
@Override public void copyOver(Freezable f){
UDPPing u = (UDPPing)f;
_retries = u._retries;
_payload = u._payload;
}
}
private static class TCPTester extends DTask<TCPTester> {
public final int _srcId;
public final int _tgtId;
public final int _N;
private final int[] _msgSzs;
private transient RPC<UDPPing>[][] _pings;
double[] _dropRates;
int[] _droppedPackets;
public TCPTester(H2ONode src, H2ONode tgt, int[] msgSzs, int ntests) {
_srcId = src.index();
_tgtId = tgt.index();
_msgSzs = msgSzs;
_N = ntests;
}
private transient boolean _done;
private final void doTest() {
_droppedPackets = new int[_N];
Arrays.fill(_droppedPackets, -1);
_pings = new RPC[_msgSzs.length][_N];
// addToPendingCount(_msgSzs.length*_N - 1);
for (int i = 0; i < _msgSzs.length; ++i)
for (int j = 0; j < _N; ++j) // instead of synchronization, just wait for predetermined amount of time
_pings[i][j] = new RPC(H2O.CLOUD._memary[_tgtId], new UDPPing(_msgSzs[i]))/*.addCompleter(this)*/.call();
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
}
// if not done yet, finish no matter what (racy but we don't care here - only a debug tool, does not have to be precise)
// setPendingCount(0);
}
@Override
public synchronized void onCompletion(CountedCompleter caller) {
if (!_done) { // only one completion
_done = true;
_dropRates = MemoryManager.malloc8d(_msgSzs.length);
// compute the drop rates
for (int i = 0; i < _msgSzs.length; ++i) {
double sum = 0;
for (int j = 0; j < _N; ++j) {
RPC<UDPPing> rpc = _pings[i][j];
sum += (rpc._dt._retries == -1 ? Double.POSITIVE_INFINITY : rpc._dt._retries);
}
_dropRates[i] = 1 - _N / (_N + sum);
}
}
}
@Override
public void compute2() {
}
}
private static class UDPDropTester extends DTask<UDPDropTester> {
public final int _srcId;
public final int _tgtId;
public final int _N;
private final int [] _msgSzs;
private transient RPC<UDPPing> [][] _pings;
double [] _dropRates;
int [] _droppedPackets;
public UDPDropTester(H2ONode src, H2ONode tgt, int [] msgSzs, int ntests){
_srcId = src.index();
_tgtId = tgt.index();
_msgSzs = msgSzs;
_N = ntests;
}
private transient boolean _done;
private final void doTest(){
_droppedPackets = new int[_N];
Arrays.fill(_droppedPackets,-1);
_pings = new RPC[_msgSzs.length][_N];
// addToPendingCount(_msgSzs.length*_N - 1);
for(int i = 0; i < _msgSzs.length; ++i)
for(int j = 0; j < _N; ++j) // instead of synchronization, just wait for predetermined amount of time
_pings[i][j] = new RPC(H2O.CLOUD._memary[_tgtId],new UDPPing(_msgSzs[i]))/*.addCompleter(this)*/.call();
try { Thread.sleep(5000); } catch (InterruptedException e) {}
// if not done yet, finish no matter what (racy but we don't care here - only a debug tool, does not have to be precise)
// setPendingCount(0);
}
@Override public synchronized void onCompletion(CountedCompleter caller){
if(!_done){ // only one completion
_done = true;
_dropRates = MemoryManager.malloc8d(_msgSzs.length);
// compute the drop rates
for(int i = 0; i < _msgSzs.length; ++i) {
double sum = 0;
for (int j = 0; j < _N; ++j) {
RPC<UDPPing> rpc = _pings[i][j];
sum += (rpc._dt._retries == -1 ? Double.POSITIVE_INFINITY : rpc._dt._retries);
}
_dropRates[i] = 1 - _N/(_N+sum);
}
}
}
@Override
public void compute2() {
if(_srcId == H2O.SELF.index()) {
doTest();
tryComplete();
} else {
_done = true;
final UDPDropTester t = (UDPDropTester) clone();
new RPC(H2O.CLOUD._memary[_srcId], t).addCompleter(new H2OCountedCompleter(this) {
@Override
public void compute2() {
}
@Override
public void onCompletion(CountedCompleter cc) {
copyOver(t);
}
}).call();
}
}
}
private static class UDPDropMatrix extends Iced {
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="message size")
public final int messageSz;
@API(help="meassured drop rates")
public final double [][] dropRates;
public UDPDropMatrix(int msgSz, double [][] dropRates){
messageSz = msgSz;
this.dropRates = dropRates;
}
@Override
public String toString(){
return " drop rates at " + messageSz + " bytes\n" + Utils.pprint(dropRates);
}
}
@Override protected void execImpl() {
logStart();
Log.debug("NetworkTester testing udp drops");
final UDPDropTester [] dropTests = new UDPDropTester[H2O.CLOUD.size()*H2O.CLOUD.size()-H2O.CLOUD.size()];
H2O.submitTask(new H2OCountedCompleter() {
@Override
public void compute2() {
int k = 0;
for(int i = 0; i < H2O.CLOUD.size(); ++i)
for(int j = 0; j < H2O.CLOUD.size(); ++j){
if(i == j) continue;
dropTests[k++] = new UDPDropTester(H2O.CLOUD._memary[i],H2O.CLOUD._memary[j],msg_sizes,10);
}
ForkJoinTask.invokeAll(dropTests);
tryComplete();
}
}).join();
dropRates = new UDPDropMatrix[msg_sizes.length];
for(int m = 0; m < msg_sizes.length; ++m){
double [][] ds = new double[H2O.CLOUD.size()][H2O.CLOUD.size()];
int k = 0;
for(int i = 0; i < H2O.CLOUD.size(); ++i)
for(int j = 0; j < H2O.CLOUD.size(); ++j){
if(i == j) continue;
ds[i][j] = dropTests[k++]._dropRates[m];
}
dropRates[m] = new UDPDropMatrix(msg_sizes[m],ds);
}
Log.debug("Network test udp drop rates: ");
for(UDPDropMatrix m:dropRates)
Log.debug(m.toString());
// now do the tcp bandwith test
// print out
}
@Override
public boolean toHTML(StringBuilder sb) {
try {
DocGen.HTML.section(sb, "UDP Drop rates");
for(int i = 0; i < msg_sizes.length; ++i){
sb.append("<h4>" + "Message size = " + msg_sizes[i] + " bytes</h4>");
sb.append("<div>");
UDPDropMatrix d = dropRates[i];
sb.append("<table class='table table-bordered table-condensed'>\n");
sb.append("<tr>");
sb.append("<th></th>");
for(int j = 0 ; j < H2O.CLOUD.size(); ++j)
sb.append("<th>" + j + "</th>");
sb.append("</tr>\n");
for(int j = 0 ; j < H2O.CLOUD.size(); ++j){
sb.append("<tr><td>" + j + "</td>");
for(int k = 0; k < d.dropRates[j].length; ++k){
sb.append("<td>" + (int)(100*d.dropRates[j][k]) + "%</td>");
}
sb.append("</tr>\n");
}
sb.append("</table>");
sb.append("</div>");
}
} catch(Throwable t){
t.printStackTrace();
}
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/UDPFetchAck.java
|
package water;
/**
* A remote task request has just returned an ACK with answer
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
// Same as a UDPAck, but running at higher priority and only handling class/id mappings
class UDPFetchAck extends UDPAck {
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/UDPHeartbeat.java
|
package water;
/**
* A UDP Heartbeat packet.
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public class UDPHeartbeat extends UDP {
@Override AutoBuffer call(AutoBuffer ab) {
if( ab._h2o != H2O.SELF ) { // Do not update self-heartbeat object
// The self-heartbeat is the sole holder of racey cloud-concensus hashes
// and if we update it here we risk dropping an update.
ab._h2o._heartbeat = new HeartBeat().read(ab);
Paxos.doHeartbeat(ab._h2o);
}
return ab;
}
static void build_and_multicast( H2O cloud, HeartBeat hb ) {
// Paxos.print_debug("send: heartbeat ",cloud._memset);
assert hb._cloud_hash != 0; // Set before send, please
H2O.SELF._heartbeat = hb;
hb.write(new AutoBuffer(H2O.SELF).putUdp(UDP.udp.heartbeat)).close();
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/UDPRebooted.java
|
package water;
import java.io.IOException;
import water.util.Log;
/**
* A UDP Rebooted packet: this node recently rebooted
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public class UDPRebooted extends UDP {
public static enum T {
none,
reboot,
shutdown,
oom,
error,
locked,
mismatch;
public void send(H2ONode target) {
assert this != none;
new AutoBuffer(target).putUdp(udp.rebooted).put1(ordinal()).close();
}
public void broadcast() { send(H2O.SELF); }
}
public static void checkForSuicide(int first_byte, AutoBuffer ab) {
if( first_byte != UDP.udp.rebooted.ordinal() ) return;
int type = ab.get1();
suicide( T.values()[type], ab._h2o);
}
public static void suicide( T cause, H2ONode killer ) {
String m;
switch( cause ) {
case none: return;
case reboot: return;
case shutdown:
closeAll();
Log.info("Orderly shutdown command from "+killer);
H2O.exit(0);
return;
case oom: m = "Out of Memory and no swap space left"; break;
case error: m = "Error leading to a cloud kill"; break;
case locked: m = "Attempting to join an H2O cloud that is no longer accepting new H2O nodes"; break;
case mismatch: m = "Attempting to join an H2O cloud with a different H2O version (is H2O already running?)"; break;
default: m = "Received kill " + cause; break;
}
closeAll();
Log.err(m+" from "+killer);
Log.err("Exiting.");
H2O.exit(-1);
}
AutoBuffer call(AutoBuffer ab) {
if( ab._h2o != null ) ab._h2o.rebooted();
return ab;
}
// Try to gracefully close/shutdown all i/o channels.
public static void closeAll() {
try { H2O._udpSocket.close(); } catch( IOException e ) { }
try { H2O._apiSocket.close(); } catch( IOException e ) { }
try { TCPReceiverThread.SOCK.close(); } catch( IOException e ) { }
}
// Pretty-print bytes 1-15; byte 0 is the udp_type enum
public String print16( AutoBuffer ab ) {
ab.getPort();
return T.values()[ab.get1()].toString();
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/UDPReceiverThread.java
|
package water;
import java.nio.channels.DatagramChannel;
import java.util.Date;
import java.util.Random;
import water.util.Log;
/**
* The Thread that looks for UDP Cloud requests.
*
* This thread just spins on reading UDP packets from the kernel and either
* dispatching on them directly itself (if the request is known short) or
* queuing them up for worker threads.
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public class UDPReceiverThread extends Thread {
static private int _unknown_packets_per_sec = 0;
static private long _unknown_packet_time = 0;
static final Random RANDOM_UDP_DROP = new Random();
public UDPReceiverThread() {
super("D-UDP-Recv");
}
// ---
// Started by main() on a single thread, this code manages reading UDP packets
@SuppressWarnings("resource")
public void run() {
Thread.currentThread().setPriority(Thread.MAX_PRIORITY-1);
DatagramChannel sock = H2O._udpSocket, errsock = null;
boolean saw_error = false;
while( true ) {
try {
// Cleanup from any prior socket failures. Rare unless we're really sick.
if( errsock != null ) { // One time attempt a socket close
final DatagramChannel tmp2 = errsock; errsock = null;
tmp2.close(); // Could throw, but errsock cleared for next pass
}
if( saw_error ) Thread.sleep(1000); // prevent deny-of-service endless socket-creates
saw_error = false;
// ---
// Common-case setup of a socket
if( sock == null ) {
sock = DatagramChannel.open();
sock.socket().bind(H2O.SELF._key);
}
// Receive a packet & handle it
basic_packet_handling(new AutoBuffer(sock));
} catch( java.nio.channels.AsynchronousCloseException ex ) {
break; // Socket closed for shutdown
} catch( java.nio.channels.ClosedChannelException ex ) {
break; // Socket closed for shutdown
} catch( Exception e ) {
// On any error from anybody, close all sockets & re-open
Log.err("UDP Receiver error on port "+H2O.UDP_PORT,e);
saw_error = true;
errsock = sock ; sock = null; // Signal error recovery on the next loop
}
}
}
// Basic packet handling:
// - Timeline record it
static public void basic_packet_handling( AutoBuffer ab ) throws java.io.IOException {
// Randomly drop 1/10th of the packets, as-if broken network. Dropped
// packets are timeline recorded before dropping - and we still will
// respond to timelines and suicide packets.
int drop = H2O.OPT_ARGS.random_udp_drop!= null &&
RANDOM_UDP_DROP.nextInt(5) == 0 ? 2 : 0;
// Record the last time we heard from any given Node
TimeLine.record_recv(ab,false,drop);
ab._h2o._last_heard_from = System.currentTimeMillis();
// Snapshots are handled *IN THIS THREAD*, to prevent more UDP packets from
// being handled during the dump. Also works for packets from outside the
// Cloud... because we use Timelines to diagnose Paxos failures.
int ctrl = ab.getCtrl();
ab.getPort(); // skip the port bytes
if( ctrl == UDP.udp.timeline.ordinal() ) {
UDP.udp.timeline._udp.call(ab);
return;
}
// Suicide packet? Short-n-sweet...
if( ctrl == UDP.udp.rebooted.ordinal())
UDPRebooted.checkForSuicide(ctrl, ab);
// Drop the packet.
if( drop != 0 ) return;
// Get the Cloud we are operating under for this packet
H2O cloud = H2O.CLOUD;
// Check cloud membership; stale ex-members are "fail-stop" - we mostly
// ignore packets from them (except paxos packets).
boolean is_member = cloud.contains(ab._h2o);
// Paxos stateless packets & ACKs just fire immediately in a worker
// thread. Dups are handled by these packet handlers directly. No
// current membership check required for Paxos packets
if( UDP.udp.UDPS[ctrl]._paxos || is_member ) {
H2O.submitTask(new FJPacket(ab,ctrl));
return;
}
// Some non-Paxos packet from a non-member. Probably should record & complain.
// Filter unknown-packet-reports. In bad situations of poisoned Paxos
// voting we can get a LOT of these packets/sec, flooding the console.
_unknown_packets_per_sec++;
long timediff = ab._h2o._last_heard_from - _unknown_packet_time;
if( timediff > 1000 ) {
Log.warn("UDP packets from outside the cloud: "+_unknown_packets_per_sec+"/sec, last one from "+ab._h2o+ " @ "+new Date());
_unknown_packets_per_sec = 0;
_unknown_packet_time = ab._h2o._last_heard_from;
}
ab.close();
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/UDPTimeOutThread.java
|
package water;
import java.util.concurrent.DelayQueue;
/**
* The Thread that looks for UDPAsyncTasks that are timing out
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public class UDPTimeOutThread extends Thread {
public UDPTimeOutThread() { super("UDPTimeout"); }
// List of "in progress" tasks. When they time-out we do the time-out action
// which is possibly a re-send if we suspect a dropped UDP packet, or a
// fail-out if the target has died.
static DelayQueue<RPC> PENDING = new DelayQueue<RPC>();
// The Run Method.
// Started by main() on a single thread, handle timing-out UDP packets
public void run() {
Thread.currentThread().setPriority(Thread.NORM_PRIORITY);
while( true ) {
try {
RPC t = PENDING.take();
// One-shot timeout effect. Retries need to re-insert back in the queue
if( H2O.CLOUD.contains(t._target) ) {
if( !t.isDone() ) t.call();
} else t.cancel(true);
} catch( InterruptedException e ) {
// Interrupted while waiting for a packet?
// Blow it off and go wait again...
}
}
}
public static final RPC[] pendingRPCs(){
return PENDING.toArray(new RPC[0]);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/UKV.java
|
package water;
import water.fvec.Vec;
/**
* User-View Key/Value Store
*
* This class handles user-view keys, and hides ArrayLets from the end user.
*
*
* @author <a href="mailto:cliffc@0xdata.com"></a>
* @version 1.0
*/
public abstract class UKV {
// This put is a top-level user-update, and not a reflected or retried
// update. i.e., The User has initiated a change against the K/V store.
static public void put( Key key, Value val ) {
Futures fs = new Futures();
put(key,val,fs);
fs.blockForPending(); // Block for remote-put to complete
}
static public void put( Key key, Iced val, Futures fs ) { put(key,new Value(key, val),fs); }
// Do the DKV.put. DISALLOW this interface for Lockables. Lockables all
// have to use the Lockable interface for all updates.
static public void put( Key key, Value val, Futures fs ) {
assert !val.isLockable();
Value res = DKV.put(key,val,fs);
assert res == null || !res.isLockable();
}
// Recursively remove, gathering all the pending remote key-deletes
static public void remove( Key key ) { remove(key,new Futures()).blockForPending(); }
static public Futures remove( Key key, Futures fs ) {
if( key.isVec() ) {
Value val = DKV.get(key);
if (val == null) return fs;
((Vec)val.get()).remove(fs);
}
DKV.remove(key,fs);
return fs;
}
// User-Weak-Get a Key from the distributed cloud.
// Right now, just gets chunk#0 from a ValueArray, or a normal Value otherwise.
static public Value getValue( Key key ) {
Value val = DKV.get(key);
return val;
}
static public void put(String s, Value v) { put(Key.make(s), v); }
static public void remove(String s) { remove(Key.make(s)); }
// Also, allow auto-serialization
static public void put( Key key, Freezable fr ) {
if( fr == null ) UKV.remove(key);
else UKV.put(key,new Value(key, fr));
}
static public void put( Key key, Iced fr ) {
if( fr == null ) UKV.remove(key);
else UKV.put(key,new Value(key, fr));
}
public static <T extends Iced> T get(Key k) {
Value v = DKV.get(k);
return (v == null) ? null : (T)v.get();
}
public static <T extends Freezable> T get(Key k, Class<T> C) {
Value v = DKV.get(k);
return (v == null) ? null : v.get(C);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/UniqueFrameId.java
|
package water;
import dontweave.gson.JsonObject;
import dontweave.gson.JsonParser;
import org.apache.commons.codec.binary.Hex;
import water.api.DocGen;
import water.api.Request.API;
import water.api.Request.Default;
import water.fvec.Frame;
/**
* Frames are mutable, so we can't create a unique id at creation time to distinguish
* between Frames. In addition, we'll want to know that two Frames parsed from the same
* data are equivalent. Therefore, we store the Frame here, and generate a good hash of
* the contents of the Vecs when we are asked for the id.
*/
public final class UniqueFrameId extends UniqueId {
private Key frame = null;
public UniqueFrameId(Key key, Frame frame) {
super(key);
this.frame = frame._key;
}
public UniqueFrameId(String key, long creation_epoch_time_millis, String id, Frame frame) {
super(key, creation_epoch_time_millis, id);
this.frame = frame._key;
}
@Override
public String getId() {
return Long.toHexString(((Frame)DKV.get(frame).get()).checksum());
}
public JsonObject toJSON() {
final String json = new String(writeJSON(new AutoBuffer()).buf());
if (json.length() == 0) return new JsonObject();
JsonObject jo = (JsonObject)new JsonParser().parse(json);
jo.addProperty("id", this.getId());
return jo;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/UniqueId.java
|
package water;
import dontweave.gson.JsonObject;
import water.api.DocGen;
import water.api.Request.API;
import water.api.Request.Default;
import java.util.UUID;
/**
* Some properties to mix in to Frame, Model and such to make them uniquely identifiable.
* That is, we want to distinguish between different instances of a Model that have the
* same key over time.
*/
public class UniqueId extends Iced {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
@API(help="The keycreation timestamp for the object (if it's not null).", required=false, filter=Default.class, json=true)
private String key = null;
@API(help="The creation timestamp for the object.", required=false, filter=Default.class, json=true)
private long creation_epoch_time_millis = -1L;
@API(help="The id for the object.", required=false, filter=Default.class, json=true)
private String id = null;
public UniqueId(Key key) {
if (null != key)
this.key = key.toString();
this.creation_epoch_time_millis = System.currentTimeMillis();
this.id = UUID.randomUUID().toString();
}
/**
* ONLY to be used to deserializing persisted instances.
*/
public UniqueId(String key, long creation_epoch_time_millis, String id) {
this.key = key;
this.creation_epoch_time_millis = creation_epoch_time_millis;
this.id = id;
}
public String getKey() {
return this.key;
}
public long getCreationEpochTimeMillis() {
return this.creation_epoch_time_millis;
}
public String getId() {
return this.id;
}
public JsonObject toJSON() {
JsonObject result = new JsonObject();
result.addProperty("key", this.getKey());
result.addProperty("creation_epoch_time_millis", this.getCreationEpochTimeMillis());
result.addProperty("id", this.getId());
return result;
}
public boolean equals(Object o) {
if (!(o instanceof UniqueId))
return false;
UniqueId other = (UniqueId)o;
// NOTE: we must call this.getId() because subclasses can define the id in a way that's dynamic.
return
(this.creation_epoch_time_millis == other.creation_epoch_time_millis) &&
(this.getId() != null) &&
(this.getId().equals(other.getId()));
}
public int hashCode() {
return 17 +
37 * Long.valueOf(this.creation_epoch_time_millis).hashCode() +
37 * this.getId().hashCode();
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Value.java
|
package water;
import java.io.*;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicInteger;
import jsr166y.ForkJoinPool;
import water.Job.ProgressMonitor;
import water.fvec.*;
import water.nbhm.NonBlockingSetInt;
import water.persist.*;
import water.util.Utils;
/**
* The core Value stored in the distributed K/V store. It contains an
* underlying byte[] which may be spilled to disk and freed by the
* {@link MemoryManager}.
*/
public class
Value extends Iced implements ForkJoinPool.ManagedBlocker {
// ---
// Type-id of serialized object; see TypeMap for the list.
// Might be a primitive array type, or a Iced POJO
private short _type;
public int type() { return _type; }
public String className() { return TypeMap.className(_type); }
// Max size of Values before we start asserting.
// Sizes around this big, or larger are probably true errors.
// In any case, they will cause issues with both GC (giant pause times on
// many collectors) and I/O (long term blocking of TCP I/O channels to
// service a single request, causing starvation of other requests).
public static final int MAX = 20*1024*1024;
// ---
// Values are wads of bits; known small enough to 'chunk' politely on disk,
// or fit in a Java heap (larger Values are built via arraylets) but (much)
// larger than a UDP packet. Values can point to either the disk or ram
// version or both. There's no caching smarts, nor compression nor de-dup
// smarts. This is just a local placeholder for some user bits being held at
// this local Node.
public int _max; // Max length of Value bytes
// ---
// A array of this Value when cached in DRAM, or NULL if not cached. The
// contents of _mem are immutable (Key/Value mappings can be changed by an
// explicit PUT action). Cleared to null asynchronously by the memory
// manager (but only if persisted to some disk or in a POJO). Can be filled
// in by reloading from disk, or by serializing a POJO.
private volatile byte[] _mem;
public final byte[] rawMem() { return _mem; }
// ---
// A POJO version of the _mem array, or null if the _mem has not been
// serialized or if _mem is primitive data and not a POJO. Cleared to null
// asynchronously by the memory manager (but only if persisted to some disk,
// or in the _mem array). Can be filled in by deserializing the _mem array.
// NOTE THAT IF YOU MODIFY any fields of a POJO that is part of a Value,
// - this is NOT the recommended programming style,
// - those changes are visible to all CPUs on the writing node,
// - but not to other nodes, and
// - the POJO might be dropped by the MemoryManager and reconstituted from
// disk and/or the byte array back to it's original form, losing your changes.
private volatile Freezable _pojo;
public Freezable rawPOJO() { return _pojo; }
// Free array (but always be able to rebuild the array)
public final void freeMem() {
assert isPersisted() || _pojo != null || _key._kb[0]==Key.DVEC;
_mem = null;
}
// Free POJO (but always be able to rebuild the POJO)
public final void freePOJO() {
assert isPersisted() || _mem != null;
_pojo = null;
}
// The FAST path get-byte-array - final method for speed.
// Will (re)build the mem array from either the POJO or disk.
// Never returns NULL.
public final byte[] memOrLoad() {
byte[] mem = _mem; // Read once!
if( mem != null ) return mem;
Freezable pojo = _pojo; // Read once!
if( pojo != null )
if( pojo instanceof Chunk ) return (_mem = ((Chunk)pojo).getBytes());
else return (_mem = pojo.write(new AutoBuffer()).buf());
if( _max == 0 ) return (_mem = new byte[0]);
return (_mem = loadPersist());
}
// Just an empty shell of a Value, no local data but the Value is "real".
// Any attempt to look at the Value will require a remote fetch.
public final boolean isEmpty() { return _max > 0 && _mem==null && _pojo == null && !isPersisted(); }
public final byte[] getBytes() {
assert _type==TypeMap.PRIM_B && _pojo == null;
byte[] mem = _mem; // Read once!
return mem != null ? mem : (_mem = loadPersist());
}
// The FAST path get-POJO - final method for speed.
// Will (re)build the POJO from the _mem array.
// Never returns NULL.
public <T extends Iced> T get() {
touch();
Iced pojo = (Iced)_pojo; // Read once!
if( pojo != null ) return (T)pojo;
pojo = TypeMap.newInstance(_type);
pojo.read(new AutoBuffer(memOrLoad()));
pojo.init(_key);
return (T)(_pojo = pojo);
}
public <T extends Freezable> T get(Class<T> fc) {
T pojo = getFreezable();
assert fc.isAssignableFrom(pojo.getClass());
return pojo;
}
public <T extends Freezable> T getFreezable() {
touch();
Freezable pojo = _pojo; // Read once!
if( pojo != null ) return (T)pojo;
pojo = TypeMap.newFreezable(_type);
pojo.read(new AutoBuffer(memOrLoad()));
if( pojo instanceof Iced ) ((Iced)pojo).init(_key);
return (T)(_pojo = pojo);
}
// ---
// Time of last access to this value.
transient long _lastAccessedTime = System.currentTimeMillis();
public final void touch() {_lastAccessedTime = System.currentTimeMillis();}
// ---
// A Value is persisted. The Key is used to define the filename.
public transient Key _key;
// ---
// Backend persistence info. 3 bits are reserved for 8 different flavors of
// backend storage. 1 bit for whether or not the latest _mem field is
// entirely persisted on the backend storage, or not. Note that with only 1
// bit here there is an unclosable datarace: one thread could be trying to
// change _mem (e.g. to null for deletion) while another is trying to write
// the existing _mem to disk (for persistence). This datarace only happens
// if we have racing deletes of an existing key, along with racing persist
// attempts. There are other races that are stopped higher up the stack: we
// do not attempt to write to disk, unless we have *all* of a Value, so
// extending _mem (from a remote read) should not conflict with writing _mem
// to disk.
//
// The low 3 bits are final.
// The on/off disk bit is strictly cleared by the higher layers (e.g. Value.java)
// and strictly set by the persistence layers (e.g. PersistIce.java).
public volatile byte _persist; // 3 bits of backend flavor; 1 bit of disk/notdisk
public final static byte ICE = 1<<0; // ICE: distributed local disks
public final static byte HDFS= 2<<0; // HDFS: backed by hadoop cluster
public final static byte S3 = 3<<0; // Amazon S3
public final static byte NFS = 4<<0; // NFS: Standard file system
public final static byte TACHYON = 5<<0; // Support for tachyon FS
public final static byte TCP = 7<<0; // TCP: For profile purposes, not a storage system
public final static byte BACKEND_MASK = (8-1);
public final static byte NOTdsk = 0<<3; // latest _mem is persisted or not
public final static byte ON_dsk = 1<<3;
final public void clrdsk() { _persist &= ~ON_dsk; } // note: not atomic
final public void setdsk() { _persist |= ON_dsk; } // note: not atomic
final public boolean isPersisted() { return (_persist&ON_dsk)!=0; }
final public byte backend() { return (byte)(_persist&BACKEND_MASK); }
// ---
// Interface for using the persistence layer(s).
public boolean onICE () { return (backend()) == ICE; }
public boolean onHDFS () { return (backend()) == HDFS; }
public boolean onNFS () { return (backend()) == NFS; }
public boolean onS3 () { return (backend()) == S3; }
public boolean onTachyon() { return (backend()) == TACHYON; }
/** Store complete Values to disk */
void storePersist() throws IOException {
if( isPersisted() ) return;
Persist.I[backend()].store(this);
}
/** Remove dead Values from disk */
void removeIce() {
// do not yank memory, as we could have a racing get hold on to this
// free_mem();
if( !isPersisted() || !onICE() ) return; // Never hit disk?
clrdsk(); // Not persisted now
Persist.I[backend()].delete(this);
}
/** Load some or all of completely persisted Values */
byte[] loadPersist() {
assert isPersisted();
return Persist.I[backend()].load(this);
}
public String nameOfPersist() { return nameOfPersist(backend()); }
public static String nameOfPersist(int x) {
switch( x ) {
case ICE : return "ICE";
case HDFS: return "HDFS";
case S3 : return "S3";
case NFS : return "NFS";
case TCP : return "TCP";
default : return "UNKNOWN(" + x + ")";
}
}
/** Set persistence to HDFS from ICE */
public void setHdfs() {
assert onICE();
byte[] mem = memOrLoad(); // Get into stable memory
_persist = Value.HDFS|Value.NOTdsk;
Persist.I[Value.HDFS].store(this);
removeIce(); // Remove from ICE disk
assert onHDFS(); // Flip to HDFS
_mem = mem; // Close a race with the H2O cleaner zapping _mem while removing from ice
}
public StringBuilder getString( int len, StringBuilder sb ) {
int newlines=0;
byte[] b = memOrLoad();
final int LEN=Math.min(len,b.length);
for( int i=0; i<LEN; i++ ) {
byte c = b[i];
if( c == '&' ) sb.append("&");
else if( c == '<' ) sb.append("<");
else if( c == '>' ) sb.append(">");
else if( c == '\n' ) { sb.append("<br>"); if( newlines++ > 5 ) break; }
else if( c == ',' && i+1<LEN && b[i+1]!=' ' )
sb.append(", ");
else sb.append((char)c);
}
if( b.length > LEN ) sb.append("...");
return sb;
}
public boolean isLockable(){ return _type != TypeMap.PRIM_B && (TypeMap.newInstance(_type) instanceof Lockable); }
public boolean isFrame() { return _type == TypeMap.FRAME; }
public boolean isVec() { return _type != TypeMap.PRIM_B && (TypeMap.newInstance(_type) instanceof Vec); }
public boolean isByteVec() { return _type != TypeMap.PRIM_B && (TypeMap.newInstance(_type) instanceof ByteVec); }
public boolean isRawData() {
if(isFrame()){
Frame fr = get();
return fr.vecs().length == 1 && (fr.vecs()[0] instanceof ByteVec);
}
// either simple value with bytearray, un-parsed value array or byte vec
return _type == TypeMap.PRIM_B || isByteVec();
}
public byte[] getFirstBytes() {
Value v = this;
if(isByteVec()){
ByteVec vec = get();
return vec.chunkForChunkIdx(0).getBytes();
} else if(isFrame()){
Frame fr = get();
return ((ByteVec)fr.vecs()[0]).chunkForChunkIdx(0).getBytes();
}
// Return empty array if key has been deleted
return v != null ? v.memOrLoad() : new byte[0];
}
// For plain Values, just the length in bytes.
// For ValueArrays, the length of all chunks.
// For Frames, the compressed size of all vecs within the frame.
public long length() {
if (isFrame()) {
return ((Frame)get()).byteSize();
}
return _max;
}
public InputStream openStream() throws IOException {
return openStream(null);
}
/** Creates a Stream for reading bytes */
public InputStream openStream(ProgressMonitor p) throws IOException {
if(onNFS() ) return PersistNFS .openStream(_key );
if(onHDFS()) return PersistHdfs.openStream(_key,p);
if(onS3() ) return PersistS3 .openStream(_key,p);
if(onTachyon()) return PersistTachyon.openStream(_key,p);
if( isFrame() ) throw new IllegalArgumentException("Tried to pass a Frame to openStream (maybe tried to parse a (already-parsed) Frame?)");
assert _type==TypeMap.PRIM_B : "Expected byte[] type but got "+TypeMap.className(_type);
return new ByteArrayInputStream(memOrLoad());
}
public boolean isBitIdentical( Value v ) {
if( this == v ) return true;
if( !isFrame() && !v.isFrame() )
return Arrays.equals(getBytes(), v.getBytes());
Frame fr0 = get();
Frame fr1 = v.get();
if( fr0.numRows() != fr1.numRows() ) return false;
if( fr0.numCols() != fr1.numCols() ) return false;
return new BitCmp(fr1).doAll(fr0)._eq;
}
private static class BitCmp extends MRTask2<BitCmp> {
final Frame _fr;
BitCmp( Frame fr ) { _fr = fr; }
boolean _eq;
@Override public void map( Chunk[] chks ) {
int cols = chks.length;
int rows = chks[0]._len;
long start = chks[0]._start;
for( int c=0; c<cols; c++ ) {
Chunk c0 = chks[c ];
Vec v1 = _fr.vecs()[c];
if( c0._vec.isUUID() ) {
for( int r=0; r<rows; r++ )
if( !( c0.isNA0(r) && v1. isNA(r+start)) &&
(( c0. isNA0(r)&&!v1. isNA(r+start)) ||
(!c0. isNA0(r)&& v1. isNA(r+start)) ||
( c0.at16l0(r)!= v1.at16l(r+start))||
( c0.at16h0(r)!= v1.at16h(r+start))) )
return;
} else {
for( int r=0; r<rows; r++ )
if( !Utils.compareDoubles(c0.at0(r),v1.at(r+start)) )
return;
}
}
_eq = true;
}
@Override public void reduce( BitCmp bc ) { _eq &= bc._eq; }
}
// --------------------------------------------------------------------------
// Set just the initial fields
public Value(Key k, int max, byte[] mem, short type, byte be ) {
assert mem==null || mem.length==max;
assert max < MAX : "Value size=0x"+Integer.toHexString(max);
_key = k;
_max = max;
_mem = mem;
_type = type;
_pojo = null;
// For the ICE backend, assume new values are not-yet-written.
// For HDFS & NFS backends, assume we from global data and preserve the
// passed-in persist bits
byte p = (byte)(be&BACKEND_MASK);
_persist = (p==ICE) ? p : be;
_rwlock = new AtomicInteger(0);
_replicas = k.home() ? new NonBlockingSetInt() : null;
}
public Value(Key k, byte[] mem ) { this(k, mem.length, mem, TypeMap.PRIM_B, ICE); }
public Value(Key k, int max ) { this(k, max, new byte[max], TypeMap.PRIM_B, ICE); }
public Value(Key k, int max, byte be ) { this(k, max, null, TypeMap.PRIM_B, be); }
public Value(Key k, String s ) { this(k, s.getBytes()); }
public Value(Key k, Iced pojo ) { this(k,pojo,ICE); }
public Value(Key k, Iced pojo, byte be ) {
_key = k;
_pojo = pojo;
_type = (short)pojo.frozenType();
_mem = (pojo instanceof Chunk)?((Chunk)pojo).getBytes():pojo.write(new AutoBuffer()).buf();
_max = _mem.length;
// For the ICE backend, assume new values are not-yet-written.
// For HDFS & NFS backends, assume we from global data and preserve the
// passed-in persist bits
byte p = (byte)(be&BACKEND_MASK);
_persist = (p==ICE) ? p : be;
_rwlock = new AtomicInteger(0);
_replicas = k.home() ? new NonBlockingSetInt() : null;
}
public Value(Key k, Freezable pojo) {
_key = k;
_pojo = pojo;
_type = (short)pojo.frozenType();
_mem = pojo.write(new AutoBuffer()).buf();
_max = _mem.length;
_persist = ICE;
_rwlock = new AtomicInteger(0);
_replicas = k.home() ? new NonBlockingSetInt() : null;
}
// Nullary constructor for weaving
public Value() {
_rwlock = new AtomicInteger(0);
_replicas = new NonBlockingSetInt();
}
// Custom serializers: the _mem field is racily cleared by the MemoryManager
// and the normal serializer then might ship over a null instead of the
// intended byte[]. Also, the value is NOT on the deserialize'd machines disk
public AutoBuffer write(AutoBuffer bb) {
byte p = _persist;
if( onICE() ) p &= ~ON_dsk; // Not on the remote disk
return bb.put1(p).put2(_type).putA1(memOrLoad());
}
// Custom serializer: set _max from _mem length; set replicas & timestamp.
public Value read(AutoBuffer bb) {
assert _key == null; // Not set yet
_persist = (byte) bb.get1();
_type = (short) bb.get2();
_mem = bb.getA1();
_max = _mem.length;
_pojo = null;
// On remote nodes _rwlock is initialized to 0 (signaling a remote PUT is
// in progress) flips to -1 when the remote PUT is done, or +1 if a notify
// needs to happen.
_rwlock.set(-1); // Set as 'remote put is done'
touch();
return this;
}
// ---------------------
// Ordering of K/V's! This field tracks a bunch of things used in ordering
// updates to the same Key. Ordering Rules:
// - Program Order. You see your own writes. All writes in a single thread
// strongly ordered (writes never roll back). In particular can:
// PUT(v1), GET, PUT(null) and The Right Thing happens.
// - Unrelated writes can race (unless fencing).
// - Writes are not atomic: some people can see a write ahead of others.
// - Last-write-wins: if we do a zillion writes to the same Key then wait "a
// long time", then do reads all reads will see the same last value.
// - Blocking on a PUT stalls until the PUT is cloud-wide visible
//
// For comparison to H2O get/put MM
// IA Memory Ordering, 8 principles from Rich Hudson, Intel
// 1. Loads are not reordered with other loads
// 2. Stores are not reordered with other stores
// 3. Stores are not reordered with older loads
// 4. Loads may be reordered with older stores to different locations but not
// with older stores to the same location
// 5. In a multiprocessor system, memory ordering obeys causality (memory
// ordering respects transitive visibility).
// 6. In a multiprocessor system, stores to the same location have a total order
// 7. In a multiprocessor system, locked instructions have a total order
// 8. Loads and stores are not reordered with locked instructions.
//
// My (KN, CNC) interpretation of H2O MM from today:
// 1. Gets are not reordered with other Gets
// 2 Puts may be reordered with Puts to different Keys.
// 3. Puts may be reordered with older Gets to different Keys, but not with
// older Gets to the same Key.
// 4. Gets may be reordered with older Puts to different Keys but not with
// older Puts to the same Key.
// 5. Get/Put amongst threads doesn't obey causality
// 6. Puts to the same Key have a total order.
// 7. no such thing. although RMW operation exists with Put-like constraints.
// 8. Gets and Puts may be reordered with RMW operations
// 9. A write barrier exists that creates Sequential Consistency. Same-key
// ordering (3-4) can't be used to create the effect.
//
// A Reader/Writer lock for the home node to control racing Gets and Puts.
// - 0 for unlocked
// - +N for locked by N concurrent GETs-in-flight
// - -1 for write-locked
//
// An ACKACK from the client GET lowers the reader lock count.
//
// Home node PUTs alter which Value is mapped to a Key, then they block until
// there are no active GETs, then atomically set the write-lock, then send
// out invalidates to all the replicas. PUTs return when all invalidates
// have reported back.
//
// An initial remote PUT will default the value to 0. A 2nd PUT attempt will
// block until the 1st one completes (multiple writes to the same Key from
// the same JVM block, so there is at most 1 outstanding write to the same
// Key from the same JVM). The 2nd PUT will CAS the value to 1, indicating
// the need for the finishing 1st PUT to call notify().
//
// Note that this sequence involves a lot of blocking on repeated writes with
// cached readers, but not the readers - i.e., writes are slow to complete.
private transient final AtomicInteger _rwlock;
private boolean RW_CAS( int old, int nnn, String msg ) {
if( !_rwlock.compareAndSet(old,nnn) ) return false;
//System.out.println(_key+", "+old+" -> "+nnn+", "+msg);
return true;
}
// List of who is replicated where
private transient final NonBlockingSetInt _replicas;
public int numReplicas() { return _replicas.size(); }
/** True if h2o has a copy of this Value */
boolean isReplicatedTo( H2ONode h2o ) { return _replicas.contains(h2o._unique_idx); }
/** Atomically insert h2o into the replica list; reports false if the Value
* flagged against future replication with a -1. Also bumps the active
* Get count, which remains until the Get completes (we receive an ACKACK). */
boolean setReplica( H2ONode h2o ) {
assert _key.home(); // Only the HOME node for a key tracks replicas
assert h2o != H2O.SELF; // Do not track self as a replica
while( true ) { // Repeat, in case racing GETs are bumping the counter
int old = _rwlock.get();
if( old == -1 ) return false; // Write-locked; no new replications. Read fails to read *this* value
assert old >= 0; // Not negative
if( RW_CAS(old,old+1,"rlock+") ) break;
}
// Narrow non-race here. Here is a time window where the rwlock count went
// up, but the replica list does not account for the new replica. However,
// the rwlock cannot go down until an ACKACK is received, and the ACK
// (hence ACKACK) doesn't go out until after this function returns.
_replicas.add(h2o._unique_idx);
// Both rwlock taken, and replica count is up now.
return true;
}
/** Atomically lower active GET count */
void lowerActiveGetCount( H2ONode h2o ) {
assert _key.home(); // Only the HOME node for a key tracks replicas
assert h2o != H2O.SELF;// Do not track self as a replica
while( true ) { // Repeat, in case racing GETs are bumping the counter
int old = _rwlock.get(); // Read the lock-word
assert old > 0; // Since lowering, must be at least 1
assert old != -1; // Not write-locked, because we are an active reader
assert _replicas.contains(h2o._unique_idx); // Self-bit is set
if( RW_CAS(old,old-1,"rlock-") ) {
if( old-1 == 0 ) // GET count fell to zero?
synchronized( this ) { notifyAll(); } // Notify any pending blocked PUTs
return; // Repeat until count is lowered
}
}
}
/** This value was atomically extracted from the local STORE by a successful
* TaskPutKey attempt (only 1 thread can ever extract and thus call here).
* No future lookups will find this Value, but there may be existing uses.
* Atomically set the rwlock count to -1 locking it from further GETs and
* ship out invalidates to caching replicas. May need to block on active
* GETs. Updates a set of Future invalidates that can be blocked against. */
Futures lockAndInvalidate( H2ONode sender, Futures fs ) {
assert _key.home(); // Only the HOME node for a key tracks replicas
// Write-Lock against further GETs
while( true ) { // Repeat, in case racing GETs are bumping the counter
int old = _rwlock.get();
assert old >= 0 : _key+", rwlock="+old; // Count does not go negative
assert old != -1; // Only the thread doing a PUT ever locks
if( old !=0 ) { // has readers?
// Active readers: need to block until the GETs (of this very Value!)
// all complete, before we can invalidate this Value - lest a racing
// Invalidate bypass a GET.
try { ForkJoinPool.managedBlock(this); } catch( InterruptedException e ) { }
} else if( RW_CAS(0,-1,"wlock") )
break; // Got the write-lock!
}
// We have the set of Nodes with replicas now. Ship out invalidates.
int max = _replicas.length();
for( int i=0; i<max; i++ )
if( _replicas.contains(i) && H2ONode.IDX[i] != sender )
TaskInvalidateKey.invalidate(H2ONode.IDX[i],_key,fs);
return fs;
}
/** Initialize the _replicas field for a PUT. On the Home node (for remote
* PUTs), it is initialized to the one replica we know about, and not
* read-locked. Used on a new Value about to be PUT on the Home node. */
void initReplicaHome( H2ONode h2o, Key key ) {
assert key.home();
assert _key == null; // This is THE initializing key write for serialized Values
assert h2o != H2O.SELF; // Do not track self as a replica
_key = key;
// Set the replica bit for the one node we know about, and leave the
// rest clear.
_replicas.add(h2o._unique_idx);
_rwlock.set(0); // No GETs are in-flight at this time.
//System.out.println(key+", init "+_rwlock.get());
}
/** Block this thread until all prior remote PUTs complete - to force
* remote-PUT ordering on the home node. */
void startRemotePut() {
assert !_key.home();
int x = 0;
// assert I am waiting on threads with higher priority?
while( (x=_rwlock.get()) != -1 ) // Spin until rwlock==-1
if( x == 1 || RW_CAS(0,1,"remote_need_notify") )
try { ForkJoinPool.managedBlock(this); } catch( InterruptedException e ) { }
}
/** The PUT for this Value has completed. Wakeup any blocked later PUTs. */
void completeRemotePut() {
assert !_key.home();
// Attempt an eager blind attempt, assuming no blocked pending notifies
if( RW_CAS(0, -1,"remote_complete") ) return;
synchronized(this) {
boolean res = RW_CAS(1, -1,"remote_do_notify");
assert res; // Must succeed
notifyAll(); // Wake up pending blocked PUTs
}
}
/** Return true if blocking is unnecessary.
* Alas, used in TWO places and the blocking API forces them to share here. */
@Override public boolean isReleasable() {
int r = _rwlock.get();
if( _key.home() ) { // Called from lock_and_invalidate
// Home-key blocking: wait for active-GET count to fall to zero
return r == 0;
} else { // Called from start_put
// Remote-key blocking: wait for active-PUT lock to hit -1
assert r == 1 || r == -1; // Either waiting (1) or done (-1) but not started(0)
return r == -1; // done!
}
}
/** Possibly blocks the current thread. Returns true if isReleasable would
* return true. Used by the FJ Pool management to spawn threads to prevent
* deadlock is otherwise all threads would block on waits. */
@Override public synchronized boolean block() {
while( !isReleasable() ) { try { wait(); } catch( InterruptedException e ) { } }
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8
|
java-sources/ai/h2o/h2o-classic/2.8/water/Weaver.java
|
package water;
import java.util.*;
import javassist.*;
import javassist.bytecode.*;
import javassist.bytecode.SignatureAttribute.ClassSignature;
import javassist.bytecode.SignatureAttribute.TypeArgument;
import water.api.Request.API;
import water.util.Log;
import water.util.Log.Tag.Sys;
public class Weaver {
private final ClassPool _pool;
private final CtClass _dtask, _iced, _enum, _freezable;
private final CtClass[] _serBases;
private final CtClass _fielddoc;
private final CtClass _arg;
// Versioning
// private final CtClass _apiSchema;
// private final CtClass _apiAdaptor;
// private final CtClass _apiHandler;
// ---
public static Class _typeMap;
public static volatile String[] _packages = new String[] { "water", "hex", "org.junit", "com.oxdata", "ai.h2o" };
Weaver() {
try {
_pool = ClassPool.getDefault();
_pool.insertClassPath(new ClassClassPath(Weaver.class));
_iced = _pool.get("water.Iced"); // Needs serialization
_dtask= _pool.get("water.DTask");// Needs serialization and remote execution
_enum = _pool.get("java.lang.Enum"); // Needs serialization
_freezable = _pool.get("water.Freezable"); // Needs serialization
// _apiSchema = _pool.get("water.api.rest.schemas.ApiSchema");
// _apiAdaptor = _pool.get("water.api.rest.ApiAdaptor");
// _apiHandler = _pool.get("water.api.rest.handlers.AbstractHandler");
//_versioned = _pool.get("water.api.rest.REST$Versioned");
_serBases = new CtClass[] { _iced, _dtask, _enum, _freezable };
for( CtClass c : _serBases ) c.freeze();
_fielddoc = _pool.get("water.api.DocGen$FieldDoc");// Is auto-documentation result
_arg = _pool.get("water.api.RequestArguments$Argument"); // Needs auto-documentation
} catch( NotFoundException e ) {
throw new RuntimeException(e);
}
}
public static void registerPackage(String name) {
synchronized( Weaver.class ) {
String[] a = _packages;
if(Arrays.asList(a).indexOf(name) < 0) {
String[] t = Arrays.copyOf(a, a.length + 1);
t[t.length-1] = name;
_packages = t;
}
}
}
public Class weaveAndLoad(String name, ClassLoader cl) {
try {
CtClass w = javassistLoadClass(name);
if( w == null ) return null;
return w.toClass(cl, null);
} catch( CannotCompileException e ) {
throw new RuntimeException(e);
}
}
// See if javaassist can find this class; if so then check to see if it is a
// subclass of water.DTask, and if so - alter the class before returning it.
private synchronized CtClass javassistLoadClass(String name) {
// Always use this weaver's classloader to preserve correct top-level classloader
// for loading H2O's classes.
// The point is to load all the time weaved classes by the same classloader
// and do not let JavaAssist to use thread context classloader.
// For normal H2O execution it will be always the same classloader
// but for running from 3rd party code, we preserve Boot's parent loader
// for all H2O internal classes.
final ClassLoader ccl = Thread.currentThread().getContextClassLoader();
Thread.currentThread().setContextClassLoader(this.getClass().getClassLoader());
try {
if( name.equals("water.Boot") ) return null;
CtClass cc = _pool.get(name); // Full Name Lookup
if( cc == null ) return null; // Oops? Try the system loader, but expected to work
if( !inPackages(cc.getPackageName()) ) return null;
for( CtClass base : _serBases )
if( cc.subclassOf(base) )
return javassistLoadClass(cc);
// Subtype of an alternative freezable?
if( cc.subtypeOf( _freezable ) ) {
// Find the alternative freezable base
CtClass xcc = cc;
CtClass ycc = null;
while( xcc.subtypeOf(_freezable) ) { ycc = xcc; xcc = xcc.getSuperclass(); }
if( !ycc.isFrozen() ) ycc.freeze(); // Freeze the alternative base
return cc == ycc ? cc : javassistLoadClass(cc); // And weave the subclass
}
return cc;
} catch( NotFoundException nfe ) {
return null; // Not found? Use the normal loader then
} catch( CannotCompileException e ) { // Expected to compile
throw new RuntimeException(e);
} catch (BadBytecode e) {
throw new RuntimeException(e);
} finally {
// Do not forget to configure classloader back to original value
Thread.currentThread().setContextClassLoader(ccl);
}
}
private static boolean inPackages(String pack) {
if( pack==null ) return false;
String[] p = _packages;
for( int i = 0; i < p.length; i++ )
if( pack.startsWith(p[i]) )
return true;
return false;
}
private synchronized CtClass javassistLoadClass( CtClass cc ) throws NotFoundException, CannotCompileException, BadBytecode {
if( cc.isFrozen() ) return cc;
// serialize parent
javassistLoadClass(cc.getSuperclass());
// Serialize enums first, since we need the raw_enum function for this class
for( CtField ctf : cc.getDeclaredFields() ) {
CtClass base = ctf.getType();
while( base.isArray() ) base = base.getComponentType();
if( base.subclassOf(_enum) && base != cc )
javassistLoadClass(base);
}
CtClass ccr = addSerializationMethods(cc);
ccr.freeze();
return ccr;
}
// Returns true if this method pre-exists *in the local class*.
// Returns false otherwise, which requires a local method to be injected
private static boolean hasExisting( String methname, String methsig, CtBehavior ccms[] ) throws NotFoundException {
for( CtBehavior cm : ccms )
if( cm.getName ().equals(methname) &&
cm.getSignature().equals(methsig ) )
return true;
return false;
}
// This method is handed a CtClass which is known to be a subclass of
// water.DTask. Add any missing serialization methods.
CtClass addSerializationMethods( CtClass cc ) throws CannotCompileException, NotFoundException {
if( cc.subclassOf(_enum) ) exposeRawEnumArray(cc);
if( cc.subclassOf(_iced) ) ensureAPImethods(cc);
if( cc.subclassOf(_iced) ||
cc.subclassOf(_dtask)||
cc.subtypeOf(_freezable)) {
cc.setModifiers(javassist.Modifier.setPublic(cc.getModifiers()));
ensureSerMethods(cc);
ensureNullaryCtor(cc);
ensureNewInstance(cc);
ensureType(cc);
}
return cc;
}
// Expose the raw enum array that all Enums have, so we can directly convert
// ordinal values to enum instances.
private void exposeRawEnumArray(CtClass cc) throws NotFoundException, CannotCompileException {
CtField field;
try {
field = cc.getField("$VALUES");
} catch( NotFoundException nfe ) {
// Eclipse apparently stores this in a different place.
field = cc.getField("ENUM$VALUES");
}
String body = "public static "+cc.getName()+" raw_enum(int i) { return i==255?null:"+field.getName()+"[i]; } ";
try {
cc.addMethod(CtNewMethod.make(body,cc));
} catch( CannotCompileException ce ) {
Log.warn(Sys.WATER,"--- Compilation failure while compiler raw_enum for "+cc.getName()+"\n"+body+"\n------",ce);
throw ce;
}
}
// Create a newInstance call which will rapidly make a new object of a
// particular type *without* Reflection's overheads.
private void ensureNewInstance(CtClass cc) throws NotFoundException, CannotCompileException {
CtMethod ccms[] = cc.getDeclaredMethods();
if( !javassist.Modifier.isAbstract(cc.getModifiers()) &&
!hasExisting("newInstance", "()Lwater/Freezable;", ccms) ) {
cc.addMethod(CtNewMethod.make(
"public water.Freezable newInstance() {\n" +
" return new " +cc.getName()+"();\n" +
"}", cc));
}
}
// Serialized types support a unique dense integer per-class, so we can do
// simple array lookups to get class info. The integer is cluster-wide
// unique and determined lazily.
private void ensureType(CtClass cc) throws NotFoundException, CannotCompileException {
CtMethod ccms[] = cc.getDeclaredMethods();
if( !javassist.Modifier.isAbstract(cc.getModifiers()) &&
!hasExisting("frozenType", "()I", ccms) ) {
// Build a simple field & method returning the type token
cc.addField(new CtField(CtClass.intType, "_frozen$type", cc));
cc.addMethod(CtNewMethod.make("public int frozenType() {" +
" return _frozen$type == 0 ? (_frozen$type=water.TypeMap.onIce(\""+cc.getName()+"\")) : _frozen$type;" +
"}",cc));
}
}
private void ensureVersion(CtClass cc) throws NotFoundException, CannotCompileException, BadBytecode {
CtMethod ccms[] = cc.getDeclaredMethods();
if (!javassist.Modifier.isAbstract(cc.getModifiers())) {
String gsig = cc.getGenericSignature();
ClassSignature csig = SignatureAttribute.toClassSignature(gsig);
// Warning: this is not doing proper parent (superclass/interfaces) traversal
TypeArgument ta = getTypeArg(csig.getSuperClass().getTypeArguments(), "Lwater/api/rest/Version");
if (ta!=null && !hasExisting("getVersion", "()"+ta.getType().encode(), ccms) ) {
String typeName = ta.toString();
String valueName = getValueFromType(typeName);
//cc.addMethod(CtNewMethod.make("public "+typeName+" getVersion() {" +
cc.addMethod(CtNewMethod.make("public water.api.rest.Version getVersion() {" +
" return "+valueName+";" +
"}",cc));
}
}
}
private String getValueFromType(String typeName) {
int idx = typeName.indexOf('$');
String t = typeName.substring(0, idx);
String v = typeName.substring(idx+1).toLowerCase();
return t+"."+v;
}
private TypeArgument getTypeArg(TypeArgument[] args, String prefix) {
for (TypeArgument ta : args)
if (ta.getType().encode().startsWith(prefix)) return ta;
return null;
}
// --------------------------------------------------------------------------
private static abstract class FieldFilter {
abstract boolean filter( CtField ctf ) throws NotFoundException;
}
private void ensureAPImethods(CtClass cc) throws NotFoundException, CannotCompileException {
CtField ctfs[] = cc.getDeclaredFields();
boolean api = false;
for( CtField ctf : ctfs )
if( ctf.getName().equals("API_WEAVER") ) {
api = true; break;
}
if( api == false ) return;
CtField fielddoc=null;
CtField getdoc=null;
boolean callsuper = true;
for( CtClass base : _serBases )
if( cc.getSuperclass() == base ) callsuper = false;
// ---
// Auto-gen JSON output to AutoBuffers
make_body(cc,ctfs,callsuper,
"public water.AutoBuffer writeJSONFields(water.AutoBuffer ab) {\n",
" super.writeJSONFields(ab)",
" ab.putJSON%z(\"%s\",%s)",
" ab.putEnumJSON(\"%s\",%s)",
" ab.putJSON%z(\"%s\",%s)",
".put1(',');\n",
";\n return ab;\n}",
new FieldFilter() {
@Override boolean filter(CtField ctf) throws NotFoundException {
API api = null;
try {
api = (API) ctf.getAnnotation(API.class);
} catch( ClassNotFoundException ex) { throw new NotFoundException("getAnnotations throws ", ex); }
return api != null && (api.json() || !isInput(ctf.getType(), api));
}
});
// ---
// Auto-gen JSON & Args doc method. Requires a structured java object.
// Every @API annotated field is either a JSON field, an Argument, or both.
// field, and has some associated fields.
//
// H2OHexKey someField2; // Anything derived from RequestArguments$Argument
// static final String someField2Help = "some help text";
// static final int someField2MinVar = 1, someField2MaxVar = 1;
//
// String[] someField; // Anything NOT derived from Argument is a JSON field
// static final String someFieldHelp = "some help text";
// static final int someFieldMinVar = 1, someFieldMaxVar = 1;
// xxxMinVar and xxxMaxVar are optional; if xxxMinVar is missing it
// defaults to 1, and if xxxMaxVar is missing it defaults "till now".
StringBuilder sb = new StringBuilder();
sb.append("new water.api.DocGen$FieldDoc[] {");
// Get classes in the hierarchy with marker field
ArrayList<CtClass> classes = new ArrayList<CtClass>();
CtClass current = cc;
while( true ) { // For all self & superclasses
classes.add(current);
current = current.getSuperclass();
api = false;
for( CtField ctf : current.getDeclaredFields() )
if( ctf.getName().equals("API_WEAVER") )
api = true;
if( api == false ) break;
}
// Start with parent classes to get fields in order
Collections.reverse(classes);
boolean first = true;
for(CtClass c : classes) {
for( CtField ctf : c.getDeclaredFields() ) {
int mods = ctf.getModifiers();
if( javassist.Modifier.isStatic(mods) ) {
if( c == cc ) { // Capture the DOC_* fields for self only
if( ctf.getName().equals("DOC_FIELDS") ) fielddoc = ctf;
if( ctf.getName().equals("DOC_GET") ) getdoc = ctf;
}
continue; // Only auto-doc instance fields (not static)
}
first = addDocIfAPI(sb,ctf,cc,first);
}
}
sb.append("}");
if( fielddoc == null ) throw new CannotCompileException("Did not find static final DocGen.FieldDoc[] DOC_FIELDS field;");
if( !fielddoc.getType().isArray() ||
fielddoc.getType().getComponentType() != _fielddoc )
throw new CannotCompileException("DOC_FIELDS not declared static final DocGen.FieldDoc[];");
cc.removeField(fielddoc); // Remove the old one
cc.addField(fielddoc,CtField.Initializer.byExpr(sb.toString()));
cc.addMethod(CtNewMethod.make(" public water.api.DocGen$FieldDoc[] toDocField() { return DOC_FIELDS; }",cc));
if( getdoc != null )
cc.addMethod(CtNewMethod.make(" public String toDocGET() { return DOC_GET; }",cc));
}
private boolean addDocIfAPI( StringBuilder sb, CtField ctf, CtClass cc, boolean first ) throws NotFoundException, CannotCompileException {
String name = ctf.getName();
Object[] as;
try { as = ctf.getAnnotations(); }
catch( ClassNotFoundException ex) { throw new NotFoundException("getAnnotations throws ", ex); }
API api = null;
for(Object o : as) if(o instanceof API) api = (API) o;
if( api != null ) {
String help = api.help();
int min = api.since();
int max = api.until();
if( min < 1 || min > 1000000 ) throw new CannotCompileException("Found field '"+name+"' but 'since' < 1 or 'since' > 1000000");
if( max < min || (max > 1000000 && max != Integer.MAX_VALUE) )
throw new CannotCompileException("Found field '"+name+"' but 'until' < "+min+" or 'until' > 1000000");
if( first ) first = false;
else sb.append(",");
boolean input = isInput(ctf.getType(), api);
sb.append("new water.api.DocGen$FieldDoc(\""+name+"\",\""+help+"\","+min+","+max+","+ctf.getType().getName()+".class,"+input+","+api.required()+",water.api.ParamImportance."+api.importance()+",water.api.Direction."+api.direction()+",\""+api.path()+"\","+ api.type().getName()+".class,\""+api.valid()+"\", \""+api.enabled()+"\",\""+api.visible()+"\")");
}
return first;
}
private final boolean isInput(CtClass fieldType, API api) {
return Request2.Helper.isInput(api) || //
// Legacy
fieldType.subclassOf(_arg);
}
// --------------------------------------------------------------------------
// Support for a nullary constructor, for deserialization.
private void ensureNullaryCtor(CtClass cc) throws NotFoundException, CannotCompileException {
// Build a null-ary constructor if needed
String clzname = cc.getSimpleName();
if( !hasExisting(clzname,"()V",cc.getDeclaredConstructors()) ) {
String body = "public "+clzname+"() { }";
cc.addConstructor(CtNewConstructor.make(body,cc));
} else {
CtConstructor ctor = cc.getConstructor("()V");
ctor.setModifiers(javassist.Modifier.setPublic(ctor.getModifiers()));
}
}
// Serialization methods: read, write & copyOver.
private void ensureSerMethods(CtClass cc) throws NotFoundException, CannotCompileException {
// Check for having "read" and "write". Either All or None of read & write
// must be defined. Note that I use getDeclaredMethods which returns only
// the local methods. The singular getDeclaredMethod searches for a
// specific method *up into superclasses*, which will trigger premature
// loading of those superclasses.
CtMethod ccms[] = cc.getDeclaredMethods();
boolean w = hasExisting("write", "(Lwater/AutoBuffer;)Lwater/AutoBuffer;", ccms);
boolean r = hasExisting("read" , "(Lwater/AutoBuffer;)Lwater/Freezable;" , ccms);
boolean d = cc.subclassOf(_dtask); // Subclass of DTask?
boolean c = hasExisting("copyOver" , "(Lwater/Freezable;)V" , ccms);
if( w && r && (!d || c) ) return;
if( w || r || c )
throw new RuntimeException(cc.getName() +" must implement all of " +
"read(AutoBuffer) and write(AutoBuffer) and copyOver(Freezable) or none");
// Add the serialization methods: read, write.
CtField ctfs[] = cc.getDeclaredFields();
// We cannot call Iced.xxx, as these methods always throw a
// RuntimeException (to make sure we noisily fail instead of silently
// fail). But we DO need to call the super-chain of serialization methods
// - stopping at DTask.
boolean callsuper = true;
// for( CtClass base : _serBases )
// if( cc.getSuperclass() == base ) callsuper = false;
// Running example is:
// class Crunk extends DTask {
// int _x; int _xs[]; double _d;
// }
// Build a write method that looks something like this:
// public AutoBuffer write( AutoBuffer s ) {
// s.put4(_x);
// s.putA4(_xs);
// s.put8d(_d);
// }
// TODO use Freezable.write instead of AutoBuffer.put for final classes
make_body(cc,ctfs,callsuper,
"public water.AutoBuffer write(water.AutoBuffer ab) {\n",
" super.write(ab);\n",
" ab.put%z(%s);\n",
" ab.putEnum(%s);\n",
" ab.put%z(%s);\n",
"",
" return ab;\n" +
"}", null);
// Build a read method that looks something like this:
// public T read( AutoBuffer s ) {
// _x = s.get4();
// _xs = s.getA4();
// _d = s.get8d();
// }
make_body(cc,ctfs,callsuper,
"public water.Freezable read(water.AutoBuffer s) {\n",
" super.read(s);\n",
" %s = s.get%z();\n",
" %s = %c.raw_enum(s.get1());\n",
" %s = (%C)s.get%z(%c.class);\n",
"",
" return this;\n" +
"}", null);
// Build a copyOver method that looks something like this:
// public void copyOver( T s ) {
// _x = s._x;
// _xs = s._xs;
// _d = s._d;
// }
if( d ) make_body(cc,ctfs,callsuper,
"public void copyOver(water.Freezable i) {\n"+
" "+cc.getName()+" s = ("+cc.getName()+")i;\n",
" super.copyOver(s);\n",
" %s = s.%s;\n",
" %s = s.%s;\n",
" %s = s.%s;\n",
"",
"}", null);
}
// Produce a code body with all these fill-ins.
private final void make_body(CtClass cc, CtField[] ctfs, boolean callsuper,
String header,
String supers,
String prims,
String enums,
String freezables,
String field_sep,
String trailer,
FieldFilter ff
) throws CannotCompileException, NotFoundException {
StringBuilder sb = new StringBuilder();
sb.append(header);
if( callsuper ) sb.append(supers);
boolean debug_print = false;
boolean first = !callsuper;
for( CtField ctf : ctfs ) {
int mods = ctf.getModifiers();
if( javassist.Modifier.isTransient(mods) || javassist.Modifier.isStatic(mods) ) {
debug_print |= ctf.getName().equals("DEBUG_WEAVER");
continue; // Only serialize not-transient instance fields (not static)
}
if( ff != null && !ff.filter(ctf) ) continue; // Fails the filter
if( first ) first = false;
else sb.append(field_sep);
CtClass base = ctf.getType();
while( base.isArray() ) base = base.getComponentType();
int ftype = ftype(ctf.getSignature(), cc, ctf ); // Field type encoding
if( ftype%20 == 9 ) {
sb.append(freezables);
} else if( ftype%20 == 10 ) { // Enums
sb.append(enums);
} else {
sb.append(prims);
}
String z = FLDSZ1[ftype % 20];
for(int i = 0; i < ftype / 20; ++i ) z = 'A'+z;
subsub(sb, "%z", z); // %z ==> short type name
subsub(sb, "%s", ctf.getName()); // %s ==> field name
subsub(sb, "%c", base.getName().replace('$', '.')); // %c ==> base class name
subsub(sb, "%C", ctf.getType().getName().replace('$', '.')); // %C ==> full class name
}
sb.append(trailer);
String body = sb.toString();
if( debug_print ) {
System.err.println(cc.getName()+" "+body);
}
try {
cc.addMethod(CtNewMethod.make(body,cc));
} catch( CannotCompileException e ) {
throw Log.err("--- Compilation failure while compiling serializers for "+cc.getName()+"\n"+body+"\n-----",e);
}
}
static private final String[] FLDSZ1 = {
"Z","1","2","2","4","4f","8","8d","Str","","Enum" // prims, String, Freezable, Enum
};
// Field types:
// 0-7: primitives
// 8,9, 10: String, Freezable, Enum
// 20-27: array-of-prim
// 28,29, 30: array-of-String, Freezable, Enum
// Barfs on all others (eg Values or array-of-Frob, etc)
private int ftype( String sig, CtClass ct, CtField fld ) throws NotFoundException {
switch( sig.charAt(0) ) {
case 'Z': return 0; // Booleans: I could compress these more
case 'B': return 1; // Primitives
case 'C': return 2;
case 'S': return 3;
case 'I': return 4;
case 'F': return 5;
case 'J': return 6;
case 'D': return 7;
case 'L': // Handled classes
if( sig.equals("Ljava/lang/String;") ) return 8;
String clz = sig.substring(1,sig.length()-1).replace('/', '.');
CtClass argClass = _pool.get(clz);
if( argClass.subtypeOf(_pool.get("water.Freezable")) ) return 9;
if( argClass.subtypeOf(_pool.get("java.lang.Enum")) ) return 10;
break;
case '[': // Arrays
return ftype(sig.substring(1), ct, fld)+20; // Same as prims, plus 20
}
throw barf(ct, fld);
}
// Replace 2-byte strings like "%s" with s2.
static private void subsub( StringBuilder sb, String s1, String s2 ) {
int idx;
while( (idx=sb.indexOf(s1)) != -1 ) sb.replace(idx,idx+2,s2);
}
private static RuntimeException barf( CtClass ct, CtField fld ) throws NotFoundException {
return new RuntimeException(ct.getSimpleName()+"."+fld.getName()+" of type "+(fld.getType().getSimpleName())+": Serialization not implemented; does not extend Iced or DTask");
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/AUC.java
|
package water.api;
import static java.util.Arrays.sort;
import hex.ConfusionMatrix;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.Utils;
import java.util.HashSet;
public class AUC extends Func {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
public static final String DOC_GET = "AUC";
@API(help = "", required = true, filter = Default.class, json=true)
public Frame actual;
@API(help="Column of the actual results (will display vertically)", required=true, filter=actualVecSelect.class, json=true)
public Vec vactual;
class actualVecSelect extends VecClassSelect { actualVecSelect() { super("actual"); } }
@API(help = "", required = true, filter = Default.class, json=true)
public Frame predict;
@API(help="Column of the predicted results (will display horizontally)", required=true, filter=predictVecSelect.class, json=true)
public Vec vpredict;
class predictVecSelect extends VecClassSelect { predictVecSelect() { super("predict"); } }
@API(help = "Thresholds (optional, e.g. 0:1:0.01 or 0.0,0.2,0.4,0.6,0.8,1.0).", required = false, filter = Default.class, json = true)
public float[] thresholds;
@API(help = "Threshold criterion", filter = Default.class, json = true)
public ThresholdCriterion threshold_criterion = ThresholdCriterion.maximum_F1;
public enum ThresholdCriterion {
maximum_F1,
maximum_F2,
maximum_F0point5,
maximum_Accuracy,
maximum_Precision,
maximum_Recall,
maximum_Specificity,
maximum_absolute_MCC,
minimizing_max_per_class_Error
}
@API(help = "AUC Data", json = true)
AUCData aucdata;
public AUCData data() { return aucdata; }
public AUC() {}
/**
* Constructor for algos that make their own CMs
* @param cms ConfusionMatrices
* @param thresh Thresholds
*/
public AUC(hex.ConfusionMatrix[] cms, float[] thresh) {
this(cms, thresh, null);
}
/**
* Constructor for algos that make their own CMs
* @param cms ConfusionMatrices
* @param thresh Thresholds
* @param domain Domain
*/
public AUC(hex.ConfusionMatrix[] cms, float[] thresh, String[] domain) {
aucdata = new AUCData().compute(cms, thresh, domain, threshold_criterion);
}
@Override protected void init() throws IllegalArgumentException {
// Input handling
if( vactual==null || vpredict==null )
throw new IllegalArgumentException("Missing vactual or vpredict!");
if (vactual.length() != vpredict.length())
throw new IllegalArgumentException("Both arguments must have the same length ("+vactual.length()+"!="+vpredict.length()+")!");
if (!vactual.isInt())
throw new IllegalArgumentException("Actual column must be integer class labels!");
if (vactual.cardinality() != -1 && vactual.cardinality() != 2)
throw new IllegalArgumentException("Actual column must contain binary class labels, but found cardinality " + vactual.cardinality() + "!");
if (vpredict.isEnum())
throw new IllegalArgumentException("vpredict cannot be class labels, expect probabilities.");
}
@Override protected void execImpl() {
Vec va = null, vp;
try {
va = vactual.toEnum(); // always returns TransfVec
vp = vpredict;
// The vectors are from different groups => align them, but properly delete it after computation
if (!va.group().equals(vp.group())) {
vp = va.align(vp);
}
// compute thresholds, if not user-given
if (thresholds != null) {
sort(thresholds);
if (Utils.minValue(thresholds) < 0) throw new IllegalArgumentException("Minimum threshold cannot be negative.");
if (Utils.maxValue(thresholds) > 1) throw new IllegalArgumentException("Maximum threshold cannot be greater than 1.");
} else {
HashSet hs = new HashSet();
final int bins = (int)Math.min(vpredict.length(), 200l);
final long stride = Math.max(vpredict.length() / bins, 1);
for( int i=0; i<bins; ++i) hs.add(new Float(vpredict.at(i*stride))); //data-driven thresholds TODO: use percentiles (from Summary2?)
for (int i=0;i<51;++i) hs.add(new Float(i/50.)); //always add 0.02-spaced thresholds from 0 to 1
// created sorted vector of unique thresholds
thresholds = new float[hs.size()];
int i=0;
for (Object h : hs) {thresholds[i++] = (Float)h; }
sort(thresholds);
}
// compute CMs
aucdata = new AUCData().compute(new AUCTask(thresholds,va.mean()).doAll(va,vp).getCMs(), thresholds, va._domain, threshold_criterion);
}
catch(Throwable t) {
t.printStackTrace();
throw new RuntimeException(t);
}
finally { // Delete adaptation vectors
if (va!=null) UKV.remove(va._key);
}
}
/* return true if a is better than b with respect to criterion criter */
static boolean isBetter(ConfusionMatrix a, ConfusionMatrix b, ThresholdCriterion criter) {
if (criter == ThresholdCriterion.maximum_F1) {
return (!Double.isNaN(a.F1()) &&
(Double.isNaN(b.F1()) || a.F1() > b.F1()));
} if (criter == ThresholdCriterion.maximum_F2) {
return (!Double.isNaN(a.F2()) &&
(Double.isNaN(b.F2()) || a.F2() > b.F2()));
} if (criter == ThresholdCriterion.maximum_F0point5) {
return (!Double.isNaN(a.F0point5()) &&
(Double.isNaN(b.F0point5()) || a.F0point5() > b.F0point5()));
} else if (criter == ThresholdCriterion.maximum_Recall) {
return (!Double.isNaN(a.recall()) &&
(Double.isNaN(b.recall()) || a.recall() > b.recall()));
} else if (criter == ThresholdCriterion.maximum_Precision) {
return (!Double.isNaN(a.precision()) &&
(Double.isNaN(b.precision()) || a.precision() > b.precision()));
} else if (criter == ThresholdCriterion.maximum_Accuracy) {
return a.accuracy() > b.accuracy();
} else if (criter == ThresholdCriterion.minimizing_max_per_class_Error) {
return a.max_per_class_error() < b.max_per_class_error();
} else if (criter == ThresholdCriterion.maximum_Specificity) {
return (!Double.isNaN(a.specificity()) &&
(Double.isNaN(b.specificity()) || a.specificity() > b.specificity()));
} else if (criter == ThresholdCriterion.maximum_absolute_MCC) {
return (!Double.isNaN(a.mcc()) &&
(Double.isNaN(b.mcc()) || Math.abs(a.mcc()) > Math.abs(b.mcc())));
}
else {
throw new IllegalArgumentException("Unknown threshold criterion.");
}
}
@Override public boolean toHTML( StringBuilder sb ) { return aucdata.toHTML(sb); }
public void toASCII( StringBuilder sb ) { aucdata.toASCII(sb); }
// Compute CMs for different thresholds via MRTask2
private static class AUCTask extends MRTask2<AUCTask> {
/* @OUT CMs */ public final hex.ConfusionMatrix[] getCMs() { return _cms; }
private hex.ConfusionMatrix[] _cms;
double nullDev;
double resDev;
final double ymu;
/* IN thresholds */ final private float[] _thresh;
AUCTask(float[] thresh, double mu) {
_thresh = thresh.clone();
ymu = mu;
}
static final double y_log_y(double y, double mu) {
if(y == 0)return 0;
if(mu < Double.MIN_NORMAL) mu = Double.MIN_NORMAL;
return y * Math.log(y / mu);
}
public static double binomial_deviance(double yreal, double ymodel){
return 2 * ((y_log_y(yreal, ymodel)) + y_log_y(1 - yreal, 1 - ymodel));
}
@Override public void map( Chunk ca, Chunk cp ) {
_cms = new hex.ConfusionMatrix[_thresh.length];
for (int i=0;i<_cms.length;++i)
_cms[i] = new hex.ConfusionMatrix(2);
final int len = Math.min(ca._len, cp._len);
for( int i=0; i < len; i++ ) {
if (ca.isNA0(i)) continue;
// throw new UnsupportedOperationException("Actual class label cannot be a missing value!");
final int a = (int)ca.at80(i); //would be a 0 if double was NaN
assert (a == 0 || a == 1) : "Invalid values in vactual: must be binary (0 or 1).";
if (cp.isNA0(i)) {
// Log.warn("Skipping predicted NaN."); //some models predict NaN!
continue;
}
final double pr = cp.at0(i);
for( int t=0; t < _cms.length; t++ ) {
final int p = pr >= _thresh[t]?1:0;
_cms[t].add(a, p);
}
}
}
@Override public void reduce( AUCTask other ) {
for( int i=0; i<_cms.length; ++i) {
_cms[i].add(other._cms[i]);
}
nullDev += other.nullDev;
resDev += other.resDev;
}
@Override public void postGlobal(){
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/AUCData.java
|
package water.api;
import hex.ConfusionMatrix;
import org.apache.commons.lang.StringEscapeUtils;
import static water.api.AUC.ThresholdCriterion;
import static water.api.AUC.isBetter;
import water.api.Request.*;
import water.*;
import java.util.HashSet;
public class AUCData extends Iced {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "Thresholds (optional, e.g. 0:1:0.01 or 0.0,0.2,0.4,0.6,0.8,1.0).", json = true)
public float[] thresholds;
@API(help = "Threshold criterion", json = true)
public ThresholdCriterion threshold_criterion = ThresholdCriterion.maximum_F1;
@API(help="domain of the actual response", json=true)
private String [] actual_domain;
@API(help="AUC (ROC)", json=true)
public double AUC;
@API(help="Gini", json=true)
public double Gini;
@API(help = "Confusion Matrices for all thresholds", json=true)
public long[][][] confusion_matrices;
@API(help = "F1 for all thresholds", json=true)
public float[] F1;
@API(help = "F2 for all thresholds", json=true)
public float[] F2;
@API(help = "F0point5 for all thresholds", json=true)
public float[] F0point5;
@API(help = "Accuracy for all thresholds", json=true)
public float[] accuracy;
@API(help = "Error for all thresholds", json=true)
public float[] errorr;
@API(help = "Precision for all thresholds", json=true)
public float[] precision;
@API(help = "Recall for all thresholds", json=true)
public float[] recall;
@API(help = "Specificity for all thresholds", json=true)
public float[] specificity;
@API(help = "MCC for all thresholds", json=true)
public float[] mcc;
@API(help = "Max per class error for all thresholds", json=true)
public float[] max_per_class_error;
@API(help="Threshold criteria", json=true)
String[] threshold_criteria;
@API(help="Optimal thresholds for criteria", json=true)
private float[] threshold_for_criteria;
@API(help="F1 for threshold criteria", json=true)
private float[] F1_for_criteria;
@API(help="F2 for threshold criteria", json=true)
private float[] F2_for_criteria;
@API(help="F0point5 for threshold criteria", json=true)
private float[] F0point5_for_criteria;
@API(help="Accuracy for threshold criteria", json=true)
private float[] accuracy_for_criteria;
@API(help="Error for threshold criteria", json=true)
private float[] error_for_criteria;
@API(help="Precision for threshold criteria", json=true)
private float[] precision_for_criteria;
@API(help="Recall for threshold criteria", json=true)
private float[] recall_for_criteria;
@API(help="Specificity for threshold criteria", json=true)
private float[] specificity_for_criteria;
@API(help="MCC for threshold criteria", json=true)
private float[] mcc_for_criteria;
@API(help="Maximum per class Error for threshold criteria", json=true)
private float[] max_per_class_error_for_criteria;
@API(help="Confusion Matrices for threshold criteria", json=true)
private long[][][] confusion_matrix_for_criteria;
/* Independent on thresholds */
public double AUC() { return AUC; }
public double Gini() { return Gini; }
/* Return the metrics for given criterion */
public double F1(ThresholdCriterion criter) { return _cms[idxCriter[criter.ordinal()]].F1(); }
public double F2(ThresholdCriterion criter) { return _cms[idxCriter[criter.ordinal()]].F2(); }
public double F0point5(ThresholdCriterion criter) { return _cms[idxCriter[criter.ordinal()]].F0point5(); }
public double precision(ThresholdCriterion criter) { return _cms[idxCriter[criter.ordinal()]].precision(); }
public double recall(ThresholdCriterion criter) { return _cms[idxCriter[criter.ordinal()]].recall(); }
public double specificity(ThresholdCriterion criter) { return _cms[idxCriter[criter.ordinal()]].specificity(); }
public double mcc(ThresholdCriterion criter) { return _cms[idxCriter[criter.ordinal()]].mcc(); }
public double accuracy(ThresholdCriterion criter) { return _cms[idxCriter[criter.ordinal()]].accuracy(); }
public double err(ThresholdCriterion criter) { return _cms[idxCriter[criter.ordinal()]].err(); }
public double max_per_class_error(ThresholdCriterion criter) { return _cms[idxCriter[criter.ordinal()]].max_per_class_error(); }
public float threshold(ThresholdCriterion criter) { return threshold_for_criteria[criter.ordinal()]; }
public long[][] cm(ThresholdCriterion criter) { return confusion_matrix_for_criteria[criter.ordinal()]; }
/* Return the metrics for chosen threshold criterion */
public double F1() { return F1(threshold_criterion); }
public double F2() { return F2(threshold_criterion); }
public double F0point5() { return F0point5(threshold_criterion); }
public double err() { return err(threshold_criterion); }
public double precision() { return precision(threshold_criterion); }
public double recall() { return recall(threshold_criterion); }
public double specificity() { return specificity(threshold_criterion); }
public double mcc() { return mcc(threshold_criterion); }
public double accuracy() { return accuracy(threshold_criterion); }
public double max_per_class_error() { return max_per_class_error(threshold_criterion); }
public float threshold() { return threshold(threshold_criterion); }
public long[][] cm() { return cm(threshold_criterion); }
public ConfusionMatrix CM() { return _cms[idxCriter[threshold_criterion.ordinal()]]; }
/* Return the best possible metrics */
public double bestF1() { return F1(ThresholdCriterion.maximum_F1); }
public double bestErr() { return err(ThresholdCriterion.maximum_Accuracy); }
/* Helpers */
private int[] idxCriter;
private double[] _tprs;
private double[] _fprs;
private hex.ConfusionMatrix[] _cms;
private static double trapezoid_area(double x1, double x2, double y1, double y2) { return Math.abs(x1-x2)*(y1+y2)/2.; }
public AUCData compute(hex.ConfusionMatrix[] cms, float[] thresh, String[] domain, ThresholdCriterion criter) {
_cms = cms;
thresholds = thresh;
threshold_criterion = criter;
actual_domain = domain;
assert(_cms.length == thresholds.length):("incompatible lengths of thresholds and confusion matrices: " + _cms.length + " != " + thresholds.length);
// compute AUC and best thresholds
computeAUC();
findBestThresholds(thresh);
computeMetrics();
return this;
}
private void computeAUC() {
_tprs = new double[_cms.length];
_fprs = new double[_cms.length];
double TPR_pre = 1;
double FPR_pre = 1;
AUC = 0;
for( int t = 0; t < _cms.length; ++t ) {
double TPR = 1 - _cms[t].classErr(1); // =TP/(TP+FN) = true-positive-rate
double FPR = _cms[t].classErr(0); // =FP/(FP+TN) = false-positive-rate
AUC += trapezoid_area(FPR_pre, FPR, TPR_pre, TPR);
TPR_pre = TPR;
FPR_pre = FPR;
_tprs[t] = TPR;
_fprs[t] = FPR;
}
AUC += trapezoid_area(FPR_pre, 0, TPR_pre, 0);
assert(AUC > -1e-5 && AUC < 1.+1e-5); //check numerical sanity
AUC = Math.max(0., Math.min(AUC, 1.)); //clamp to 0...1
Gini = 2*AUC-1;
}
private void findBestThresholds(float[] thresholds) {
threshold_criteria = new String[ThresholdCriterion.values().length];
int i=0;
HashSet<ThresholdCriterion> hs = new HashSet<ThresholdCriterion>();
for (ThresholdCriterion criter : ThresholdCriterion.values()) {
hs.add(criter);
threshold_criteria[i++] = criter.toString().replace("_", " ");
}
confusion_matrix_for_criteria = new long[hs.size()][][];
idxCriter = new int[hs.size()];
threshold_for_criteria = new float[hs.size()];
F1_for_criteria = new float[hs.size()];
F2_for_criteria = new float[hs.size()];
F0point5_for_criteria = new float[hs.size()];
accuracy_for_criteria = new float[hs.size()];
error_for_criteria = new float[hs.size()];
precision_for_criteria = new float[hs.size()];
recall_for_criteria = new float[hs.size()];
specificity_for_criteria = new float[hs.size()];
mcc_for_criteria = new float[hs.size()];
max_per_class_error_for_criteria = new float[hs.size()];
for (ThresholdCriterion criter : hs) {
final int id = criter.ordinal();
idxCriter[id] = 0;
threshold_for_criteria[id] = thresholds[0];
for(i = 1; i < _cms.length; ++i) {
if (isBetter(_cms[i], _cms[idxCriter[id]], criter)) {
idxCriter[id] = i;
threshold_for_criteria[id] = thresholds[i];
}
}
// Set members for JSON, float to save space
confusion_matrix_for_criteria[id] = _cms[idxCriter[id]]._arr;
F1_for_criteria[id] = (float)_cms[idxCriter[id]].F1();
F2_for_criteria[id] = (float)_cms[idxCriter[id]].F2();
F0point5_for_criteria[id] = (float)_cms[idxCriter[id]].F0point5();
accuracy_for_criteria[id] = (float)_cms[idxCriter[id]].accuracy();
error_for_criteria[id] = (float)_cms[idxCriter[id]].err();
precision_for_criteria[id] = (float)_cms[idxCriter[id]].precision();
recall_for_criteria[id] = (float)_cms[idxCriter[id]].recall();
specificity_for_criteria[id] = (float)_cms[idxCriter[id]].specificity();
mcc_for_criteria[id] = (float)_cms[idxCriter[id]].mcc();
max_per_class_error_for_criteria[id] = (float)_cms[idxCriter[id]].max_per_class_error();
}
}
/**
* Populate requested JSON fields
*/
private void computeMetrics() {
confusion_matrices = new long[_cms.length][][];
F1 = new float[_cms.length];
F2 = new float[_cms.length];
F0point5 = new float[_cms.length];
accuracy = new float[_cms.length];
errorr = new float[_cms.length];
precision = new float[_cms.length];
recall = new float[_cms.length];
specificity = new float[_cms.length];
mcc = new float[_cms.length];
max_per_class_error = new float[_cms.length];
for(int i=0;i<_cms.length;++i) {
confusion_matrices[i] = _cms[i]._arr;
F1[i] = (float)_cms[i].F1();
F2[i] = (float)_cms[i].F2();
F0point5[i] = (float)_cms[i].F0point5();
accuracy[i] = (float)_cms[i].accuracy();
errorr[i] = (float)_cms[i].err();
precision[i] = (float)_cms[i].precision();
recall[i] = (float)_cms[i].recall();
specificity[i] = (float)_cms[i].specificity();
mcc[i] = (float)_cms[i].mcc();
max_per_class_error[i] = (float)_cms[i].max_per_class_error();
}
}
public boolean toHTML( StringBuilder sb ) {
try {
if (actual_domain == null) actual_domain = new String[]{"false","true"};
// make local copies to avoid getting clear()'ed out in the middle of printing (can happen for DeepLearning, for example)
String[] my_actual_domain = actual_domain.clone();
String[] my_threshold_criteria = threshold_criteria.clone();
float[] my_threshold_for_criteria = threshold_for_criteria.clone();
float[] my_thresholds = thresholds.clone();
hex.ConfusionMatrix[] my_cms = _cms.clone();
if (my_thresholds == null) return false;
if (my_threshold_criteria == null) return false;
if (my_cms == null) return false;
if (idxCriter == null) return false;
sb.append("<div>");
DocGen.HTML.section(sb, "<a href=\"http://en.wikipedia.org/wiki/Receiver_operating_characteristic\">Scoring for Binary Classification</a>");
// data for JS
sb.append("\n<script type=\"text/javascript\">");//</script>");
sb.append("var cms = [\n");
for (hex.ConfusionMatrix cm : _cms) {
StringBuilder tmp = new StringBuilder();
cm.toHTML(tmp, my_actual_domain);
sb.append("\t'" + StringEscapeUtils.escapeJavaScript(tmp.toString()) + "',\n");
}
sb.append("];\n");
sb.append("var criterion = " + threshold_criterion.ordinal() + ";\n"); //which one
sb.append("var criteria = [");
for (String c : my_threshold_criteria) sb.append("\"" + c + "\",");
sb.append(" ];\n");
sb.append("var thresholds = [");
for (double t : my_threshold_for_criteria) sb.append((float) t + ",");
sb.append(" ];\n");
sb.append("var F1_values = [");
for (int i = 0; i < my_cms.length; ++i) sb.append((float) my_cms[i].F1() + ",");
sb.append(" ];\n");
sb.append("var F2_values = [");
for (int i = 0; i < my_cms.length; ++i) sb.append((float) my_cms[i].F2() + ",");
sb.append(" ];\n");
sb.append("var F0point5_values = [");
for (int i = 0; i < my_cms.length; ++i) sb.append((float) my_cms[i].F0point5() + ",");
sb.append(" ];\n");
sb.append("var accuracy = [");
for (int i = 0; i < my_cms.length; ++i) sb.append((float) my_cms[i].accuracy() + ",");
sb.append(" ];\n");
sb.append("var error = [");
for (int i = 0; i < my_cms.length; ++i) sb.append((float) my_cms[i].err() + ",");
sb.append(" ];\n");
sb.append("var precision = [");
for (int i = 0; i < my_cms.length; ++i) sb.append((float) my_cms[i].precision() + ",");
sb.append(" ];\n");
sb.append("var recall = [");
for (int i = 0; i < my_cms.length; ++i) sb.append((float) my_cms[i].recall() + ",");
sb.append(" ];\n");
sb.append("var specificity = [");
for (int i = 0; i < my_cms.length; ++i) sb.append((float) my_cms[i].specificity() + ",");
sb.append(" ];\n");
sb.append("var mcc = [");
for (int i = 0; i < my_cms.length; ++i) sb.append((float) my_cms[i].mcc() + ",");
sb.append(" ];\n");
sb.append("var max_per_class_error = [");
for (int i = 0; i < my_cms.length; ++i) sb.append((float) my_cms[i].max_per_class_error() + ",");
sb.append(" ];\n");
sb.append("var idxCriter = [");
for (int i : idxCriter) sb.append(i + ",");
sb.append(" ];\n");
sb.append("</script>\n");
// Selection of threshold criterion
sb.append("\n<div><b>Threshold criterion:</b></div><select id='threshold_select' onchange='set_criterion(this.value, idxCriter[this.value])'>\n");
for (int i = 0; i < my_threshold_criteria.length; ++i)
sb.append("\t<option value='" + i + "'" + (i == threshold_criterion.ordinal() ? "selected='selected'" : "") + ">" + my_threshold_criteria[i] + "</option>\n");
sb.append("</select>\n");
sb.append("</div>");
DocGen.HTML.arrayHead(sb);
sb.append("<th>AUC</th>");
sb.append("<th>Gini</th>");
sb.append("<th id='threshold_criterion'>Threshold for " + threshold_criterion.toString().replace("_", " ") + "</th>");
sb.append("<th>F1 </th>");
sb.append("<th>Accuracy </th>");
sb.append("<th>Error </th>");
sb.append("<th>Precision </th>");
sb.append("<th>Recall </th>");
sb.append("<th>Specificity</th>");
sb.append("<th>MCC</th>");
sb.append("<th>Max per class Error</th>");
sb.append("<tr class='warning'>");
sb.append("<td>" + String.format("%.5f", AUC()) + "</td>"
+ "<td>" + String.format("%.5f", Gini()) + "</td>"
+ "<td id='threshold'>" + String.format("%g", threshold()) + "</td>"
+ "<td id='F1_value'>" + String.format("%.7f", F1()) + "</td>"
+ "<td id='accuracy'>" + String.format("%.7f", accuracy()) + "</td>"
+ "<td id='error'>" + String.format("%.7f", err()) + "</td>"
+ "<td id='precision'>" + String.format("%.7f", precision()) + "</td>"
+ "<td id='recall'>" + String.format("%.7f", recall()) + "</td>"
+ "<td id='specificity'>" + String.format("%.7f", specificity()) + "</td>"
+ "<td id='mcc'>" + String.format("%.7f", mcc()) + "</td>"
+ "<td id='max_per_class_error'>" + String.format("%.7f", max_per_class_error()) + "</td>"
);
DocGen.HTML.arrayTail(sb);
// sb.append("<div id='BestConfusionMatrix'>");
// CM().toHTML(sb, actual_domain);
// sb.append("</div>");
sb.append("<table><tr><td>");
plotROC(sb);
sb.append("</td><td id='ConfusionMatrix'>");
CM().toHTML(sb, my_actual_domain);
sb.append("</td></tr>");
sb.append("<tr><td><h5>Threshold:</h5></div><select id=\"select\" onchange='show_cm(this.value)'>\n");
for (int i = 0; i < my_cms.length; ++i)
sb.append("\t<option value='" + i + "'" + (my_thresholds[i] == threshold() ? "selected='selected'" : "") + ">" + my_thresholds[i] + "</option>\n");
sb.append("</select></td></tr>");
sb.append("</td>");
sb.append("</table>");
sb.append("\n<script type=\"text/javascript\">");
sb.append("function show_cm(i){\n");
sb.append("\t" + "document.getElementById('ConfusionMatrix').innerHTML = cms[i];\n");
sb.append("\t" + "document.getElementById('F1_value').innerHTML = F1_values[i];\n");
sb.append("\t" + "document.getElementById('accuracy').innerHTML = accuracy[i];\n");
sb.append("\t" + "document.getElementById('error').innerHTML = error[i];\n");
sb.append("\t" + "document.getElementById('precision').innerHTML = precision[i];\n");
sb.append("\t" + "document.getElementById('recall').innerHTML = recall[i];\n");
sb.append("\t" + "document.getElementById('specificity').innerHTML = specificity[i];\n");
sb.append("\t" + "document.getElementById('mcc').innerHTML = mcc[i];\n");
sb.append("\t" + "document.getElementById('max_per_class_error').innerHTML = max_per_class_error[i];\n");
sb.append("\t" + "update(dataset);\n");
sb.append("}\n");
sb.append("function set_criterion(i, idx){\n");
sb.append("\t" + "criterion = i;\n");
// sb.append("\t" + "document.getElementById('BestConfusionMatrix').innerHTML = cms[idx];\n");
sb.append("\t" + "document.getElementById('threshold_criterion').innerHTML = \" Threshold for \" + criteria[i];\n");
sb.append("\t" + "document.getElementById('threshold').innerHTML = thresholds[i];\n");
sb.append("\t" + "show_cm(idx);\n");
sb.append("\t" + "document.getElementById(\"select\").selectedIndex = idx;\n");
sb.append("\t" + "update(dataset);\n");
sb.append("}\n");
sb.append("</script>\n");
return true;
} catch (Exception ex) {
return false;
}
}
public void toASCII( StringBuilder sb ) {
sb.append(CM().toString());
sb.append("AUC: " + String.format("%.5f", AUC()));
sb.append(", Gini: " + String.format("%.5f", Gini()));
sb.append(", F1: " + String.format("%.5f", F1()));
sb.append(", F2: " + String.format("%.5f", F2()));
sb.append(", F0point5: " + String.format("%.5f", F0point5()));
sb.append(", Accuracy: " + String.format("%.5f", accuracy()));
sb.append(", Error: " + String.format("%.5f", err()));
sb.append(", Precision: " + String.format("%.5f", precision()));
sb.append(", Recall: " + String.format("%.5f", recall()));
sb.append(", Specificity: " + String.format("%.5f", specificity()));
sb.append(", MCC: " + String.format("%.5f", mcc()));
sb.append(", Threshold for " + threshold_criterion.toString().replace("_", " ") + ": " + String.format("%g", threshold()));
sb.append("\n");
}
void plotROC(StringBuilder sb) {
sb.append("<script type=\"text/javascript\" src='/h2o/js/d3.v3.min.js'></script>");
sb.append("<div id=\"ROC\">");
sb.append("<style type=\"text/css\">");
sb.append(".axis path," +
".axis line {\n" +
"fill: none;\n" +
"stroke: black;\n" +
"shape-rendering: crispEdges;\n" +
"}\n" +
".axis text {\n" +
"font-family: sans-serif;\n" +
"font-size: 11px;\n" +
"}\n");
sb.append("</style>");
sb.append("<div id=\"rocCurve\" style=\"display:inline;\">");
sb.append("<script type=\"text/javascript\">");
sb.append("//Width and height\n");
sb.append("var w = 500;\n"+
"var h = 300;\n"+
"var padding = 40;\n"
);
sb.append("var dataset = [");
for(int c = 0; c < _fprs.length; c++) {
assert(_tprs.length == _fprs.length);
if (c == 0) {
sb.append("["+String.valueOf(_fprs[c])+",").append(String.valueOf(_tprs[c])).append("]");
}
sb.append(", ["+String.valueOf(_fprs[c])+",").append(String.valueOf(_tprs[c])).append("]");
}
//diagonal
for(int c = 0; c < 200; c++) {
sb.append(", ["+String.valueOf(c/200.)+",").append(String.valueOf(c/200.)).append("]");
}
sb.append("];\n");
sb.append(
"//Create scale functions\n"+
"var xScale = d3.scale.linear()\n"+
".domain([0, d3.max(dataset, function(d) { return d[0]; })])\n"+
".range([padding, w - padding * 2]);\n"+
"var yScale = d3.scale.linear()"+
".domain([0, d3.max(dataset, function(d) { return d[1]; })])\n"+
".range([h - padding, padding]);\n"+
"var rScale = d3.scale.linear()"+
".domain([0, d3.max(dataset, function(d) { return d[1]; })])\n"+
".range([2, 5]);\n"+
"//Define X axis\n"+
"var xAxis = d3.svg.axis()\n"+
".scale(xScale)\n"+
".orient(\"bottom\")\n"+
".ticks(5);\n"+
"//Define Y axis\n"+
"var yAxis = d3.svg.axis()\n"+
".scale(yScale)\n"+
".orient(\"left\")\n"+
".ticks(5);\n"+
"//Create SVG element\n"+
"var svg = d3.select(\"#rocCurve\")\n"+
".append(\"svg\")\n"+
".attr(\"width\", w)\n"+
".attr(\"height\", h);\n"+
"/*"+
"//Create labels\n"+
"svg.selectAll(\"text\")"+
".data(dataset)"+
".enter()"+
".append(\"text\")"+
".text(function(d) {"+
"return d[0] + \",\" + d[1];"+
"})"+
".attr(\"x\", function(d) {"+
"return xScale(d[0]);"+
"})"+
".attr(\"y\", function(d) {"+
"return yScale(d[1]);"+
"})"+
".attr(\"font-family\", \"sans-serif\")"+
".attr(\"font-size\", \"11px\")"+
".attr(\"fill\", \"red\");"+
"*/\n"+
"//Create X axis\n"+
"svg.append(\"g\")"+
".attr(\"class\", \"axis\")"+
".attr(\"transform\", \"translate(0,\" + (h - padding) + \")\")"+
".call(xAxis);\n"+
"//X axis label\n"+
"d3.select('#rocCurve svg')"+
".append(\"text\")"+
".attr(\"x\",w/2)"+
".attr(\"y\",h - 5)"+
".attr(\"text-anchor\", \"middle\")"+
".text(\"False Positive Rate\");\n"+
"//Create Y axis\n"+
"svg.append(\"g\")"+
".attr(\"class\", \"axis\")"+
".attr(\"transform\", \"translate(\" + padding + \",0)\")"+
".call(yAxis);\n"+
"//Y axis label\n"+
"d3.select('#rocCurve svg')"+
".append(\"text\")"+
".attr(\"x\",150)"+
".attr(\"y\",-5)"+
".attr(\"transform\", \"rotate(90)\")"+
//".attr(\"transform\", \"translate(0,\" + (h - padding) + \")\")"+
".attr(\"text-anchor\", \"middle\")"+
".text(\"True Positive Rate\");\n"+
"//Title\n"+
"d3.select('#rocCurve svg')"+
".append(\"text\")"+
".attr(\"x\",w/2)"+
".attr(\"y\",padding - 20)"+
".attr(\"text-anchor\", \"middle\")"+
".text(\"ROC\");\n" +
"function update(dataset) {" +
"svg.selectAll(\"circle\").remove();" +
"//Create circles\n"+
"var data = svg.selectAll(\"circle\")"+
".data(dataset);\n"+
"var activeIdx = idxCriter[criterion];\n" +
"data.enter()\n"+
".append(\"circle\")\n"+
".attr(\"cx\", function(d) {\n"+
"return xScale(d[0]);\n"+
"})\n"+
".attr(\"cy\", function(d) {\n"+
"return yScale(d[1]);\n"+
"})\n"+
".attr(\"fill\", function(d,i) {\n"+
" if (document.getElementById(\"select\") != null && i == document.getElementById(\"select\").selectedIndex && i != activeIdx) {\n" +
" return \"blue\"\n" +
" }\n" +
" else if (i == activeIdx) {\n"+
" return \"green\"\n"+
" }\n" +
" else if (d[0] != d[1] || d[0] == 0 || d[1] == 0) {\n"+
" return \"blue\"\n"+
" }\n" +
" else {\n"+
" return \"red\"\n"+
" }\n"+
"})\n"+
".attr(\"r\", function(d,i) {\n"+
" if (document.getElementById(\"select\") != null && i == document.getElementById(\"select\").selectedIndex && i != activeIdx) {\n" +
" return 4\n" +
" }\n" +
" else if (i == activeIdx) {\n"+
" return 6\n"+
" }\n" +
" else if (d[0] != d[1] || d[0] == 0 || d[1] == 0) {\n"+
" return 1.5\n"+
" }\n"+
" else {\n"+
" return 1\n"+
" }\n" +
"})\n" +
".on(\"mouseover\", function(d,i){\n" +
" if(i < " + _fprs.length + ") {" +
" document.getElementById(\"select\").selectedIndex = i\n" +
" show_cm(i)\n" +
" }\n" +
"});\n"+
"data.exit().remove();" +
"}\n" +
"update(dataset);");
sb.append("</script>");
sb.append("</div>");
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/AboutH2O.java
|
package water.api;
import water.AbstractBuildVersion;
import water.H2O;
/**
* Print some information about H2O.
*/
public class AboutH2O extends HTMLOnlyRequest {
@Override
protected String build(Response response) {
AbstractBuildVersion abv = H2O.getBuildVersion();
String build_branch = abv.branchName();
String build_hash = abv.lastCommitHash();
String build_describe = abv.describe();
String build_project_version = abv.projectVersion();
String build_by = abv.compiledBy();
String build_on = abv.compiledOn();
StringBuffer sb = new StringBuffer();
sb.append("<div class=\"container\">");
//sb.append("<div class=\"hero-unit\">");
sb.append("<h1 class=\"text-center\"><u>About H<sub>2</sub>O</u></h1><br />");
sb.append("<div class=\"row\">");
sb.append("<div class=\"well span6 offset3\">");
row(sb, "Build git branch", build_branch);
row(sb, "Build git hash", build_hash);
row(sb, "Build git describe",build_describe);
row(sb, "Build project version", build_project_version);
row(sb, "Built by", build_by);
row(sb, "Built on", build_on);
sb.append("</div>");
sb.append("</div>");
sb.append("<br />");
sb.append("<div>");
sb.append("<p class=\"lead text-center\">Join <a href=\"https://groups.google.com/forum/#!forum/h2ostream\" target=\"_blank\">h2ostream</a>, our google group community</p>");
sb.append("<p class=\"lead text-center\">Follow us on Twitter, <a href=\"https://twitter.com/hexadata\" target=\"_blank\">@hexadata</a></p>");
sb.append("<p class=\"lead text-center\">Email us at <a href=\"mailto:support@0xdata.com\" target=\"_top\">support@0xdata.com</a></p>");
sb.append("</div>");
sb.append("</div>");
//sb.append("</div>");
sb.append("</div>");
return sb.toString();
}
private StringBuffer row(StringBuffer sb, String c1, String c2) {
sb.append("<div class=\"row\">");
sb.append("<div class=\"span2\"><p class=\"text-right\"><small>").append(c1).append("</small></p></div>");
sb.append("<div class=\"span4\"><p>").append(c2).append("</p></div>");
return sb.append("</div>");
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/Cancel.java
|
package water.api;
import water.Job;
import water.Key;
import water.util.RString;
import dontweave.gson.JsonObject;
public class Cancel extends Request {
// TODO use ExistingJobKey (check other places)
protected final Str _key = new Str(KEY);
public static String link(Key k, String content) {
RString rs = new RString("<a href='Cancel.html?key=%key'>%content</a>");
rs.replace("key", k.toString());
rs.replace("content",content);
return rs.toString();
}
@Override
protected Response serve() {
String key = _key.value();
try {
Job.findJob(Key.make(key)).cancel();
} catch( Throwable e ) {
return Response.error(e);
}
JsonObject response = new JsonObject();
return Response.redirect(response, Jobs.class, null);
}
@Override
public RequestServer.API_VERSION[] supportedVersions() {
return SUPPORTS_V1_V2;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/Cloud.java
|
package water.api;
import dontweave.gson.*;
import water.*;
import water.util.Log;
import java.util.concurrent.ConcurrentHashMap;
public class Cloud extends Request2 {
@API(help="quiet", required=false, filter=Default.class)
protected boolean quiet = false;
@API(help="skip_ticks", required=false, filter=Default.class)
protected boolean skip_ticks = false;
/**
* Data structure to store last tick counts from a given node.
*/
private class LastTicksEntry {
final public long _system_idle_ticks;
final public long _system_total_ticks;
final public long _process_total_ticks;
LastTicksEntry(HeartBeat hb) {
_system_idle_ticks = hb._system_idle_ticks;
_system_total_ticks = hb._system_total_ticks;
_process_total_ticks = hb._process_total_ticks;
}
}
@Override
public RequestServer.API_VERSION[] supportedVersions() {
return SUPPORTS_V1_V2;
}
/**
* Store last tick counts for each node.
*
* This is local to a node and doesn't need to be Iced, so make it transient.
* Access this each time the Cloud status page is called on this node.
*
* The window of tick aggregation is between calls to this page (which might come from the browser or from REST
* API clients).
*
* Note there is no attempt to distinguish between REST API sessions. Every call updates the last tick count info.
*/
private static transient ConcurrentHashMap<String,LastTicksEntry> ticksHashMap = new ConcurrentHashMap<String, LastTicksEntry>();
private static volatile boolean lastCloudHealthy = false;
public Cloud() {
_requestHelp = "Displays the information about the current cloud. For each"
+ " node displays its heartbeat information.";
}
@Override public Response serve() {
JsonObject response = new JsonObject();
final H2O cloud = H2O.CLOUD;
final H2ONode self = H2O.SELF;
response.addProperty(VERSION, H2O.VERSION);
response.addProperty(CLOUD_NAME, H2O.NAME);
response.addProperty(NODE_NAME, self.toString());
response.addProperty(CLOUD_SIZE, cloud._memary.length);
long now = System.currentTimeMillis();
response.addProperty(CLOUD_UPTIME_MILLIS, now - H2O.START_TIME_MILLIS);
boolean cloudHealthy = true;
JsonArray nodes = new JsonArray();
for (H2ONode h2o : cloud._memary) {
HeartBeat hb = h2o._heartbeat;
JsonObject node = new JsonObject();
node.addProperty(NAME,h2o.toString());
node.addProperty(NUM_KEYS, hb._keys);
node.addProperty(VALUE_SIZE, hb.get_valsz());
node.addProperty(FREE_MEM, hb.get_free_mem());
node.addProperty(TOT_MEM, hb.get_tot_mem());
node.addProperty(MAX_MEM, hb.get_max_mem());
node.addProperty(MEM_BW, hb._membw);
node.addProperty(FREE_DISK, hb.get_free_disk());
node.addProperty(MAX_DISK, hb.get_max_disk());
node.addProperty(NUM_CPUS, (int)hb._num_cpus);
node.addProperty(GFLOPS, hb._gflops);
node.addProperty(SYSTEM_LOAD, hb._system_load_average);
Long elapsed = System.currentTimeMillis() - h2o._last_heard_from;
node.addProperty(ELAPSED, elapsed);
h2o._node_healthy = elapsed > HeartBeatThread.TIMEOUT ? false : true;
node.addProperty(NODE_HEALTH, h2o._node_healthy);
if (! h2o._node_healthy) {
cloudHealthy = false;
}
node.addProperty("cpus_allowed", hb._cpus_allowed);
node.addProperty("nthreads", hb._nthreads);
node.addProperty("PID", hb._pid);
JsonArray fjth = new JsonArray();
JsonArray fjqh = new JsonArray();
JsonArray fjtl = new JsonArray();
JsonArray fjql = new JsonArray();
if( hb._fjthrds != null ) {
for( int i=0; i<H2O.MIN_HI_PRIORITY; i++ ) {
if( hb._fjthrds[i]==-1 ) break;
fjtl.add(new JsonPrimitive(hb._fjthrds[i]));
fjql.add(new JsonPrimitive(hb._fjqueue[i]));
}
node.add(FJ_THREADS_LO, fjtl);
node.add(FJ_QUEUE_LO , fjql);
for( int i=H2O.MIN_HI_PRIORITY; i<H2O.MAX_PRIORITY; i++ ) {
fjth.add(new JsonPrimitive(hb._fjthrds[i]));
fjqh.add(new JsonPrimitive(hb._fjqueue[i]));
}
node.add(FJ_THREADS_HI, fjth);
node.add(FJ_QUEUE_HI , fjqh);
}
node.addProperty(RPCS, (int) hb._rpcs);
node.addProperty(TCPS_ACTIVE, (int) hb._tcps_active);
if (hb._process_num_open_fds >= 0) { node.addProperty("open_fds", hb._process_num_open_fds); } else { node.addProperty("open_fds", "N/A"); }
// Use tick information to calculate CPU usage percentage for the entire system and
// for the specific H2O node.
//
// Note that 100% here means "the entire box". This is different from 'top' 100%,
// which usually means one core.
int my_cpu_pct = -1;
int sys_cpu_pct = -1;
if (!skip_ticks) {
LastTicksEntry lte = ticksHashMap.get(h2o.toString());
if (lte != null) {
long system_total_ticks_delta = hb._system_total_ticks - lte._system_total_ticks;
// Avoid divide by 0 errors.
if (system_total_ticks_delta > 0) {
long system_idle_ticks_delta = hb._system_idle_ticks - lte._system_idle_ticks;
double sys_cpu_frac_double = 1 - ((double)(system_idle_ticks_delta) / (double)system_total_ticks_delta);
if (sys_cpu_frac_double < 0) sys_cpu_frac_double = 0; // Clamp at 0.
else if (sys_cpu_frac_double > 1) sys_cpu_frac_double = 1; // Clamp at 1.
sys_cpu_pct = (int)(sys_cpu_frac_double * 100);
long process_total_ticks_delta = hb._process_total_ticks - lte._process_total_ticks;
double process_cpu_frac_double = ((double)(process_total_ticks_delta) / (double)system_total_ticks_delta);
// Saturate at 0 and 1.
if (process_cpu_frac_double < 0) process_cpu_frac_double = 0; // Clamp at 0.
else if (process_cpu_frac_double > 1) process_cpu_frac_double = 1; // Clamp at 1.
my_cpu_pct = (int)(process_cpu_frac_double * 100);
}
}
LastTicksEntry newLte = new LastTicksEntry(hb);
ticksHashMap.put(h2o.toString(), newLte);
}
if (my_cpu_pct >= 0) { node.addProperty("my_cpu_%", my_cpu_pct); } else { node.addProperty("my_cpu_%", "N/A"); }
if (sys_cpu_pct >= 0) { node.addProperty("sys_cpu_%", sys_cpu_pct); } else { node.addProperty("sys_cpu_%", "N/A"); }
node.addProperty(LAST_CONTACT, h2o._last_heard_from);
nodes.add(node);
}
response.addProperty(CLOUD_HEALTH, cloudHealthy);
response.add(NODES,nodes);
response.addProperty(CONSENSUS, Paxos._commonKnowledge); // Cloud is globally accepted
response.addProperty(LOCKED, Paxos._cloudLocked); // Cloud is locked against changes
boolean logCloudStatus = (!cloudHealthy) || (cloudHealthy != lastCloudHealthy) || !quiet;
lastCloudHealthy = cloudHealthy;
if (logCloudStatus) {
Log.info("H2O Cloud Status:");
for (String s : response.toString().split("[{}]"))
if (!s.equals(",") && s.length() > 0) Log.info(s); // Log the cloud status to stdout
}
Response r = Response.done(response);
r.setBuilder(CONSENSUS, new BooleanStringBuilder("","Voting new members"));
r.setBuilder(LOCKED, new BooleanStringBuilder("Locked","Accepting new members"));
r.setBuilder(NODES, new MyAryBuilder());
r.setBuilder(NODES+"."+NAME, new NodeCellBuilder());
r.setBuilder(NODES+"."+LAST_CONTACT, new LastContactBuilder());
return r;
}
public static String pos_neg(double d) {
return d >= 0 ? String.valueOf(d) : "n/a";
}
// Just the Node as a link
private static class NodeCellBuilder extends ArrayRowElementBuilder {
@Override public String elementToString(JsonElement element, String contextName) {
String str = element.getAsString();
if( str.equals(H2O.SELF.toString()) ) {
return "<a href='StoreView.html'>"+str+"</a>";
}
String str2 = str.startsWith("/") ? str.substring(1) : str;
String str3 = "<a href='http://" + str2 + "/StoreView.html'>" + str + "</a>";
return str3;
}
}
// Highlight sick nodes
private static class MyAryBuilder extends ArrayBuilder {
static ArrayRowBuilder MY_ARRAY_ROW = new MyRowBuilder();
@Override public Builder defaultBuilder(JsonElement element) { return MY_ARRAY_ROW; }
}
private static class MyRowBuilder extends ArrayRowBuilder {
@Override public String header(JsonObject object, String objectName) {
long then = object.getAsJsonPrimitive(LAST_CONTACT).getAsLong();
long now = System.currentTimeMillis();
return ((now-then) >= HeartBeatThread.TIMEOUT) ? "\n<tr class=\"error\">" : "\n<tr>";
}
}
// Last-heard-from time pretty-printing
private static class LastContactBuilder extends ArrayRowElementBuilder {
@Override public String elementToString(JsonElement element, String contextName) {
long then = element.getAsLong();
long now = System.currentTimeMillis();
return (now-then >= 2*1000) ? ""+((now-then)/1000)+" secs ago" : "now";
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/CloudStatus.java
|
package water.api;
import water.Iced;
public class CloudStatus extends Iced {
public String cloud_name;
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/CollectLinuxInfo.java
|
package water.api;
import water.*;
import water.api.RequestServer.API_VERSION;
import water.util.LinuxProcFileReader;
import water.util.Log;
import java.io.File;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.util.Arrays;
public class CollectLinuxInfo extends Func {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
// This Request supports the HTML 'GET' command, and this is the help text for GET.
static final String DOC_GET = "Collect information available from Linux (does nothing for non-Linux)";
static void collect() throws RuntimeException {
// Sanity check that we're running on Linux.
{
LinuxProcFileReader lpfr = new LinuxProcFileReader();
lpfr.read();
if (!lpfr.valid()) {
Log.info("CollectLinuxInfo couldn't collect anything because we're not running on Linux");
}
}
// Clean up old directory, if one exists.
String linuxInfoDirString = Log.getLogPathFileNameStem() + "-collect-linux-info";
File linuxInfoDir = new File(linuxInfoDirString);
if (linuxInfoDir.exists()) {
try {
String[] cmd = {"/bin/rm", "-r", "-f", linuxInfoDirString};
Log.debug("Running command: " + Arrays.toString(cmd));
Process p = Runtime.getRuntime().exec(cmd);
p.waitFor();
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
if (linuxInfoDir.exists()) {
throw new RuntimeException("Failed to delete directory " + linuxInfoDirString);
}
// Make new directory.
try {
String[] cmd = {"/bin/mkdir", "-p", linuxInfoDirString};
Log.debug("Running command: " + Arrays.toString(cmd));
Process p = Runtime.getRuntime().exec(cmd);
p.waitFor();
}
catch (Exception e) {
throw new RuntimeException(e);
}
if (! linuxInfoDir.exists()) {
throw new RuntimeException("Failed to create directory " + linuxInfoDirString);
}
try {
String[] cmd = {"/bin/chmod", "u+rwx", linuxInfoDirString};
Log.debug("Running command: " + Arrays.toString(cmd));
Process p = Runtime.getRuntime().exec(cmd);
p.waitFor();
}
catch (Exception e) {
throw new RuntimeException(e);
}
// Unpack the script.
String collectFilename = "collect-linux-info.sh";
InputStream is = Boot._init.getResource2("/diagnostics/" + collectFilename);
File collectDirFilename = new File(linuxInfoDir, collectFilename);
try {
FileOutputStream os = new FileOutputStream(collectDirFilename);
byte[] buffer = new byte[1024];
int len = is.read(buffer);
while (len != -1) {
os.write(buffer, 0, len);
len = is.read(buffer);
}
is.close();
os.close();
}
catch (Exception e) {
throw new RuntimeException(e);
}
// Run the unpacked script.
try {
String[] cmd = {"/bin/sh", collectDirFilename.toString(), linuxInfoDirString};
Log.debug("Running command: " + Arrays.toString(cmd));
Process p = Runtime.getRuntime().exec(cmd);
p.waitFor();
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
private static class CollectLinuxInfoTask extends DRemoteTask {
@Override
public void lcompute() {
CollectLinuxInfo.collect();
tryComplete();
}
@Override public void reduce(DRemoteTask drt) {}
}
@Override protected void execImpl() {
CollectLinuxInfoTask task = new CollectLinuxInfoTask();
task.invokeOnAllNodes();
}
@Override public API_VERSION[] supportedVersions() {
return SUPPORTS_ONLY_V2;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/ConfusionMatrix.java
|
package water.api;
import water.*;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.TransfVec;
import water.fvec.Vec;
import water.util.Utils;
import java.util.Arrays;
import static water.util.Utils.printConfusionMatrix;
/**
* Compare two categorical columns, reporting a grid of co-occurrences.
* <br>
* The semantics follows R-approach - see R code:
* <pre>
* > l = c("A", "B", "C")
* > a = factor(c("A", "B", "C"), levels=l)
* > b = factor(c("A", "B", "A"), levels=l)
* > confusionMatrix(a,b)
*
* Reference
* Prediction A B C
* A 1 0 0
* B 0 1 0
* C 1 0 0
* </pre>
*
* <p>Note: By default we report zero rows and columns.</p>
*
* @author cliffc
*/
public class ConfusionMatrix extends Func {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "", required = true, filter = Default.class)
public Frame actual;
@API(help="Column of the actual results (will display vertically)", required=true, filter=actualVecSelect.class)
public Vec vactual;
class actualVecSelect extends VecClassSelect { actualVecSelect() { super("actual"); } }
@API(help = "", required = true, filter = Default.class)
public Frame predict;
@API(help="Column of the predicted results (will display horizontally)", required=true, filter=predictVecSelect.class)
public Vec vpredict;
class predictVecSelect extends VecClassSelect { predictVecSelect() { super("predict"); } }
@API(help="domain of the actual response")
String [] actual_domain;
@API(help="domain of the predicted response")
String [] predicted_domain;
@API(help="union of domains")
public
String [] domain;
@API(help="Confusion Matrix (or co-occurrence matrix)")
public long cm[][];
@API(help="Mean Squared Error")
public double mse = Double.NaN;
private boolean classification;
@Override protected void init() throws IllegalArgumentException {
classification = vactual.isInt() && vpredict.isInt();
// Input handling
if( vactual==null || vpredict==null )
throw new IllegalArgumentException("Missing actual or predict!");
if (vactual.length() != vpredict.length())
throw new IllegalArgumentException("Both arguments must have the same length!");
// Handle regression kind which is producing CM 1x1 elements
if (!classification && vactual.isEnum())
throw new IllegalArgumentException("Actual vector cannot be categorical for regression scoring.");
if (!classification && vpredict.isEnum())
throw new IllegalArgumentException("Predicted vector cannot be categorical for regression scoring.");
}
@Override protected void execImpl() {
Vec va = null,vp = null, avp = null;
try {
if (classification) {
// Create a new vectors - it is cheap since vector are only adaptation vectors
va = vactual .toEnum(); // always returns TransfVec
actual_domain = va._domain;
vp = vpredict.toEnum(); // always returns TransfVec
predicted_domain = vp._domain;
if (!Arrays.equals(actual_domain, predicted_domain)) {
domain = Utils.domainUnion(actual_domain, predicted_domain);
int[][] vamap = Model.getDomainMapping(domain, actual_domain, true);
va = TransfVec.compose( (TransfVec) va, vamap, domain, false ); // delete original va
int[][] vpmap = Model.getDomainMapping(domain, predicted_domain, true);
vp = TransfVec.compose( (TransfVec) vp, vpmap, domain, false ); // delete original vp
} else domain = actual_domain;
// The vectors are from different groups => align them, but properly delete it after computation
if (!va.group().equals(vp.group())) {
avp = vp;
vp = va.align(vp);
}
cm = new CM(domain.length).doAll(va,vp)._cm;
} else {
mse = new CM(1).doAll(vactual,vpredict).mse();
}
return;
} finally { // Delete adaptation vectors
if (va!=null) UKV.remove(va._key);
if (vp!=null) UKV.remove(vp._key);
if (avp!=null) UKV.remove(avp._key);
}
}
// Compute the co-occurrence matrix
private static class CM extends MRTask2<CM> {
/* @IN */ final int _c_len;
/* @OUT Classification */ long _cm[][];
/* @OUT Regression */ public double mse() { return _count > 0 ? _mse/_count : Double.POSITIVE_INFINITY; }
/* @OUT Regression Helper */ private double _mse;
/* @OUT Regression Helper */ private long _count;
CM(int c_len) { _c_len = c_len; }
@Override public void map( Chunk ca, Chunk cp ) {
//classification
if (_c_len > 1) {
_cm = new long[_c_len+1][_c_len+1];
int len = Math.min(ca._len,cp._len); // handle different lenghts, but the vectors should have been rejected already
for( int i=0; i < len; i++ ) {
int a=ca.isNA0(i) ? _c_len : (int)ca.at80(i);
int p=cp.isNA0(i) ? _c_len : (int)cp.at80(i);
_cm[a][p]++;
}
if( len < ca._len )
for( int i=len; i < ca._len; i++ )
_cm[ca.isNA0(i) ? _c_len : (int)ca.at80(i)][_c_len]++;
if( len < cp._len )
for( int i=len; i < cp._len; i++ )
_cm[_c_len][cp.isNA0(i) ? _c_len : (int)cp.at80(i)]++;
} else {
_cm = null;
_mse = 0;
assert(ca._len == cp._len);
int len = ca._len;
for( int i=0; i < len; i++ ) {
if (ca.isNA0(i) || cp.isNA0(i)) continue; //TODO: Improve
final double a=ca.at0(i);
final double p=cp.at0(i);
_mse += (p-a)*(p-a);
_count++;
}
}
}
@Override public void reduce( CM cm ) {
if (_cm != null && cm._cm != null) {
Utils.add(_cm,cm._cm);
} else {
assert(_mse != Double.NaN && cm._mse != Double.NaN);
assert(_cm == null && cm._cm == null);
_mse += cm._mse;
_count += cm._count;
}
}
}
@Override public boolean toHTML( StringBuilder sb ) {
if (classification) {
DocGen.HTML.section(sb,"Confusion Matrix");
if( cm == null ) return true;
printConfusionMatrix(sb, cm, domain, true);
} else{
DocGen.HTML.section(sb,"Mean Squared Error");
if( mse == Double.NaN ) return true;
DocGen.HTML.arrayHead(sb);
sb.append("<tr class='warning'><td>" + mse + "</td></tr>");
DocGen.HTML.arrayTail(sb);
}
return true;
}
public void toASCII( StringBuilder sb ) {
if (classification) {
if(cm == null) return;
printConfusionMatrix(sb, cm, domain, false);
} else {
sb.append("MSE: " + mse);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/Console.java
|
package water.api;
import water.Boot;
import water.exec.ASTOp;
import water.util.RString;
public class Console extends HTMLOnlyRequest {
@Override protected String build(Response response) {
RString rs = new RString(Boot._init.loadContent("/h2o/console.html"));
rs.replace("HELP", getHelp());
return rs.toString();
}
private String getHelp() {
StringBuilder sb = new StringBuilder();
sb.append("jqconsole.Write(");
sb.append("'Access keys directly by name (for example `iris.hex`).\\n' +");
sb.append("'Available functions are:'+");
for(String s : ASTOp.UNI_INFIX_OPS.keySet())
sb.append("'\\n\\t").append(s).append("' +");
sb.append("'\\n', 'jqconsole-output');");
return sb.toString();
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/Constants.java
|
package water.api;
import water.Iced;
import water.util.Check;
public class Constants extends Iced {
public static class Suffixes {
// JSON fields with these suffixes will automatically provide some
// pretty printing. Users can still override the fields later
public static final String BYTES = "_bytes";
public static final String BYTES_PER_SECOND = "_bytes_/_sec";
public static final String MILLIS = "_ms";
}
public static class Extensions {
public static final String HEX = ".hex";
public static final String JOB = ".job";
public static final String KMEANS = ".kmeans";
public static final String R = ".r";
public static final String JSON = ".json";
}
public static class Schemes {
public static final String FILE = "file";
public static final String HDFS = "hdfs";
public static final String S3 = "s3";
public static final String NFS = "nfs";
}
public static final String BUILT_IN_KEY_JOBS = "jobs";
public static final String ALPHA = "alpha";
public static final String ARGUMENTS = "arguments";
public static final String AUC = "area_under_curve";
public static final String BASE = "base";
public static final String BEST_THRESHOLD = "best_threshold";
public static final String BETA_EPS = "beta_epsilon";
public static final String BIN_LIMIT = "bin_limit";
public static final String BROWSE = "browse";
public static final String BUCKET = "bucket";
public static final String CANCELLED = "cancelled";
public static final String CARDINALITY = "cardinality";
public static final String CASE = "case";
public static final String CASE_MODE = "case_mode";
public static final String CHUNK = "chunk";
public static final String CLASS = "response_variable";
public static final String CLOUD_HEALTH = "cloud_healthy";
public static final String CLOUD_NAME = "cloud_name";
public static final String CLOUD_SIZE = "cloud_size";
public static final String CLOUD_UPTIME_MILLIS = "cloud_uptime_millis";
public static final String CLUSTERS = "clusters";
public static final String COEFFICIENTS = "coefficients";
public static final String COL_INDEX = "col_index";
public static final String COLS = "cols";
public static final String COLUMN_NAME = "col_name";
public static final String COLUMNS_DISPLAY = "max_column_display";
public static final String CONSENSUS = "consensus";
public static final String CONTENTS = "contents";
public static final String COUNT = "count";
public static final String DATA_KEY = "data_key";
public static final String DEPTH = "depth";
public static final String DESCRIPTION = "description";
public static final String DEST_KEY = "destination_key";
public static final String DTHRESHOLDS = "thresholds";
public static final String ELAPSED = "elapsed_time";
public static final String END_TIME = "end_time";
public static final String ENUM_DOMAIN_SIZE = "enum_domain_size";
public static final String ERROR = "error";
public static final String ESCAPE_NAN = "escape_nan";
public static final String EXCLUSIVE_SPLIT_LIMIT = "exclusive_split_limit";
public static final String EXPRESSION = "expression";
public static final String FAILED = "failed";
public static final String FAMILY = "family";
public static final String FEATURES = "features";
public static final String FILE = "file";
public static final String FILES = "files";
public static final String FILTER = "filter";
public static final String FIRST_CHUNK = "first_chunk";
public static final String FJ_QUEUE_HI = "fj_queue_hi";
public static final String FJ_QUEUE_LO = "fj_queue_lo";
public static final String FJ_THREADS_HI = "fj_threads_hi";
public static final String FJ_THREADS_LO = "fj_threads_lo";
public static final String FREE_DISK = "free_disk" + Suffixes.BYTES;
public static final String FREE_MEM = "free_mem" + Suffixes.BYTES;
public static final String HEADER = "header";
public static final String PREVIEW = "preview";
public static final String HEIGHT = "height";
public static final String HELP = "help";
public static final String IGNORE = "ignore";
public static final String ITEMS = "items";
public static final String ITERATIVE_CM = "iterative_cm";
public static final String JOB = "job";
public static final String JOB_KEY = "job_key";
public static final String JOBS = "jobs";
public static final String JSON_H2O = "h2o";
public static final String KEY = "key";
public static final String KEYS = "keys";
public static final String LAST_CONTACT = "last_contact";
public static final String LAMBDA = "lambda";
public static final String LIMIT = "limit";
public static final String LINK = "link";
public static final String LOCKED = "locked";
public static final String MAX = "max";
public static final String MAX_DISK = "max_disk" + Suffixes.BYTES;
public static final String MAX_ITER = "max_iter";
public static final String MAX_MEM = "max_mem" + Suffixes.BYTES;
public static final String MEM_BW = "mem_bandwidth" + Suffixes.BYTES_PER_SECOND;
public static final String MAX_ROWS = "max_rows";
public static final String MEAN = "mean";
public static final String MIN = "min";
public static final String MODEL_KEY = "model_key";
public static final String MODELS = "models";
public static final String MORE = "more";
public static final String MTRY = "mtry";
public static final String MTRY_NODES = "mtry_nodes";
public static final String NAME = "name";
public static final String NEG_X = "neg_x";
public static final String NO_CM = "no_confusion_matrix";
public static final String NODE = "node";
public static final String NODE_HEALTH = "node_healthy";
public static final String NODE_NAME = "node_name";
public static final String NODES = "nodes";
public static final String NORMALIZE = "normalize";
public static final String NUM_COLS = "num_cols";
public static final String NUM_CPUS = "num_cpus";
public static final String GFLOPS = "linpack_gflops";
public static final String NUM_KEYS = "num_keys";
public static final String NUM_MISSING_VALUES = "num_missing_values";
public static final String NUM_ROWS = "num_rows";
public static final String NUM_TREES = "ntree";
public static final String NUM_SUCCEEDED = "num_succeeded";
public static final String NUM_FAILED = "num_failed";
public static final String OBJECT = "object";
public static final String OFFSET = "offset";
public static final String OOBEE = "out_of_bag_error_estimate";
public static final String PARALLEL = "parallel";
public static final String PATH = "path";
public static final String PREVIOUS_MODEL_KEY = "previous_model_key";
public static final String PROGRESS = "progress";
public static final String PROGRESS_KEY = "progress_key";
public static final String PROGRESS_TOTAL = "progress_total";
public static final String REDIRECT = "redirect_request";
public static final String REDIRECT_ARGS = "redirect_request_args";
public static final String REPLICATION_FACTOR = "replication_factor";
public static final String REQUEST_TIME = "time";
public static final String RESPONSE = "response";
public static final String RHO = "rho";
public static final String ROW = "row";
public static final String ROW_SIZE = "row_size";
public static final String ROWS = "rows";
public static final String RPCS = "rpcs";
public static final String SAMPLE = "sample";
public static final String SCALE = "scale";
public static final String SAMPLING_STRATEGY = "sampling_strategy";
public static final String SEED = "seed";
public static final String SEPARATOR = "separator";
public static final String PARSER_TYPE = "parser_type";
public static final String SENT_ROWS = "sent_rows";
public static final String SIZE = "size";
public static final String SOURCE_KEY = "source_key";
public static final String STACK_TRACES = "stack_traces";
public static final String START_TIME = "start_time";
public static final String STATUS = "status";
public static final String STEP = "step";
public static final String STAT_TYPE = "stat_type";
public static final String STRATA_SAMPLES = "strata_samples";
public static final String SUCCEEDED = "succeeded";
public static final String SYSTEM_LOAD = "system_load";
public static final String TASK_KEY = "task_key";
public static final String TCPS_ACTIVE = "tcps_active";
public static final String TCPS_DUTY = "tcp_duty_cycle";
public static final String TIME = "time";
public static final String TOT_MEM = "tot_mem" + Suffixes.BYTES;
public static final String TO_ENUM = "to_enum";
public static final String TREE_COUNT = "number_built";
public static final String TREE_DEPTH = "depth";
public static final String TREE_LEAVES = "leaves";
public static final String TREE_NUM = "tree_number";
public static final String TREES = "trees";
public static final String TWEEDIE_POWER = "tweedie_power";
public static final String TYPE = "type";
public static final String URL = "url";
public static final String USE_NON_LOCAL_DATA = "use_non_local_data";
public static final String VALUE = "value";
public static final String VALUE_SIZE = "value_size" + Suffixes.BYTES;
public static final String VALUE_TYPE = "type";
public static final String VARIANCE = "variance";
public static final String VERSION = "version";
public static final String VIEW = "view";
public static final String WARNINGS = "warnings";
public static final String WEIGHT = "weight";
public static final String PRIOR = "prior";
public static final String WEIGHTS = "class_weights";
public static final String WIDTH = "width";
public static final String X = "x";
public static final String XVAL = "n_folds";
public static final String Y = "y";
static { assert Check.staticFinalStrings(Constants.class); }
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/CoxPHModelView.java
|
package water.api;
import hex.CoxPH.CoxPHModel;
import water.Key;
import water.Request2;
import water.UKV;
public class CoxPHModelView extends Request2 {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="Cox Proportional Hazards Model Key", required=true, filter=CoxPHModelKeyFilter.class)
Key _modelKey;
class CoxPHModelKeyFilter extends H2OKey { public CoxPHModelKeyFilter() { super("model_key",true); } }
@API(help="Cox Proportional Hazards Model")
public CoxPHModel coxph_model;
public static String link(String txt, Key model) {
return "<a href='CoxPHModelView.html?_modelKey=" + model + "'>" + txt + "</a>";
}
public static Response redirect(Request req, Key modelKey) {
return Response.redirect(req, "/2/CoxPHModelView", "_modelKey", modelKey);
}
@Override public boolean toHTML(StringBuilder sb){
coxph_model.get_params().makeJsonBox(sb);
coxph_model.generateHTML("Cox Proportional Hazards Model", sb);
return true;
}
@Override protected Response serve() {
coxph_model = UKV.get(_modelKey);
if (coxph_model == null)
return Response.error("Model '" + _modelKey + "' not found!");
else
return Response.done(this);
}
@Override public void toJava(StringBuilder sb) {
coxph_model.toJavaHtml(sb);
}
@Override protected String serveJava() {
CoxPHModel m = UKV.get(_modelKey);
if (m != null)
return m.toJava();
else
return "";
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/CoxPHProgressPage.java
|
package water.api;
import hex.CoxPH.CoxPHModel;
import water.Key;
import water.UKV;
public class CoxPHProgressPage extends Progress2 {
/** Return {@link water.api.RequestBuilders.Response} for finished job. */
@Override protected Response jobDone(final Key dest) {
Response resp;
if (UKV.get(dest) == null)
resp = Response.error("start times must be strictly less than stop times");
else
resp = CoxPHModelView.redirect(this, dest);
return resp;
}
public static Response redirect(Request req, Key jobkey, Key dest) {
return Response.redirect(req, "/2/CoxPHProgressPage", JOB_KEY, jobkey, DEST_KEY, dest);
}
@Override public boolean toHTML(StringBuilder sb) {
CoxPHModel m = UKV.get(destination_key);
if (m != null)
m.generateHTML("Cox Proportional Hazards Model", sb);
else
DocGen.HTML.paragraph(sb, "Pending...");
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/CoxPHSurvfit.java
|
package water.api;
import hex.CoxPH.CoxPHModel;
import water.*;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.RString;
public class CoxPHSurvfit extends Request2 {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "Model", required = true, filter = Default.class)
public Key model;
@API(help="New X Value", required=false, filter=Default.class)
double x_new = Double.NaN;
@API(help = "Survival Curve", filter = Default.class)
public Key survfit;
public static String link(Key k, double x_new, String content) {
RString rs = new RString("<a href='CoxPHSurvfit.query?model=%$key&x_new=%x_new'>%content</a>");
rs.replace("key", k.toString());
rs.replace("x_new", x_new);
rs.replace("content", content);
return rs.toString();
}
@Override protected Response serve() {
try {
if (model == null)
throw new IllegalArgumentException("Model is required to perform validation!");
CoxPHModel m = DKV.get(model).get();
if (survfit == null)
survfit = Key.make("__Survfit_" + Key.make());
m.makeSurvfit(survfit, x_new);
return Inspect2.redirect(this, survfit.toString());
} catch (Throwable t) {
return Response.error(t);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/DRFModelView.java
|
package water.api;
import hex.drf.DRF.DRFModel;
import water.*;
public class DRFModelView extends Request2 {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="DRF Model Key", required=true, filter=DRFModelKeyFilter.class)
Key _modelKey;
class DRFModelKeyFilter extends H2OKey { public DRFModelKeyFilter() { super("model_key",true); } }
@API(help="DRF Model")
public DRFModel drf_model;
public static String link(String txt, Key model) {
return "<a href='DRFModelView.html?_modelKey=" + model + "'>" + txt + "</a>";
}
public static Response redirect(Request req, Key modelKey) {
return Response.redirect(req, "/2/DRFModelView", "_modelKey", modelKey);
}
@Override public boolean toHTML(StringBuilder sb){
drf_model.get_params().makeJsonBox(sb);
drf_model.generateHTML("DRF Model", sb);
return true;
}
@Override protected Response serve() {
drf_model = UKV.get(_modelKey);
if (drf_model == null) return Response.error("Model '" + _modelKey + "' not found!");
else return Response.done(this);
}
@Override public void toJava(StringBuilder sb) { drf_model.toJavaHtml(sb); }
@Override protected String serveJava() {
DRFModel m = UKV.get(_modelKey);
if (m!=null)
return m.toJava();
else
return "";
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/DRFProgressPage.java
|
package water.api;
import hex.drf.DRF.DRFModel;
import water.Job;
import water.Key;
import water.UKV;
public class DRFProgressPage extends Progress2 {
/** Return {@link Response} for finished job. */
@Override protected Response jobDone(final Key dst) {
return DRFModelView.redirect(this, dst);
}
public static Response redirect(Request req, Key jobkey, Key dest) {
return Response.redirect(req, "/2/DRFProgressPage", JOB_KEY, jobkey, DEST_KEY, dest);
}
@Override public boolean toHTML( StringBuilder sb ) {
Job jjob = Job.findJob(job_key);
if (jjob ==null) return true;
DRFModel m = UKV.get(jjob.dest());
if (m!=null) m.generateHTML("DRF Model", sb);
else DocGen.HTML.paragraph(sb, "Pending...");
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/Debug.java
|
package water.api;
import water.*;
import water.util.Log;
public class Debug extends Request {
@Override protected Response serve() {
int kcnt=0;
for( Key key : H2O.localKeySet() ) {
kcnt++;
Value v = H2O.raw_get(key);
Log.debug("K: ",key," V:",(v==null?"null":""+v._max));
}
return Response.error("Dumped "+kcnt+" keys");
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/DeepLearningModelView.java
|
package water.api;
import hex.deeplearning.DeepLearning;
import hex.deeplearning.DeepLearningModel;
import hex.deeplearning.Neurons;
import water.Key;
import water.Request2;
import water.UKV;
public class DeepLearningModelView extends Request2 {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="Deep Learning Model Key", required=true, filter=DeepLearningModelKeyFilter.class)
Key _modelKey;
class DeepLearningModelKeyFilter extends H2OKey { public DeepLearningModelKeyFilter() { super("model_key",true); } }
@API(help="Deep Learning Model")
DeepLearningModel deeplearning_model;
public static String link(String txt, Key model) {
return "<a href='DeepLearningModelView.html?_modelKey=" + model + "'>" + txt + "</a>";
}
public static Response redirect(Request req, Key modelKey) {
return Response.redirect(req, "/2/DeepLearningModelView", "_modelKey", modelKey);
}
@Override public boolean toHTML(StringBuilder sb){
if (deeplearning_model != null)
deeplearning_model.generateHTML("Deep Learning Model", sb);
return true;
}
@Override protected Response serve() {
deeplearning_model = UKV.get(_modelKey);
if (deeplearning_model == null) return Response.error("Model '" + _modelKey + "' not found!");
else return Response.done(this);
}
@Override public void toJava(StringBuilder sb) {
deeplearning_model.toJavaHtml(sb);
}
@Override protected String serveJava() {
deeplearning_model = UKV.get(_modelKey);
if (deeplearning_model!=null
&& !deeplearning_model.get_params().autoencoder) //not yet implemented
return deeplearning_model.toJava();
else
return "";
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/DeepLearningProgressPage.java
|
package water.api;
import hex.deeplearning.DeepLearningModel;
import water.Job;
import water.Key;
import water.UKV;
public class DeepLearningProgressPage extends Progress2 {
/** Return {@link water.api.RequestBuilders.Response} for finished job. */
@Override protected Response jobDone(final Key dst) {
return DeepLearningModelView.redirect(this, dst);
}
public static Response redirect(Request req, Key jobkey, Key dest) {
return Response.redirect(req, "/2/DeepLearningProgressPage", JOB_KEY, jobkey, DEST_KEY, dest);
}
@Override public boolean toHTML( StringBuilder sb ) {
Job jjob = Job.findJob(job_key);
if (jjob ==null) return true;
DeepLearningModel m = UKV.get(jjob.dest());
if (m!=null) m.generateHTML("Deep Learning Model", sb);
else DocGen.HTML.paragraph(sb, "Pending...");
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/Direction.java
|
package water.api;
public enum Direction {
IN, OUT, INOUT;
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/DocGen.java
|
package water.api;
import hex.KMeans2;
import hex.drf.DRF;
import hex.gbm.GBM;
import hex.glm.GLM2;
import java.io.*;
import java.lang.reflect.Field;
import java.util.Properties;
import water.*;
import water.api.*;
import water.api.RequestArguments.Argument;
import water.util.Log;
/**
* Auto-gen doc support, for JSON and REST API docs
* @author <a href="mailto:cliffc@0xdata.com"></a>
*/
public abstract class DocGen {
public static final HTML HTML = new HTML();
public static final ReST ReST = new ReST();
public static void createFile (String fileName, String content) {
try {
FileWriter fstream = new FileWriter(fileName, false); //true tells to append data.
BufferedWriter out = new BufferedWriter(fstream);
out.write(content);
out.close();
} catch( Throwable e ) {
System.err.println("Error: " + e.getMessage());
}
}
public static void createReSTFilesInCwd() {
// ImportFiles2 is spitting out a bunch of HTML junk, which is buggy. Disable for now.
// createFile("ImportFiles2.rst", new ImportFiles2().ReSTHelp());
createFile("Parse2.rst", new Parse2().ReSTHelp());
createFile("GBM.rst", new GBM().ReSTHelp());
createFile("DRF2.rst", new DRF().ReSTHelp());
createFile("GLM2.rst", new GLM2().ReSTHelp());
createFile("KMeans2.rst", new KMeans2().ReSTHelp());
// createFile("Summary2.rst", new Summary2().ReSTHelp());
}
/** The main method launched in the H2O environment and
* generating documentation.
*/
public static void main(String[] args) throws Exception {
// Boot invoke by default mainClass water.H2O and then call runClass
H2O.waitForCloudSize(1);
createReSTFilesInCwd();
H2O.exit(0);
}
// Class describing meta-info about H2O queries and results.
public static class FieldDoc {
final String _name; // Field name
final String _help; // Some descriptive text
final int _since, _until; // Min/Max supported-version numbers
final Class _clazz; // Java type: subtypes of Argument are inputs, otherwise outputs
final boolean _input, _required;
final ParamImportance _importance;
final Direction _direction;
final String _path;
final Class _type;
final String _valid;
final String _enabled;
final String _visible;
RequestArguments.Argument _arg; // Lazily filled in, as docs are asked for.
public FieldDoc( String name, String help, int min, int max, Class C, boolean input,
boolean required, ParamImportance importance, Direction direction, String path,
Class type, String valid, String enabled, String visible) {
_name = name; _help = help; _since = min; _until = max; _clazz = C; _input = input; _required = required; _importance = importance;
_direction = direction;
_path = path;
_type = type;
_valid = valid;
_enabled = enabled;
_visible = visible;
}
@Override public String toString() {
return "{"+_name+", from "+_since+" to "+_until+", "+_clazz.getSimpleName()+", "+_help+"}";
}
private final String version() {
return "Since version "+_since+
(_until==Integer.MAX_VALUE?"":", deprecated on version "+_until);
}
public final boolean isInput () {
return _input;
}
public final boolean isJSON() { return !isInput(); }
public final ParamImportance importance() { return _importance; }
public final String name() { return _name; }
// Specific accessors for input arguments. Not valid for JSON output fields.
private RequestArguments.Argument arg(Request R) {
if( _arg != null ) return _arg;
Class clzz = R.getClass();
// An amazing crazy API from the JDK again. Cannot search for protected
// fields without either (1) throwing NoSuchFieldException if you ask in
// a subclass, or (2) sorting through the list of ALL fields and EACH
// level of the hierarchy. Sadly, I catch NSFE & loop.
while( true ) {
try {
Field field = clzz.getDeclaredField(_name);
field.setAccessible(true);
Object o = field.get(R);
return _arg=((RequestArguments.Argument)o);
}
catch( NoSuchFieldException ie ) { clzz = clzz.getSuperclass(); }
catch( IllegalAccessException ie ) { break; }
catch( ClassCastException ie ) { break; }
}
return null;
}
}
// --------------------------------------------------------------------------
// Abstract text generators, for building pretty docs in either HTML or
// ReStructuredText form.
public abstract StringBuilder escape( StringBuilder sb, String s );
public abstract StringBuilder bodyHead( StringBuilder sb );
public abstract StringBuilder bodyTail( StringBuilder sb );
public abstract StringBuilder title( StringBuilder sb, String t );
public abstract StringBuilder section( StringBuilder sb, String t );
public abstract StringBuilder listHead( StringBuilder sb );
public abstract StringBuilder listBullet( StringBuilder sb, String s, String body, int d );
public abstract StringBuilder listTail( StringBuilder sb );
public abstract String bold( String s );
public abstract StringBuilder paraHead( StringBuilder sb );
public abstract StringBuilder paraTail( StringBuilder sb );
public StringBuilder paragraph( StringBuilder sb, String s ) {
return paraTail(paraHead(sb).append(s));
}
public String genHelp(Request R) {
final String name = R.getClass().getSimpleName();
final FieldDoc docs[] = R.toDocField();
final StringBuilder sb = new StringBuilder();
bodyHead(sb);
title(sb,name);
paragraph(sb,"");
section(sb,"Supported HTTP methods and descriptions");
String gs = R.toDocGET();
if( gs != null ) {
paragraph(sb,"GET");
paragraph(sb,gs);
}
section(sb,"URL");
paraTail(escape(paraHead(sb),"http://<h2oHost>:<h2oApiPort>/"+name+".json"));
// Escape out for not-yet-converted auto-doc Requests
if( docs == null ) return bodyTail(sb).toString();
section(sb,"Input parameters");
listHead(sb);
for( FieldDoc doc : docs ) {
if( doc.isInput() ) {
Argument arg = doc.arg(R); // Legacy
String help = doc._help;
boolean required = doc._required;
ParamImportance importance = doc.importance();
String[] errors = null;
if(arg != null) {
String description = arg.queryDescription();
if(description != null && description.length() != 0)
help = description;
required |= arg._required;
errors = arg.errors();
}
listBullet(sb,
bold(doc._name)+", a "+doc._clazz.getSimpleName()+", <i>"+importance.title+"</i>",
help+". "+doc.version(), 0);
if( errors != null || required ) {
paragraph(sb,"");
paragraph(sb,bold("Possible JSON error field returns:"));
listHead(sb);
String argErr = "Argument '"+doc._name+"' error: ";
if( errors != null )
for( String err : errors )
listBullet(sb,argErr+err,"",1);
if( required )
listBullet(sb,argErr+"Argument '"+doc._name+"' is required, but not specified","",1);
listTail(sb);
}
}
}
listTail(sb);
section(sb,"Output JSON elements");
listJSONFields(sb,docs);
section(sb,"HTTP response codes");
paragraph(sb,"200 OK");
paragraph(sb,"Success and error responses are identical.");
String s[] = R.DocExampleSucc();
if( s != null ) {
section(sb,"Success Example");
paraHead(sb);
url(sb,name,s);
paraTail(sb);
paragraph(sb,serve(name,s));
}
String f[] = R.DocExampleFail();
if( f != null ) {
section(sb,"Error Example");
paraHead(sb);
url(sb,name,f);
paraTail(sb);
paragraph(sb,serve(name,f));
}
bodyTail(sb);
return sb.toString();
}
private void listJSONFields( StringBuilder sb, FieldDoc[] docs ) {
listHead(sb);
for( FieldDoc doc : docs )
if( doc.isJSON() ) {
listBullet(sb,
bold(doc._name)+", a "+doc._clazz.getSimpleName(),
doc._help+". "+doc.version()+", "+doc.importance().title,0);
Class c = doc._clazz.getComponentType();
if( c==null ) c = doc._clazz;
if( Iced.class.isAssignableFrom(c) ) {
try {
FieldDoc[] nested = ((Iced)c.newInstance()).toDocField();
if( nested != null ) // Can be empty, e.g. for Key
listJSONFields(sb,nested);
}
catch( InstantiationException ie ) { water.util.Log.errRTExcept(ie); }
catch( IllegalAccessException ie ) { water.util.Log.errRTExcept(ie); }
}
}
listTail(sb);
}
private static StringBuilder url( StringBuilder sb, String name, String[] parms ) {
sb.append("curl -s ").append(name).append(".json");
boolean first = true;
for( int i=0; i<parms.length; i+= 2 ) {
if( first ) { first = false; sb.append("?"); }
else { sb.append("&"); }
sb.append(parms[i]).append('=').append(parms[i+1]);
}
return sb.append('\n');
}
private static String serve( String name, String[] parms ) {
Properties p = new Properties();
for( int i=0; i<parms.length; i+= 2 )
p.setProperty(parms[i],parms[i+1]);
NanoHTTPD.Response r = RequestServer.SERVER.serve(name+".json",null,null,p);
try {
int l = r.data.available();
byte[] b = new byte[l];
r.data.read(b);
return new String(b);
} catch( IOException ioe ) {
Log.err(ioe);
return null;
}
}
// --------------------------------------------------------------------------
// HTML flavored help text
public static class HTML extends DocGen {
@SuppressWarnings("unused")
@Override public StringBuilder escape(StringBuilder sb, String s ) {
int len=s.length();
for( int i=0; i<len; i++ ) {
char c = s.charAt(i);
if( false ) ;
else if( c=='<' ) sb.append("<");
else if( c=='>' ) sb.append(">");
else if( c=='&' ) sb.append("&");
else if( c=='"' ) sb.append(""");
else sb.append(c);
}
return sb;
}
public String escape2(String s) {
StringBuilder sb = new StringBuilder(s.length());
escape(sb, s);
return sb.toString();
}
@Override public StringBuilder bodyHead( StringBuilder sb ) {
return sb.append("<div class='container'>"+
"<div class='row-fluid'>"+
"<div class='span12'>");
}
@Override public StringBuilder bodyTail( StringBuilder sb ) { return sb.append("</div></div></div>"); }
@Override public StringBuilder title ( StringBuilder sb, String t ) { return sb.append("<h3>").append(t).append("</h3>\n"); }
@Override public StringBuilder section( StringBuilder sb, String t ) { return sb.append("<h4>").append(t).append("</h4>\n"); }
@Override public StringBuilder paraHead( StringBuilder sb ) { return sb.append("<p>"); }
@Override public StringBuilder paraTail( StringBuilder sb ) { return sb.append("</p>\n"); }
@Override public StringBuilder listHead( StringBuilder sb ) { return sb.append("<ul>"); }
@Override public StringBuilder listBullet( StringBuilder sb, String s, String body, int d ) {
return paragraph(sb.append("<li>").append(s).append("</li>"),body).append('\n');
}
@Override public StringBuilder listTail( StringBuilder sb ) { return sb.append("</ul>\n"); }
@Override public String bold( String s ) { return "<b>"+s+"</b>"; }
public StringBuilder arrayHead( StringBuilder sb ) { return arrayHead(sb,null); }
public StringBuilder progress(float value, StringBuilder sb){
int pct = (int) (value * 100);
String type = "progress-stripped active";
if (pct==-100) { // task is done
pct = 100;
type = "progress-success";
} else if (pct==-200) {
pct = 100;
type = "progress-warning";
}
// @formatter:off
sb.append
("<div style='margin-bottom:0px;padding-bottom:0xp;margin-top:8px;height:5px;width:180px' class='progress "+type+"'>").append //
("<div class='bar' style='width:" + pct + "%;'>").append //
("</div>").append //
("</div>");
// @formatter:on
return sb;
}
public StringBuilder arrayHead( StringBuilder sb, String[] headers ) {
sb.append("<span style='display: inline-block;'>");
sb.append("<table class='table table-striped table-bordered'>\n");
if( headers != null ) {
sb.append("<tr>");
for( String s : headers ) sb.append("<th>").append(s).append("</th>");
sb.append("</tr>\n");
}
return sb;
}
public StringBuilder arrayTail( StringBuilder sb ) { return sb.append("</table></span>\n"); }
public StringBuilder array( StringBuilder sb, String[] ss ) {
arrayHead(sb);
for( String s : ss ) sb.append("<tr><td>").append(s).append("</td></tr>");
return arrayTail(sb);
}
public StringBuilder toJSArray(StringBuilder sb, float[] nums) { return toJSArray(sb, nums, null, nums.length); }
public StringBuilder toJSArray(StringBuilder sb, float[] nums, Integer[] sortOrder, int maxValues) {
sb.append('[');
for (int i=0; i<maxValues; i++) {
if (i>0) sb.append(',');
sb.append(nums[sortOrder!=null ? sortOrder[i] : i]);
}
sb.append(']');
return sb;
}
public StringBuilder toJSArray(StringBuilder sb, String[] ss) { return toJSArray(sb, ss, null, ss.length); }
public StringBuilder toJSArray(StringBuilder sb, String[] ss, Integer[] sortOrder, int maxValues) {
sb.append('[');
for (int i=0; i<maxValues; i++) {
if (i>0) sb.append(',');
sb.append('"').append(ss[sortOrder!=null ? sortOrder[i] : i]).append('"');
}
sb.append(']');
return sb;
}
public <T> StringBuilder tableLine(StringBuilder sb, String title, T[] values, Integer[] sortOrder) {
return tableLine(sb, title, values, sortOrder, values.length);
}
public <T> StringBuilder tableLine(StringBuilder sb, String title, T[] values, Integer[] sortOrder, int maxValues) {
return tableLine(sb, title, values, sortOrder, maxValues, false, null);
}
public <T> StringBuilder tableLine(StringBuilder sb, String title, T[] values, Integer[] sortOrder, int maxValues, boolean checkBoxes, String idName) {
assert sortOrder == null || values.length == sortOrder.length;
sb.append("<tr><th>").append(title).append("</th>");
for( int i=0; i<maxValues; i++ ) {
sb.append("<td>");
T val = values[sortOrder!=null ? sortOrder[i] : i];
if (checkBoxes) sb.append("<input type=\"checkbox\" name=\"").append(idName).append("\" value=\"").append(val).append("\" checked /> ");
sb.append(val);
sb.append("</td>");
}
sb.append("</tr>");
return sb;
}
public StringBuilder tableLine(StringBuilder sb, String title, float[] values, Integer[] sortOrder) {
return tableLine(sb, title, values, sortOrder, values.length);
}
public StringBuilder tableLine(StringBuilder sb, String title, float[] values, Integer[] sortOrder, int maxValues) {
assert sortOrder == null || values.length == sortOrder.length;
sb.append("<tr><th>").append(title).append("</th>");
for( int i=0; i<maxValues; i++ )
sb.append(String.format("<td>%5.4f</td>",values[sortOrder!=null ? sortOrder[i] : i]));
sb.append("</tr>");
return sb;
}
public StringBuilder graph(StringBuilder sb, String gid, String gname, StringBuilder ...gparams) {
sb.append("<style scoped>@import url('/h2o/css/graphs.css')</style>");
sb.append("<script type=\"text/javascript\" src='/h2o/js/d3.v3.min.js'></script>");
sb.append("<script src='/h2o/js/graphs.js'></script>");
sb.append("<div id='").append(gid).append("'>")
.append(" <script>")
.append(gname).append("('").append(gid).append("'");
for (int i=0; i<gparams.length; i++) sb.append(", ").append(gparams[i]);
sb.append(");");
sb.append(" </script>")
.append("</div>");
return sb;
}
}
// --------------------------------------------------------------------------
// ReST flavored help text
static class ReST extends DocGen { // Restructured text
private StringBuilder cr(StringBuilder sb) { return sb.append('\n'); }
private StringBuilder underLine( StringBuilder sb, String s, char c ) {
cr(cr(sb).append(s));
int len = s.length();
for( int i=0; i<len; i++ ) sb.append(c);
return cr(cr(sb));
}
@Override public StringBuilder escape(StringBuilder sb, String s ) { return sb.append(s); }
@Override public StringBuilder bodyHead( StringBuilder sb ) { return sb; }
@Override public StringBuilder bodyTail( StringBuilder sb ) { return sb; }
@Override public StringBuilder title ( StringBuilder sb, String t ) { return underLine(sb,t,'='); }
@Override public StringBuilder section( StringBuilder sb, String t ) { return underLine(sb,t,'-'); }
@Override public StringBuilder listHead( StringBuilder sb ) { return cr(sb); }
@Override public StringBuilder listBullet( StringBuilder sb, String s, String body, int d ) {
if( d > 0 ) sb.append(" ");
cr(sb.append("* ").append(s));
if( body.length() > 0 )
cr(cr(cr(sb).append(" ").append(body)));
return sb;
}
@Override public StringBuilder listTail( StringBuilder sb ) { return cr(sb); }
@Override public String bold( String s ) { return "**"+s+"**"; }
@Override public StringBuilder paraHead( StringBuilder sb ) { return sb.append(" "); }
@Override public StringBuilder paraTail( StringBuilder sb ) { return cr(sb); }
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/Documentation.java
|
package water.api;
import water.AbstractBuildVersion;
import water.H2O;
/**
* Redirect to online documentation page.
*/
public class Documentation extends HTMLOnlyRequest {
protected String build(Response response) {
AbstractBuildVersion abv = H2O.getBuildVersion();
String branchName = abv.branchName();
String buildNumber = abv.buildNumber();
String url = "http://s3.amazonaws.com/h2o-release/h2o/" + branchName + "/" + buildNumber + "/docs-website/index.html";
return "<meta http-equiv=\"refresh\" content=\"0; url=" + url + "\">";
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/DomainMapping.java
|
package water.api;
import water.Request2;
import water.fvec.*;
import java.util.Arrays;
public class DomainMapping extends Request2 {
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
// This Request supports the HTML 'GET' command, and this is the help text
// for GET.
static final String DOC_GET = "Get the domain mapping of String in a Vec";
static final String NA = ""; // not available information
@API(help="An existing H2O Frame key.", required=true, filter=Default.class, gridable=false)
Frame src_key;
@API(help="A string whose domain mapping should be returned.", required=true, filter=Default.class, gridable = false)
String str;
@API(help="The domain mapping") long map;
// Just validate the frame, and fill in the summary bits
@Override protected Response serve() {
if( src_key == null ) return RequestServer._http404.serve();
Vec v = src_key.anyVec();
if (v.isEnum()) {
map = Arrays.asList(v.domain()).indexOf(str);
} else if (v.masterVec() != null && v.masterVec().isEnum()) {
map = Arrays.asList(v.masterVec().domain()).indexOf(str);
} else {
map = -1;
}
return Response.done(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/DownloadDataset.java
|
package water.api;
import water.*;
import water.fvec.Frame;
import java.io.InputStream;
import java.util.Properties;
/**
* @author tomasnykodym
*/
public class DownloadDataset extends Request2 {
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
// This Request supports the HTML 'GET' command, and this is the help text
// for GET.
static final String DOC_GET = "Download a Frame as a CSV file";
@API(help="An existing H2O Frame or VA key.", filter=Default.class)
Key src_key;
@API(help="Emit double values in a machine readable lossless format with Double.toHexString().", filter=Default.class)
boolean hex_string = false;
public static String link(Key k, String content){
return "<a href='/2/DownloadDataset?src_key=" + k.toString() + "'>" + content + "</a>";
}
@SuppressWarnings("resource")
@Override final public NanoHTTPD.Response serve(NanoHTTPD server, Properties args, RequestType type) {
// Needs to be done also for help to initialize or argument records
checkArguments(args, type);
if (DKV.get(src_key) == null) throw new IllegalArgumentException(src_key.toString() + " not found.");
Object value = DKV.get(src_key).get();
InputStream csv = ((Frame) value).toCSV(true, hex_string);
NanoHTTPD.Response res = server.new Response(NanoHTTPD.HTTP_OK,NanoHTTPD.MIME_DEFAULT_BINARY, csv);
// Clean up Key name back to something resembling a file system name. Hope
// the user's browser actually asks for what to do with the suggested
// filename. Without this code, my FireFox would claim something silly
// like "no helper app installed", then fail the download.
String s = src_key.toString();
int x = s.length()-1;
boolean dot=false;
for( ; x >= 0; x-- )
if( !Character.isLetterOrDigit(s.charAt(x)) && s.charAt(x)!='_' )
if( s.charAt(x)=='.' && !dot ) dot=true;
else break;
String suggested_fname = s.substring(x+1).replace(".hex", ".csv");
if( !suggested_fname.endsWith(".csv") )
suggested_fname = suggested_fname+".csv";
res.addHeader("Content-Disposition", "filename=" + suggested_fname);
return res;
}
@Override protected Response serve() {
return Response.doneEmpty();
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/Exec2.java
|
package water.api;
import water.util.Log;
import water.*;
import water.exec.*;
import water.fvec.*;
import java.util.Arrays;
import java.util.Properties;
public class Exec2 extends Request2 {
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
// This Request supports the HTML 'GET' command, and this is the help text for GET.
static final String DOC_GET = "Executes a string in H2O's R-like language.";
@API(help="String to execute", required=true, filter=Default.class)
String str;
@API(help="Warning message,ifany") String[] warning;
@API(help="Parsing error, if any") String error;
@API(help="Result key" ) Key key;
@API(help="Rows in Frame result" ) long num_rows;
@API(help="Columns in Frame result" ) int num_cols;
@API(help="Scalar result" ) double scalar;
@API(help="Function result" ) String funstr;
// Pretty-print of result. For Frames, first 10 rows. For scalars, just the
// value. For functions, the pretty-printed AST.
@API(help="String result" ) String result;
@API(help="Array of Column Summaries.") Inspect2.ColSummary cols[];
@Override protected Response serve() {
if( str == null ) return RequestServer._http404.serve();
Throwable e;
Env env = null;
try {
env = water.exec.Exec2.exec(str);
StringBuilder sb = env._sb;
if( sb.length()!=0 ) sb.append("\n");
if( env == null ) throw new IllegalArgumentException("Null return from Exec2?");
if( env.sp() == 0 ) { // Empty stack
} else if( env.isAry() ) {
Frame fr = env.peekAry();
String skey = env.peekKey();
num_rows = fr.numRows();
num_cols = fr.numCols();
cols = new Inspect2.ColSummary[num_cols];
for( int i=0; i<num_cols; i++ )
cols[i] = new Inspect2.ColSummary(fr._names[i],fr.vecs()[i]);
// Now the first few rows.
String[] fs = fr.toStringHdr(sb);
for( int i=0; i<Math.min(6,fr.numRows()); i++ )
fr.toString(sb,fs,i);
// Nuke the result
env.pop();
} else if( env.isFcn() ) {
ASTOp op = env.peekFcn();
funstr = op.toString();
sb.append(op.toString(true)); // Verbose function
env.pop();
} else {
scalar = env.popDbl();
sb.append(Double.toString(scalar));
}
if (env.warnings().length != 0) { sb.append(Arrays.toString(env.warnings())); }
result=sb.toString();
// num_cols = num_rows == 0 ? 0 : num_cols;
return Response.done(this);
}
catch( IllegalArgumentException pe ) { e=pe;} // No logging user typo's
catch( Throwable e2 ) { Log.err(e=e2); }
finally {
if (env != null) {
try { env.remove_and_unlock(); }
catch (Exception xe) { Log.err("env.remove_and_unlock() failed", xe); }
}
}
return Response.error(e);
}
@Override protected NanoHTTPD.Response serveGrid(NanoHTTPD server, Properties parms, RequestType type) {
return superServeGrid(server, parms, type);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/ExportFiles.java
|
package water.api;
import java.io.*;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import water.*;
import water.api.RequestServer.API_VERSION;
import water.fvec.Frame;
import water.persist.PersistHdfs;
import water.util.FSUtils;
import water.util.Log;
public class ExportFiles extends Request2 {
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
// This Request supports the HTML 'GET' command, and this is the help text
// for GET.
static final String DOC_GET =
"Export a Frame from H2O onto a file system (local disk or HDFS).";
@Override
public API_VERSION[] supportedVersions() { return SUPPORTS_ONLY_V2; }
@API(help="Key to an existing H2O Frame (or ValueArray).", required=true,filter=Default.class)
Key src_key;
@API(help="Path to a file on either local disk of connected node or HDFS.", required=true,filter=GeneralFile.class,gridable=false)
String path;
@API(help="Overwrite existing files.", required=false,filter=Default.class,gridable=false)
boolean force = false;
public static String link(Key k, String content){
return "<a href='/2/ExportFiles.query?src_key=" + k.toString() + "'>" + content + "</a>";
}
/**
* Iterates over fields and their annotations, and creates argument handlers.
*/
boolean _local = false;
@Override protected void registered(API_VERSION version) { super.registered(version); }
@Override protected Response serve() {
try {
// pull everything local
Log.info("ExportFiles processing (" + path + ")");
if (DKV.get(src_key) == null) throw new IllegalArgumentException(src_key.toString() + " not found.");
Object value = DKV.get(src_key).get();
// create a stream to read the entire VA or Frame
if( !(value instanceof Frame) ) throw new UnsupportedOperationException("Can only export Frames.");
InputStream csv = ((Frame) value).toCSV(true);
String p2 = path.toLowerCase();
if( p2.startsWith("hdfs://" ) ) serveHdfs(csv);
else if( p2.startsWith("s3n://" ) ) serveHdfs(csv);
else serveLocalDisk(csv);
return RequestBuilders.Response.done(this);
} catch (Throwable t) {
return RequestBuilders.Response.error(t);
}
}
protected void serveHdfs(InputStream csv) throws IOException {
if (FSUtils.isBareS3NBucketWithoutTrailingSlash(path)) { path += "/"; }
Path p = new Path(path);
org.apache.hadoop.fs.FileSystem fs = org.apache.hadoop.fs.FileSystem.get(p.toUri(), PersistHdfs.CONF);
if( !force && fs.exists(p) ) throw new IllegalArgumentException("File " + path + " already exists.");
fs.mkdirs(p.getParent());
FSDataOutputStream s = fs.create(p);
byte[] buffer = new byte[1024];
try {
int len;
while ((len = csv.read(buffer)) > 0) {
s.write(buffer, 0, len);
}
} finally {
s.close();
Log.info("Key '" + src_key.toString() + "' was written to " + path.toString() + ".");
}
}
private void serveLocalDisk(InputStream csv) throws IOException {
_local = true;
OutputStream output = null;
try {
File f = new File(path);
if( !force && f.exists() ) throw new IllegalArgumentException("File " + path + " already exists.");
output = new FileOutputStream(path.toString());
byte[] buffer = new byte[1024];
int len;
while((len = csv.read(buffer)) > 0) {
output.write(buffer, 0, len);
}
Log.info("Key '" + src_key.toString() + "' was written to " +
(_local && H2O.CLOUD.size() > 1 ? H2O.SELF_ADDRESS + ":" : "") + path.toString() + ".");
} finally {
if (output != null) output.close();
}
}
@Override public boolean toHTML( StringBuilder sb ) {
DocGen.HTML.section(sb, "Export done. Key '" + src_key.toString() +
"' was written to " + (_local && H2O.CLOUD.size() > 1 ? H2O.SELF_ADDRESS + ":" : "") + path.toString());
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/ExportHdfs.java
|
package water.api;
import org.apache.hadoop.fs.Path;
import water.*;
import water.persist.PersistHdfs;
import water.util.Log;
import dontweave.gson.JsonObject;
public class ExportHdfs extends Request {
protected final H2OExistingKey _source = new H2OExistingKey(SOURCE_KEY);
protected final Str _path = new Str(PATH);
public ExportHdfs() {
_requestHelp = "Exports JSON to the given HDFS path. The Web server node "
+ "must have write permission to the HDFS path.";
_path._requestHelp = "HDFS path to export to.";
}
@Override protected Response serve() {
Value value = _source.value();
String path = _path.value();
try {
if( value == null ) throw new IllegalArgumentException("Unknown key: " + _source.record()._originalValue);
byte[] data = null;
Model model = getAsModel(value);
if( model != null ) {
// Add extension, used during import
if( !path.endsWith(Extensions.JSON) ) path += Extensions.JSON;
data = model.writeJSON(new AutoBuffer()).buf();
}
if( data != null ) PersistHdfs.store(new Path(path), data);
else throw new UnsupportedOperationException("Only models can be exported");
} catch( Throwable e ) {
return Response.error(e);
}
JsonObject json = new JsonObject();
Response r = Response.done(json);
return r;
}
private static Model getAsModel(Value v) {
if( v.type() == TypeMap.PRIM_B ) return null;
Iced iced = v.get();
if( iced instanceof Model ) return (Model) iced;
return null;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/ExportModel.java
|
package water.api;
import water.Model;
import water.Request2;
public class ExportModel extends Request2 {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
static final String DOC_GET = "Exports a model as JSON";
@API(help = "The model to export", json = true, required = true, filter = Default.class)
public Model model;
@Override protected Response serve() {
return Response.done(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/Filter.java
|
package water.api;
public interface Filter {
boolean run(Object value);
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/FrameSplitPage.java
|
package water.api;
import hex.FrameSplitter;
import java.util.Arrays;
import java.util.Random;
import water.*;
import water.fvec.Frame;
import water.util.MRUtils;
import water.util.Utils;
/** Small utility page to split frame
* into n-parts parts based on given ratios.
*
* <p>User specifies n-split ratios, which expose parts of resulting
* datasets and produces (n+1)-datasets based on random selection of rows
* from original dataset.</p>
*
* <p>Keep original chunk distribution.</p>
*
* @see FrameSplitter
*/
public class FrameSplitPage extends Func {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "Data frame", required = true, filter = Default.class)
public Frame source;
@API(help = "Split ratio - can be an array of split ratios", required = true, filter = Default.class)
public float[] ratios = new float[] {0.75f}; // n-values => n+1 output datasets
@API(help = "Shuffle rows before splitting", required = false, filter = Default.class)
public boolean shuffle = false;
@API(help = "Seed for reproducible shuffling.", required = false, filter = Default.class)
public long seed = new Random().nextLong();
@API(help = "Keys for each split partition.")
public Key[] split_keys;
@API(help = "Holds a number of rows per each output partition.")
public long[] split_rows;
@API(help = "Holds a number of split ratios per partition.")
public float[] split_ratios;
// Check parameters
@Override protected void init() throws IllegalArgumentException {
super.init();
/* Check input parameters */
float sum = 0;
long nrows = source.numRows();
if (nrows <= ratios.length) throw new IllegalArgumentException("Dataset does not have enough row to be split!");
for (int i=0; i<ratios.length; i++) {
if (!(ratios[i] > 0 && ratios[i] < 1)) throw new IllegalArgumentException("Split ration has to be in (0,1) interval!");
if (ratios[i] * nrows <= 1) throw new IllegalArgumentException("Ratio " + ratios[i] + " produces empty frame since the source frame has only " + nrows + "!");
sum += ratios[i];
}
if (!(sum<1f)) throw new IllegalArgumentException("Sum of split ratios has to be less than 1!");
}
// Run the function
@Override protected void execImpl() {
Frame frame = source;
if (shuffle) {
// FIXME: switch to global shuffle
frame = MRUtils.shuffleFramePerChunk(Utils.generateShuffledKey(frame._key), frame, seed);
frame.delete_and_lock(null).unlock(null); // save frame to DKV
// delete frame on the end
gtrash(frame);
}
FrameSplitter fs = new FrameSplitter(frame, ratios);
H2O.submitTask(fs);
Frame[] splits = fs.getResult();
split_keys = new Key [splits.length];
split_rows = new long[splits.length];
float rsum = Utils.sum(ratios);
split_ratios = Arrays.copyOf(ratios, splits.length);
split_ratios[splits.length-1] = 1f-rsum;
long sum = 0;
for(int i=0; i<splits.length; i++) {
sum += splits[i].numRows();
split_keys[i] = splits[i]._key;
split_rows[i] = splits[i].numRows();
}
assert sum == source.numRows() : "Frame split produced wrong number of rows: nrows(source) != sum(nrows(splits))";
}
@Override public boolean toHTML(StringBuilder sb) {
int nsplits = split_keys.length;
String [] headers = new String[nsplits+2];
headers[0] = "";
for(int i=0; i<nsplits; i++) headers[i+1] = "Split #"+i;
headers[nsplits+1] = "Total";
DocGen.HTML.arrayHead(sb, headers);
// Key table row
sb.append("<tr><td>").append(DocGen.HTML.bold("Keys")).append("</td>");
for (int i=0; i<nsplits; i++) {
Key k = split_keys[i];
sb.append("<td>").append(Inspect2.link(k)).append("</td>");
}
sb.append("<td>").append(Inspect2.link(source._key)).append("</td>");
sb.append("</tr>");
// Number of rows row
sb.append("<tr><td>").append(DocGen.HTML.bold("Rows")).append("</td>");
for (int i=0; i<nsplits; i++) {
long r = split_rows[i];
sb.append("<td>").append(String.format("%,d", r)).append("</td>");
}
sb.append("<td>").append(String.format("%,d", Utils.sum(split_rows))).append("</td>");
sb.append("</tr>");
// Split ratios
sb.append("<tr><td>").append(DocGen.HTML.bold("Ratios")).append("</td>");
for (int i=0; i<nsplits; i++) {
float r = 100*split_ratios[i];
sb.append("<td>").append(String.format("%.2f %%", r)).append("</td>");
}
sb.append("<td>").append(String.format("%.2f %%", 100*Utils.sum(split_ratios))).append("</td>");
sb.append("</tr>");
DocGen.HTML.arrayTail(sb);
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/Frames.java
|
package water.api;
import dontweave.gson.*;
import org.apache.commons.math3.util.Pair;
import water.*;
import water.api.Models.ModelSummary;
import water.fvec.Frame;
import java.util.*;
public class Frames extends Request2 {
///////////////////////
// Request2 boilerplate
///////////////////////
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
// This Request supports the HTML 'GET' command, and this is the help text
// for GET.
static final String DOC_GET = "Return the list of dataframes.";
public static String link(Key k, String content){
return "<a href='/2/Frames'>" + content + "</a>";
}
////////////////
// Query params:
////////////////
@API(help="An existing H2O Frame key.", required=false, filter=Default.class)
Frame key = null;
@API(help="Find Models that are compatible with the Frame.", required=false, filter=Default.class)
boolean find_compatible_models = false;
@API(help="An existing H2O Model key to score with the Frame which is specified by the key parameter.", required=false, filter=Default.class)
Model score_model = null;
/////////////////
// The Code (tm):
/////////////////
public static final Gson gson = new GsonBuilder().serializeSpecialFloatingPointValues().setPrettyPrinting().create();
public static final class FrameSummary {
public String id = null;
public String key = null;
public long creation_epoch_time_millis = -1;
public String[] column_names = { };
public Set<String> compatible_models = new HashSet<String>();
public boolean is_raw_frame = true; // guilty until proven innocent
}
// TODO: refactor, since this is duplicated
private static Map whitelistJsonObject(JsonObject unfiltered, Set<String> whitelist) {
// If we create a new JsonObject here and serialize it the key/value pairs are inside
// a superflouous "members" object, so create a Map instead.
JsonObject filtered = new JsonObject();
Set<Map.Entry<String,JsonElement>> entries = unfiltered.entrySet();
for (Map.Entry<String,JsonElement> entry : entries) {
String key = entry.getKey();
if (whitelist.contains(key))
filtered.add(key, entry.getValue());
}
return gson.fromJson(gson.toJson(filtered), Map.class);
}
/**
* Fetch all the Models so we can see if they are compatible with our Frame(s).
*/
private Pair<Map<String, Model>, Map<String, Set<String>>> fetchModels() {
Map<String, Model> all_models = null;
Map<String, Set<String>> all_models_cols = null;
if (this.find_compatible_models) {
// caches for this request
all_models = (new Models()).fetchAll();
all_models_cols = new TreeMap<String, Set<String>>();
for (Map.Entry<String, Model> entry : all_models.entrySet()) {
all_models_cols.put(entry.getKey(), new TreeSet<String>(Arrays.asList(entry.getValue()._names)));
}
}
return new Pair<Map<String, Model>, Map<String, Set<String>>>(all_models, all_models_cols);
}
private static Map<String, Model> findCompatibleModels(Frame frame, Map<String, Model> all_models, Map<String, Set<String>> all_models_cols) {
Map<String, Model> compatible_models = new TreeMap<String, Model>();
Set<String> frame_column_names = new HashSet(Arrays.asList(frame._names));
for (Map.Entry<String, Set<String>> entry : all_models_cols.entrySet()) {
Set<String> model_cols = entry.getValue();
if (frame_column_names.containsAll(model_cols)) {
/// See if adapt throws an exception or not.
try {
Model model = all_models.get(entry.getKey());
Frame[] outputs = model.adapt(frame, false); // TODO: this does too much work; write canAdapt()
Frame adapted = outputs[0];
Frame trash = outputs[1];
// adapted.delete(); // TODO: shouldn't we clean up adapted vecs? But we can't delete() the frame as a whole. . .
trash.delete();
// A-Ok
compatible_models.put(entry.getKey(), model);
}
catch (Exception e) {
// skip
}
}
}
return compatible_models;
}
public static Map<String, FrameSummary> generateFrameSummaries(Set<String>keys, Map<String, Frame> frames, boolean find_compatible_models, Map<String, Model> all_models, Map<String, Set<String>> all_models_cols) {
Map<String, FrameSummary> frameSummaries = new TreeMap<String, FrameSummary>();
if (null == keys) {
keys = frames.keySet();
}
for (String key : keys) {
FrameSummary summary = new FrameSummary();
Frames.summarizeAndEnhanceFrame(summary, frames.get(key), find_compatible_models, all_models, all_models_cols);
frameSummaries.put(key, summary);
}
return frameSummaries;
}
/**
* Summarize fields in water.fvec.Frame.
*/
private static void summarizeAndEnhanceFrame(FrameSummary summary, Frame frame, boolean find_compatible_models, Map<String, Model> all_models, Map<String, Set<String>> all_models_cols) {
UniqueId unique_id = frame.getUniqueId();
summary.id = unique_id.getId();
summary.key = unique_id.getKey();
summary.creation_epoch_time_millis = unique_id.getCreationEpochTimeMillis();
summary.column_names = frame._names;
summary.is_raw_frame = frame.isRawData();
if (find_compatible_models) {
Map<String, Model> compatible_models = findCompatibleModels(frame, all_models, all_models_cols);
summary.compatible_models = compatible_models.keySet();
}
}
/**
* Fetch all Frames from the KV store.
*/
protected static Map<String, Frame>fetchAll() {
// Get all the fvec frame keys.
return H2O.KeySnapshot.globalSnapshot().fetchAll(Frame.class); // Sort for pretty display and reliable ordering.
}
/**
* For one or more Frame from the KV store, sumamrize and enhance them and Response containing a map of them.
*/
private Response serveOneOrAll(Map<String, Frame> framesMap) {
// returns empty sets if !this.find_compatible_models
Pair<Map<String, Model>, Map<String, Set<String>>> models_info = fetchModels();
Map<String, Model> all_models = models_info.getFirst();
Map<String, Set<String>> all_models_cols = models_info.getSecond();
Map<String, FrameSummary> frameSummaries = Frames.generateFrameSummaries(null, framesMap, find_compatible_models, all_models, all_models_cols);
Map resultsMap = new LinkedHashMap();
resultsMap.put("frames", frameSummaries);
// If find_compatible_models then include a map of the Model summaries. Should we put this on a separate switch?
if (this.find_compatible_models) {
Set<String> all_referenced_models = new TreeSet<String>();
for (Map.Entry<String, FrameSummary> entry: frameSummaries.entrySet()) {
FrameSummary summary = entry.getValue();
all_referenced_models.addAll(summary.compatible_models);
}
Map<String, ModelSummary> modelSummaries = Models.generateModelSummaries(all_referenced_models, all_models, false, null, null);
resultsMap.put("models", modelSummaries);
}
// TODO: temporary hack to get things going
String json = gson.toJson(resultsMap);
JsonObject result = gson.fromJson(json, JsonElement.class).getAsJsonObject();
return Response.done(result);
}
/**
* Score a frame with the given model.
*/
protected static Response scoreOne(Frame frame, Model score_model) {
water.ModelMetrics metrics = water.ModelMetrics.getFromDKV(score_model, frame);
if (null == metrics) {
// have to compute
water.util.Log.debug("Cache miss: computing ModelMetrics. . .");
long before = System.currentTimeMillis();
Frame predictions = score_model.score(frame, true); // TODO: for now we're always calling adapt inside score
long after = System.currentTimeMillis();
ConfusionMatrix cm = new ConfusionMatrix(); // for regression this computes the MSE
AUC auc = null;
HitRatio hr = null;
if (score_model.isClassifier()) {
auc = new AUC();
// hr = new HitRatio();
score_model.calcError(frame, frame.vec(score_model.responseName()), predictions, predictions, "Prediction error:",
true, 20, cm, auc, hr);
} else {
score_model.calcError(frame, frame.vec(score_model.responseName()), predictions, predictions, "Prediction error:",
true, 20, cm, null, null);
}
// Now call AUC and ConfusionMatrix and maybe HitRatio
metrics = new water.ModelMetrics(score_model.getUniqueId(),
score_model.getModelCategory(),
frame.getUniqueId(),
after - before,
after,
(auc == null ? null : auc.data()),
cm);
// Put the metrics into the KV store
metrics.putInDKV();
} else {
// it's already cached in the DKV
water.util.Log.debug("using ModelMetrics from the cache. . .");
}
JsonObject metricsJson = metrics.toJSON();
JsonArray metricsArray = new JsonArray();
metricsArray.add(metricsJson);
JsonObject result = new JsonObject();
result.add("metrics", metricsArray);
return Response.done(result);
}
@Override
protected Response serve() {
if (null == this.key) {
return serveOneOrAll(fetchAll());
} else {
if (null == this.score_model) {
// just serve it
Frame frame = this.key;
Map<String, Frame> framesMap = new TreeMap(); // Sort for pretty display and reliable ordering.
framesMap.put(frame._key.toString(), frame);
return serveOneOrAll(framesMap);
} else {
// score it
return scoreOne(this.key, this.score_model);
}
}
} // serve()
} // class Frames
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/GBMModelView.java
|
package water.api;
import hex.gbm.GBM.GBMModel;
import water.*;
public class GBMModelView extends Request2 {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help="GBM Model Key", required=true, filter=GBMModelKeyFilter.class)
Key _modelKey;
class GBMModelKeyFilter extends H2OKey { public GBMModelKeyFilter() { super("model_key",true); } }
@API(help="GBM Model")
public GBMModel gbm_model;
public static String link(String txt, Key model) {
return "<a href='GBMModelView.html?_modelKey=" + model + "'>" + txt + "</a>";
}
public static Response redirect(Request req, Key modelKey) {
return Response.redirect(req, "/2/GBMModelView", "_modelKey", modelKey);
}
@Override public boolean toHTML(StringBuilder sb){
gbm_model.get_params().makeJsonBox(sb);
gbm_model.generateHTML("GBM Model", sb);
return true;
}
@Override protected Response serve() {
gbm_model = UKV.get(_modelKey);
if (gbm_model == null) return Response.error("Model '" + _modelKey + "' not found!");
else return Response.done(this);
}
@Override public void toJava(StringBuilder sb) { gbm_model.toJavaHtml(sb); }
@Override protected String serveJava() {
GBMModel m = UKV.get(_modelKey);
if (m!=null)
return m.toJava();
else
return "";
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/GBMProgressPage.java
|
package water.api;
import hex.gbm.GBM.GBMModel;
import water.Job;
import water.Key;
import water.UKV;
public class GBMProgressPage extends Progress2 {
/** Return {@link Response} for finished job. */
@Override protected Response jobDone(final Key dst) {
return GBMModelView.redirect(this, dst);
}
public static Response redirect(Request req, Key jobkey, Key dest) {
return Response.redirect(req, "/2/GBMProgressPage", JOB_KEY, jobkey, DEST_KEY, dest);
}
@Override public boolean toHTML( StringBuilder sb ) {
GBMModel m = UKV.get(destination_key);
if (m!=null) m.generateHTML("GBM Model", sb);
else DocGen.HTML.paragraph(sb, "Pending...");
return true;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/GLMPredict.java
|
package water.api;
import hex.glm.GLMModel;
import water.*;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.RString;
public class GLMPredict extends Request2 {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
@API(help = "Model", required = true, filter = Default.class)
public Key model; // Type to Model when retired OldModel
@API(help="lambda",required=false,filter=Default.class)
double lambda = Double.NaN;
@API(help = "Data frame", required = true, filter = Default.class)
public Frame data;
@API(help = "Prediction", filter = Default.class)
public Key prediction;
public static String link(Key k, double lambda, String content) {
RString rs = new RString("<a href='GLMPredict.query?model=%$key&lambda=%lambda'>%content</a>");
rs.replace("key", k.toString());
rs.replace("lambda",lambda);
rs.replace("content", content);
return rs.toString();
}
@Override protected Response serve() {
try {
if( model == null )
throw new IllegalArgumentException("Model is required to perform validation!");
final Key predictionKey = ( prediction == null )?Key.make("__Prediction_" + Key.make()):prediction;
GLMModel m = new GLMModel.GetScoringModelTask(null, model,lambda).invokeTask()._res;
// Create a new random key
if ( prediction == null )
prediction = Key.make("__Prediction_" + Key.make());
Frame fr = new Frame(prediction,new String[0],new Vec[0]).delete_and_lock(null);
fr = m.score(data);
fr = new Frame(prediction,fr._names,fr.vecs()); // Jam in the frame key
fr.unlock(null);
return Inspect2.redirect(this, prediction.toString());
} catch( Throwable t ) {
return Response.error(t);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/GainsLiftTable.java
|
package water.api;
import water.Func;
import water.Key;
import water.MRTask2;
import water.UKV;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.Log;
import water.util.Utils;
/* Compute the Gains and Lift Table for binary classifier */
public class GainsLiftTable extends Func {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
public static final String DOC_GET = "Gains/Lift Table";
@API(help = "", required = true, filter = Default.class, json=true)
public Frame actual;
@API(help="Column of the actual results", required=true, filter=actualVecSelect.class, json=true)
public Vec vactual;
class actualVecSelect extends VecClassSelect { actualVecSelect() { super("actual"); } }
@API(help = "", required = true, filter = Default.class, json=true)
public Frame predict;
@API(help="Column of the predicted results", required=true, filter=predictVecSelect.class, json=true)
public Vec vpredict;
class predictVecSelect extends VecClassSelect { predictVecSelect() { super("predict"); } }
@API(help = "The number of rows in the gains table", required = false, filter = Default.class, json = true)
public int groups = 10;
// helper - contains the probability thresholds for each of the groups
double[] thresholds;
// Results (Output)
@API(help="Response rates", json=true)
public float[] response_rates;
@API(help="Average response rate", json=true)
public float avg_response_rate;
@Override protected void init() throws IllegalArgumentException {
// Input handling
if( vactual==null || vpredict==null )
throw new IllegalArgumentException("Missing vactual or vpredict!");
if (vactual.length() != vpredict.length())
throw new IllegalArgumentException("Both arguments must have the same length ("+vactual.length()+"!="+vpredict.length()+")!");
if (!vactual.isInt())
throw new IllegalArgumentException("Actual column must be integer class labels!");
if (vactual.cardinality() != -1 && vactual.cardinality() != 2)
throw new IllegalArgumentException("Actual column must contain binary class labels, but found cardinality " + vactual.cardinality() + "!");
if (vpredict.isEnum())
throw new IllegalArgumentException("vpredict cannot be class labels, expect probabilities.");
}
public GainsLiftTable() {}
public GainsLiftTable(float[] response_rates, float avg_response_rate) {
this.response_rates = response_rates;
this.avg_response_rate = avg_response_rate;
}
@Override protected void execImpl() {
Vec va = null, vp;
try {
va = vactual.toEnum(); // always returns TransfVec
vp = vpredict;
// The vectors are from different groups => align them, but properly delete it after computation
if (!va.group().equals(vp.group())) {
vp = va.align(vp);
}
// compute thresholds for each quantile
{
thresholds = new double[groups];
for (int i=0; i<groups; ++i) {
QuantilesPage q = new QuantilesPage();
q.source_key = predict;
q.column = vpredict;
q.quantile = (groups-i-1.) / groups;
q.invoke();
thresholds[i] = q.result;
}
if (Utils.minValue(thresholds) < 0) throw new IllegalArgumentException("Minimum propability cannot be negative.");
if (Utils.maxValue(thresholds) > 1) throw new IllegalArgumentException("Maximum probability cannot be greater than 1.");
// Now compute the GainsTask
GainsTask gt = new GainsTask(thresholds, va.length());
gt.doAll(va, vp);
response_rates = gt.response_rates();
avg_response_rate = gt.avg_response_rate();
}
} catch (Throwable t) {
// do nothing
} finally { // Delete adaptation vectors
if (va!=null) UKV.remove(va._key);
}
StringBuilder sb = new StringBuilder();
toASCII(sb);
Log.info(sb);
}
@Override public boolean toHTML( StringBuilder sb ) {
if (response_rates == null) return false;
DocGen.HTML.arrayHead(sb);
sb.append("<a href=\"http://books.google.com/books?id=-JwptfFItaoC&pg=PA318&lpg=PA319&source=bl&ots=_S6fJI5Wds&sig=Uvff-MosTE7CR4e8LdE8TdJvo44&hl=en&sa=X&ei=b3EcVMnHB6T2iwK3koC4Cw&ved=0CF0Q6AEwBw#v=onepage&q&f=false\">"
+ "Gains/Lift Table Reference</a></h4>");
// Sum up predicted & actuals
sb.append("<tr class='warning' style='min-width:60px'>");
sb.append("<th>Quantile</th><th>Response rate</th><th>Lift</th><th>Cumulative lift</th>");
sb.append("</tr>");
float cumulativelift = 0;
for( int i=0; i<groups; i++ ) {
sb.append("<tr>");
sb.append("<td>").append(Utils.formatPct((i + 1.) / groups)).append("</td>");
sb.append("<td>").append(Utils.formatPct(response_rates[i])).append("</td>");
final float lift = response_rates[i]/ avg_response_rate;
cumulativelift += lift/groups;
sb.append("<td>").append(lift).append("</td>");
sb.append("<td>").append(Utils.formatPct(cumulativelift)).append("</td>");
}
sb.append("<tr style='min-width:60px'><th>Total</th>");
sb.append("<td>").append(Utils.formatPct(avg_response_rate)).append("</td>");
sb.append("<td>").append(1.0).append("</td>");
sb.append("<td></td>");
DocGen.HTML.arrayTail(sb);
return true;
}
public void toASCII( StringBuilder sb ) {
if (response_rates == null) return;
// Sum up predicted & actuals
sb.append("Quantile Response rate Lift Cumulative lift\n");
float cumulativelift = 0;
for( int i=0; i<groups; i++ ) {
sb.append(Utils.formatPct((i + 1.) / groups));
sb.append(" ").append(Utils.formatPct(response_rates[i])).append(" ");
final float lift = response_rates[i]/ avg_response_rate;
cumulativelift += lift/groups;
sb.append(" ").append(lift).append(" ");
sb.append(" ").append(Utils.formatPct(cumulativelift)).append("\n");
}
sb.append("Total ");
sb.append(" ").append(Utils.formatPct(avg_response_rate)).append(" ");
sb.append(" ").append(1.0).append(" ");
sb.append(" \n");
}
// Compute Gains table via MRTask2
private static class GainsTask extends MRTask2<GainsTask> {
/* @OUT response_rates */
public final float[] response_rates() { return _response_rates; }
public final float avg_response_rate() { return _avg_response_rate; }
/* @IN total count of events */ final private double[] _thresh;
final private long _count;
private long[] _responses;
private long _avg_response;
private float _avg_response_rate;
private float[] _response_rates;
GainsTask(double[] thresh, long count) {
_thresh = thresh.clone();
_count = count;
}
@Override public void map( Chunk ca, Chunk cp ) {
_responses = new long[_thresh.length];
_avg_response = 0;
final int len = Math.min(ca._len, cp._len);
for( int i=0; i < len; i++ ) {
if (ca.isNA0(i)) continue;
final int a = (int)ca.at80(i);
if (a != 0 && a != 1) throw new IllegalArgumentException("Invalid values in vactual: must be binary (0 or 1).");
if (cp.isNA0(i)) continue;
final double pr = cp.at0(i);
for( int t=0; t < _thresh.length; t++ ) {
// count number of positive responses in bucket given by two thresholds
if (pr >= _thresh[t] && (t == 0 || pr < _thresh[t-1]) && a == 1) _responses[t]++;
}
if (a == 1) _avg_response++;
}
}
@Override public void reduce( GainsTask other ) {
for( int i=0; i<_responses.length; ++i) {
_responses[i] += other._responses[i];
}
_avg_response += other._avg_response;
}
@Override public void postGlobal(){
_response_rates = new float[_thresh.length];
for (int i=0; i<_response_rates.length; ++i) {
_response_rates[i] = (float) _responses[i];
}
Utils.div(_response_rates, (float)_count/_thresh.length);
for (int i=0; i<_response_rates.length; ++i) {
// spill over to next bucket - needed due to tie breaking in quantiles
if(_response_rates[i] > 1) {
_response_rates[i+1] += (_response_rates[i]-1);
_response_rates[i] -= (_response_rates[i]-1);
}
}
_avg_response_rate = (float)_avg_response / _count;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/Get.java
|
package water.api;
import dontweave.gson.JsonObject;
import java.util.Properties;
import water.*;
import water.util.Log;
public class Get extends Request {
protected H2OExistingKey _key = new H2OExistingKey(KEY);
@Override public NanoHTTPD.Response serve(NanoHTTPD server, Properties args, RequestType type) {
if( type == RequestType.json ) {
JsonObject resp = new JsonObject();
resp.addProperty(ERROR,"This request is only provided for browser connections");
return wrap(server, resp);
} else if( type != RequestType.www ) {
return super.serve(server, args, type);
}
String query = checkArguments(args, type);
if (query != null) return wrap(server,query,type);
try {
Value val = _key.value();
Key key = val._key;
if (!key.user_allowed())
return wrap(server,build(Response.error("Not a user key: " + key)));
// HTML file save of Value
NanoHTTPD.Response res = server.new Response(NanoHTTPD.HTTP_OK,NanoHTTPD.MIME_DEFAULT_BINARY,val.openStream());
res.addHeader("Content-Length", Long.toString(val.length()));
res.addHeader("Content-Disposition", "attachment; filename="+key.toString());
return res;
} catch( Throwable e ) {
return wrap(server,build(Response.error(e)));
}
}
@Override protected Response serve() {
throw new RuntimeException("Get should not be called from this context");
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/HTMLOnlyRequest.java
|
package water.api;
import java.util.Properties;
import water.NanoHTTPD;
import dontweave.gson.JsonObject;
/** Request that only supports browser (html, query, help, wiki) request types.
*
* When accessed from JSON throws.
*
* @author peta
*/
public abstract class HTMLOnlyRequest extends Request {
@Override final public NanoHTTPD.Response serve(NanoHTTPD server, Properties args, RequestType type) {
if (type == RequestType.json) {
JsonObject resp = new JsonObject();
resp.addProperty(ERROR,"This request is only provided for browser connections");
return wrap(server, resp);
}
return super.serve(server,args,type);
}
@Override protected Response serve() {
return Response.doneEmpty();
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/HTTP404.java
|
package water.api;
import java.util.Properties;
import water.NanoHTTPD;
import water.util.RString;
/**
*
* @author peta
*/
public class HTTP404 extends Request {
private transient final Str _error = new Str(ERROR,"Unknown error");
public HTTP404() {
_requestHelp = "Displays the HTTP 404 page with error specified in JSON"
+ " argument error.";
_error._requestHelp = "Error description for the 404. Generally the URL not found.";
}
@Override public Response serve() {
return Response.error(_error.value());
}
@Override protected String serveJava() {
return _error.value();
}
@Override public water.NanoHTTPD.Response serve(NanoHTTPD server, Properties parms, RequestType type) {
water.NanoHTTPD.Response r = super.serve(server, parms, type);
r.status = NanoHTTPD.HTTP_NOTFOUND;
return r;
}
private static final String _html =
"<h3>HTTP 404 - Not Found</h3>"
+ "<div class='alert alert-error'>%ERROR</div>"
;
@Override protected String build(Response response) {
StringBuilder sb = new StringBuilder();
sb.append("<div class='container'>");
sb.append("<div class='row-fluid'>");
sb.append("<div class='span12'>");
sb.append(buildResponseHeader(response));
RString str = new RString(_html);
str.replace("ERROR", response.error());
sb.append(str.toString());
sb.append("</div></div></div>");
return sb.toString();
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/HTTP500.java
|
package water.api;
import java.util.Properties;
import water.NanoHTTPD;
import water.util.RString;
/**
*
* @author peta
*/
public class HTTP500 extends Request {
private transient final Str _error = new Str(ERROR,"Unknown error");
public HTTP500() {
_requestHelp = "Displays the HTTP 500 page with error specified in JSON"
+ " argument error. This page is displayed when any unexpected"
+ " exception is returned from the request processing at any level.";
_error._requestHelp = "Error description for the 500. Generally the exception message.";
}
@Override public Response serve() {
return Response.error(_error.value());
}
@Override protected String serveJava() {
return _error.value();
}
@Override public water.NanoHTTPD.Response serve(NanoHTTPD server, Properties parms, RequestType type) {
// We can be in different thread => so check if _error is specified
if (!_error.specified()) { // We are handling exception - so try to find error message in parms
for (Argument arg : _arguments) {
arg.reset();
arg.check(this, parms.getProperty(arg._name,""));
}
}
water.NanoHTTPD.Response r = super.serve(server, parms, type);
r.status = NanoHTTPD.HTTP_INTERNALERROR;
return r;
}
private static final String _html =
"<h3>HTTP 500 - Internal Server Error</h3>"
+ "<div class='alert alert-error'>%ERROR</div>"
;
@Override protected String build(Response response) {
StringBuilder sb = new StringBuilder();
sb.append("<div class='container'>");
sb.append("<div class='row-fluid'>");
sb.append("<div class='span12'>");
sb.append(buildResponseHeader(response));
RString str = new RString(_html);
str.replace("ERROR", response.error());
sb.append(str.toString());
sb.append("</div></div></div>");
return sb.toString();
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/Handler.java
|
package water.api;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.Properties;
import water.H2O.H2OCountedCompleter;
import water.H2O;
import water.schemas.HTTP500V1;
import water.schemas.Schema;
import water.util.Log;
public abstract class Handler<H extends Handler<H,S>,S extends Schema<H,S>> extends H2OCountedCompleter {
private long _t_start, _t_stop; // Start/Stop time in ms for the serve() call
/** Default supported versions: Version 2 onwards, not Version 1. Override
* in child handlers to e.g. support V1. */
protected int min_ver() { return 2; }
protected int max_ver() { return Integer.MAX_VALUE; }
/** Dumb Version->Schema mapping */
abstract protected S schema(int version);
// Invoke the handler with parameters. Can throw any exception the called handler can throw.
protected final Schema handle(int version, Method meth, Properties parms) throws Exception {
if( !(min_ver() <= version && version <= max_ver()) ) // Version check!
return new HTTP500V1(new IllegalArgumentException("Version "+version+" is not in range V"+min_ver()+"-V"+max_ver()));
// Make a version-specific Schema; primitive-parse the URL into the Schema,
// fill the Handler from the versioned Schema.
S s = schema(version).fillFrom(parms).fillInto((H)this); // Version-specific Schema
// Run the Handler in the Nano Thread (nano does not grok CPS!)
_t_start = System.currentTimeMillis();
try { meth.invoke(this); }
// Exception throws out of the invoked method turn into InvocationTargetException
// rather uselessly. Peel out the original exception & throw it.
catch( InvocationTargetException ite ) {
Throwable t = ite.getCause();
throw (t instanceof Exception) ? (Exception)t : new RuntimeException(t);
}
_t_stop = System.currentTimeMillis();
// Version-specific unwind from the Handler back into the Schema
return s.fillFrom((H)this);
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/HitRatio.java
|
package water.api;
import water.util.Log;
import static water.util.ModelUtils.getPredictions;
import water.Func;
import water.MRTask2;
import water.UKV;
import water.fvec.Chunk;
import water.fvec.Frame;
import water.fvec.Vec;
import water.util.Utils;
import java.util.Arrays;
import java.util.Random;
public class HitRatio extends Func {
static final int API_WEAVER = 1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
public static final String DOC_GET = "Hit Ratio";
@API(help = "", required = true, filter = Default.class, json=true)
public Frame actual;
@API(help="Column of the actual results (will display vertically)", required=true, filter=actualVecSelect.class, json=true)
public Vec vactual;
class actualVecSelect extends VecClassSelect { actualVecSelect() { super("actual"); } }
@API(help = "", required = true, filter = Default.class, json=true)
public Frame predict;
@API(help = "Max. number of labels (K) to use for hit ratio computation", required = false, filter = Default.class, json = true)
private int max_k = 10;
public void set_max_k(int k) { max_k = k; }
@API(help = "Random number seed for breaking ties between equal probabilities", required = false, filter = Default.class, json = true)
private long seed = new Random().nextLong();
@API(help="domain of the actual response")
private String [] actual_domain;
@API(help="Hit ratios for k=1...K")
private float[] hit_ratios;
// public float[] hit_ratios() { return hit_ratios; }
public HitRatio() {}
@Override protected void init() throws IllegalArgumentException {
// Input handling
if( actual==null || predict==null )
throw new IllegalArgumentException("Missing actual or predict!");
if( vactual==null )
throw new IllegalArgumentException("Missing vactual!");
if (vactual.length() != predict.anyVec().length())
throw new IllegalArgumentException("Both arguments must have the same length!");
if (!vactual.isInt())
throw new IllegalArgumentException("Actual column must be integer class labels!");
}
@Override protected void execImpl() {
Vec va = null;
try {
va = vactual.toEnum(); // always returns TransfVec
actual_domain = va._domain;
if (max_k > predict.numCols()-1) {
Log.warn("Reducing Hitratio Top-K value to maximum value allowed: " + String.format("%,d", predict.numCols() - 1));
max_k = predict.numCols() - 1;
}
final Frame actual_predict = new Frame(predict.names().clone(), predict.vecs().clone());
actual_predict.replace(0, va); // place actual labels in first column
hit_ratios = new HitRatioTask(max_k, seed).doAll(actual_predict).hit_ratios();
} finally { // Delete adaptation vectors
if (va!=null) UKV.remove(va._key);
}
}
@Override public boolean toHTML( StringBuilder sb ) {
if (hit_ratios==null) return false;
sb.append("<div>");
DocGen.HTML.section(sb, "Hit Ratio for Multi-Class Classification");
DocGen.HTML.paragraph(sb, "(Frequency of actual class label to be among the top-K predicted class labels)");
DocGen.HTML.arrayHead(sb);
sb.append("<th>K</th>");
sb.append("<th>Hit Ratio</th>");
for (int k = 1; k<=max_k; ++k) sb.append("<tr><td>" + k + "</td><td>" + String.format("%.3f", hit_ratios[k-1]*100.) + "%</td></tr>");
DocGen.HTML.arrayTail(sb);
return true;
}
public void toASCII( StringBuilder sb ) {
if (hit_ratios==null) return;
sb.append("K Hit-ratio\n");
for (int k = 1; k<=max_k; ++k) sb.append(k + " " + String.format("%.3f", hit_ratios[k-1]*100.) + "%\n");
}
/**
* Update hit counts for given set of actual label and predicted labels
* This is to be called for each predicted row
* @param hits Array of length K, counting the number of hits (entries will be incremented)
* @param actual_label 1 actual label
* @param pred_labels K predicted labels
*/
static void updateHits(long[] hits, int actual_label, int[] pred_labels) {
assert(hits != null);
for (long h : hits) assert(h >= 0);
assert(pred_labels != null);
assert(actual_label >= 0);
assert(hits.length == pred_labels.length);
//find the first occurrence of the actual label and increment all counters from there on
//do nothing if no hit
for (int k=0;k<pred_labels.length;++k) {
if (pred_labels[k] == actual_label) {
while (k<pred_labels.length) hits[k++]++;
}
}
}
// Compute CMs for different thresholds via MRTask2
private static class HitRatioTask extends MRTask2<HitRatioTask> {
/* @OUT CMs */ public final float[] hit_ratios() {
float[] hit_ratio = new float[_K];
if (_count == 0) return new float[_K];
for (int i=0;i<_K;++i) {
hit_ratio[i] = ((float)_hits[i])/_count;
}
return hit_ratio;
}
/* IN K */
final private int _K;
/* IN Seed */
private long _seed;
/* Helper */
private long[] _hits; //the number of hits, length: K
private long _count; //the number of scored rows
HitRatioTask(int K, long seed) {
_K = K;
_seed = seed;
}
@Override public void map( Chunk[] cs ) {
_hits = new long[_K];
Arrays.fill(_hits, 0);
// pseudo-random tie breaking needs some bits to work with
final double[] tieBreaker = new double [] {
new Random(_seed).nextDouble(), new Random(_seed+1).nextDouble(),
new Random(_seed+2).nextDouble(), new Random(_seed+3).nextDouble() };
float [] preds = new float[cs.length];
// rows
for( int r=0; r < cs[0]._len; r++ ) {
if (cs[0].isNA0(r)) {
_count--;
continue;
}
final int actual_label = (int)cs[0].at80(r);
//predict K labels
for(int p=1; p < cs.length; p++) preds[p] = (float)cs[p].at0(r);
final int[] pred_labels = getPredictions(_K, preds, tieBreaker);
if (actual_label < cs.length-1) updateHits(_hits, actual_label, pred_labels);
}
_count += cs[0]._len;
}
@Override public void reduce( HitRatioTask other ) {
assert(other._K == _K);
_hits = Utils.add(_hits, other._hits);
_count += other._count;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/IOStatus.java
|
package water.api;
import dontweave.gson.*;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.concurrent.TimeUnit;
import water.*;
import water.util.Log;
import water.util.TimelineSnapshot;
public class IOStatus extends Request {
private static final String HISTOGRAM = "histogram";
public IOStatus() { _requestHelp = "Displays recent I/O activity."; }
// Delta-time for histogram summaries, in seconds
private static final int[] dts = new int[]{1,5,60,300};
@Override public Response serve() {
JsonObject response = new JsonObject();
final long[][] snapshot = TimeLine.system_snapshot();
final H2O cloud = TimeLine.CLOUD;
final int csz = cloud.size();
SimpleDateFormat sdf = new SimpleDateFormat("HH:mm:ss:SSS");
// Histograms! Per-8-i/o flavors (see Value.persist bits) per read/write per time-window
long durs[][][][] = new long[csz][8][2][dts.length]; // Duration from open-to-close
long blks[][][][] = new long[csz][8][2][dts.length]; // Blocked-for-i/o nanos
long sizs[][][][] = new long[csz][8][2][dts.length]; // Bytes moved
int cnts[][][][] = new int [csz][8][2][dts.length]; // Events in this bucket
// Process all the timeline events
JsonArray iops = new JsonArray();
TimelineSnapshot events = new TimelineSnapshot(cloud, snapshot);
long now = System.currentTimeMillis(); // Time 'now' just AFTER the snapshot
for( TimelineSnapshot.Event event : events ) {
int flavor = event.is_io();
// if( flavor == -1 ) continue; //0 leads to problems below as nameOfPersist(0) == null
if( flavor <= 0 ) continue;
int nidx = event._nodeId;
int rw = event.send_recv();// 1 for receive or read
long ctms = event.ms(); // Close-time msec
long dura = event.ms_io();// Duration in msec open-to-close
long blkd = event.ns(); // Nano's in blocking i/o calls;
long size = event.size_io(); // Bytes read/written
// Collect histograms
for( int i=0; i<dts.length; i++ ) {
int dt = dts[i]*1000; // Duration of this histogram bucket, in ms
if( ctms+dt >= now ) { // Ends within the bucket?
durs[nidx][flavor][rw][i] += dura;
blks[nidx][flavor][rw][i] += blkd;
sizs[nidx][flavor][rw][i] += size;
cnts[nidx][flavor][rw][i] ++;
}
}
// Also dump the raw io ops
JsonObject iop = new JsonObject();
iop.addProperty("closeTime", sdf.format(new Date(ctms)));
iop.addProperty(Constants.NODE,cloud._memary[nidx].toString());
iop.addProperty("i_o",Value.nameOfPersist(flavor));
iop.addProperty("r_w",rw==0?"write":"read");
iop.addProperty("duration"+Constants.Suffixes.MILLIS,dura); // ms from open-to-close
iop.addProperty("blocked"+Constants.Suffixes.MILLIS,TimeUnit.MILLISECONDS.convert(blkd,TimeUnit.NANOSECONDS)); // ns in blocking i/o calls
iop.addProperty("size"+Constants.Suffixes.BYTES,size); // bytes read/written
iops.add(iop);
}
// Dump out histograms
JsonArray histo = new JsonArray();
for( int n=0; n<csz; n++ ) {
for( int i=0; i<8; i++ ) {
for( int j=0; j<2; j++ ) {
for( int k=0; k<dts.length; k++ ) {
if( cnts[n][i][j][k] != 0 ) {
JsonObject sum = new JsonObject();
sum.addProperty("cloud_node_idx",n);
sum.addProperty("i_o",Value.nameOfPersist(i));
sum.addProperty("r_w",j==0?"write":"read");
sum.addProperty("window",dts[k]);
double dur = durs[n][i][j][k]/1e3; // Duration
double blk = blks[n][i][j][k]/1e9; // Blocked
double siz = sizs[n][i][j][k]*1.0;
if( dur == 0.0 ) dur = blk; // Round-off error sometimes; fix div-by-0
sum.addProperty("effective"+Constants.Suffixes.BYTES_PER_SECOND, siz/dur);
sum.addProperty("peak" +Constants.Suffixes.BYTES_PER_SECOND, siz/blk);
histo.add(sum);
}
}
}
}
}
response.add(HISTOGRAM,histo);
response.add("raw_iops",iops);
Response r = Response.done(response);
r.setBuilder(HISTOGRAM, new HistogramBuilder());
return r;
}
private static class HistogramBuilder extends Builder {
@Override public String build(Response response, JsonElement je, String contextName) {
final H2O cloud = TimeLine.CLOUD;
final int csz = cloud.size();
// Painfully reverse the Json to a java array again
long ebws[][][][] = new long[csz][8][2][dts.length]; // Duration from open-to-close
long pbws[][][][] = new long[csz][8][2][dts.length]; // Duration from open-to-close
boolean f[][][]= new boolean[csz][8][2];
for (JsonElement e : je.getAsJsonArray() ) {
JsonObject jo = e.getAsJsonObject();
int nidx = jo.get("cloud_node_idx").getAsInt();
// Convert flavor string to a flavor index
String fs = jo.get("i_o").getAsString();
int flavor;
for( flavor=0; flavor<8; flavor++ )
if( fs.equals(Value.nameOfPersist(flavor)) )
break;
assert flavor < 8;
// Convert r/w string to 1/0
int r_w = jo.get("r_w").getAsString().equals("write") ? 0 : 1;
// Convert time-window value into time-window index
int window, widx = jo.get("window").getAsInt();
for( window=0; window < dts.length; window++ )
if( dts[window] == widx )
break;
ebws[nidx][flavor][r_w][window] = jo.get("effective"+Constants.Suffixes.BYTES_PER_SECOND).getAsLong();
pbws[nidx][flavor][r_w][window] = jo.get("peak" +Constants.Suffixes.BYTES_PER_SECOND).getAsLong();
f [nidx][flavor][r_w] = true;
}
StringBuilder sb = new StringBuilder();
for( int n=0; n<csz; n++ ) {
sb.append("<h4>").append(cloud._memary[n]).append("</h4>");
sb.append("<span style='display: inline-block;'>");
sb.append("<table class='table table-striped table-bordered'>");
// Header
sb.append("<tr>");
sb.append("<th>i/o</th><th>r/w</th>");
for( int i=0; i<dts.length; i++ )
sb.append("<th>").append(dts[i]).append("s </th>");
sb.append("</tr>");
// For all I/O flavors
for( int flavor=0; flavor<8; flavor++ ) {
if( !f[n][flavor][0] && !f[n][flavor][1] ) continue;
int rows = 0; // Compute rows for either read or write or both
if( f[n][flavor][0] ) rows++;
if( f[n][flavor][1] ) rows++;
sb.append("<tr>");
sb.append("<td rowspan=\""+rows+"\"><h4>").append(Value.nameOfPersist(flavor)).append("</h4></td>");
if( f[n][flavor][1] ) doRow(sb,ebws,pbws,n,flavor,1);
if( f[n][flavor][0] ) doRow(sb,ebws,pbws,n,flavor,0);
sb.append("</tr>");
}
sb.append("</table></span>");
}
return sb.toString();
}
}
// Do a single row, all time-windows
private static void doRow( StringBuilder sb, long[][][][] ebws, long[][][][] pbws, int nidx, int flavor, int r_w ) {
sb.append("<td> eff/peak ").append(r_w==0?"write":"read").append("</td>");
for( int i=0; i<dts.length; i++ ) {
sb.append("<td>");
long eff = ebws[nidx][flavor][r_w][i];
long peak= pbws[nidx][flavor][r_w][i];
if( eff > 0 || peak > 0 ) {
int scale = Math.max(PrettyPrint.byteScale(eff),PrettyPrint.byteScale(peak));
String s1 = PrettyPrint.bytes(eff,scale);
String s2 = s1.substring(0,s1.length()-3); // Strip units off
sb.append(s2).append(" / ").append(PrettyPrint.bytes(peak,scale)).append("/S");
}
sb.append("</td>");
}
sb.append("</tr>");
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/ImportFiles2.java
|
package water.api;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.ObjectListing;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import org.apache.hadoop.fs.Path;
import tachyon.client.TachyonFS;
import tachyon.org.apache.thrift.TException;
import tachyon.thrift.ClientFileInfo;
import water.*;
import water.api.RequestServer.API_VERSION;
import water.fvec.*;
import water.persist.*;
import water.util.*;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class ImportFiles2 extends Request2 {
static final int API_WEAVER=1; // This file has auto-gen'd doc & json fields
static public DocGen.FieldDoc[] DOC_FIELDS; // Initialized from Auto-Gen code.
// This Request supports the HTML 'GET' command, and this is the help text
// for GET.
static final String DOC_GET =
"Map a file from the source (either localhost filesystem, HDFS, or S3) into H2O memory. Data is "+
"loaded lazily, when the Key is read (usually in a Parse2 command, to build " +
"a Frame key). (Warning: Every host in the cluster must have this file visible locally!)";
protected String parseLink(String k, String txt) { return Parse2.link(k, txt); }
String parse() { return "Parse2.query"; }
@Override
public API_VERSION[] supportedVersions() { return SUPPORTS_ONLY_V2; }
@API(help="Path to file/folder on either local disk/hdfs/s3",required=true,filter=GeneralFile.class,gridable=false)
String path;
@API(help="Common prefix for all successfully imported file keys")
String prefix;
@API(help="successfully imported files")
String [] files;
@API(help="keys of imported files")
String [] keys;
@API(help="files that failed to load")
String [] fails;
@API(help="Prior Keys that matched a prefix of the imported path, and were removed prior to (re)importing")
String[] dels;
public static Key[] importPath(String path){
File f = new File(path);
assert f.exists():"file not found: " + f.getAbsolutePath();
ImportFiles2 imp = new ImportFiles2();
imp.path = path;
imp.serve();
Key [] res = new Key[imp.keys.length];
for(int i = 0; i < res.length; ++i)
res[i] = Key.make(imp.keys[i]);
return res;
}
/**
* Iterates over fields and their annotations, and creates argument handlers.
*/
@Override protected void registered(API_VERSION version) {
super.registered(version);
}
@Override protected Response serve() {
try{
if(path != null){
String p2 = path.toLowerCase();
if( false ) ;
else if( p2.startsWith("hdfs://" ) ) serveHdfs();
else if( p2.startsWith("s3n://" ) ) serveHdfs();
else if( p2.startsWith("maprfs:/" ) ) serveHdfs(); // "maprfs:/datasets" is legal
else if( p2.startsWith("s3://" ) ) serveS3();
else if( p2.startsWith("http://" ) ) serveHttp();
else if( p2.startsWith("https://") ) serveHttp();
else if( p2.startsWith("tachyon://")) serveTachyon();
else serveLocalDisk();
}
return Response.done(this);
} catch( Throwable e ) {
return Response.error(e);
}
}
protected void serveHdfs() throws IOException{
if (isBareS3NBucketWithoutTrailingSlash(path)) { path += "/"; }
Log.info("ImportHDFS processing (" + path + ")");
ArrayList<String> succ = new ArrayList<String>();
ArrayList<String> fail = new ArrayList<String>();
PersistHdfs.addFolder2(new Path(path), succ, fail);
keys = succ.toArray(new String[succ.size()]);
files = keys;
fails = fail.toArray(new String[fail.size()]);
this.prefix = getCommonPrefix(keys);
DKV.write_barrier();
}
protected void serveS3(){
Futures fs = new Futures();
assert path.startsWith("s3://");
path = path.substring(5);
int bend = path.indexOf('/');
if(bend == -1)bend = path.length();
String bucket = path.substring(0,bend);
String prefix = bend < path.length()?path.substring(bend+1):"";
AmazonS3 s3 = PersistS3.getClient();
if( !s3.doesBucketExist(bucket) )
throw new IllegalArgumentException("S3 Bucket " + bucket + " not found!");;
ArrayList<String> succ = new ArrayList<String>();
ArrayList<String> fail = new ArrayList<String>();
ObjectListing currentList = s3.listObjects(bucket, prefix);
while(true){
for(S3ObjectSummary obj:currentList.getObjectSummaries())
try {
succ.add(S3FileVec.make(obj,fs).toString());
} catch( Throwable e ) {
fail.add(obj.getKey());
Log.err("Failed to loadfile from S3: path = " + obj.getKey() + ", error = " + e.getClass().getName() + ", msg = " + e.getMessage());
}
if(currentList.isTruncated())
currentList = s3.listNextBatchOfObjects(currentList);
else
break;
}
keys = succ.toArray(new String[succ.size()]);
files = keys;
fails = fail.toArray(new String[fail.size()]);
this.prefix = getCommonPrefix(keys);
}
private void serveLocalDisk() {
File f = new File(path);
if(!f.exists())throw new IllegalArgumentException("File " + path + " does not exist!");
ArrayList<String> afiles = new ArrayList();
ArrayList<String> akeys = new ArrayList();
ArrayList<String> afails = new ArrayList();
ArrayList<String> adels = new ArrayList();
FileIntegrityChecker.check(f).syncDirectory(afiles,akeys,afails,adels);
files = afiles.toArray(new String[0]);
keys = akeys .toArray(new String[0]);
fails = afails.toArray(new String[0]);
dels = adels .toArray(new String[0]);
prefix = getCommonPrefix(keys);
}
protected void serveHttp() {
try {
java.net.URL url = new URL(path);
Key k = Key.make(path);
InputStream is = url.openStream();
if( is == null ) {
Log.err("Unable to open stream to URL " + path);
}
UploadFileVec.readPut(k, is);
fails = new String[0];
String[] filesArr = { path };
files = filesArr;
String[] keysArr = { k.toString() };
keys = keysArr;
this.prefix = getCommonPrefix(keys);
}
catch( Throwable e) {
String[] arr = { path };
fails = arr;
files = new String[0];
keys = new String[0];
}
}
private void serveTachyon() {
assert path.startsWith(PersistTachyon.PREFIX) : "Path "+path+" is not prefixed by tachyon prefix " + PersistTachyon.PREFIX;
TachyonFS client = null;
ArrayList<String> succ = new ArrayList<String>();
ArrayList<String> fail = new ArrayList<String>();
try {
String[] pathComponents = PersistTachyon.decode(path);
String serverUri = pathComponents[0];
// Be explicit, it would be possible to use default client URI, but better is throw an error
if (serverUri==null || serverUri.isEmpty()) throw new IllegalArgumentException("The " + path + " is not legall URI - it is missing tachyon server URI (e.g., tachyon://localhost:19998/)." );
client = ((PersistTachyon) Persist.I[Value.TACHYON]).createClient(PersistTachyon.PREFIX+serverUri);
String rootFolder = pathComponents[1];
List<ClientFileInfo> filesOnTFS= client.listStatus(rootFolder); // do a recursive descend
Futures fs = new Futures();
for (ClientFileInfo f : filesOnTFS ) {
try {
succ.add(TachyonFileVec.make(serverUri, f, fs).toString());
} catch (Throwable t) {
fail.add(f.getName());
Log.err("Failed to loadfile from Tachyon: path = " + f.path + ", error = " + t.getClass().getName() + ", msg = " + t.getMessage());
}
}
keys = succ.toArray(new String[succ.size()]);
files = keys;
fails = fail.toArray(new String[fail.size()]);
this.prefix = getCommonPrefix(keys);
} catch (IOException e) {
fillEmpty("Cannot access specified file(s) on tachyon FS, because " + e.getMessage());
} finally {
if (client!=null) try { client.close(); } catch (TException _ ) {};
}
}
private void fillEmpty(String failure) {
fails = new String[] {failure};
files = new String[0];
keys = new String[0];
}
private String getCommonPrefix(String[] keys) {
String prefix = new String();
if(keys.length > 0) prefix = keys[0];
for(int i = 1; i < keys.length; i++) {
String tmp = keys[i];
int j = 0;
for(; j < Math.min(prefix.length(), tmp.length()); j++) {
if(prefix.charAt(j) != tmp.charAt(j)) break;
}
prefix = prefix.substring(0, j);
}
return prefix;
}
// HTML builder
@Override public boolean toHTML( StringBuilder sb ) {
if(files == null)return false;
if( files != null && files.length > 1 )
sb.append("<div class='alert'>")
.append(parseLink("*"+prefix+"*", "Parse all into hex format"))
.append(" </div>");
DocGen.HTML.title(sb,"files");
DocGen.HTML.arrayHead(sb);
for( int i=0; i<files.length; i++ )
sb.append("<tr><td><a href='"+parse()+"?source_key=").append(keys[i]).
append("'>").append(files[i]).append("</a></td></tr>");
DocGen.HTML.arrayTail(sb);
if( fails.length > 0 )
DocGen.HTML.array(DocGen.HTML.title(sb,"fails"),fails);
if( dels != null && dels.length > 0 )
DocGen.HTML.array(DocGen.HTML.title(sb,"Keys deleted before importing"),dels);
return true;
}
private boolean isBareS3NBucketWithoutTrailingSlash(String s) {
Pattern p = Pattern.compile("s3n://[^/]*");
Matcher m = p.matcher(s);
boolean b = m.matches();
return b;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/ImportHdfs.java
|
package water.api;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.fs.Path;
import water.DKV;
import water.persist.PersistHdfs;
import water.util.Log;
import dontweave.gson.*;
import dontweave.gson.internal.Streams;
public class ImportHdfs extends Request {
public class PathArg extends TypeaheadInputText<String> {
public PathArg(String name) {
super(TypeaheadHdfsPathRequest.class, name, true);
}
@Override protected String parse(String input) throws IllegalArgumentException {
return input;
}
@Override protected String queryDescription() { return "existing HDFS path"; }
@Override protected String defaultValue() { return null; }
}
protected final PathArg _path = new PathArg(PATH);
public ImportHdfs() {
_requestHelp = "Imports the given HDFS path. All nodes in the "
+ "cloud must have permission to access the HDFS path.";
_path._requestHelp = "HDFS path to import.";
}
boolean isBareS3NBucketWithoutTrailingSlash(String s) {
Pattern p = Pattern.compile("s3n://[^/]*");
Matcher m = p.matcher(s);
boolean b = m.matches();
return b;
}
@Override
protected Response serve() {
String pstr = _path.value();
if (isBareS3NBucketWithoutTrailingSlash(_path.value())) { pstr = pstr + "/"; }
Log.info("ImportHDFS processing (" + pstr + ")");
JsonArray succ = new JsonArray();
JsonArray fail = new JsonArray();
try {
PersistHdfs.addFolder(new Path(pstr), succ, fail);
} catch( IOException e ) {
return Response.error(e);
}
DKV.write_barrier();
JsonObject json = new JsonObject();
json.add(NUM_SUCCEEDED, new JsonPrimitive(succ.size()));
json.add(SUCCEEDED, succ);
json.add(NUM_FAILED, new JsonPrimitive(fail.size()));
json.add(FAILED, fail);
Response r = Response.done(json);
r.setBuilder(SUCCEEDED + "." + KEY, new KeyCellBuilder());
// Add quick link
if (succ.size() > 1)
r.addHeader("<div class='alert'>" //
+ Parse.link("*"+pstr+"*", "Parse all into hex format") + " </div>");
return r;
}
}
|
0
|
java-sources/ai/h2o/h2o-classic/2.8/water
|
java-sources/ai/h2o/h2o-classic/2.8/water/api/ImportS3.java
|
package water.api;
import java.io.IOException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import water.DKV;
import water.Key;
import water.persist.PersistS3;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.ObjectListing;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import dontweave.gson.*;
import water.util.Log;
public class ImportS3 extends Request {
public class BucketArg extends TypeaheadInputText<String> {
public BucketArg(String name) {
super(TypeaheadS3BucketRequest.class, name, true);
}
@Override
protected String parse(String input) throws IllegalArgumentException {
AmazonS3 s3 = PersistS3.getClient();
if( !s3.doesBucketExist(input) )
throw new IllegalArgumentException("S3 Bucket " + input + " not found!");
return input;
}
@Override
protected String queryDescription() {
return "existing S3 Bucket";
}
@Override
protected String defaultValue() {
return null;
}
}
protected final BucketArg _bucket = new BucketArg(BUCKET);
public ImportS3() {
_requestHelp = "Imports the given Amazon S3 Bucket. All nodes in the "
+ "cloud must have permission to access the Amazon bucket.";
_bucket._requestHelp = "Amazon S3 Bucket to import.";
}
public void processListing(ObjectListing listing, JsonArray succ, JsonArray fail){
for( S3ObjectSummary obj : listing.getObjectSummaries() ) {
try {
Key k = PersistS3.loadKey(obj);
JsonObject o = new JsonObject();
o.addProperty(KEY, k.toString());
o.addProperty(FILE, obj.getKey());
o.addProperty(VALUE_SIZE, obj.getSize());
succ.add(o);
} catch( IOException e ) {
JsonObject o = new JsonObject();
o.addProperty(FILE, obj.getKey());
o.addProperty(ERROR, e.getMessage());
fail.add(o);
}
}
}
@Override
protected Response serve() {
String bucket = _bucket.value();
Log.info("ImportS3 processing (" + bucket + ")");
JsonObject json = new JsonObject();
JsonArray succ = new JsonArray();
JsonArray fail = new JsonArray();
AmazonS3 s3 = PersistS3.getClient();
ObjectListing currentList = s3.listObjects(bucket);
processListing(currentList, succ, fail);
while(currentList.isTruncated()){
currentList = s3.listNextBatchOfObjects(currentList);
processListing(currentList, succ, fail);
}
json.add(NUM_SUCCEEDED, new JsonPrimitive(succ.size()));
json.add(SUCCEEDED, succ);
json.add(NUM_FAILED, new JsonPrimitive(fail.size()));
json.add(FAILED, fail);
DKV.write_barrier();
Response r = Response.done(json);
r.setBuilder(SUCCEEDED + "." + KEY, new KeyCellBuilder());
return r;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.