index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/RapidsHelpV3.java
|
package water.api.schemas3;
import water.Iced;
import water.api.API;
/**
* Help for the rapids language
*/
public class RapidsHelpV3 extends SchemaV3<Iced, RapidsHelpV3> {
@API(help="Description of the rapids language.",
direction=API.Direction.OUTPUT)
public RapidsExpressionV3[] expressions;
public static class RapidsExpressionV3 extends SchemaV3<Iced, RapidsExpressionV3> {
@API(help="(Class) name of the language construct")
public String name;
@API(help="Code fragment pattern.")
public String pattern;
@API(help="Description of the functionality provided by this language construct.")
public String description;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/RapidsMapFrameV3.java
|
package water.api.schemas3;
import water.Iced;
import water.api.API;
import water.fvec.Frame;
import java.util.Map;
public class RapidsMapFrameV3 extends RapidsSchemaV3<Iced,RapidsMapFrameV3> {
@API(help="Frames", direction=API.Direction.OUTPUT)
public RapidsFrameV3[] frames;
@API(help="Keys of the map", direction=API.Direction.OUTPUT)
public RapidsStringsV3 map_keys;
public RapidsMapFrameV3() {}
public RapidsMapFrameV3(Map<String, Frame> fr) {
map_keys = new RapidsStringsV3(fr.keySet().toArray(new String[]{}));
int i = 0;
Frame[] framesFromMap = fr.values().toArray(new Frame[]{});
frames = new RapidsFrameV3[framesFromMap.length];
for (Frame frame : framesFromMap) {
this.frames[i++] = new RapidsFrameV3(frame);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/RapidsNumberV3.java
|
package water.api.schemas3;
import water.Iced;
import water.api.API;
/**
*/
public class RapidsNumberV3 extends RapidsSchemaV3<Iced,RapidsNumberV3> {
@API(help="Number result", direction=API.Direction.OUTPUT)
public double scalar;
public RapidsNumberV3() {}
public RapidsNumberV3(double d) { scalar = d; }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/RapidsNumbersV3.java
|
package water.api.schemas3;
import water.Iced;
import water.api.API;
/**
*/
public class RapidsNumbersV3 extends RapidsSchemaV3<Iced,RapidsNumbersV3> {
@API(help="Number array result", direction=API.Direction.OUTPUT)
public double[] scalar;
public RapidsNumbersV3() {}
public RapidsNumbersV3(double[] ds) {
scalar = ds;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/RapidsSchemaV3.java
|
package water.api.schemas3;
import water.Iced;
import water.api.API;
/**
*/
public class RapidsSchemaV3<I extends Iced, S extends RapidsSchemaV3<I, S>> extends RequestSchemaV3<I, S> {
@API(help="A Rapids AstRoot expression", direction=API.Direction.INPUT, required=true)
public String ast;
@API(help="Session key", direction=API.Direction.INPUT)
public String session_id;
@API(help="[DEPRECATED] Key name to assign Frame results", direction=API.Direction.INPUT)
public String id;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/RapidsStringV3.java
|
package water.api.schemas3;
import water.Iced;
import water.api.API;
/**
*/
public class RapidsStringV3 extends RapidsSchemaV3<Iced,RapidsStringV3> {
@API(help="String result", direction=API.Direction.OUTPUT)
public String string;
public RapidsStringV3() {}
public RapidsStringV3(String s) { string = s; }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/RapidsStringsV3.java
|
package water.api.schemas3;
import water.Iced;
import water.api.API;
/**
*/
public class RapidsStringsV3 extends RapidsSchemaV3<Iced,RapidsStringsV3> {
@API(help="String array result", direction=API.Direction.OUTPUT)
public String[] string;
public RapidsStringsV3() {}
public RapidsStringsV3(String[] ss) { string = ss; }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/RemoveAllV3.java
|
package water.api.schemas3;
import water.Iced;
import water.api.API;
public class RemoveAllV3 extends RequestSchemaV3<Iced, RemoveAllV3> {
@API(direction = API.Direction.INPUT, help = "Keys of the models to retain", level = API.Level.critical)
public KeyV3[] retained_keys;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/RemoveV3.java
|
package water.api.schemas3;
import water.Iced;
import water.api.API;
public class RemoveV3 extends RequestSchemaV3<Iced, RemoveV3> {
@API(help="Object to be removed.")
public KeyV3 key;
@API(help="If true, removal operation will cascade down the object tree.", direction = API.Direction.INPUT)
public boolean cascade;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/RequestSchemaV3.java
|
package water.api.schemas3;
import water.Iced;
import water.api.API;
import water.api.Schema;
/**
* Base Schema class for all REST API requests, gathering up common behavior such as the
*/
public class RequestSchemaV3<I extends Iced, S extends RequestSchemaV3<I, S>> extends SchemaV3<I, S> {
@API(help="Comma-separated list of JSON field paths to exclude from the result, used like: " +
"\"/3/Frames?_exclude_fields=frames/frame_id/URL,__meta\"", direction=API.Direction.INPUT)
public String _exclude_fields = "";
// @API(help="Not yet implemented", direction=API.Direction.INPUT)
// public String _include_fields = "";
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/RouteV3.java
|
package water.api.schemas3;
import water.api.API;
import water.api.Handler;
import water.api.Route;
import water.util.PojoUtils;
public final class RouteV3 extends SchemaV3<Route, RouteV3> {
@API(help="", direction=API.Direction.OUTPUT)
public String http_method;
@API(help="", direction=API.Direction.OUTPUT)
public String url_pattern;
@API(help="", direction=API.Direction.OUTPUT)
public String summary;
@API(help="", direction=API.Direction.OUTPUT)
public String api_name;
@API(help="", direction=API.Direction.OUTPUT)
public String handler_class;
@API(help="", direction=API.Direction.OUTPUT)
public String handler_method;
@API(help="", direction=API.Direction.OUTPUT)
public String input_schema;
@API(help="", direction=API.Direction.OUTPUT)
public String output_schema;
// NOTE: Java 7 captures and lets you look up subpatterns by name but won't give you the list of names, so we need this redundant list:
@API(help="", direction=API.Direction.OUTPUT)
public String[] path_params; // list of params we capture from the url pattern, e.g. for /17/MyComplexObj/(.*)/(.*)
@API(help="", direction=API.Direction.OUTPUT)
public String markdown;
public RouteV3() {}
public RouteV3(Route impl) { super(impl); }
@Override
public RouteV3 fillFromImpl(Route impl) {
PojoUtils.copyProperties(this, impl, PojoUtils.FieldNaming.ORIGIN_HAS_UNDERSCORES, new String[] {"url_pattern", "handler_class", "handler_method"} );
this.url_pattern = impl._url;
this.handler_class = impl._handler_class.toString();
this.handler_method = impl._handler_method.getName();
this.input_schema = Handler.getHandlerMethodInputSchema(impl._handler_method).getSimpleName();
this.output_schema = Handler.getHandlerMethodOutputSchema(impl._handler_method).getSimpleName();
return this;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/SaveToHiveTableV3.java
|
package water.api.schemas3;
import water.Iced;
import water.api.API;
import water.api.SaveToHiveTableHandler;
public class SaveToHiveTableV3 extends RequestSchemaV3<Iced, SaveToHiveTableV3> {
//Input fields
@API(help = "H2O Frame ID", required = true)
public KeyV3.FrameKeyV3 frame_id;
@API(help = "HIVE JDBC URL", required = true)
public String jdbc_url;
@API(help = "Name of table to save data to.", required = true)
public String table_name;
@API(help = "HDFS Path to where the table should be stored.")
public String table_path;
@API(help = "Storage format of the created table.", values = {"CSV", "PARQUET"})
public SaveToHiveTableHandler.HiveFrameSaver.Format format;
@API(help = "HDFS Path where to store temporary data.")
public String tmp_path;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/SchemaMetadataV3.java
|
package water.api.schemas3;
import water.api.API;
import water.api.SchemaMetadata;
import water.util.PojoUtils;
import java.util.ArrayList;
public class SchemaMetadataV3 extends SchemaV3<SchemaMetadata, SchemaMetadataV3> {
@API(help="Version number of the Schema.")
public int version;
/**
* The simple schema (class) name, e.g. DeepLearningParametersV2, used in the schema metadata. Must not be
* changed after creation (treat as final).
*/
@API(help="Simple name of the Schema. NOTE: the schema_names form a single namespace.")
public String name ;
@API(help="[DEPRECATED] This field is always the same as name.", direction=API.Direction.OUTPUT)
public String label;
/**
* The simple schema superclass name, e.g. ModelSchemaV3, used in the schema metadata. Must not be changed after
* creation (treat as final).
*/
@API(help="Simple name of the superclass of the Schema. NOTE: the schema_names form a single namespace.")
public String superclass ;
@API(help="Simple name of H2O type that this Schema represents. Must not be changed after creation (treat as final).")
public String type;
@API(help="All the public fields of the schema", direction=API.Direction.OUTPUT)
public FieldMetadataV3[] fields;
@API(help="Documentation for the schema in Markdown format with GitHub extensions", direction=API.Direction.OUTPUT)
String markdown;
public SchemaMetadataV3() {}
public SchemaMetadataV3(SchemaMetadata impl) { super(impl); }
@Override
public SchemaMetadata createImpl() {
return new SchemaMetadata();
}
@Override
public SchemaMetadata fillImpl(SchemaMetadata impl) {
impl.fields = new ArrayList<>(this.fields.length);
int i = 0;
for (FieldMetadataV3 s : this.fields)
impl.fields.add(s.createImpl());
return impl;
}
@Override
public SchemaMetadataV3 fillFromImpl(SchemaMetadata impl) {
PojoUtils.copyProperties(this, impl, PojoUtils.FieldNaming.CONSISTENT, new String[] {"fields"});
this.fields = new FieldMetadataV3[impl.fields.size()];
this.label = impl.name;
int i = 0;
for (SchemaMetadata.FieldMetadata f : impl.fields)
this.fields[i++] = new FieldMetadataV3().fillFromImpl(f); // TODO: version!
return this;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/SchemaV3.java
|
package water.api.schemas3;
import water.Iced;
import water.api.API;
import water.api.Schema;
/**
* Base Schema class for all v3 REST API objects.
*
* Any schema which is *NOT* used for requests (i.e. it cannot be an input to a REST API handler) should inherit from
* this class.
* However if a schema is used for requests, then it should inherit from {@link RequestSchemaV3}, which contains some
* additional fields common for all REST API requests.
*/
public class SchemaV3<I extends Iced, S extends SchemaV3<I,S>> extends Schema<I, S> {
@API(help="Metadata on this schema instance, to make it self-describing.", direction=API.Direction.OUTPUT)
public Meta __meta;
/**
* Metadata for a Schema, including the version, name and type. This information is included in all v3 REST API
* responses as a field in the Schema so that the payloads are self-describing, and it is also available through
* the /Metadata/schemas REST API endpoint for the purposes of REST service discovery.
*/
public static final class Meta extends Iced {
@API(help="Version number of this Schema. Must not be changed after creation (treat as final).", direction=API.Direction.OUTPUT)
public int schema_version;
@API(help="Simple name of this Schema. NOTE: the schema_names form a single namespace.", direction=API.Direction.OUTPUT)
public String schema_name;
@API(help="Simple name of H2O type that this Schema represents. Must not be changed after creation (treat as final).", direction=API.Direction.OUTPUT)
public String schema_type;
/** Default constructor used only for newInstance() in generic reflection-based code. */
public Meta() {}
/** Standard constructor which supplies all the fields. The fields should be treated as immutable once set. */
public Meta(int version, String name, String type) {
schema_version = version;
schema_name = name;
schema_type = type;
}
/** Used during markdown generation. */
public String toString() { return schema_name; }
}
public SchemaV3() { this(null); }
public SchemaV3(I impl) {
__meta = new Meta(getSchemaVersion(), getSchemaName(), getSchemaType());
if (impl != null)
this.fillFromImpl(impl);
}
public water.AutoBuffer writeJSON(water.AutoBuffer ab) {
// Ugly hack, but sometimes I find that __meta was not initialized by now; which means that constructor was
// somehow skipped, which means the object was created in roundabout way and then unsafely cast... Hope we'll
// find a proper solution to this issue eventually...
if (__meta == null)
__meta = new Meta(getSchemaVersion(), getSchemaName(), getSchemaType());
return super.writeJSON(ab);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/SegmentModelsParametersV3.java
|
package water.api.schemas3;
import hex.segments.SegmentModelsBuilder;
import water.api.API;
public class SegmentModelsParametersV3 extends SchemaV3<SegmentModelsBuilder.SegmentModelsParameters, SegmentModelsParametersV3> {
@API(help = "Uniquely identifies the collection of the segment models")
public KeyV3.SegmentModelsKeyV3 segment_models_id;
@API(help = "Enumeration of all segments for which to build models for")
public KeyV3.FrameKeyV3 segments;
@API(help = "List of columns to segment-by, models will be built for all segments in the data")
public String[] segment_columns;
@API(help = "Level of parallelism of bulk model building, it is the maximum number of models each H2O node will be building in parallel")
public int parallelism;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/SegmentModelsV3.java
|
package water.api.schemas3;
import water.Iced;
import water.api.API;
public class SegmentModelsV3 extends SchemaV3<Iced, SegmentModelsV3> {
@API(help = "Segment Models id")
public KeyV3.SegmentModelsKeyV3 segment_models_id;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/SessionPropertyV3.java
|
package water.api.schemas3;
import water.Iced;
import water.api.API;
public class SessionPropertyV3 extends RequestSchemaV3<Iced, SessionPropertyV3> {
@API(help="Session ID", direction = API.Direction.INOUT)
public String session_key;
@API(help="Property Key", direction = API.Direction.INOUT)
public String key;
@API(help="Property Value", direction = API.Direction.INOUT)
public String value;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/ShutdownV3.java
|
package water.api.schemas3;
import water.api.ShutdownHandler;
public class ShutdownV3 extends RequestSchemaV3<ShutdownHandler.Shutdown,ShutdownV3> {
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/SignificantRulesV3.java
|
package water.api.schemas3;
import water.Iced;
import water.api.API;
public class SignificantRulesV3 extends RequestSchemaV3<Iced, SignificantRulesV3> {
@API(help="Model id of interest", json = false)
public KeyV3.ModelKeyV3 model_id;
@API(help="The estimated coefficients and language representations (in case it is a rule) for each of the significant baselearners.", direction=API.Direction.OUTPUT)
public TwoDimTableV3 significant_rules_table;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/SplitFrameV3.java
|
package water.api.schemas3;
import hex.SplitFrame;
import water.api.API;
import water.api.schemas3.KeyV3.FrameKeyV3;
public class SplitFrameV3 extends SchemaV3<SplitFrame, SplitFrameV3> {
@API(help="Job Key")
public KeyV3.JobKeyV3 key;
@API(help="Dataset")
public FrameKeyV3 dataset;
@API(help="Split ratios - resulting number of split is ratios.length+1", json=true)
public double[] ratios;
@API(help="Destination keys for each output frame split.", direction = API.Direction.INOUT)
public FrameKeyV3[] destination_frames;
public SplitFrameV3() {}
public SplitFrameV3(SplitFrame impl) { super(impl); }
@Override
public SplitFrame createImpl() { return new SplitFrame(); }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/SteamMetricsV3.java
|
package water.api.schemas3;
import water.api.API;
public class SteamMetricsV3 extends RequestSchemaV3<SteamMetricsV3.SteamMetrics, SteamMetricsV3> {
public static class SteamMetrics extends water.Iced<SteamMetrics> {
public SteamMetrics()
{}
}
@API(help="Steam metrics API version", direction = API.Direction.OUTPUT)
public long version;
@API(help="Number of milliseconds that the cluster has been idle", direction = API.Direction.OUTPUT)
public long idle_millis;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/StringPairV3.java
|
package water.api.schemas3;
import hex.StringPair;
import water.api.API;
import water.api.Schema;
public class StringPairV3 extends SchemaV3<StringPair, StringPairV3> implements Schema.AutoParseable {
@API(help = "Value A")
String a;
@API(help = "Value B")
String b;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/TabulateV3.java
|
package water.api.schemas3;
import water.api.API;
import water.api.schemas3.KeyV3.FrameKeyV3;
import water.util.Tabulate;
public class TabulateV3 extends SchemaV3<Tabulate, TabulateV3> {
// INPUT
@API(help="Dataset", required = true)
public FrameKeyV3 dataset;
@API(help="Predictor", required = true, level = API.Level.critical, is_member_of_frames = {"dataset"}, is_mutually_exclusive_with = {"col_y"}, direction = API.Direction.INOUT)
public FrameV3.ColSpecifierV3 predictor;
@API(help="Response", required = true, level = API.Level.critical, is_member_of_frames = {"dataset"}, is_mutually_exclusive_with = {"col_x"}, direction = API.Direction.INOUT)
public FrameV3.ColSpecifierV3 response;
@API(help="Observation weights (optional)", required = false, level = API.Level.critical, is_member_of_frames = {"dataset"}, is_mutually_exclusive_with = {"col_x"}, direction = API.Direction.INOUT)
public FrameV3.ColSpecifierV3 weight;
@API(help="Number of bins for predictor column")
public int nbins_predictor;
@API(help="Number of bins for response column")
public int nbins_response;
// OUTPUT
@API(help="Counts table", direction = API.Direction.OUTPUT)
public TwoDimTableV3 count_table;
@API(help="Response table", direction = API.Direction.OUTPUT)
public TwoDimTableV3 response_table;
public TabulateV3() {}
public TabulateV3(Tabulate impl) { super(impl); }
@Override
public TabulateV3 fillFromImpl(Tabulate impl) {
super.fillFromImpl(impl);
return this;
}
@Override
public Tabulate createImpl() {
return new Tabulate();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/TimelineV3.java
|
package water.api.schemas3;
import water.*;
import water.api.API;
import water.api.TimelineHandler.Timeline;
import water.init.TimelineSnapshot;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
/**
* Display of a Timeline
*/
public class TimelineV3 extends RequestSchemaV3<Timeline,TimelineV3> {
// This schema has no input params
@API(help="Current time in millis.", direction=API.Direction.OUTPUT)
public long now;
@API(help="This node", direction=API.Direction.OUTPUT)
public String self;
@API(help="recorded timeline events", direction=API.Direction.OUTPUT)
public EventV3[] events;
public static class EventV3<I, S extends EventV3<I, S>> extends SchemaV3<Iced, S> {
@API(help="Time when the event was recorded. Format is hh:mm:ss:ms")
public final String date;
@API(help="Time in nanos")
public final long nanos;
enum EventType {unknown, heartbeat, network_msg, io}
@API(help="type of recorded event", values = {"unknown", "heartbeat", "network_msg", "io"})
public final EventType type;
@SuppressWarnings("unused")
public EventV3() { date = null; nanos = -1; type = EventType.unknown; }
private EventV3(EventType type, long millis, long nanos){
this.type = type;
this.date = new SimpleDateFormat("HH:mm:ss:SSS").format(new Date(millis));
this.nanos = nanos;
}
protected String who() { throw H2O.unimpl(); };
protected String ioType() { throw H2O.unimpl(); };
protected String event() { throw H2O.unimpl(); };
public String bytes() { throw H2O.unimpl(); };
} // Event
public static class HeartBeatEvent extends EventV3<Iced, HeartBeatEvent> {
@API(help = "number of sent heartbeats")
final int sends;
@API(help = "number of received heartbeats")
final int recvs;
public HeartBeatEvent() { super(); sends = -1; recvs = -1; }
private HeartBeatEvent(int sends, int recvs, long lastMs, long lastNs){
super(EventType.heartbeat,lastMs,lastNs);
this.sends = sends;
this.recvs = recvs;
}
@Override protected String who() { return "many -> many";}
@Override protected String ioType() {return "UDP";}
@Override protected String event() {return "heartbeat";}
@Override public String bytes() {return sends + " sent " + ", " + recvs + " received";}
@Override public String toString() { return "HeartBeat(" + sends + " sends, " + recvs + " receives)"; }
} // HeartBeatEvent
public static class NetworkEvent extends EventV3<Iced, NetworkEvent> {
@API(help="Boolean flag distinguishing between sends (true) and receives(false)")
public final boolean is_send;
@API(help="network protocol (UDP/TCP)")
public final String protocol;
@API(help="UDP type (exec,ack, ackack,...")
public final String msg_type; // udp
@API(help="Sending node")
public final String from;
@API(help="Receiving node")
public final String to;
@API(help="Pretty print of the first few bytes of the msg payload. Contains class name for tasks.")
public final String data;
public NetworkEvent() { super(); is_send = false; protocol = "unknown"; msg_type = "unknown"; from = "unknown"; to = "unknown"; data = "unknown"; }
private NetworkEvent(long ms, long ns, boolean is_send, String protocol, String msg_type, String from, String to, String data){
super(EventType.network_msg,ms,ns);
this.is_send = is_send;
this.protocol = protocol;
this.msg_type = msg_type;
this.from = from;
this.to = to;
this.data = data;
}
@Override protected String who() { return from + " -> " + to;}
@Override protected String ioType() {return protocol;}
@Override protected String event() {return msg_type;}
@Override public String bytes() {return data;}
@Override public String toString() {
return "NetworkMsg(" + from + " -> " + to + ", protocol = '" + protocol + "', data = '" + data + "')";
}
} // NetworkEvent
public static class IOEvent extends EventV3<Iced, IOEvent> {
@API(help="flavor of the recorded io (ice/hdfs/...)")
public final String io_flavor;
@API(help="node where this io event happened")
public final String node;
@API(help="data info")
public final String data;
public IOEvent() { this(-1, -1, "unknown", "unknown", "unknown"); }
private IOEvent(long ms, long ns, String node, String io_flavor, String data){
super(EventType.io,ms,ns);
this.io_flavor = io_flavor;
this.node = node;
this.data = data;
}
@Override protected String who(){return node;}
@Override protected String ioType() {return io_flavor;}
@Override protected String event() {return "i_o";}
@Override public String bytes() { return data;}
@Override public String toString() { return "I_O('" + io_flavor + "')"; }
} // IOEvent
@Override public TimelineV3 fillFromImpl(Timeline timeline) {
ArrayList<EventV3> outputEvents = new ArrayList<>();
ArrayList<TimelineSnapshot.Event> heartbeats = new ArrayList();
H2O cloud = TimeLine.getCLOUD();
if (null != timeline.snapshot) {
for (TimelineSnapshot.Event event : timeline.snapshot) {
H2ONode h2o = cloud.members()[event._nodeId];
// The event type. First get payload.
UDP.udp msgType = event.udpType();
// Accumulate repeated heartbeats
if (msgType == UDP.udp.heartbeat) {
heartbeats.add(event);
continue;
}
// Now dump out accumulated heartbeats
if (!heartbeats.isEmpty()) {
long firstMs = heartbeats.get(0).ms();
long lastMs = heartbeats.get(heartbeats.size() - 1).ms();
int totalSends = 0;
int totalRecvs = 0;
int totalDrops = 0;
int[] sends = new int[cloud.size()];
int[] recvs = new int[cloud.size()];
for (TimelineSnapshot.Event h : heartbeats) {
if (h.isSend()) {
++totalSends;
++sends[h._nodeId];
} else if (h.isDropped()) {
++totalDrops;
} else {
++totalRecvs;
++recvs[h._nodeId];
}
}
heartbeats.clear();
outputEvents.add(new HeartBeatEvent(totalSends, totalRecvs, firstMs, lastMs));
}
long ms = event.ms();
long ns = event.ns();
if (msgType == UDP.udp.i_o) { // handle io event
outputEvents.add(new IOEvent(ms, ns, event.recoH2O().toString(), event.ioflavor(), UDP.printx16(event.dataLo(), event.dataHi())));
} else { // network msg
String from, to;
if (event.isSend()) {
from = h2o.toString();
to = event.packH2O() == null ? "multicast" : event.packH2O().toString();
} else {
from = event.packH2O().toString();
to = h2o.toString();
}
outputEvents.add(new NetworkEvent(ms, ns, event.isSend(), event.isTCP() ? "TCP" : "UDP", msgType.toString(), from, to, UDP.printx16(event.dataLo(), event.dataHi())));
}
}
} // if timeline.snapshot
events = outputEvents.toArray(new EventV3[null == outputEvents ? 0 : outputEvents.size()]);
return this;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/TwoDimTableV3.java
|
package water.api.schemas3;
import water.AutoBuffer;
import water.H2O;
import water.Iced;
import water.IcedWrapper;
import water.api.API;
import water.util.TwoDimTable;
/**
* Client-facing Schema of a TwoDimTable
* Notes:
* 1) We embed the rowHeaders into the table, extending it by 1 column
* 2) We store all the data in column-major order
* 3) We store all the data in String format
*
*/
public class TwoDimTableV3 extends SchemaV3<TwoDimTable, TwoDimTableV3> {
public static class ColumnSpecsBase extends SchemaV3<Iced, ColumnSpecsBase> {
@API(help="Column Name", direction=API.Direction.OUTPUT)
public String name; // allow reset of col header names so that I can undo the pythonize for GLM multinomial coeff names
@API(help="Column Type", direction=API.Direction.OUTPUT)
String type;
@API(help="Column Format (printf)", direction=API.Direction.OUTPUT)
String format;
@API(help="Column Description", direction=API.Direction.OUTPUT)
String description;
}
@API(help="Table Name", direction=API.Direction.OUTPUT)
public String name;
@API(help="Table Description", direction=API.Direction.OUTPUT)
public String description;
@API(help="Column Specification", direction=API.Direction.OUTPUT)
public ColumnSpecsBase[] columns;
@API(help="Number of Rows", direction=API.Direction.OUTPUT)
public int rowcount;
@API(help="Table Data (col-major)", direction=API.Direction.OUTPUT)
public IcedWrapper[][] data;
public TwoDimTableV3() {}
public TwoDimTableV3(TwoDimTable impl) { super(impl); }
/**
* Fill a TwoDimTable Schema from a TwoDimTable
* @param t TwoDimTable
* @return TwoDimTableSchema
*/
@Override
public TwoDimTableV3 fillFromImpl(TwoDimTable t) {
name = t.getTableHeader();
description = t.getTableDescription();
final int rows = t.getRowDim();
rowcount = rows;
boolean have_row_header_cols = t.getColHeaderForRowHeaders() != null;
for (int r=0; r<rows; ++r) {
if (!have_row_header_cols) break;
have_row_header_cols &= t.getRowHeaders()[r] != null;
}
if (have_row_header_cols) {
final int cols = t.getColDim()+1;
columns = new ColumnSpecsBase[cols];
columns[0] = new ColumnSpecsBase();
columns[0].name = pythonify(t.getColHeaderForRowHeaders());
columns[0].type = "string";
columns[0].format = "%s";
columns[0].description = t.getColHeaderForRowHeaders();
for (int c = 1; c < cols; ++c) {
columns[c] = new ColumnSpecsBase();
columns[c].name = pythonify(t.getColHeaders()[c - 1]);
columns[c].type = t.getColTypes()[c - 1];
columns[c].format = t.getColFormats()[c - 1];
columns[c].description = t.getColHeaders()[c - 1];
}
data = new IcedWrapper[cols][rows];
data[0] = new IcedWrapper[t.getRowDim()];
for (int r = 0; r < t.getRowDim(); ++r) {
data[0][r] = new IcedWrapper(t.getRowHeaders()[r]);
}
IcedWrapper[][] cellValues = t.getCellValues();
for (int c = 1; c < cols; ++c) {
data[c] = new IcedWrapper[rows];
for (int r = 0; r < rows; ++r) {
data[c][r] = cellValues[r][c - 1];
}
}
} else {
final int cols = t.getColDim();
columns = new ColumnSpecsBase[cols];
for (int c = 0; c < cols; ++c) {
columns[c] = new ColumnSpecsBase();
columns[c].name = pythonify(t.getColHeaders()[c]);
columns[c].type = t.getColTypes()[c];
columns[c].format = t.getColFormats()[c];
columns[c].description = t.getColHeaders()[c];
}
data = new IcedWrapper[cols][rows];
IcedWrapper[][] cellValues = t.getCellValues();
for (int c = 0; c < cols; ++c) {
data[c] = new IcedWrapper[rows];
for (int r = 0; r < rows; ++r) {
data[c][r] = cellValues[r][c];
}
}
}
return this;
}
/**
* Turn a description such as "Avg. Training MSE" into a JSON-usable field name "avg_training_mse"
* @param n
* @return
*/
private String pythonify(String n) {
if (n == null || name.toLowerCase().contains("confusion")) return n;
StringBuilder sb = new StringBuilder();
String [] modified = n.split("[\\s_]+");
for (int i=0; i<modified.length; ++i) {
if (i!=0) sb.append("_");
String s = modified[i];
// if (!s.matches("^[A-Z]{2,3}$")) {
sb.append(s.toLowerCase()); //everything goes lowercase
// } else {
// sb.append(s);
// }
}
String newString = sb.toString().replaceAll("[^\\w]", "");
// if (!newString.equals(name)) {
// Log.warn("Turning column description into field name: " + name + " --> " + newString);
// }
return newString;
}
/**
* Fill a TwoDimTable from this Schema
* @param impl
* @return
*/
public TwoDimTable fillImpl(TwoDimTable impl) {
final int rows = data[0].length;
assert(rows == rowcount);
final int cols = data.length+1;
String tableHeader = name;
String tableDescription = description;
String colHeaderForRowHeaders = columns[0].name;
String[] rowHeaders = new String[rows];
for (int r=0; r<rows; ++r) {
rowHeaders[r] = (String)data[0][r].get();
}
String[] colHeaders = new String[cols];
colHeaders[0] = "";
for (int c=1; c<cols; ++c) {
colHeaders[c] = columns[c].description;
}
String[] colTypes = new String[cols];
colTypes[0] = "";
for (int c=1; c<cols; ++c) {
colTypes[c] = columns[c].type;
}
String[] colFormats = new String[cols];
colFormats[0] = "%s";
for (int c=1; c<cols; ++c) {
colFormats[c] = columns[c].format;
}
String[][] strCellValues = new String[rows][cols];
double[][] dblCellValues = new double[rows][cols];
for (int r=0; r<data[0].length; ++r) {
for (int c=0; c<data.length; ++c) {
try {
if (columns[c].format.equals("string")) { // switch(String) is not java1.6 compliant!
strCellValues[r][c] = (String)data[c][r].get();
}
else if (columns[c].format.equals("double")) {
dblCellValues[r][c] = (Double)data[c][r].get();
}
else if (columns[c].format.equals("float")) {
dblCellValues[r][c] = (Float)data[c][r].get();
}
else if (columns[c].format.equals("int")) {
dblCellValues[r][c] = (Integer)data[c][r].get();
}
else if (columns[c].format.equals("long")) {
dblCellValues[r][c] = (Long)data[c][r].get();
}
else throw H2O.fail();
} catch (ClassCastException e) {
throw new RuntimeException(e);
}
}
}
return new TwoDimTable(tableHeader, tableDescription, rowHeaders, colHeaders, colTypes, colFormats, colHeaderForRowHeaders, strCellValues, dblCellValues);
}
public final AutoBuffer writeJSON_impl(AutoBuffer ab) {
ab.putJSONStr("name",name);
ab.put1(',');
ab.putJSONStr("description",description);
ab.put1(',');
ab.putJSONStr("columns").put1(':');
ab.put1('[');
if( columns!=null ) {
for (int i = 0; i < columns.length; ++i) {
columns[i].writeJSON(ab);
if (i < columns.length - 1) ab.put1(',');
}
}
ab.put1(']');
ab.put1(',');
ab.putJSON4("rowcount", rowcount);
ab.put1(',');
ab.putJSONStr("data").put1(':');
ab.put1('[');
if( data!=null ) {
for (int i = 0; i < data.length; ++i) {
ab.put1('[');
for (int j = 0; j < data[i].length; ++j) {
if (data[i][j] == null || data[i][j].get() == null) {
ab.putJNULL();
} else {
data[i][j].writeUnwrappedJSON(ab);
}
if (j < data[i].length - 1) ab.put1(',');
}
ab.put1(']');
if (i < data.length - 1) ab.put1(',');
}
}
ab.put1(']');
return ab;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/TypeaheadV3.java
|
package water.api.schemas3;
import water.Iced;
import water.api.API;
public class TypeaheadV3 extends RequestSchemaV3<Iced,TypeaheadV3> {
// Input fields
@API(help="training_frame", required=true)
public String src;
@API(help="limit")
public int limit;
// Output fields
@API(help="matches", direction=API.Direction.OUTPUT)
public String matches[];
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/UnlockKeysV3.java
|
package water.api.schemas3;
import water.Iced;
public class UnlockKeysV3 extends RequestSchemaV3<Iced, UnlockKeysV3> {
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/ValidationMessageV3.java
|
package water.api.schemas3;
import hex.ModelBuilder;
import hex.ModelBuilder.ValidationMessage;
import water.api.API;
import water.util.Log;
import water.util.PojoUtils;
import java.util.HashMap;
import java.util.Map;
/**
* Model builder parameter validation message schema.
*/
public final class ValidationMessageV3 extends SchemaV3<ValidationMessage, ValidationMessageV3> {
@API(help = "Type of validation message (ERROR, WARN, INFO, HIDE)", direction = API.Direction.OUTPUT)
public String message_type;
@API(help = "Field to which the message applies", direction = API.Direction.OUTPUT)
public String field_name;
@API(help = "Message text", direction = API.Direction.OUTPUT)
public String message;
/**
* Map impl field names in the validation messages to schema field names,
* called <i>after</i> behavior of stripping leading _ characters.
*/
public static void mapValidationMessageFieldNames(ValidationMessageV3[] validation_messages, String[] from, String[]
to) {
if (from == null && to == null)
return;
if (from == null || to == null)
throw new IllegalArgumentException("Bad parameter name translation arrays; one is null and the other isn't.");
Map<String, String> translations = new HashMap<>();
for (int i = 0; i < from.length; i++) {
translations.put(from[i], to[i]);
}
for (ValidationMessageV3 vm : validation_messages) {
if (null == vm) {
Log.err("Null ValidationMessageV3 for ModelBuilderSchema.");
continue;
}
if (null == vm.field_name) {
Log.err("Null field_name: " + vm);
continue;
}
if (translations.containsKey(vm.field_name))
vm.field_name = translations.get(vm.field_name);
}
}
public ValidationMessage createImpl() {
return new ModelBuilder.ValidationMessage(Log.valueOf(message_type), field_name, message);
}
// Version&Schema-specific filling from the implementation object
public ValidationMessageV3 fillFromImpl(ValidationMessage vm) {
PojoUtils.copyProperties(this, vm, PojoUtils.FieldNaming.ORIGIN_HAS_UNDERSCORES);
this.message_type = Log.LVLS[vm.log_level()]; // field name changed
if (this.field_name != null) {
if (this.field_name.startsWith("_"))
this.field_name = this.field_name.substring(1);
else
Log.warn("Expected all ValidationMessage field_name values to have leading underscores; ignoring: " + field_name);
}
return this;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/VarImpV3.java
|
package water.api.schemas3;
import hex.VarImp;
import water.api.API;
public class VarImpV3 extends SchemaV3<VarImp,VarImpV3> {
@API(help="Variable importance of individual variables", direction=API.Direction.OUTPUT)
public float[] varimp;
@API(help="Names of variables", direction=API.Direction.OUTPUT)
protected String[] names;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/WaterMeterCpuTicksV3.java
|
package water.api.schemas3;
import water.api.API;
import water.util.PojoUtils;
import water.util.WaterMeterCpuTicks;
public class WaterMeterCpuTicksV3 extends RequestSchemaV3<WaterMeterCpuTicks, WaterMeterCpuTicksV3> {
@API(help="Index of node to query ticks for (0-based)", required = true, direction = API.Direction.INPUT)
public int nodeidx;
@API(help="array of tick counts per core", direction = API.Direction.OUTPUT)
public long[][] cpu_ticks;
// Version&Schema-specific filling into the implementation object
public WaterMeterCpuTicks createImpl() {
WaterMeterCpuTicks obj = new WaterMeterCpuTicks();
PojoUtils.copyProperties(obj, this, PojoUtils.FieldNaming.CONSISTENT);
return obj;
}
// Version&Schema-specific filling from the implementation object
public WaterMeterCpuTicksV3 fillFromImpl(WaterMeterCpuTicks i) {
PojoUtils.copyProperties(this, i, PojoUtils.FieldNaming.CONSISTENT);
return this;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas3/WaterMeterIoV3.java
|
package water.api.schemas3;
import water.api.API;
import water.util.PojoUtils;
import water.util.WaterMeterIo;
public class WaterMeterIoV3 extends RequestSchemaV3<WaterMeterIo, WaterMeterIoV3> {
@API(help="Index of node to query ticks for (0-based)", direction = API.Direction.INPUT)
public int nodeidx;
@API(help="array of IO info", direction = API.Direction.OUTPUT)
public WaterMeterIo.IoStatsEntry persist_stats[];
// Version&Schema-specific filling into the implementation object
public WaterMeterIo createImpl() {
WaterMeterIo obj = new WaterMeterIo();
PojoUtils.copyProperties(obj, this, PojoUtils.FieldNaming.CONSISTENT);
return obj;
}
// Version&Schema-specific filling from the implementation object
public WaterMeterIoV3 fillFromImpl(WaterMeterIo i) {
PojoUtils.copyProperties(this, i, PojoUtils.FieldNaming.CONSISTENT);
return this;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas4/EndpointV4.java
|
package water.api.schemas4;
import water.api.API;
import water.api.Handler;
import water.api.Route;
/**
*
*/
public class EndpointV4 extends OutputSchemaV4<Route, EndpointV4> {
@API(help="Method+Url of the request; variable parts are enclosed in curly braces. For example: " +
"/4/schemas/{schema_name}")
public String url;
@API(help="Short description of the functionality provided by the endpoint.")
public String description;
@API(help="Unique name of the endpoint. These names can be used to look up the endpoint's info via " +
"GET /4/endpoints/{name}.")
public String name;
// TODO: more explanation -- how input object corresponds to the actual request
@API(help="Input schema.")
public String input_schema;
@API(help="Schema for the result returned by the endpoint.")
public String output_schema;
@Override
public EndpointV4 fillFromImpl(Route route) {
url = route._http_method + " " + route._url;
description = route._summary;
name = route._api_name;
input_schema = "/4/schemas/" + Handler.getHandlerMethodInputSchema(route._handler_method).getSimpleName();
output_schema = "/4/schemas/" + Handler.getHandlerMethodOutputSchema(route._handler_method).getSimpleName();
return this;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas4/EndpointsListV4.java
|
package water.api.schemas4;
import water.Iced;
import water.api.API;
/**
* List of endpoints, returned by GET /4/endpoints
*/
public class EndpointsListV4 extends OutputSchemaV4<Iced, EndpointsListV4> {
@API(help="List of endpoints in H2O REST API (v4).")
public EndpointV4[] endpoints;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas4/InputSchemaV4.java
|
package water.api.schemas4;
import water.Iced;
import water.api.API;
import water.api.Schema;
/**
* Base Schema class for all v4 REST API requests. It provides common _schema field, as well as .
*/
public class InputSchemaV4<I extends Iced, S extends InputSchemaV4<I,S>> extends Schema<I,S> {
@API(help="Filter on the set of output fields: if you set _fields=\"foo,bar,baz\", then only those fields will be " +
"included in the output; or you can specify _fields=\"-goo,gee\" to include all fields except goo and gee. If " +
"the result contains nested data structures, then you can refer to the fields within those structures as well. " +
"For example if you specify _fields=\"foo(oof),bar(-rab)\", then only fields foo and bar will be included, and " +
"within foo there will be only field oof, whereas within bar all fields except rab will be reported.")
public String _fields;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas4/ListRequestV4.java
|
package water.api.schemas4;
import water.Iced;
/**
* Common input schema class for endpoints that request collections of objects. For example,
* GET /4/schemas
* GET /4/frames
* GET /4/models
* etc.
* This class is a placeholder right now, but eventually it can host functionality such as filtering/sorting the
* results, providing cursor capabilities, etc.
* TODO add cursor fields {limit} and {offset}
*/
public class ListRequestV4 extends OutputSchemaV4<Iced, ListRequestV4> {
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas4/ModelInfoV4.java
|
package water.api.schemas4;
import hex.ModelBuilder;
import water.api.API;
/**
* Lightweight information profile about each model.
*/
public class ModelInfoV4 extends OutputSchemaV4<ModelBuilder, ModelInfoV4> {
@API(help="Algorithm name, such as 'gbm', 'deeplearning', etc.")
public String algo;
@API(help="Development status of the algorithm: alpha, beta, or stable.")
public String maturity;
@API(help="Does the model support generation of POJOs?")
public boolean have_pojo;
@API(help="Does the model support generation of MOJOs?")
public boolean have_mojo;
@API(help="Mojo version number for this algorithm.")
public String mojo_version;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas4/ModelsInfoV4.java
|
package water.api.schemas4;
import water.Iced;
import water.api.API;
/**
* List of models, returned by GET /4/modelsinfo
*/
public class ModelsInfoV4 extends OutputSchemaV4<Iced, ModelsInfoV4> {
@API(help="Generic information about each model supported in H2O.")
public ModelInfoV4[] models;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas4/OutputSchemaV4.java
|
package water.api.schemas4;
import water.Iced;
import water.api.API;
import water.api.Schema;
/**
* Base output Schema class for all v4 REST API requests. It provides common __schema field that identifies the
* schema in the output.
*/
public class OutputSchemaV4<I extends Iced, S extends OutputSchemaV4<I,S>> extends Schema<I,S> {
@API(help="Url describing the schema of the current object.")
public String __schema;
public OutputSchemaV4() {
__schema = "/4/schemas/" + this.getSchemaName();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas4/SessionIdV4.java
|
package water.api.schemas4;
import water.Iced;
import water.api.API;
public class SessionIdV4 extends OutputSchemaV4<Iced, SessionIdV4> {
@API(help="Session ID")
public String session_key;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas4
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas4/input/CreateFrameOriginalIV4.java
|
package water.api.schemas4.input;
import hex.createframe.recipes.OriginalCreateFrameRecipe;
import water.api.API;
import water.api.schemas3.KeyV3;
import water.api.schemas4.InputSchemaV4;
/**
* Input schema for `POST /4/frames/$original` endpoint.
*/
public class CreateFrameOriginalIV4 extends InputSchemaV4<OriginalCreateFrameRecipe, CreateFrameOriginalIV4> {
@API(help="destination key")
public KeyV3.FrameKeyV3 dest;
@API(help = "Number of rows")
public int rows;
@API(help = "Number of data columns (in addition to the first response column)")
public int cols;
@API(help = "Random number seed that determines the random values")
public long seed;
@API(help = "Whether frame should be randomized")
public boolean randomize;
@API(help = "Constant value (for randomize=false)")
public long value;
@API(help = "Range for real variables (-range ... range)")
public double real_range;
@API(help = "Fraction of categorical columns (for randomize=true)")
public double categorical_fraction;
@API(help = "Factor levels for categorical variables")
public int factors;
@API(help = "Fraction of integer columns (for randomize=true)")
public double integer_fraction;
@API(help = "Range for integer variables (-range ... range)")
public int integer_range;
@API(help = "Fraction of binary columns (for randomize=true)")
public double binary_fraction;
@API(help = "Fraction of 1's in binary columns")
public double binary_ones_fraction;
@API(help = "Fraction of date/time columns (for randomize=true)")
public double time_fraction;
@API(help = "Fraction of string columns (for randomize=true)")
public double string_fraction;
@API(help = "Fraction of missing values")
public double missing_fraction;
@API(help = "Whether an additional response column should be generated")
public boolean has_response;
@API(help = "Number of factor levels of the first column (1=real, 2=binomial, N=multinomial)")
public int response_factors;
@API(help = "For real-valued response variable: Whether the response should be positive only.")
public boolean positive_response;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas4
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas4/input/CreateFrameSimpleIV4.java
|
package water.api.schemas4.input;
import hex.createframe.recipes.SimpleCreateFrameRecipe;
import water.api.API;
import water.api.schemas3.KeyV3;
import water.api.schemas4.InputSchemaV4;
/**
* Input schema for `POST /4/frames/$simple` endpoint.
*/
public class CreateFrameSimpleIV4 extends InputSchemaV4<SimpleCreateFrameRecipe, CreateFrameSimpleIV4> {
@API(help = "Id for the frame to be created.")
public KeyV3.FrameKeyV3 dest;
@API(help = "Random number seed that determines the random values.")
public long seed;
@API(help = "Number of rows.")
public int nrows;
@API(help = "Number of real-valued columns. Values in these columns will be uniformly distributed between " +
"real_lb and real_ub.")
public int ncols_real;
@API(help = "Number of integer columns.")
public int ncols_int;
@API(help = "Number of enum (categorical) columns.")
public int ncols_enum;
@API(help = "Number of boolean (binary) columns.")
public int ncols_bool;
@API(help = "Number of string columns.")
public int ncols_str;
@API(help = "Number of time columns.")
public int ncols_time;
@API(help = "Lower bound for the range of the real-valued columns.")
public double real_lb;
@API(help = "Upper bound for the range of the real-valued columns.")
public double real_ub;
@API(help = "Lower bound for the range of integer columns.")
public int int_lb;
@API(help = "Upper bound for the range of integer columns.")
public int int_ub;
@API(help = "Number of levels (categories) for the enum columns.")
public int enum_nlevels;
@API(help = "Fraction of ones in each boolean (binary) column.")
public double bool_p;
@API(help = "Lower bound for the range of time columns (in ms since the epoch).")
public long time_lb;
@API(help = "Upper bound for the range of time columns (in ms since the epoch).")
public long time_ub;
@API(help = "Length of generated strings in string columns.")
public int str_length;
@API(help = "Fraction of missing values.")
public double missing_fraction;
@API(help = "Type of the response column to add.", values = {"none", "real", "int", "bool", "enum", "time"})
public SimpleCreateFrameRecipe.ResponseType response_type;
@API(help = "Lower bound for the response variable (real/int/time types).")
public double response_lb;
@API(help = "Upper bound for the response variable (real/int/time types).")
public double response_ub;
@API(help = "Frequency of 1s for the bool (binary) response column.")
public double response_p;
@API(help = "Number of categorical levels for the enum response column.")
public int response_nlevels;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas4
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas4/input/JobIV4.java
|
package water.api.schemas4.input;
import water.Iced;
import water.api.API;
import water.api.schemas4.InputSchemaV4;
/**
* Input schema for the {@code "GET /4/jobs/{job_id}"} endpoint.
*/
public class JobIV4 extends InputSchemaV4<Iced, JobIV4> {
@API(help="Id of the job to fetch.")
public String job_id;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas4
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas4/output/JobV4.java
|
package water.api.schemas4.output;
import water.Job;
import water.TypeMap;
import water.api.API;
import water.api.schemas4.OutputSchemaV4;
import java.io.PrintWriter;
import java.io.StringWriter;
/** Schema for a single Job. */
public class JobV4 extends OutputSchemaV4<Job<?>, JobV4> {
@API(help="Job id")
public String job_id;
@API(help="Job status", values={"RUNNING", "DONE", "STOPPING", "CANCELLED", "FAILED"})
public Status status;
@API(help="Current progress, a number going from 0 to 1")
public float progress;
@API(help="Current progress status description")
public String progress_msg;
@API(help="Start time")
public long start_time;
@API(help="Runtime in milliseconds")
public long duration;
@API(help="Id of the target object (being created by this Job)")
public String target_id;
@API(help="Type of the target: Frame, Model, etc.")
public String target_type;
@API(help="Exception message, if an exception occurred")
public String exception;
@API(help="Stacktrace")
public String stacktrace;
public enum Status {
RUNNING, DONE, STOPPING, CANCELLED, FAILED
}
@Override public JobV4 fillFromImpl(Job<?> job) {
if (job == null) return this;
job_id = job._key.toString();
progress = job.progress();
progress_msg = job.progress_msg();
duration = job.msec();
if (job.isRunning()) {
status = job.stop_requested()? Status.STOPPING : Status.RUNNING;
} else {
status = job.stop_requested()? Status.CANCELLED : Status.DONE;
}
Throwable ex = job.ex();
if (ex != null) {
status = Status.FAILED;
exception = ex.toString();
StringWriter sw = new StringWriter();
ex.printStackTrace(new PrintWriter(sw));
stacktrace = sw.toString();
}
target_id = job._result == null || !job.readyForView()? null : job._result.toString();
target_type = TypeMap.theFreezable(job._typeid).getClass().getSimpleName();
return this;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas99/AssemblyV99.java
|
package water.api.schemas99;
import water.Iced;
import water.api.API;
import water.api.schemas3.KeyV3;
import water.api.schemas3.RequestSchemaV3;
/** FIXME: comments please! */
public class AssemblyV99 extends RequestSchemaV3<Iced, AssemblyV99> {
// input fields
@API(help="A list of steps describing the assembly line.")
public String[] steps;
@API(help="Input Frame for the assembly.")
public KeyV3.FrameKeyV3 frame;
@API(help="The name of the file (and generated class in case of pojo)")
public String file_name;
@API(help="The key of the Assembly object to retrieve from the DKV.")
public String assembly_id;
//output
@API(help="Output of the assembly line.", direction=API.Direction.OUTPUT)
public KeyV3.FrameKeyV3 result;
@API(help="A Key to the fit Assembly data structure", direction=API.Direction.OUTPUT)
public KeyV3.AssemblyKeyV3 assembly;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas99/GridsV99.java
|
package water.api.schemas99;
import hex.schemas.GridSchemaV99;
import water.api.API;
import water.api.schemas3.SchemaV3;
import water.api.Grids;
public class GridsV99 extends SchemaV3<Grids, GridsV99> {
@API(help="Grids", direction=API.Direction.OUTPUT)
public GridSchemaV99[] grids;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/api/schemas99/RapidsV99.java
|
package water.api.schemas99;
import water.Iced;
import water.api.API;
import water.api.schemas3.RequestSchemaV3;
import water.api.schemas3.KeyV3;
public class RapidsV99 extends RequestSchemaV3<Iced, RapidsV99> {
// Input fields
@API(help="An Abstract Syntax Tree.", direction=API.Direction.INPUT)
public String ast;
// Output. Only one of these 5 results is returned; the rest are null - and
// this is how the caller tells about which result is valid.
@API(help="Parsing error, if any", direction=API.Direction.OUTPUT)
public String error;
@API(help="Scalar result", direction=API.Direction.OUTPUT)
public double scalar;
@API(help="Function result", direction=API.Direction.OUTPUT)
public String funstr;
@API(help="String result", direction=API.Direction.OUTPUT)
public String string;
@API(help="Result key", direction=API.Direction.OUTPUT)
public KeyV3.FrameKeyV3 key;
@API(help="Rows in Frame result", direction=API.Direction.OUTPUT)
public long num_rows;
@API(help="Columns in Frame result", direction=API.Direction.OUTPUT)
public int num_cols;
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/codegen/CodeGenerator.java
|
package water.codegen;
import water.exceptions.JCodeSB;
/**
* Interface for code generator.
*/
public interface CodeGenerator {
/** Generate code to given output.
*
* @param out code generation output.
*/
void generate(JCodeSB out);
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/codegen/CodeGeneratorPipeline.java
|
package water.codegen;
import java.util.ArrayList;
import water.exceptions.JCodeSB;
/**
* A simple code generation pipeline.
*
* It composes code generators and allows for their execution
* later.
*/
public class CodeGeneratorPipeline extends ArrayList<CodeGenerator> implements
CodeGenerator {
@Override
public void generate(JCodeSB out) {
for (CodeGenerator codeGen : this) {
codeGen.generate(out);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/exceptions/H2OAbstractRuntimeException.java
|
package water.exceptions;
import water.util.HttpResponseStatus;
import water.H2OError;
import water.util.IcedHashMapGeneric;
/**
* RuntimeException which results in a http 400 error by default, and serves as a base class for other error types.
* Note that the HTTP error status can be overridden when converting the exception to {@link H2OError}
* when overriding {@link #toH2OError(String)}.
*
* NOTE: don't use this directly; use more specific types.
*/
abstract public class H2OAbstractRuntimeException extends RuntimeException {
protected int HTTP_RESPONSE_CODE() { return HttpResponseStatus.BAD_REQUEST.getCode(); }
public long timestamp;
public String dev_message;
public IcedHashMapGeneric.IcedHashMapStringObject values;
public H2OAbstractRuntimeException(String message, String dev_message, IcedHashMapGeneric.IcedHashMapStringObject values) {
super(message);
this.timestamp = System.currentTimeMillis();
this.dev_message = dev_message;
this.values = values;
}
public H2OAbstractRuntimeException(String msg, String dev_msg) {
this(msg, dev_msg, new IcedHashMapGeneric.IcedHashMapStringObject());
}
public H2OError toH2OError() {
return toH2OError(null);
}
public H2OError toH2OError(String error_url) {
return new H2OError(timestamp, error_url, getMessage(), dev_message, HTTP_RESPONSE_CODE(), values, this);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/exceptions/H2OCategoricalLevelNotFoundArgumentException.java
|
package water.exceptions;
import water.util.IcedHashMap;
import water.util.IcedHashMapGeneric;
/**
* Exception signalling that an categorical level was not found.
*/
public class H2OCategoricalLevelNotFoundArgumentException extends H2ONotFoundArgumentException {
public H2OCategoricalLevelNotFoundArgumentException(String argument, String categorical_level, String frame_name, String column_name) {
super("Categorical level: " + categorical_level + " not found in column_name: " + column_name + " in frame: " + frame_name + " from argument: " + argument + ": " + argument.toString(),
"Categorical level: " + categorical_level + " not found in column_name: " + column_name + " in frame: " + frame_name + " from argument: " + argument + ": " + argument.toString());
this.values = new IcedHashMapGeneric.IcedHashMapStringObject();
this.values.put("argument", argument);
this.values.put("categorical_level", categorical_level);
this.values.put("frame_name", frame_name);
this.values.put("column_name", column_name);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/exceptions/H2OColumnNotFoundArgumentException.java
|
package water.exceptions;
import water.fvec.Frame;
import water.util.IcedHashMap;
import water.util.IcedHashMapGeneric;
/**
* Exception signalling that a Vec was not found.
* <p>
* If the Vec name came from an argument, especially from an API parameter, use
* {@code H2OColumnNotFoundArgumentException(String argument, Frame frame, String column_name)} or
* {@code H2OColumnNotFoundArgumentException(String argument, String frame_name, String column_name)},
* which let you specify the argument name. If not, use
* {@code H2OColumnNotFoundArgumentException(Frame frame, String column_name)} or
* {@code H2OColumnNotFoundArgumentException(String frame_name, String column_name)}.
*/
public class H2OColumnNotFoundArgumentException extends H2ONotFoundArgumentException {
public H2OColumnNotFoundArgumentException(String argument, Frame frame, String column_name) {
this(argument, (null == frame._key ? "null" : frame._key.toString()), column_name);
}
public H2OColumnNotFoundArgumentException(String argument, String frame_name, String column_name) {
super("Column: " + column_name + " not found in frame: " + frame_name + " from argument: " + argument + ": " + argument.toString(),
"Column: " + column_name + " not found in frame: " + frame_name + " from argument: " + argument + ": " + argument.toString());
this.values = new IcedHashMapGeneric.IcedHashMapStringObject();
this.values.put("argument", argument);
this.values.put("frame_name", frame_name);
this.values.put("column_name", column_name);
}
public H2OColumnNotFoundArgumentException(Frame frame, String column_name) {
this((null == frame._key ? "null" : frame._key.toString()), column_name);
}
public H2OColumnNotFoundArgumentException(String frame_name, String column_name) {
super("Column: " + column_name + " not found in frame: " + frame_name + ".",
"Column: " + column_name + " not found in frame: " + frame_name + ".");
this.values = new IcedHashMapGeneric.IcedHashMapStringObject();
this.values.put("frame_name", frame_name);
this.values.put("column_name", column_name);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/exceptions/H2OConcurrentModificationException.java
|
package water.exceptions;
/**
* H2OConcurrentModificationException signals that object was modified while being used in another
* operation.
* Example use case is deleting a Vec while a checksum is being calculated on it.
*/
public class H2OConcurrentModificationException extends H2OAbstractRuntimeException {
public H2OConcurrentModificationException(String message) {
super(message, message);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/exceptions/H2OFailException.java
|
package water.exceptions;
import water.H2OError;
import water.util.HttpResponseStatus;
/**
* RuntimeException which causes H2O to shut down. This should only be used for cases in
* which the code is bad, for example because a case isn't covered which must be for the
* product to function correctly, and which therefore should be caught quickly in the
* development process.
*/
public class H2OFailException extends H2OAbstractRuntimeException {
protected int HTTP_RESPONSE_CODE() { return HttpResponseStatus.INTERNAL_SERVER_ERROR.getCode(); }
public H2OFailException(String message) {
super(message, message);
this.timestamp = System.currentTimeMillis();
}
public H2OFailException(String msg, Throwable cause) {
this(msg);
this.initCause(cause);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/exceptions/H2OFileAccessDeniedException.java
|
package water.exceptions;
/**
* Exception thrown when a file matches file_deny_glob
*/
public class H2OFileAccessDeniedException extends H2OAbstractRuntimeException {
public H2OFileAccessDeniedException(String message, String dev_message) {
super(message, dev_message);
}
public H2OFileAccessDeniedException(String message) {
super(message, message);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/exceptions/H2OGridException.java
|
package water.exceptions;
import water.H2OError;
public class H2OGridException extends H2OAbstractRuntimeException {
private final Throwable _rootCause;
private final int _httpResponse;
public H2OGridException(String msg) {
this(msg, null);
}
public H2OGridException(String msg, Throwable rootException) {
this(msg, rootException, 0);
}
public H2OGridException(String msg, Throwable rootException, int httpResponse) {
super(msg, msg);
_rootCause = rootException;
_httpResponse = httpResponse > 0 ? httpResponse : super.HTTP_RESPONSE_CODE();
}
@Override
protected int HTTP_RESPONSE_CODE() {
return _httpResponse;
}
@Override
public H2OError toH2OError(String error_url) {
H2OError err;
String rootMessage = _rootCause== null ? null : _rootCause.getMessage().trim();
if (_rootCause instanceof H2OAbstractRuntimeException) {
err = ((H2OAbstractRuntimeException) _rootCause).toH2OError(error_url);
} else {
err = new H2OError(timestamp, error_url, getMessage(), dev_message, HTTP_RESPONSE_CODE(), values, _rootCause);
}
StringBuilder msg = new StringBuilder(getMessage().trim());
if (msg.charAt(msg.length()-1) != '.') msg.append('.');
if (rootMessage != null) {
msg.append(' ');
msg.append("Root cause: ");
msg.append(rootMessage);
err._exception_msg = msg.toString();
}
return err;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/exceptions/H2OIllegalArgumentException.java
|
package water.exceptions;
import water.util.HttpResponseStatus;
import water.util.IcedHashMap;
import water.util.IcedHashMapGeneric;
public class H2OIllegalArgumentException extends H2OAbstractRuntimeException {
protected int HTTP_RESPONSE_CODE() { return HttpResponseStatus.PRECONDITION_FAILED.getCode(); }
public H2OIllegalArgumentException(String argument, String function, Object value) {
super("Illegal argument: " + argument + " of function: " + function + ": " + (value == null ? "null":value.toString()),
"Illegal argument: " + argument + " of function: " + function + ": " + (value == null ? "null":value.toString()) + " of class: " + (value == null ? "null":value.getClass()));
this.values = new IcedHashMapGeneric.IcedHashMapStringObject();
this.values.put("function", function);
this.values.put("argument", argument);
if (value!=null) {
try {
this.values.put("value", value);
} catch (Exception ignored) {}
}
}
/** Raw-message constructor for use by subclasses. */
public H2OIllegalArgumentException(String message, String dev_message, IcedHashMapGeneric.IcedHashMapStringObject values) {
super(message, dev_message, values);
}
/** Raw-message constructor for use by subclasses. */
public H2OIllegalArgumentException(String message, String dev_message) {
super(message, dev_message);
}
/** Raw-message constructor for use by subclasses. */
public H2OIllegalArgumentException(String message) {
super(message, message);
}
public static H2OIllegalArgumentException wrongKeyType(String fieldName, String keyName, String expectedType, Class actualType) {
H2OIllegalArgumentException e =
new H2OIllegalArgumentException(
expectedType + " argument: " + fieldName + " with value: " + keyName + " points to a non-" + expectedType + " object: " + actualType.getSimpleName(),
expectedType + " argument: " + fieldName + " with value: " + keyName + " points to a non-" + expectedType + " object: " + actualType.getName());
e.values = new IcedHashMapGeneric.IcedHashMapStringObject();
e.values.put("argument", fieldName);
e.values.put("value", keyName);
e.values.put("expected_type", expectedType);
e.values.put("actual_type", actualType);
return e;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/exceptions/H2OIllegalValueException.java
|
package water.exceptions;
import water.util.IcedHashMap;
import water.util.IcedHashMapGeneric;
/**
* Exception indicating that we found an illegal value which was not passed in as an argument.
*/
public class H2OIllegalValueException extends H2OAbstractRuntimeException {
public H2OIllegalValueException(String field, String object, Object value) {
super("Illegal value for field: " + field + " of object: " + object + ": " + value.toString(),
"Illegal value for field: " + field + " of object: " + object + ": " + value.toString() + " of class: " + value.getClass());
this.values = new IcedHashMapGeneric.IcedHashMapStringObject();
this.values.put("field", field);
this.values.put("object", object);
this.values.put("value", value);
}
public H2OIllegalValueException(String field, Object value) {
super("Illegal value for field: " + field + ": " + value.toString(),
"Illegal value for field: " + field + ": " + value.toString() + " of class: " + value.getClass());
this.values = new IcedHashMapGeneric.IcedHashMapStringObject();
this.values.put("field", field);
this.values.put("value", value);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/exceptions/H2OKeyNotFoundArgumentException.java
|
package water.exceptions;
import water.Key;
import water.util.IcedHashMap;
import water.util.IcedHashMapGeneric;
/**
* Exception signalling that a Key was not found.
* <p>
* If the Key name came from an argument, especially from an API parameter, use
* {@code H2OKeyNotFoundArgumentException(String argument, String name)} or {@code H2OKeyNotFoundArgumentException(String argument, Key key)},
* which let you specify the argument name. If not, use {@code H2OKeyNotFoundArgumentException(String argument, String name)} or
* {@code H2OKeyNotFoundArgumentException(String argument, Key key)}.
*/
public class H2OKeyNotFoundArgumentException extends H2ONotFoundArgumentException {
public H2OKeyNotFoundArgumentException(String argument, String function, String name) {
super("Object '" + name.toString() + "' not found in function: " + function + " for argument: " + argument,
"Object '" + name.toString() + "' not found in function: " + function + " for argument: " + argument);
this.values = new IcedHashMapGeneric.IcedHashMapStringObject();
this.values.put("function", function);
this.values.put("argument", argument);
this.values.put("name", name);
}
public H2OKeyNotFoundArgumentException(String argument, String name) {
super("Object '" + name.toString() + "' not found for argument: " + argument,
"Object '" + name.toString() + "' not found for argument: " + argument);
this.values = new IcedHashMapGeneric.IcedHashMapStringObject();
this.values.put("argument", argument);
this.values.put("name", name);
}
public H2OKeyNotFoundArgumentException(String argument, Key key) {
this(argument, null == key ? "null" : key.toString());
}
public H2OKeyNotFoundArgumentException(String name) {
super("Object not found: " + name.toString(),
"Object not found: " + name.toString());
this.values = new IcedHashMapGeneric.IcedHashMapStringObject();
this.values.put("name", name);
}
public H2OKeyNotFoundArgumentException(Key key) {
this(null == key ? "null" : key.toString());
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/exceptions/H2OKeyWrongTypeArgumentException.java
|
package water.exceptions;
import water.util.HttpResponseStatus;
import water.Keyed;
import water.util.IcedHashMap;
import water.util.IcedHashMapGeneric;
public class H2OKeyWrongTypeArgumentException extends H2OIllegalArgumentException {
protected int HTTP_RESPONSE_CODE() { return HttpResponseStatus.NOT_FOUND.getCode(); }
public H2OKeyWrongTypeArgumentException(String argument, Object value, Class<? extends Keyed> expected, Class actual) {
super("Expected a " + expected.getSimpleName() + " for key argument: " + argument + " with value: " + value + ". Found a: " + actual.getSimpleName(),
"Expected a " + expected.getCanonicalName() + " for key argument: " + argument + " with value: " + value + ". Found a: " + actual.getCanonicalName());
this.values = new IcedHashMapGeneric.IcedHashMapStringObject();
this.values.put("argument", argument);
this.values.put("value", value);
this.values.put("expected_type", expected.getCanonicalName());
this.values.put("actual_type", actual.getCanonicalName());
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/exceptions/H2OKeysNotFoundArgumentException.java
|
package water.exceptions;
import water.util.IcedHashMap;
import water.util.IcedHashMapGeneric;
public class H2OKeysNotFoundArgumentException extends H2ONotFoundArgumentException {
public H2OKeysNotFoundArgumentException(String argument, String[] names) {
super("Objects not found: " + argument + ": " + names.toString(),
"Objects not found: " + argument + ": " + names.toString());
this.values = new IcedHashMapGeneric.IcedHashMapStringObject();
this.values.put("argument", argument);
this.values.put("names", names);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/exceptions/H2OModelBuilderIllegalArgumentException.java
|
package water.exceptions;
import hex.Model;
import hex.ModelBuilder;
import water.H2OModelBuilderError;
import water.util.IcedHashMapGeneric;
public class H2OModelBuilderIllegalArgumentException extends H2OIllegalArgumentException {
/** Raw-message constructor for use by the factory method. */
private H2OModelBuilderIllegalArgumentException(String message, String dev_message) {
super(message, dev_message);
}
public static H2OModelBuilderIllegalArgumentException makeFromBuilder(ModelBuilder builder) {
Model.Parameters parameters = builder._parms;
String algo = builder._parms.algoName();
String msg = "Illegal argument(s) for " + algo + " model: " + builder.dest() + ". Details: " + builder.validationErrors();
H2OModelBuilderIllegalArgumentException exception = new H2OModelBuilderIllegalArgumentException(msg, msg);
exception.values = new IcedHashMapGeneric.IcedHashMapStringObject();
exception.values.put("algo", algo);
exception.values.put("parameters", parameters);
exception.values.put("error_count", builder.error_count());
exception.values.put("messages", builder._messages);
return exception;
}
public H2OModelBuilderError toH2OError(String error_url) {
return new H2OModelBuilderError(timestamp, error_url, getMessage(), dev_message, HTTP_RESPONSE_CODE(), values, this);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/exceptions/H2ONotFoundArgumentException.java
|
package water.exceptions;
import water.util.HttpResponseStatus;
public class H2ONotFoundArgumentException extends H2OIllegalArgumentException {
final protected int HTTP_RESPONSE_CODE() { return HttpResponseStatus.NOT_FOUND.getCode(); }
public H2ONotFoundArgumentException(String msg, String dev_msg) {
super(msg, dev_msg);
}
public H2ONotFoundArgumentException(String msg) {
super(msg, msg);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/exceptions/H2OUnsupportedDataFileException.java
|
package water.exceptions;
import water.util.IcedHashMapGeneric;
/**
* Exception thrown by a parser when a file format is recognized but a certain feature used
* in the particular data file is not supported (eg. nested structures).
*/
public class H2OUnsupportedDataFileException extends H2OAbstractRuntimeException {
public H2OUnsupportedDataFileException(String message, String dev_message, IcedHashMapGeneric.IcedHashMapStringObject values) {
super(message, dev_message, values);
}
public H2OUnsupportedDataFileException(String message, String dev_message) {
super(message, dev_message, new IcedHashMapGeneric.IcedHashMapStringObject());
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/exceptions/JCodeSB.java
|
package water.exceptions;
import water.util.IcedBitSet;
/**
* Simple interface to share definition of SB and SBPrintStream.
*
* Designed for Java code generation.
*/
public interface JCodeSB<T extends JCodeSB<T>> {
// Append primitves
T ps(String s);
T p(String s);
//T p(float s); - intentionally not included to avoid issues with confusing p(float) and pj(float) - we don't want to output floats that look like doubles in Java code
T p(double s);
T p(char s);
T p(int s);
T p(long s);
T p(boolean s);
T p(JCodeSB s);
T pobj(Object s);
/** Increase indentation counter */
T i(int d);
/** Increase indentation counter */
T i();
/** Indent and append. */
T ip(String s);
/** Append empty string. */
T s();
// Java specific append of double
T pj(double s);
// Java specific append of float
T pj(float s);
/* Append Java string - escape all " and \ */
T pj(String s);
/** Append reference to object's field
*
* @param objectName name of object
* @param fieldName field name to reference
* @return
*/
T pj(String objectName, String fieldName);
T p(IcedBitSet ibs);
/** Increase indentation counter */
T ii(int i);
/** Decrease indentation counter */
T di(int i);
// Copy indent from given string buffer
T ci(JCodeSB sb);
T nl();
// Convert a String[] into a valid Java String initializer
T toJavaStringInit(String[] ss);
T toJavaStringInit(float[] ss);
T toJavaStringInit(double[] ss);
T toJavaStringInit(double[][] ss);
T toJavaStringInit(double[][][] ss);
T toJSArray(float[] nums);
T toJSArray(String[] ss);
int getIndent();
String getContent();
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/AppendableVec.java
|
package water.fvec;
import water.*;
import java.util.Arrays;
/**
* A NEW single distributed vector column.
*
* The NEW vector has no data, and takes no space. It supports distributed
* parallel writes to it, via calls to append2. Such writes happen in parallel
* and all writes are ordered. Writes *will* be local to the node doing them,
* specifically to allow control over locality. By default, writes will go
* local-homed chunks with no compression; there is a final 'close' to the NEW
* vector which may do compression; the final 'close' will return some other
* Vec type. NEW Vectors do NOT support reads!
*/
public class AppendableVec extends Vec {
// Temporary ESPC, for uses which do not know the number of Chunks up front.
public long _tmp_espc[];
// Allow Chunks to have their final Chunk index (set at closing) offset by
// this much. Used by the Parser to fold together multi-file AppendableVecs.
public final int _chunkOff;
public AppendableVec( Key<Vec> key, byte type ) { this(key, new long[4], type, 0); }
public AppendableVec( Key<Vec> key, long[] tmp_espc, byte type, int chunkOff) {
super( key, -1/*no rowLayout yet*/, null, type );
_tmp_espc = tmp_espc;
_chunkOff = chunkOff;
}
// A NewVector chunk was "closed" - completed. Add it's info to the roll-up.
// This call is made in parallel across all node-local created chunks, but is
// not called distributed.
synchronized void closeChunk( int cidx, int len ) {
// The Parser will pre-allocate the _tmp_espc large enough (the Parser
// knows how many final Chunks there will be up front). Other users are
// encouraged to set a "large enough" espc - and a shared one at that - to
// avoid these copies.
// Set the length into the temp ESPC at the Chunk index (accounting for _chunkOff)
cidx -= _chunkOff;
while( cidx >= _tmp_espc.length ) // should not happen if espcs are preallocated and shared!
_tmp_espc = Arrays.copyOf(_tmp_espc, _tmp_espc.length<<1);
_tmp_espc[cidx] = len;
}
public static Vec[] closeAll(AppendableVec [] avs) {
Futures fs = new Futures();
Vec [] res = closeAll(avs,fs);
fs.blockForPending();
return res;
}
public static Vec[] closeAll(AppendableVec [] avs, Futures fs) {
Vec [] res = new Vec[avs.length];
final int rowLayout = avs[0].compute_rowLayout();
for(int i = 0; i < avs.length; ++i)
res[i] = avs[i].close(rowLayout,fs);
return res;
}
// Class 'reduce' call on new vectors; to combine the roll-up info.
// Called single-threaded from the M/R framework.
public void reduce( AppendableVec nv ) {
if( this == nv ) return; // Trivially done
if( _tmp_espc == nv._tmp_espc ) return;
// Combine arrays of elements-per-chunk
long e1[] = nv._tmp_espc; // Shorter array of longs?
if (e1.length > _tmp_espc.length) { // Keep longer array
e1 = _tmp_espc; // Keep the shorter one in e1
_tmp_espc = nv._tmp_espc; // Keep longer in the object
}
for( int i=0; i<e1.length; i++ ) // Copy non-zero elements over
if( _tmp_espc[i]==0 && e1[i] != 0 ) // Read-filter (old code unconditionally did a R-M-W cycle)
_tmp_espc[i] = e1[i]; // Only write if needed
}
public Vec layout_and_close(Futures fs) { return close(compute_rowLayout(),fs); }
public int compute_rowLayout() {
int nchunk = _tmp_espc.length;
while( nchunk > 1 && _tmp_espc[nchunk-1] == 0 )
nchunk--;
// Compute elems-per-chunk.
// Roll-up elem counts, so espc[i] is the starting element# of chunk i.
long espc[] = new long[nchunk+1]; // Shorter array
long x=0; // Total row count so far
for( int i=0; i<nchunk; i++ ) {
espc[i] = x; // Start elem# for chunk i
x += _tmp_espc[i]; // Raise total elem count
}
espc[nchunk]=x; // Total element count in last
return ESPC.rowLayout(_key,espc);
}
// "Close" out a NEW vector - rewrite it to a plain Vec that supports random
// reads, plus computes rows-per-chunk, min/max/mean, etc.
public Vec close(int rowLayout, Futures fs) {
// Compute #chunks
int nchunk = _tmp_espc.length;
DKV.remove(chunkKey(nchunk),fs); // remove potential trailing key
while( nchunk > 1 && _tmp_espc[nchunk-1] == 0 ) {
nchunk--;
DKV.remove(chunkKey(nchunk),fs); // remove potential trailing key
}
// Replacement plain Vec for AppendableVec.
Vec vec = new Vec(_key, rowLayout, domain(), _type);
DKV.put(_key,vec,fs); // Inject the header into the K/V store
return vec;
}
// Default read/write behavior for AppendableVecs
@Override protected boolean readable() { return false; }
@Override protected boolean writable() { return true ; }
@Override public NewChunk chunkForChunkIdx(int cidx) { return new NewChunk(this,cidx); }
// None of these are supposed to be called while building the new vector
@Override public Value chunkIdx( int cidx ) { throw H2O.fail(); }
@Override public long length() { throw H2O.fail(); }
@Override public int nChunks() { throw H2O.fail(); }
@Override public int nonEmptyChunks() { throw H2O.fail(); }
@Override public int elem2ChunkIdx( long i ) { throw H2O.fail(); }
@Override protected long chunk2StartElem( int cidx ) { throw H2O.fail(); }
@Override public long byteSize() { return 0; }
@Override public String toString() { return "[AppendableVec, unknown size]"; }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/ByteVec.java
|
package water.fvec;
import water.Job;
import water.Key;
import water.Value;
import java.io.IOException;
import java.io.InputStream;
/**
* A vector of plain Bytes.
*/
public class ByteVec extends Vec {
public ByteVec( Key key, int rowLayout ) { super(key, rowLayout); }
@Override public C1NChunk chunkForChunkIdx(int cidx) { return (C1NChunk)super.chunkForChunkIdx(cidx); }
/** Return column missing-element-count - ByteVecs do not allow any "missing elements" */
@Override public long naCnt() { return 0; }
/** Is all integers? Yes, it's all bytes */
@Override public boolean isInt(){return true; }
/** Get an unspecified amount of initial bytes; typically a whole C1NChunk of
* length Vec.DFLT_CHUNK_SIZE but no guarantees. Useful for previewing the start
* of large files.
* @return array of initial bytes */
public byte[] getFirstBytes() {
return getFirstChunkBytes();
}
final byte[] getFirstChunkBytes() {
return chunkForChunkIdx(0)._mem;
}
public InputStream openStream() {
return openStream(null);
}
/**
* Open a stream view over the underlying data
*/
public InputStream openStream(final Key job_key) {
InputStream is = new InputStream() {
final long[] sz = new long[1];
private int _cidx, _pidx, _sz;
private C1NChunk _c0;
@Override
public int available() {
if (_c0 == null || _sz >= _c0._len) {
sz[0] += _c0 != null ? _c0._len : 0;
if (_cidx >= nChunks()) return 0;
_c0 = chunkForChunkIdx(_cidx++);
_sz = C1NChunk._OFF;
if (job_key != null)
Job.update(_c0._len, job_key);
}
return _c0._len - _sz;
}
@Override
public void close() {
_cidx = nChunks();
_c0 = null;
_sz = 0;
}
@Override
public int read() throws IOException {
return available() == 0 ? -1 : 0xFF & _c0._mem[_sz++];
}
@Override
public int read(byte[] b, int off, int len) {
if (b == null) { // Back-channel read of cidx
if (_cidx > _pidx) { // Remove prev chunk from memory
Value v = Value.STORE_get(chunkKey(_pidx++));
if (v != null && v.isPersisted()) {
v.freePOJO(); // Eagerly toss from memory
v.freeMem();
} // Else not found, or not on disk somewhere
}
return _cidx;
}
int sz = available();
if (sz == 0)
return -1;
len = Math.min(len, sz);
System.arraycopy(_c0._mem, _sz, b, off, len);
_sz += len;
return len;
}
};
try {
is.available();
} catch (IOException e) {
throw new RuntimeException(e);
}
return is;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/C0DChunk.java
|
package water.fvec;
import water.parser.BufferedString;
import water.util.UnsafeUtils;
import java.util.UUID;
/**
* The constant 'double' column (also used to represent constant NA chunks).
*/
public class C0DChunk extends Chunk {
private static final int _OFF=8+4;
private double _con;
public C0DChunk(double con, int len) {
_start = -1;
set_len(len);
_mem=new byte[_OFF];
_con = con;
UnsafeUtils.set8d(_mem, 0, con);
UnsafeUtils.set4(_mem,8,len);
}
@Override public boolean hasFloat() { return ! Double.isNaN(_con); /* Constant NA chunk is not Float! */ }
@Override protected final long at8_impl( int i ) {
if( Double.isNaN(_con) ) throw new IllegalArgumentException("at8_abs but value is missing");
return (long)_con; // Possible silent truncation
}
long at16h_impl(int idx) { throw wrongType(UUID.class, Object.class); }
@Override protected final double atd_impl( int i ) {return _con;}
@Override protected final boolean isNA_impl( int i ) { return Double.isNaN(_con); }
@Override boolean set_impl(int idx, long l) { return l==_con; }
@Override boolean set_impl(int i, double d) { return d==_con; }
@Override boolean set_impl(int i, float f ) { return f==_con; }
@Override boolean setNA_impl(int i) { return Double.isNaN(_con); }
@Override double min() { return _con; }
@Override double max() { return _con; }
BufferedString atStr_impl(BufferedString bStr, int idx) {
if(Double.isNaN(_con)) return null; // speciall all missing case
return super.atStr_impl(bStr,idx);
}
// 3.3333333e33
// public int pformat_len0() { return 22; }
// public String pformat0() { return "% 21.15e"; }
// Custom serializers: the _mem field contains ALL the fields already.
// Init _start to -1, so we know we have not filled in other fields.
// Leave _vec & _chk2 null, leave _len unknown.
@Override final public void initFromBytes() {
_start = -1; _cidx = -1;
_con = UnsafeUtils.get8d(_mem,0);
set_len(UnsafeUtils.get4(_mem,8));
}
@Override public boolean isSparseZero(){return _con == 0;}
@Override public int sparseLenZero() {return _con ==0?0:_len;}
@Override public int nextNZ(int rid) {return _con==0?_len:rid+1;}
@Override public int nonzeros(int [] arr) {
if (_con == 0) return 0;
for (int i = 0; i < _len; ++i) arr[i] = i;
return _len;
}
@Override public boolean isSparseNA(){return Double.isNaN(_con);}
@Override public int sparseLenNA() {return Double.isNaN(_con)?0:_len;}
@Override public int getSparseDoubles(double [] vals, int [] ids, double NA){
if(_con == 0) return 0;
double con = Double.isNaN(_con)?NA:_con;
for(int i = 0; i < _len; ++i) {
vals[i] = con;
ids[i] = i;
}
return _len;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int from, int to){
if(_con == 0)
v.addZeros(to-from);
else if(Double.isNaN(_con))
v.addNAs(to-from);
else for(int i = from; i < to; i++)
v.addValue(_con);
return v;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int [] ids){
if(_con == 0)
v.addZeros(ids.length);
else if(Double.isNaN(_con))
v.addNAs(ids.length);
else for(int i = 0; i < ids.length; i++)
v.addValue(_con);
return v;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/C0LChunk.java
|
package water.fvec;
import water.util.UnsafeUtils;
/**
* The constant 'long' column.
*/
public class C0LChunk extends Chunk {
protected static final int _OFF=8+4;
private long _con;
public C0LChunk(long con, int len) { _mem=new byte[_OFF]; _start = -1; set_len(len);
_con = con;
UnsafeUtils.set8(_mem, 0, con);
UnsafeUtils.set4(_mem,8,len);
}
@Override public boolean hasFloat() { return false; }
@Override public boolean hasNA() { return false; }
@Override protected final long at8_impl( int i ) { return _con; }
@Override protected final double atd_impl( int i ) {return _con; }
@Override protected final boolean isNA_impl( int i ) { return false; }
@Override boolean set_impl(int idx, long l) { return l==_con; }
@Override boolean set_impl(int i, double d) { return d==_con; }
@Override boolean set_impl(int i, float f ) { return f==_con; }
@Override boolean setNA_impl(int i) { return false; }
@Override boolean set_impl (int idx, String str) { return false; }
@Override double min() { return _con; }
@Override double max() { return _con; }
@Override public final void initFromBytes () {
_start = -1; _cidx = -1;
_con = UnsafeUtils.get8(_mem,0);
set_len(UnsafeUtils.get4(_mem,8));
}
@Override public boolean isSparseZero(){return _con == 0;}
@Override public int sparseLenZero(){return _con == 0?0: _len;}
@Override public int nextNZ(int rid){return _con == 0?_len:rid+1;}
@Override public int nonzeros(int [] arr) {
if (_con == 0) return 0;
for (int i = 0; i < _len; ++i) arr[i] = i;
return _len;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int from, int to){
if(_con == 0)
v.addZeros(to-from);
else for(int i = from; i < to; i++)
v.addValue(_con);
return v;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int [] ids){
if(_con == 0)
v.addZeros(ids.length);
else for(int i = 0; i < ids.length; i++)
v.addValue(_con);
return v;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/C16Chunk.java
|
package water.fvec;
import water.util.UnsafeUtils;
public class C16Chunk extends Chunk {
static final long _LO_NA = Long.MIN_VALUE;
static final long _HI_NA = 0;
C16Chunk( byte[] bs ) { _mem=bs; _start = -1; set_len(_mem.length>>4); }
@Override protected final long at8_impl( int i ) { throw new IllegalArgumentException("at8_abs but 16-byte UUID"); }
@Override protected final double atd_impl( int i ) { throw new IllegalArgumentException("atd but 16-byte UUID"); }
@Override protected final boolean isNA_impl( int i ) { return isNA(loAt(i), hiAt(i)); }
public static boolean isNA(long lo, long hi) { return lo ==_LO_NA && hi ==_HI_NA; }
private long loAt(int idx) { return UnsafeUtils.get8(_mem, idx*16); }
private long hiAt(int idx) { return UnsafeUtils.get8(_mem, idx*16+8); }
@Override protected long at16l_impl(int idx) {
long lo = loAt(idx);
if (lo == _LO_NA && hiAt(idx) == _HI_NA) {
throw new IllegalArgumentException("at16l but value is missing at " + idx);
}
return lo;
}
@Override protected long at16h_impl(int idx) {
long hi = hiAt(idx);
if (hi == _HI_NA && loAt(idx) == _LO_NA) {
throw new IllegalArgumentException("at16h but value is missing at " + idx);
}
return hi;
}
@Override boolean set_impl(int i, long lo, long hi) {
if (isNA(lo, hi)) throw new IllegalArgumentException("Illegal uid value");
UnsafeUtils.set8(_mem, i*16, lo);
UnsafeUtils.set8(_mem, i*16 + 8, hi);
return true;
}
@Override boolean set_impl(int idx, long l) { return false; }
@Override boolean set_impl(int i, double d) { return false; }
@Override boolean set_impl(int i, float f ) { return false; }
@Override boolean setNA_impl(int idx) { return set_impl(idx, _LO_NA, _HI_NA); }
@Override
public <T extends ChunkVisitor> T processRows(T v, int from, int to) {
for(int i = from; i < to; i++) {
if(isNA(i)) v.addNAs(1);
else v.addValue(UnsafeUtils.get8(_mem, 16 * i), UnsafeUtils.get8(_mem, 16 * i + 8));
}
return v;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int[] ids) {
for(int i:ids) v.addValue(UnsafeUtils.get8(_mem,16*i),UnsafeUtils.get8(_mem,16*i+8));
return v;
}
@Override protected final void initFromBytes () {
_start = -1; _cidx = -1;
set_len(_mem.length>>4);
assert _mem.length == _len * 16;
}
// @Override protected int pformat_len0() { return 36; }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/C1Chunk.java
|
package water.fvec;
/**
* The empty-compression function, if all elements fit directly on UNSIGNED bytes.
* Cannot store 0xFF, the value is a marker for N/A.
*/
public class C1Chunk extends Chunk {
static protected final int _OFF = 0;
static protected final int _NA = 0xFF;
C1Chunk(byte[] bs) { _mem=bs; _start = -1; set_len(_mem.length); }
@Override protected final long at8_impl( int i ) {
long res = 0xFF&_mem[i+_OFF];
if( res == _NA ) throw new IllegalArgumentException("at8_abs but value is missing");
return res;
}
@Override protected final double atd_impl( int i ) {
long res = 0xFF&_mem[i+_OFF];
return (res == _NA)?Double.NaN:res;
}
@Override protected final boolean isNA_impl( int i ) { return (0xFF&_mem[i+_OFF]) == _NA; }
@Override boolean set_impl(int i, long l) {
if( !(0 <= l && l < 255) ) return false;
_mem[i+_OFF] = (byte)l;
return true;
}
@Override boolean set_impl(int i, double d) { return false; }
@Override boolean set_impl(int i, float f ) { return false; }
@Override boolean setNA_impl(int idx) { _mem[idx+_OFF] = (byte)_NA; return true; }
@Override public void initFromBytes(){
_start = -1; _cidx = -1;
set_len(_mem.length);
}
private final void processRow(int r, ChunkVisitor v){
int i = 0xFF&_mem[r+_OFF];
if(i == _NA) v.addNAs(1);
else v.addValue(i);
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int from, int to) {
for(int i = from; i < to; i++) processRow(i,v);
return v;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int[] ids) {
for(int i:ids) processRow(i,v);
return v;
}
public int [] getIntegers(int [] vals, int from, int to, int NA){
for(int i = from; i < to; i++) {
int x = 0xFF&_mem[i];
vals[i-from] = (x == _NA)?NA:x;
}
return vals;
}
@Override public double [] getDoubles(double [] vals, int from, int to, double NA){
for(int i = from; i < to; i++) {
int x = 0xFF&_mem[i];
vals[i-from] = (x == _NA)?NA:x;
}
return vals;
}
@Override public double [] getDoubles(double [] vals, int [] ids){
int k = 0;
for(int i:ids) {
int x = 0xFF&_mem[i];
vals[k++] = (x == _NA)?Double.NaN:x;
}
return vals;
}
@Override
public boolean hasFloat() {return false;}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/C1NChunk.java
|
package water.fvec;
/**
* The empty-compression function, if all elements fit directly on UNSIGNED bytes.
* [In particular, this is the compression style for data read in from files.]
*/
public class C1NChunk extends Chunk {
protected static final int _OFF=0;
public C1NChunk(byte[] bs) { _mem=bs; _start = -1; set_len(_mem.length); }
@Override protected final long at8_impl( int i ) { return 0xFF&_mem[i]; }
@Override protected final double atd_impl( int i ) { return 0xFF&_mem[i]; }
@Override protected final boolean isNA_impl( int i ) { return false; }
@Override boolean set_impl(int i, long l ) { return false; }
@Override boolean set_impl(int i, double d) { return false; }
@Override boolean set_impl(int i, float f ) { return false; }
@Override boolean setNA_impl(int idx) { return false; }
// Custom serializers: the _mem field contains ALL the fields already.
// Init _start to -1, so we know we have not filled in other fields.
// Leave _vec & _chk2 null, leave _len unknown.
@Override protected final void initFromBytes () {
_start = -1;
_cidx = -1;
set_len(_mem.length);
}
@Override public boolean hasFloat() {return false;}
@Override public boolean hasNA() { return false; }
@Override public double [] getDoubles(double [] vals, int [] ids) {
int k = 0;
for (int i : ids) vals[k++] = _mem[i] & 0xFF;
return vals;
}
@Override public double [] getDoubles(double [] vals, int from, int to, double NA){
for(int i = from; i < to; ++i)
vals[i-from] = _mem[i]&0xFF;
return vals;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int from, int to) {
for(int i = from; i < to; i++) v.addValue(0xFF&_mem[i]);
return v;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int[] ids) {
for(int i:ids) v.addValue(0xFF&_mem[i]);
return v;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/C1SChunk.java
|
package water.fvec;
import water.util.PrettyPrint;
/**
* The scale/bias function, where data is in SIGNED bytes before scaling.
*/
public final class C1SChunk extends CSChunk {
C1SChunk( byte[] bs, long bias, int scale) {
super(bs,bias,scale,0);
if(scale < 0) { // check precision
double div = PrettyPrint.pow10(1, -scale);
for (int i = 0; i < _len; ++i) {
int x = 0xFF & _mem[_OFF + i];
if (x == C1Chunk._NA) continue;
if ((getD(x, C1Chunk._NA, Double.NaN)) != (x+bias)/div){
setDecimal();
break;
}
}
}
}
@Override protected final long at8_impl( int i ) {
int x = 0xFF&_mem[_OFF+i];
if( x==C1Chunk._NA )
throw new IllegalArgumentException("at8_abs but value is missing");
return get8(x);
}
@Override protected final double atd_impl( int i ) {
return getD(0xFF&_mem[_OFF+i],C1Chunk._NA);
}
@Override protected final boolean isNA_impl( int i ) { return (0xFF&_mem[i+_OFF]) == C1Chunk._NA; }
@Override boolean setNA_impl(int idx) {
_mem[idx+_OFF] = (byte)C1Chunk._NA; return true;
}
@Override
protected boolean set_impl(int i, double x) {
if(Double.isNaN(x)) return setNA_impl(i);
int y = getScaledValue(x, C1Chunk._NA);
byte b = (byte)y;
if(getD(0xFF&b,C1Chunk._NA,Double.NaN) != x)
return false;
_mem[_OFF+i] = b;
assert !isNA_impl(i);
return true;
}
/**
* Dense bulk interface, fetch values from the given range
* @param vals
* @param from
* @param to
*/
@Override
public double [] getDoubles(double [] vals, int from, int to, double NA){
for(int i = from; i < to; ++i)
vals[i-from] = getD(0xFF&_mem[_OFF+i],C1Chunk._NA,NA);
return vals;
}
/**
* Dense bulk interface, fetch values from the given ids
* @param vals
* @param ids
*/
@Override
public double [] getDoubles(double [] vals, int [] ids){
int j = 0;
for(int i:ids)
vals[j++] = getD(0xFF&_mem[_OFF+i],C1Chunk._NA);
return vals;
}
private <T extends ChunkVisitor> void processRow(T v, int i, long bias, int exp){
long x = 0xFF & _mem[_OFF + i];
if(x == C1Chunk._NA) v.addNAs(1);
else v.addValue(x + bias, exp);
}
@Override
protected <T extends ChunkVisitor> T processRows2(T v, int from, int to, long bias, int exp) {
for(int i = from; i < to; ++i)
processRow(v,i,bias,exp);
return v;
}
@Override
protected <T extends ChunkVisitor> T processRows2(T v, int from, int to) {
for(int i = from; i < to; ++i)
v.addValue(getD(0xFF&_mem[_OFF+i],C1Chunk._NA));
return v;
}
@Override
protected <T extends ChunkVisitor> T processRows2(T v, int [] ids, long bias, int exp) {
for(int i:ids)
processRow(v,i,bias,exp);
return v;
}
@Override
protected <T extends ChunkVisitor> T processRows2(T v, int [] ids) {
for(int i:ids)
v.addValue(getD(0xFF&_mem[_OFF+i],C1Chunk._NA));
return v;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/C2Chunk.java
|
package water.fvec;
import water.util.UnsafeUtils;
/**
* The empty-compression function, where data is in shorts.
*/
public class C2Chunk extends Chunk {
static protected final int _NA = Short.MIN_VALUE;
static protected final int _OFF=0;
C2Chunk( byte[] bs ) { _mem=bs; _start = -1; set_len(_mem.length>>1); }
@Override protected final long at8_impl( int i ) {
int res = UnsafeUtils.get2(_mem,(i<<1)+_OFF);
if( res == _NA ) throw new IllegalArgumentException("at8_abs but value is missing");
return res;
}
@Override protected final double atd_impl( int i ) {
int res = UnsafeUtils.get2(_mem,(i<<1)+_OFF);
return res == _NA?Double.NaN:res;
}
@Override protected final boolean isNA_impl( int i ) { return UnsafeUtils.get2(_mem,(i<<1)+_OFF) == _NA; }
@Override boolean set_impl(int idx, long l) {
if( !(Short.MIN_VALUE < l && l <= Short.MAX_VALUE) ) return false;
UnsafeUtils.set2(_mem,(idx<<1)+_OFF,(short)l);
return true;
}
@Override boolean set_impl(int idx, double d) {
if( Double.isNaN(d) ) return setNA_impl(idx);
long l = (long)d;
return l == d && set_impl(idx, l);
}
@Override boolean set_impl(int i, float f ) { return set_impl(i,(double)f); }
@Override boolean setNA_impl(int idx) { UnsafeUtils.set2(_mem,(idx<<1)+_OFF,(short)_NA); return true; }
private final void processRow(int r, ChunkVisitor v){
int i = UnsafeUtils.get2(_mem,(r<<1)+_OFF);
if(i == _NA) v.addNAs(1);
else v.addValue(i);
}
@Override public double [] getDoubles(double [] vals, int from, int to, double NA){
for(int i = from; i < to; i++) {
int x = UnsafeUtils.get2(_mem, 2*i);
vals[i-from] = (x == _NA)?NA:x;
}
return vals;
}
@Override public double [] getDoubles(double [] vals, int [] ids){
int k = 0;
for(int i:ids) {
int x = UnsafeUtils.get2(_mem, 2*i);
vals[k++] = (x == _NA)?Double.NaN:x;
}
return vals;
}
@Override
public int [] getIntegers(int [] vals, int from, int to, int NA){
for(int i = from; i < to; i++) {
int x = UnsafeUtils.get2(_mem, 2*i);
vals[i-from] = (x == _NA)?NA:x;
}
return vals;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int from, int to) {
for(int i = from; i < to; i++) processRow(i,v);
return v;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int[] ids) {
for(int i:ids) processRow(i,v);
return v;
}
@Override public final void initFromBytes () {
_start = -1; _cidx = -1;
set_len(_mem.length>>1);
assert _mem.length == _len <<1;
}
@Override
public boolean hasFloat() {return false;}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/C2SChunk.java
|
package water.fvec;
import water.util.PrettyPrint;
import water.util.UnsafeUtils;
/**
* The scale/bias function, where data is in SIGNED bytes before scaling.
*/
public class C2SChunk extends CSChunk {
C2SChunk( byte[] bs, long bias, int scale ) {
super(bs,bias,scale,1);
if(scale < 0) { // check precision
double div = PrettyPrint.pow10(1, -scale);
for (int i = 0; i < _len; ++i) {
int x = getMantissa(i);
if (x == C2Chunk._NA) continue;
if ((getD(x, C2Chunk._NA, Double.NaN)) != (x+bias)/div){
setDecimal();
break;
}
}
}
}
@Override protected final long at8_impl( int i ) {
int x = getMantissa(i);
if( x==C2Chunk._NA )
throw new IllegalArgumentException("at8_abs but value is missing");
return get8(x);
}
private int getMantissa(int i){return UnsafeUtils.get2(_mem,_OFF+2*i);}
private void setMantissa(int i, short s){
UnsafeUtils.set2(_mem,(i*2)+_OFF,s);
}
@Override protected final double atd_impl( int i ) {return getD(getMantissa(i),C2Chunk._NA);}
@Override protected final boolean isNA_impl( int i ) { return getMantissa(i) == C2Chunk._NA; }
@Override boolean set_impl(int i, double x) {
if(Double.isNaN(x)) return setNA_impl(i);
int y = getScaledValue(x, C2Chunk._NA);
short s = (short)y;
if(getD(s,C2Chunk._NA, Double.NaN) != x)
return false;
setMantissa(i,s);
assert !isNA_impl(i);
return true;
}
@Override boolean setNA_impl(int idx) {setMantissa(idx,(short)C2Chunk._NA); return true; }
@Override public double [] getDoubles(double [] vals, int from, int to, double NA){
for(int i = from; i < to; i++)
vals[i-from] = getD(getMantissa(i),C2Chunk._NA,NA);
return vals;
}
@Override public double [] getDoubles(double [] vals, int [] ids){
int k = 0;
for(int i:ids)
vals[k++] = getD(getMantissa(i),C2Chunk._NA);
return vals;
}
private <T extends ChunkVisitor> void processRow(T v, int i, long bias, int exp){
long x = getMantissa(i);
if(x == C2Chunk._NA) v.addNAs(1);
else v.addValue(x + bias, exp);
}
@Override
protected <T extends ChunkVisitor> T processRows2(T v, int from, int to, long bias, int exp) {
for(int i = from; i < to; ++i)
processRow(v,i,bias,exp);
return v;
}
@Override
protected <T extends ChunkVisitor> T processRows2(T v, int from, int to) {
for(int i = from; i < to; ++i)
v.addValue(getD(getMantissa(i),C2Chunk._NA));
return v;
}
@Override
protected <T extends ChunkVisitor> T processRows2(T v, int [] ids, long bias, int exp) {
for(int i:ids) processRow(v,i,bias,exp);
return v;
}
@Override
protected <T extends ChunkVisitor> T processRows2(T v, int [] ids) {
for(int i:ids)
v.addValue(getD(getMantissa(i),C2Chunk._NA));
return v;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/C4Chunk.java
|
package water.fvec;
import water.util.UnsafeUtils;
/**
* The empty-compression function, where data is in 'int's.
*/
public class C4Chunk extends Chunk {
static protected final int _NA = Integer.MIN_VALUE;
C4Chunk( byte[] bs ) { _mem=bs; _start = -1; set_len(_mem.length>>2); }
@Override protected final long at8_impl( int i ) {
long res = UnsafeUtils.get4(_mem,i<<2);
if( res == _NA ) throw new IllegalArgumentException("at8_abs but value is missing");
return res;
}
@Override protected final double atd_impl( int i ) {
long res = UnsafeUtils.get4(_mem, i << 2);
return res == _NA?Double.NaN:res;
}
@Override protected final boolean isNA_impl( int i ) { return UnsafeUtils.get4(_mem,i<<2) == _NA; }
@Override boolean set_impl(int idx, long l) {
if( !(Integer.MIN_VALUE < l && l <= Integer.MAX_VALUE) ) return false;
UnsafeUtils.set4(_mem,idx<<2,(int)l);
return true;
}
@Override boolean set_impl(int i, double d) { return false; }
@Override boolean set_impl(int i, float f ) { return false; }
@Override boolean setNA_impl(int idx) { UnsafeUtils.set4(_mem,(idx<<2),(int)_NA); return true; }
private final void processRow(int r, ChunkVisitor v){
int i = UnsafeUtils.get4(_mem,(r<<2));
if(i == _NA) v.addNAs(1);
else v.addValue(i);
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int from, int to) {
for(int i = from; i < to; i++) processRow(i,v);
return v;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int[] ids) {
for(int i:ids) processRow(i,v);
return v;
}
@Override public final void initFromBytes () {
_start = -1; _cidx = -1;
set_len(_mem.length>>2);
assert _mem.length == _len <<2;
}
@Override public boolean hasFloat() {return false;}
@Override public double [] getDoubles(double [] vals, int from, int to, double NA){
for(int i = from; i < to; i++) {
int x = UnsafeUtils.get4(_mem, 4*i);
vals[i-from] = (x == _NA)?NA:x;
}
return vals;
}
@Override public double [] getDoubles(double [] vals, int [] ids){
int k = 0;
for(int i:ids) {
int x = UnsafeUtils.get4(_mem, 4*i);
vals[k++] = (x == _NA)?Double.NaN:x;
}
return vals;
}
@Override public int [] getIntegers(int [] vals, int from, int to, int NA){
for(int i = from; i < to; i++) {
int x = UnsafeUtils.get4(_mem, 4*i);
vals[i-from] = (x == _NA)?NA:x;
}
return vals;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/C4FChunk.java
|
package water.fvec;
import water.util.UnsafeUtils;
/**
* The empty-compression function, where data is in 'float's.
*/
public class C4FChunk extends Chunk {
public C4FChunk( byte[] bs ) { _mem=bs; _start = -1; set_len(_mem.length>>2); }
@Override protected final long at8_impl( int i ) {
float res = UnsafeUtils.get4f(_mem, i << 2);
if( Float.isNaN(res) ) throw new IllegalArgumentException("at8_abs but value is missing");
return (long)res;
}
@Override protected final double atd_impl( int i ) {
float res = UnsafeUtils.get4f(_mem,i<<2);
return Float.isNaN(res)?Double.NaN:res;
}
@Override protected final boolean isNA_impl( int i ) { return Float.isNaN(UnsafeUtils.get4f(_mem,i<<2)); }
@Override boolean set_impl(int idx, long l) { return false; }
@Override boolean set_impl(int i, double d) { return false; }
@Override boolean set_impl(int i, float f ) {
UnsafeUtils.set4f(_mem,i<<2,f);
return true;
}
@Override boolean setNA_impl(int idx) { UnsafeUtils.set4f(_mem,(idx<<2),Float.NaN); return true; }
@Override public NewChunk extractRows(NewChunk nc, int from, int to){
for(int i = from; i < to; i++)
nc.addNum(UnsafeUtils.get4f(_mem,4*i));
return nc;
}
@Override public NewChunk extractRows(NewChunk nc, int... rows){
for(int i:rows)
nc.addNum(UnsafeUtils.get4f(_mem,4*i));
return nc;
}
private final void processRow(int r, ChunkVisitor v){
float f = UnsafeUtils.get4f(_mem,(r<<2));
if(Float.isNaN(f)) v.addNAs(1);
else v.addValue((double)f);
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int from, int to) {
for(int i = from; i < to; i++) processRow(i,v);
return v;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int[] ids) {
for(int i:ids) processRow(i,v);
return v;
}
// 3.3333333e33
// public int pformat_len0() { return 14; }
// public String pformat0() { return "% 13.7e"; }
@Override public final void initFromBytes () {
_start = -1; _cidx = -1;
set_len(_mem.length>>2);
assert _mem.length == _len <<2;
}
@Override public boolean hasFloat() {return true;}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/C4SChunk.java
|
package water.fvec;
import water.util.PrettyPrint;
import water.util.UnsafeUtils;
/**
* The scale/bias function, where data is in SIGNED bytes before scaling.
*/
public class C4SChunk extends CSChunk {
C4SChunk( byte[] bs, long bias, int scale ) {
super(bs,bias,scale,2);
if(scale < 0) { // check precision
double div = PrettyPrint.pow10(1, -scale);
for (int i = 0; i < _len; ++i) {
int x = getMantissa(i);
if (x == C4Chunk._NA) continue;
if ((getD(x, C1Chunk._NA, Double.NaN)) != (x+bias)/div){
setDecimal();
break;
}
}
}
}
private int getMantissa(int i){return UnsafeUtils.get4(_mem,_OFF+4*i);}
private void setMantissa(int i, int j){
UnsafeUtils.set4(_mem,(i*4)+_OFF,j);
}
@Override protected final double atd_impl( int i ) {return getD(getMantissa(i),C4Chunk._NA);}
@Override protected final boolean isNA_impl( int i ) { return getMantissa(i) == C4Chunk._NA; }
@Override boolean set_impl(int i, double x) {
if(Double.isNaN(x)) return setNA_impl(i);
int y = getScaledValue(x, C4Chunk._NA);
if(getD(y,C4Chunk._NA, Double.NaN) != x)
return false;
setMantissa(i,y);
assert !isNA_impl(i);
return true;
}
@Override boolean setNA_impl(int idx) {setMantissa(idx,C4Chunk._NA); return true; }
@Override public double [] getDoubles(double [] vals, int from, int to, double NA){
for(int i = from; i < to; i++)
vals[i-from] = getD(getMantissa(i),C4Chunk._NA,NA);
return vals;
}
@Override public double [] getDoubles(double [] vals, int [] ids){
int k = 0;
for(int i:ids)
vals[k++] = getD(getMantissa(i),C4Chunk._NA);
return vals;
}
private <T extends ChunkVisitor> void processRow(T v, int i, long bias, int exp){
long x = getMantissa(i);
if(x == C4Chunk._NA) v.addNAs(1);
else v.addValue(x + bias, exp);
}
@Override
protected <T extends ChunkVisitor> T processRows2(T v, int from, int to, long bias, int exp) {
for(int i = from; i < to; ++i)
processRow(v,i,bias,exp);
return v;
}
@Override
protected <T extends ChunkVisitor> T processRows2(T v, int from, int to) {
for(int i = from; i < to; ++i)
v.addValue(getD(getMantissa(i),C4Chunk._NA));
return v;
}
@Override
protected <T extends ChunkVisitor> T processRows2(T v, int [] ids, long bias, int exp) {
for(int i:ids)
processRow(v,i,bias,exp);
return v;
}
@Override
protected <T extends ChunkVisitor> T processRows2(T v, int [] ids) {
for(int i:ids)
v.addValue(getD(getMantissa(i),C4Chunk._NA));
return v;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/C4VolatileChunk.java
|
package water.fvec;
import water.*;
import water.util.UnsafeUtils;
/**
* The empty-compression function, where data is in 'int's.
* Can only be used locally (intentionally does not serialize).
* Intended for temporary data which gets modified frequently.
* Exposes data directly as int[]
*/
public class C4VolatileChunk extends Chunk {
static protected final long _NA = Integer.MIN_VALUE;
transient private int [] _is;
C4VolatileChunk(int[] is ) { _is = is; _mem = new byte[0]; _start = -1; _len = is.length; }
public boolean isVolatile() {return true;}
public int[] getValues(){return _is;}
@Override protected final long at8_impl( int i ) {
long res = _is[i];
if( res == _NA ) throw new IllegalArgumentException("at8_abs but value is missing");
return res;
}
@Override protected final double atd_impl( int i ) {
long res = _is[i];
return res == _NA?Double.NaN:res;
}
@Override protected final boolean isNA_impl( int i ) { return _is[i] == _NA; }
@Override boolean set_impl(int idx, long l) {
if( !(Integer.MIN_VALUE < l && l <= Integer.MAX_VALUE) ) return false;
_is[idx] = (int)l;
return true;
}
@Override boolean set_impl(int i, double d) {return false; }
@Override boolean set_impl(int i, float f ) {return false; }
@Override boolean setNA_impl(int idx) { _is[idx] = (int)_NA; return true; }
private final void processRow(int r, ChunkVisitor v){
int i = UnsafeUtils.get4(_mem,(r<<2));
if(i == _NA) v.addNAs(1);
else v.addValue(i);
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int from, int to) {
for(int i = from; i < to; i++) processRow(i,v);
return v;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int[] ids) {
for(int i:ids) processRow(i,v);
return v;
}
@Override public final void initFromBytes () {
_len = _mem.length >> 2;
_is = MemoryManager.malloc4(_len);
for(int i = 0; i < _is.length; ++i)
_is[i] = UnsafeUtils.get4(_mem,4*i);
_mem = null;
}
@Override
public Futures close( int cidx, Futures fs ) {
if(chk2() != null) return chk2().close(cidx,fs);
Value v = new Value(_vec.chunkKey(cidx),this,_len*4,Value.ICE);
DKV.put(v._key,v,fs);
return fs;
}
@Override public byte [] asBytes() {
byte [] res = MemoryManager.malloc1(_len*4);
for(int i = 0; i < _len; ++i)
UnsafeUtils.set4(res,4*i,_is[i]);
return res;
}
@Override public boolean hasFloat() {return false;}
// public Futures close(int cidx, Futures fs ) { // always assume got modified
// Value v = new Value(_vec.chunkKey(_cidx), this,this._len*4,Value.ICE);
// DKV.put(v._key,v,fs); // Write updated chunk back into K/V
// return fs;
// }
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/C8Chunk.java
|
package water.fvec;
import water.util.UnsafeUtils;
/**
* The empty-compression function, where data is in 'long's.
*/
public class C8Chunk extends Chunk {
protected static final long _NA = Long.MIN_VALUE;
C8Chunk( byte[] bs ) { _mem=bs; _start = -1; set_len(_mem.length>>3); }
@Override protected final long at8_impl( int i ) {
long res = UnsafeUtils.get8(_mem,i<<3);
if( res == _NA ) throw new IllegalArgumentException("at8_abs but value is missing");
return res;
}
@Override protected final double atd_impl( int i ) {
long res = UnsafeUtils.get8(_mem,i<<3);
return res == _NA?Double.NaN:res;
}
@Override protected final boolean isNA_impl( int i ) { return UnsafeUtils.get8(_mem, i << 3)==_NA; }
@Override boolean set_impl(int idx, long l) { return false; }
@Override boolean set_impl(int i, double d) { return false; }
@Override boolean set_impl(int i, float f ) { return false; }
@Override boolean setNA_impl(int idx) { UnsafeUtils.set8(_mem,(idx<<3),_NA); return true; }
@Override public final void initFromBytes () {
_start = -1; _cidx = -1;
set_len(_mem.length>>3);
assert _mem.length == _len <<3;
}
@Override
public boolean hasFloat() {return false;}
private final void processRow(int r, ChunkVisitor v){
long l = UnsafeUtils.get8(_mem,(r<<3));
if(l == _NA) v.addNAs(1);
else v.addValue(l);
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int from, int to) {
for(int i = from; i < to; i++) processRow(i,v);
return v;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int[] ids) {
for(int i:ids) processRow(i,v);
return v;
}
@Override public double [] getDoubles(double [] vals, int from, int to, double NA){
for(int i = from; i < to; i++) {
long x = UnsafeUtils.get8(_mem, 8*i);
vals[i-from] = (x == _NA)?NA:x;
}
return vals;
}
@Override public double [] getDoubles(double [] vals, int [] ids){
int k = 0;
for(int i:ids) {
long x = UnsafeUtils.get8(_mem, 8*i);
vals[k++] = (x == _NA)?Double.NaN:x;
}
return vals;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/C8DChunk.java
|
package water.fvec;
import water.util.UnsafeUtils;
/**
* The empty-compression function, where data is in 'double's.
*/
public class C8DChunk extends Chunk {
C8DChunk( byte[] bs ) { _mem=bs; _start = -1; set_len(_mem.length>>3); }
@Override protected final long at8_impl( int i ) {
double res = UnsafeUtils.get8d(_mem, i << 3);
if( Double.isNaN(res) ) throw new IllegalArgumentException("at8_abs but value is missing");
return (long)res;
}
@Override protected final double atd_impl( int i ) { return UnsafeUtils.get8d(_mem,i<<3) ; }
@Override protected final boolean isNA_impl( int i ) { return Double.isNaN(UnsafeUtils.get8d(_mem,i<<3)); }
@Override boolean set_impl(int idx, long l) { return false; }
/**
* Fast explicit set for double.
* @param i
* @param d
*/
public void set8D(int i, double d) {UnsafeUtils.set8d(_mem,i<<3,d);}
public double get8D(int i) {return UnsafeUtils.get8d(_mem,i<<3);}
@Override boolean set_impl(int i, double d) {
UnsafeUtils.set8d(_mem,i<<3,d);
return true;
}
@Override boolean set_impl(int i, float f ) {
UnsafeUtils.set8d(_mem,i<<3,f);
return true;
}
@Override boolean setNA_impl(int idx) { UnsafeUtils.set8d(_mem,(idx<<3),Double.NaN); return true; }
// 3.3333333e33
// public int pformat_len0() { return 22; }
// public String pformat0() { return "% 21.15e"; }
@Override public final void initFromBytes () {
_start = -1; _cidx = -1;
set_len(_mem.length>>3);
assert _mem.length == _len <<3;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int from, int to) {
for(int i = from; i < to; i++) v.addValue(UnsafeUtils.get8d(_mem,8*i));
return v;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int[] ids) {
for(int i:ids) v.addValue(UnsafeUtils.get8d(_mem,8*i));
return v;
}
@Override public double [] getDoubles(double [] vals, int from, int to, double NA){
for(int i = from; i < to; i++) {
double d = UnsafeUtils.get8d(_mem, 8 * i);
vals[i - from] = Double.isNaN(d)?NA:d;
}
return vals;
}
@Override public double [] getDoubles(double [] vals, int [] ids){
int k = 0;
for(int i:ids)
vals[k++] = UnsafeUtils.get8d(_mem,8*i);
return vals;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/C8DVolatileChunk.java
|
package water.fvec;
import water.*;
import water.util.UnsafeUtils;
/**
* The empty-compression function, where data is in 'double's.
* Can only be used locally (intentionally does not serialize).
* Intended for temporary data which gets modified frequently.
* Exposes data directly as double[]
*/
public final class C8DVolatileChunk extends Chunk {
private transient double [] _ds;
C8DVolatileChunk(double[] ds ) {_start = -1; _len = ds.length; _ds = ds; }
public double [] getValues(){return _ds;}
@Override protected final long at8_impl( int i ) {
double res = _ds[i];
if( Double.isNaN(res) ) throw new IllegalArgumentException("at8_abs but value is missing");
return (long)res;
}
@Override protected final double atd_impl( int i ) {
return _ds[i] ;
}
@Override protected final boolean isNA_impl( int i ) { return Double.isNaN(_ds[i]); }
@Override boolean set_impl(int idx, long l) {
double d = l;
if(d != l) return false;
_ds[idx] = d;
return true;
}
@Override boolean set_impl(int i, double d) {
_ds[i] = d;
return true;
}
@Override boolean set_impl(int i, float f ) {
_ds[i] = f;
return true;
}
public boolean isVolatile() {return true;}
@Override boolean setNA_impl(int idx) { UnsafeUtils.set8d(_mem,(idx<<3),Double.NaN); return true; }
@Override public final void initFromBytes () {
_len = _mem.length >> 3;
_ds = MemoryManager.malloc8d(_len);
for(int i = 0; i < _ds.length; ++i)
_ds[i] = UnsafeUtils.get8d(_mem,8*i);
_mem = null;
}
@Override public byte [] asBytes() {
byte [] res = MemoryManager.malloc1(_len*8);
for(int i = 0; i < _len; ++i)
UnsafeUtils.set8d(res,8*i,_ds[i]);
return res;
}
@Override
public Futures close( int cidx, Futures fs ) {
if(chk2() != null) return chk2().close(cidx,fs);
Value v = new Value(_vec.chunkKey(cidx),this,_len*8,Value.ICE);
DKV.put(v._key,v,fs);
return fs;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int from, int to) {
for(int i = from; i < to; i++) v.addValue(_ds[i]);
return v;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int[] ids) {
for(int i:ids) v.addValue(_ds[i]);
return v;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/CBSChunk.java
|
package water.fvec;
import water.MemoryManager;
/** A simple chunk for boolean values. In fact simple bit vector.
* Each boolean is represented by 2bits since we need to represent NA.
*/
public class CBSChunk extends Chunk {
static protected final byte _NA = 0x02; // Internal representation of NA
static protected final int _OFF = 2;
private transient byte _bpv;
public byte bpv() { return _bpv; } //bits per value
private transient byte _gap;// number of trailing unused bits in the end (== _len % 8, we allocate bytes, but our length i generally not multiple of 8)
public byte gap() { return _gap; } //number of trailing unused bits in the end
public CBSChunk(boolean [] vals) {
_len = vals.length;
_bpv = 1;
int memlen = _len >> 3 + (_len & 7) == 0?0:1;
_mem = MemoryManager.malloc1(memlen);
_mem[0] = _gap = (byte)(vals.length & 7);
_mem[1] = _bpv;
for(int i = 0; i < vals.length; ++i)
if(vals[i])write(i,(byte)1);
}
public CBSChunk(byte[] bs) { _mem = bs; initFromBytes(); }
public CBSChunk(int len, int bpv) {
_gap = (byte) ((8 - (len*bpv & 7)) & 7);
int clen = CBSChunk._OFF + (len >> (3 - bpv + 1)) + (_gap == 0?0:1);
byte [] bs = MemoryManager.malloc1(clen);
bs[0] = _gap;
bs[1] = _bpv = (byte)bpv;
assert ((clen - _OFF) - (_gap == 0?0:1) == (len >> (3-bpv+1)));
_mem = bs; _start = -1;
_len = len;
}
@Override protected long at8_impl(int idx) {
byte b = read(idx);
if( b == _NA ) throw new IllegalArgumentException("at8_abs but value is missing");
return b;
}
@Override protected double atd_impl(int idx) {
byte b = read(idx);
return b == _NA ? Double.NaN : b;
}
@Override protected final boolean isNA_impl( int i ) { return read(i)==_NA; }
private void set_byte(int idx, byte val){
int bix = _OFF + ((idx*_bpv)>>3); // byte index
int off = _bpv*idx & 7; // index within the byte
int mask = ~((1 | _bpv) << off);
_mem[bix] = (byte)((_mem[bix] & mask) | (val << off)); // 1 or 3 for 1bit per value or 2 bits per value
}
void write(int idx, byte val){
int bix = _OFF + ((idx*_bpv)>>3); // byte index
int off = _bpv*idx & 7; // index within the byte
write(bix, off,val);
}
protected byte read(int idx) {
int bix = _OFF + ((idx*_bpv)>>3); // byte index
int off = _bpv*idx & 7; // index within the byte
int mask = (1 | _bpv); // 1 or 3 for 1bit per value or 2 bits per value
return read(_mem[bix], off,mask);
}
@Override boolean set_impl(int idx, long l) {
if (l == 1 || l == 0) {
set_byte(idx, (byte)l);
return true;
}
return false;
}
@Override boolean set_impl(int idx, double d) {
if(Double.isNaN(d)) return setNA_impl(idx);
if(d == 0 || d == 1) {
set_byte(idx,(byte)d);
return true;
}
return false;
}
@Override boolean set_impl(int idx, float f ) {
if(Float.isNaN(f))
return setNA_impl(idx);
if(f == 0 || f == 1) {
set_byte(idx,(byte)f);
return true;
}
return false;
}
@Override boolean setNA_impl(int idx) {
if(_bpv == 2) {
set_byte(idx, _NA);
return true;
}
return false;
}
private void processRow(int r, ChunkVisitor v){
int i = read(r);
if(i == _NA) v.addNAs(1);
else v.addValue(i);
}
@Override public ChunkVisitor processRows(ChunkVisitor v, int from, int to){
for(int i = from; i < to; ++i)
processRow(i,v);
return v;
}
@Override public ChunkVisitor processRows(ChunkVisitor v, int... rows){
for(int i:rows)
processRow(i,v);
return v;
}
// /** Writes 1bit from value into b at given offset and return b */
// public static byte write1b(byte b, byte val, int off) {
// val = (byte) ((val & 0x1) << (7-off));
// return (byte) (b | val);
// }
// /** Writes 2bits from value into b at given offset and return b */
// public static byte write2b(byte b, byte val, int off) {
// val = (byte) ((val & 0x3) << (6-off)); // 0000 00xx << (6-off)
// return (byte) (b | val);
// }
private byte read(int b, int off, int mask){
return (byte)((b >> off) & mask);
}
private byte write(int bix, int off, int val){
return _mem[bix] |= (val << off);
}
// /** Reads 1bit from given b in given offset. */
// public static byte read1b(byte b, int off) { return (byte) ((b >> (7-off)) & 0x1); }
// /** Reads 2bit from given b in given offset. */
// public static byte read2b(byte b, int off) { return (byte) ((b >> (6-off)) & 0x3); }
/** Returns compressed len of the given array length if the value if represented by bpv-bits. */
public static int clen(int values, int bpv) {
int len = (values*bpv) >> 3;
return values*bpv % 8 == 0 ? len : len + 1;
}
@Override double min() { return 0; }
@Override double max() { return 1; }
@Override protected final void initFromBytes () {
_start = -1; _cidx = -1;
_gap = _mem[0];
_bpv = _mem[1];
set_len(((_mem.length - _OFF)*8 - _gap) / _bpv);
}
@Override
public boolean hasFloat() {return false;}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/CSChunk.java
|
package water.fvec;
import water.util.PrettyPrint;
import water.util.UnsafeUtils;
/**
* Created by tomas on 8/14/17.
*
* Chunk storing 1/2/4 byte values with an offset and/or scale.
* Used for fixed point decimal numbers or scaled/offseted integers.
*
* value(i) = (stored_bytes(i) + bias)*scale; if scale > 1 or scale == 1/x and (stored_bytes(i) + bias)*(1/x) == (stored_bytes(i) + bias)/x for all i-s
* or
* value(i) = (stored_bytes(i) + bias)/scale ; otherwise
*
*/
public abstract class CSChunk extends Chunk {
static protected final int _OFF=8+4+4;
private transient double _scale;
private transient long _bias;
private transient boolean _isDecimal;
CSChunk( byte[] bs, long bias, int scale, int szLog) {
_mem = bs;
_start = -1;
set_len((_mem.length - _OFF) >> szLog);
_bias = bias;
UnsafeUtils.set8(_mem, 0, bias);
UnsafeUtils.set4(_mem, 8, scale);
_scale = PrettyPrint.pow10(1,scale);
UnsafeUtils.set4(_mem,12,szLog);
}
protected void setDecimal(){
_isDecimal = true;
_scale = PrettyPrint.pow10(1,-UnsafeUtils.get4(_mem,8));
UnsafeUtils.set4(_mem,12,-UnsafeUtils.get4(_mem,12)-1);
}
private int getSzLog(){
int x = UnsafeUtils.get4(_mem,12);
return x < 0?-x-1:x;
}
public final double scale() { return _isDecimal?1.0/_scale:_scale; }
@Override public final byte precision() {
return (byte)Math.max(UnsafeUtils.get4(_mem,8),0);
}
protected final double getD(int x, int NA){return getD(x,NA,Double.NaN);}
protected final double getD(int x, int NA, double naImpute){
return x == NA?naImpute:_isDecimal?(_bias + x)/_scale:(_bias + x)*_scale;
}
protected final long get8(int x) { return (_bias + x)*(long)(_scale); }
@Override public final boolean hasFloat(){ return _isDecimal || _scale < 1; }
@Override public final void initFromBytes () {
_start = -1; _cidx = -1;
set_len((_mem.length-_OFF) >> getSzLog());
_bias = UnsafeUtils.get8 (_mem,0);
int x = UnsafeUtils.get4(_mem,8);
int szLog = UnsafeUtils.get4(_mem,12);
_isDecimal = szLog < 0;
_scale = PrettyPrint.pow10(1,_isDecimal?-x:x);
}
@Override protected long at8_impl( int i ) {
double res = atd_impl(i); // note: |mantissa| <= 4B => double is ok
if(Double.isNaN(res)) throw new IllegalArgumentException("at8_abs but value is missing");
return (long)res;
}
@Override public final boolean set_impl(int idx, long l) {
double d = (double)l;
if(d != l) return false;
return set_impl(idx,d);
}
@Override public final boolean set_impl(int idx, float f) {
return set_impl(idx,(double)f);
}
protected final int getScaledValue(double d, int NA){
assert !Double.isNaN(d):"NaN should be handled separately";
return (int)((_isDecimal?d*_scale:(d/_scale))-_bias);
}
@Override
public final <T extends ChunkVisitor> T processRows(T v, int from, int to) {
if(v.expandedVals()){
processRows2(v,from,to,_bias,UnsafeUtils.get4(_mem,8));
} else
processRows2(v,from,to);
return v;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int[] ids) {
if(v.expandedVals()){
processRows2(v,ids,_bias,UnsafeUtils.get4(_mem,8));
} else
processRows2(v,ids);
return v;
}
protected abstract <T extends ChunkVisitor> T processRows2(T v, int from, int to, long bias, int exp) ;
protected abstract <T extends ChunkVisitor> T processRows2(T v, int from, int to);
protected abstract <T extends ChunkVisitor> T processRows2(T v, int [] ids, long bias, int exp) ;
protected abstract <T extends ChunkVisitor> T processRows2(T v, int [] ids);
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/CStrChunk.java
|
package water.fvec;
import water.*;
import water.util.SetOfBytes;
import water.util.StringUtils;
import water.util.UnsafeUtils;
import water.parser.BufferedString;
import java.util.Arrays;
/**
* The empty-compression function, where data is in 'string's.
*/
public class CStrChunk extends Chunk {
static final int NA = -1;
static protected final int _OFF=4+1;
private int _valstart;
public boolean _isAllASCII = false;
public CStrChunk() {}
/**
* Empty-compression function, where data is a constant string
* @param s Constant string
* @param len Chunk length
*/
public CStrChunk(String s, int len){
byte[] sBytes = StringUtils.bytesOf(s);
sBytes = Arrays.copyOf(sBytes,sBytes.length+1);
sBytes[sBytes.length-1] = 0;
init(sBytes.length, StringUtils.bytesOf(s),len,len,null,null);
}
/**
* Empty-compression function, where data is in 'string's.
* @param sslen Next offset into ss for placing next String
* @param ss Bytes of appended strings, including trailing 0
* @param sparseLen Length of sparse chunk (number of extracted (non-zero) elements)
* @param idxLen Length of chunk
* @param id Indices (row numbers) of stored values, used for sparse
* @param is Index of strings - holds offsets into ss[]. is[i] == -1 means NA/sparse
*/
public CStrChunk(int sslen, byte[] ss, int sparseLen, int idxLen, int[] id, int[] is) {
init(sslen,ss,sparseLen,idxLen,id,is);
}
private void init (int sslen, byte[] ss, int sparseLen, int idxLen, int[] id, int[] is) {
_start = -1;
_valstart = idx(idxLen);
_len = idxLen;
_mem = MemoryManager.malloc1(_valstart + sslen, false);
UnsafeUtils.set4(_mem, 0, _valstart); // location of start of strings
Arrays.fill(_mem,_OFF,_valstart,(byte)-1); // Indicate All Is NA's
for( int i = 0; i < sparseLen; ++i ) // Copy the sparse indices
UnsafeUtils.set4(_mem, idx(id==null ? i : id[i]), is==null ? 0 : is[i]); //Need to check if id and is are null since both are not always needed for mem allocation
UnsafeUtils.copyMemory(ss,0,_mem,_valstart,sslen);
_isAllASCII = true;
for(int i = _valstart; i < _mem.length; ++i) {
byte c = _mem[i];
if ((c & 0x80) == 128) { //value beyond std ASCII
_isAllASCII = false;
break;
}
}
UnsafeUtils.set1(_mem, 4, (byte) (_isAllASCII ? 1 : 0)); // isAllASCII flag
}
private int idx(int i) { return _OFF+(i<<2); }
@Override public boolean setNA_impl(int idx) { return false; }
@Override public boolean set_impl(int idx, float f) { if (Float.isNaN(f)) return false; else throw new IllegalArgumentException("Operation not allowed on string vector.");}
@Override public boolean set_impl(int idx, double d) { if (Double.isNaN(d)) return false; else throw new IllegalArgumentException("Operation not allowed on string vector.");}
@Override public boolean set_impl(int idx, long l) { throw new IllegalArgumentException("Operation not allowed on string vector.");}
@Override public boolean set_impl(int idx, String str) { return false; }
@Override public boolean isNA_impl(int idx) {
int off = intAt(idx);
return off == NA;
}
public int intAt(int i) { return UnsafeUtils.get4(_mem, idx(i)); }
public byte byteAt(int i) { return _mem[_valstart+i]; }
public int lengthAtOffset(int off) {
int len = 0;
while (byteAt(off + len) != 0) len++;
return len;
}
@Override public long at8_impl(int idx) { throw new IllegalArgumentException("Operation not allowed on string vector.");}
@Override public double atd_impl(int idx) { throw new IllegalArgumentException("Operation not allowed on string vector.");}
@Override public BufferedString atStr_impl(BufferedString bStr, int idx) {
int off = intAt(idx);
if( off == NA ) return null;
int len = lengthAtOffset(off);
assert len >= 0 : getClass().getSimpleName() + ".atStr_impl: len=" + len + ", idx=" + idx + ", off=" + off;
return bStr.set(_mem,_valstart+off,len);
}
@Override protected final void initFromBytes () {
_start = -1; _cidx = -1;
_valstart = UnsafeUtils.get4(_mem, 0);
byte b = UnsafeUtils.get1(_mem,4);
_isAllASCII = b != 0;
set_len((_valstart-_OFF)>>2);
}
@Override public ChunkVisitor processRows(ChunkVisitor nc, int from, int to){
BufferedString bs = new BufferedString();
for(int i = from; i < to; i++)
nc.addValue(atStr(bs,i));
return nc;
}
@Override public ChunkVisitor processRows(ChunkVisitor nc, int... rows){
BufferedString bs = new BufferedString();
for(int i:rows)
nc.addValue(atStr(bs,i));
return nc;
}
/**
* Optimized toLower() method to operate across the entire CStrChunk buffer in one pass.
* This method only changes the values of ASCII uppercase letters in the text.
*
* NewChunk is the same size as the original.
*
* @param nc NewChunk to be filled with the toLower version of ASCII strings in this chunk
* @return Filled NewChunk
*/
public NewChunk asciiToLower(NewChunk nc) {
// copy existing data
nc = this.extractRows(nc, 0,_len);
//update offsets and byte array
for(int i= 0; i < nc._sslen; i++) {
if (nc._ss[i] > 0x40 && nc._ss[i] < 0x5B) // check for capital letter
nc._ss[i] += 0x20; // lower it
}
return nc;
}
/**
* Optimized toUpper() method to operate across the entire CStrChunk buffer in one pass.
* This method only changes the values of ASCII lowercase letters in the text.
*
* NewChunk is the same size as the original.
*
* @param nc NewChunk to be filled with the toUpper version of ASCII strings in this chunk
* @return Filled NewChunk
*/
public NewChunk asciiToUpper(NewChunk nc) {
// copy existing data
nc = this.extractRows(nc, 0,_len);
//update offsets and byte array
for(int i= 0; i < nc._sslen; i++) {
if (nc._ss[i] > 0x60 && nc._ss[i] < 0x7B) // check for capital letter
nc._ss[i] -= 0x20; // upper it
}
return nc;
}
/**
* Optimized trim() method to operate across the entire CStrChunk buffer in one pass.
* This mimics Java String.trim() by only considering characters of value
* <code>'\u0020'</code> or less as whitespace to be trimmed. This means that like
* Java's String.trim() it ignores 16 of the 17 characters regarded as a space in UTF.
*
* NewChunk is the same size as the original, despite trimming.
*
* @param nc NewChunk to be filled with trimmed version of strings in this chunk
* @return Filled NewChunk
*/
public NewChunk asciiTrim(NewChunk nc) {
// copy existing data
nc = this.extractRows(nc, 0,_len);
//update offsets and byte array
for(int i=0; i < _len; i++) {
int j = 0;
int off = UnsafeUtils.get4(_mem,idx(i));
if (off != NA) {
//UTF chars will appear as negative values. In Java spec, space is any char 0x20 and lower
while( _mem[_valstart+off+j] > 0 && _mem[_valstart+off+j] < 0x21) j++;
if (j > 0) nc.set_is(i,off + j);
while( _mem[_valstart+off+j] != 0 ) j++; //Find end
j--;
while( _mem[_valstart+off+j] > 0 && _mem[_valstart+off+j] < 0x21) { //March back to find first non-space
nc._ss[off+j] = 0; //Set new end
j--;
}
}
}
return nc;
}
/**
* Optimized substring() method for a buffer of only ASCII characters.
* The presence of UTF-8 multi-byte characters would give incorrect results
* for the string length, which is required here.
*
* @param nc NewChunk to be filled with substrings in this chunk
* @param startIndex The beginning index of the substring, inclusive
* @param endIndex The ending index of the substring, exclusive
* @return Filled NewChunk
*/
public NewChunk asciiSubstring(NewChunk nc, int startIndex, int endIndex) {
// copy existing data
nc = this.extractRows(nc, 0,_len);
//update offsets and byte array
for (int i = 0; i < _len; i++) {
int off = UnsafeUtils.get4(_mem, idx(i));
if (off != NA) {
int len = 0;
while (_mem[_valstart + off + len] != 0) len++; //Find length
nc.set_is(i,startIndex < len ? off + startIndex : off + len);
for (; len > endIndex - 1; len--) {
nc._ss[off + len] = 0; //Set new end
}
}
}
return nc;
}
/**
* Optimized length() method for a buffer of only ASCII characters.
* This is a straight byte count for each word in the chunk. The presence
* of UTF-8 multi-byte characters would give incorrect results.
*
* @param nc NewChunk to be filled with lengths of strings in this chunk
* @return Filled NewChunk
*/
public NewChunk asciiLength(NewChunk nc) {
//pre-allocate since size is known
nc.alloc_mantissa(_len);
nc.alloc_exponent(_len); // sadly, a waste
// fill in lengths
for(int i=0; i < _len; i++) {
int off = UnsafeUtils.get4(_mem,idx(i));
int len = 0;
if (off != NA) {
while (_mem[_valstart + off + len] != 0) len++;
nc.addNum(len, 0);
} else nc.addNA();
}
return nc;
}
public NewChunk asciiEntropy(NewChunk nc) {
nc.alloc_doubles(_len);
for (int i = 0; i < _len; i++) {
double entropy = entropyAt(i);
if (Double.isNaN(entropy)) nc.addNA();
else nc.addNum(entropy);
}
return nc;
}
double entropyAt(int i) {
int off = intAt(i);
if (off == NA) return Double.NaN;
int[] frq = new int[256];
int len = lengthAtOffset(off);
for (int j = 0; j < len; j++) {
frq[0xff & byteAt(off + j)]++;
}
double sum = 0;
for (int b = 0; b < 256; b++) {
int f = frq[b];
if (f > 0) {
double x = (double)f / len;
sum += x * Math.log(x);
}
}
return - sum / Math.log(2);
}
/**
* Optimized lstrip() & rstrip() methods to operate across the entire CStrChunk buffer in one pass.
*
* NewChunk is the same size as the original, despite trimming.
*
* @param nc NewChunk to be filled with strip version of strings in this chunk
* @param chars chars to strip, treated as ASCII
* @return Filled NewChunk
*/
public NewChunk asciiLStrip(NewChunk nc, String chars) {
SetOfBytes set = new SetOfBytes(chars);
//update offsets and byte array
for(int i=0; i < _len; i++) {
int off = intAt(i);
if (off != NA) {
while (set.contains(byteAt(off))) off++;
int len = lengthAtOffset(off);
nc.addStr(new BufferedString(_mem, _valstart+off, len));
} else nc.addNA();
}
return nc;
}
public NewChunk asciiRStrip(NewChunk nc, String chars) {
SetOfBytes set = new SetOfBytes(chars);
//update offsets and byte array
for(int i=0; i < _len; i++) {
int off = intAt(i);
if (off != NA) {
int pos = off + lengthAtOffset(off);
while (pos --> off && set.contains(byteAt(pos)));
nc.addStr(new BufferedString(_mem, _valstart+off, pos - off + 1));
} else nc.addNA();
}
return nc;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/CUDChunk.java
|
package water.fvec;
import water.MemoryManager;
import water.util.UnsafeUtils;
import java.util.HashMap;
import java.util.Map;
/**
* The "few unique doubles"-compression function
*/
public class CUDChunk extends Chunk {
public static int MAX_UNIQUES=256;
public static int computeByteSize(int uniques, int len) {
return 4 + 4 // _len + numUniques
+ (uniques << 3) //unique double values
+ (len << 1); //mapping of row -> unique value index (0...255)
}
int numUniques;
CUDChunk() {}
CUDChunk(byte[] bs, HashMap<Long,Byte> hs, int len) {
_start = -1;
numUniques = hs.size();
set_len(len);
_mem = MemoryManager.malloc1(computeByteSize(numUniques, _len), false);
UnsafeUtils.set4(_mem, 0, _len);
UnsafeUtils.set4(_mem, 4, numUniques);
int j=0;
//create the mapping and also store the unique values (as longs)
for (Map.Entry<Long,Byte> e : hs.entrySet()) {
e.setValue((byte) (j - 128)); //j is in 0...256 -> byte value needs to be in -128...127 for storage
UnsafeUtils.set8(_mem, 8 + (j << 3), e.getKey());
j++;
}
// store the mapping
for (int i=0; i<len; ++i)
UnsafeUtils.set1(_mem, 8 + (numUniques << 3) + i, hs.get(Double.doubleToLongBits(UnsafeUtils.get8d(bs, i << 3))));
}
@Override protected final long at8_impl( int i ) {
double res = atd_impl(i);
if( Double.isNaN(res) ) throw new IllegalArgumentException("at8_impl but value is missing");
return (long)res;
}
@Override protected final double atd_impl( int i ) {
int whichUnique = (UnsafeUtils.get1(_mem, 8 + (numUniques << 3) + i)+128);
return Double.longBitsToDouble(UnsafeUtils.get8(_mem, 8 + (whichUnique << 3)));
}
@Override public double [] getDoubles(double [] vals, int from, int to) {
return getDoubles(vals,from,to,Double.NaN);
}
@Override public double [] getDoubles(double [] vals, int from, int to, double NA) {
double [] uniques = new double[numUniques];
for(int i = 0; i < numUniques; ++i) {
uniques[i] = Double.longBitsToDouble(UnsafeUtils.get8(_mem, 8 + (i << 3)));
if(Double.isNaN(uniques[i]))
uniques[i] = NA;
}
for(int i = 0; i < _len; ++i)
vals[i] = uniques[(UnsafeUtils.get1(_mem, 8 + (numUniques << 3) + i)+128)];
return vals;
}
@Override protected final boolean isNA_impl( int i ) { return Double.isNaN(atd_impl(i)); }
@Override boolean set_impl(int idx, long l) { return false; }
@Override boolean set_impl(int i, double d) {
for (int j = 0; j < numUniques; ++j) {
if (Double.compare(Double.doubleToLongBits(d), UnsafeUtils.get8(_mem, 8 + (j << 3))) == 0) {
UnsafeUtils.set1(_mem, 8 + (numUniques << 3) + i, (byte) (j-128));
return true;
}
}
return false;
}
@Override boolean set_impl(int i, float f ) {
return set_impl(i, (double)f);
}
@Override boolean setNA_impl(int idx) {
return set_impl(idx, Double.NaN);
}
@Override public ChunkVisitor processRows(ChunkVisitor nc, int from, int to){
for(int i = from; i < to; i++)
nc.addValue(atd(i));
return nc;
}
@Override public ChunkVisitor processRows(ChunkVisitor nc, int... rows){
for(int i:rows)
nc.addValue(atd(i));
return nc;
}
@Override protected final void initFromBytes () {
_start = -1; _cidx = -1;
_len = UnsafeUtils.get4(_mem, 0);
numUniques = UnsafeUtils.get4(_mem, 4);
set_len(_len);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/CXFChunk.java
|
package water.fvec;
import water.H2O;
import water.util.UnsafeUtils;
// Sparse chunk.
public class CXFChunk extends CXIChunk {
protected CXFChunk(byte [] mem){
super(mem);
}
private double getVal(int x){
switch(_elem_sz) {
case 8: return UnsafeUtils.get4f(_mem, x + 4);
case 12: return UnsafeUtils.get8d(_mem, x + 4);
default: throw H2O.unimpl();
}
}
@Override public long at8_impl(int idx){
int x = findOffset(idx);
if(x < 0) {
if(_isNA) throw new RuntimeException("at4 but the value is missing!");
return 0;
}
double val = getVal(x);
if(Double.isNaN(val)) throw new RuntimeException("at4 but the value is missing!");
return (long)val;
}
@Override public double atd_impl(int idx) {
int x = findOffset(idx);
if(x < 0)
return _isNA?Double.NaN:0;
return getVal(x);
}
@Override
public Chunk deepCopy() {return new CXFChunk(_mem.clone());}
@Override
public <T extends ChunkVisitor> T processRows(T v, int from, int to){
int prevId = from-1;
int x = from == 0?_OFF: findOffset(from);
if(x < 0) x = -x-1;
while(x < _mem.length){
int id = getId(x);
if(id >= to)break;
if(_isNA) v.addNAs(id-prevId-1);
else v.addZeros(id-prevId-1);
v.addValue(getVal(x));
prevId = id;
x+=_elem_sz;
}
if(_isNA) v.addNAs(to-prevId-1);
else v.addZeros(to-prevId-1);
return v;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int [] ids){
int x = _OFF;
int k = 0;
int zeros = 0;
while(k < ids.length) {
int idk = ids[k];
assert ids[k] >= 0 && (k == 0 || ids[k] > ids[k-1]);
int idx = ids[ids.length-1]+1;
while(x < _mem.length && (idx = getId(x)) < idk) x += _elem_sz;
if(x == _mem.length){
zeros += ids.length - k;
break;
}
if(idx == idk){
if(_isNA) v.addNAs(zeros);
else v.addZeros(zeros);
v.addValue(getVal(x));
zeros = 0;
x+=_elem_sz;
} else
zeros++;
k++;
}
if(zeros > 0){
if(_isNA) v.addNAs(zeros);
else v.addZeros(zeros);
}
return v;
}
@Override
public boolean hasFloat(){return true;}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/CXIChunk.java
|
package water.fvec;
import water.H2O;
import water.util.UnsafeUtils;
// Sparse chunk.
public class CXIChunk extends Chunk {
private static long [] _NAS = new long[]{-1/* not used, binary chunks can't have NAs */,/* not used*/-1,C2Chunk._NA,-1,C4Chunk._NA,-1,-1,-1,C8Chunk._NA};
public static long NA(int val_sz){return _NAS[val_sz];}
transient boolean _isNA; // na sparse or zero sparse
transient int _val_sz;
transient int _elem_sz;
// transient int _elemsz_log; // 2 or 3 for 4 or 8 byte per element
static final int _OFF = 8; // 4B of len, 1B id sz 1B value, 1B isNA, 1B padding
protected CXIChunk(byte [] mem){
_mem = mem;
initFromBytes();
}
@Override public int sparseLenZero(){
return isSparseZero()?sparseLen():_len;
}
@Override public int sparseLenNA(){
return isSparseNA()?sparseLen():_len;
}
protected final int getId(int x){
if(x == _mem.length) return _len;
int id_sz = _elem_sz - _val_sz;
return id_sz == 4?UnsafeUtils.get4(_mem,x):0xFFFF&UnsafeUtils.get2(_mem,x);
}
private long getVal(int x){
switch(_val_sz){
case 0: return 1;
case 2: return UnsafeUtils.get2(_mem,x+2);
case 4: return UnsafeUtils.get4(_mem,x+4);
case 8: return UnsafeUtils.get8(_mem,x+4);
default: throw H2O.unimpl();
}
}
private double getFVal(int x){
long ival = getVal(x);
return ival == _NAS[_val_sz]?Double.NaN:ival;
}
@Override
public final boolean isSparseNA(){return _isNA;}
@Override
public final boolean isSparseZero(){return !_isNA;}
@Override
protected final void initFromBytes() {
_start = -1; _cidx = -1;
_len = UnsafeUtils.get4(_mem,0);
int id_sz = _mem[4]&0xFF;
_val_sz = _mem[5]&0xFF;
_elem_sz = _val_sz + id_sz;
_isNA = (0xFF&_mem[6]) == 1;
_previousOffset = _OFF;
}
protected final int sparseLen(){
return (_mem.length - _OFF) / _elem_sz;
}
protected final int getOff(int id){return _OFF + _elem_sz*id;}
protected final int getIdx(int off){return (off-_OFF)/_elem_sz;}
protected final int findOffset(int i) { // do binary search
int off = _previousOffset;
int id = getId(off);
if(id == i) return off;
if(id < i && (id = getId(off+=_elem_sz)) == i) {
_previousOffset = off;
return off;
}
int lb = id < i?getIdx(off):0;
int ub = id > i?getIdx(off):sparseLen();
while (lb < ub) {
int mid = lb + ((ub - lb) >> 1);
off = getOff(mid);
int x = getId(off);
if (x == i) {
_previousOffset = off;
return off;
}
if (x < i) lb = mid + 1;
else ub = mid;
}
return -getOff(ub)-1;
}
@Override public long at8_impl(int idx){
int x = findOffset(idx);
if(x < 0) {
if(_isNA) throw new RuntimeException("at8 but the value is missing!");
return 0;
}
long val = getVal(x);
if(val == _NAS[_val_sz])
throw new RuntimeException("at4 but the value is missing!");
return val;
}
@Override public double atd_impl(int idx) {
int x = findOffset(idx);
if(x < 0)
return _isNA?Double.NaN:0;
return getFVal(x);
}
@Override public final boolean isNA_impl( int i ) {return Double.isNaN(atd(i));}
@Override
boolean set_impl(int idx, long l) {
return false;
}
@Override
boolean set_impl(int idx, double d) {
return false;
}
@Override
boolean set_impl(int idx, float f) {
return false;
}
@Override
boolean setNA_impl(int idx) {
return false;
}
@Override public boolean hasNA() { return true; }
@Override
public Chunk deepCopy() {return new CXIChunk(_mem.clone());}
public final int len(){return _len;}
private transient int _previousOffset = _OFF;
@Override public final int nextNZ(int i){
int x = findOffset(i);
if(x < 0) x = -x-1-_elem_sz;
_previousOffset = x += _elem_sz;
return getId(x);
}
@Override public final int nextNZ(int rid, boolean onlyTrueZero) {
return onlyTrueZero && _isNA ? rid + 1 : nextNZ(rid);
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int from, int to){
int prevId = from-1;
int x = from == 0?_OFF: findOffset(from);
if(x < 0) x = -x-1;
while(x < _mem.length){
int id = getId(x);
if(id >= to)break;
if(_isNA) v.addNAs(id-prevId-1);
else v.addZeros(id-prevId-1);
long val = getVal(x);
if(val ==_NAS[_val_sz])
v.addNAs(1);
else
v.addValue(val);
prevId = id;
x+=_elem_sz;
}
if(_isNA) v.addNAs(to-prevId-1);
else v.addZeros(to-prevId-1);
return v;
}
@Override
public <T extends ChunkVisitor> T processRows(T v, int [] ids){
int x = _OFF;
int k = 0;
int zeros = 0;
while(k < ids.length) {
int idk = ids[k];
assert ids[k] >= 0 && (k == 0 || ids[k] > ids[k-1]);
int idx = ids[ids.length-1]+1;
while(x < _mem.length && (idx = getId(x)) < idk) x += _elem_sz;
if(x == _mem.length){
zeros += ids.length - k;
break;
}
if(idx == idk){
if(_isNA) v.addNAs(zeros);
else v.addZeros(zeros);
long val = getVal(x);
if(val == _NAS[_val_sz])
v.addNAs(1);
else
v.addValue(val);
zeros = 0;
x+=_elem_sz;
} else
zeros++;
k++;
}
if(zeros > 0){
if(_isNA) v.addNAs(zeros);
else v.addZeros(zeros);
}
return v;
}
@Override
public boolean hasFloat(){return false;}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/CategoricalWrappedVec.java
|
package water.fvec;
import water.AutoBuffer;
import water.Key;
import water.DKV;
import water.MRTask;
import water.util.ArrayUtils;
import java.util.Arrays;
import java.util.HashMap;
/** A vector transforming values of given vector according to given domain
* mapping - currently only used to transform categorical columns but in theory would
* work for any dense-packed Int column. Expected usage is to map from a new
* dataset to the domain-mapping expected by a model (which will match the
* dataset it was trained on).
*
* <p>The Vector's domain is the union of the Test and Train domains.
*
* <p>The mapping is defined by int[] array, size is input Test.domain.length.
* Contents refer to values in the Train.domain. Extra values in the Test
* domain are sorted after the Train.domain - so mapped values have to be
* range-checked (note that returning some flag for NA, say -1, would also
* need to be checked for).
*/
public class CategoricalWrappedVec extends WrappedVec {
/** List of values from underlying vector which this vector map to a new
* value in the union domain. */
int[] _map;
int _p=0;
/** Main constructor: convert from one categorical to another */
public CategoricalWrappedVec(Key key, int rowLayout, String[] toDomain, Key masterVecKey) {
super(key, rowLayout, masterVecKey);
computeMap(masterVec().domain(),toDomain,masterVec().isBad());
DKV.put(this);
}
/** Constructor just to generate the map and domain; used in tests or when
* mixing categorical columns */
private CategoricalWrappedVec(Key key) { super(key, ESPC.rowLayout(key, new long[]{0}), null, null); }
private static CategoricalWrappedVec makeTransientVec(String[] from, String[] to) {
Key key = Vec.newKey();
CategoricalWrappedVec tmp = new CategoricalWrappedVec(key);
tmp.computeMap(from, to, false);
return tmp;
}
public static int[] computeMap(String[] from, String[] to) {
return makeTransientVec(from, to)._map;
}
/**
* Updates Vec's domain in-place (instead of creating a wrapper Vec)
* @param source source Vec
* @param toDomain target domain
* @return updated instance of Vec
*/
public static Vec updateDomain(Vec source, String[] toDomain) {
CategoricalWrappedVec tv = makeTransientVec(source.domain(), toDomain);
new RemapTask(tv).doAll(source);
source.setDomain(tv.domain());
DKV.put(source);
return source;
}
private static class RemapTask extends MRTask<RemapTask> {
private final int[] _map;
private final int _p;
private RemapTask(CategoricalWrappedVec vec) {
_map = vec._map; _p = vec._p;
}
@Override
public void map(Chunk c) {
Chunk wc = new CategoricalWrappedChunk(c, c._vec, _map, _p);
assert wc._len == c._len;
for (int i = 0; i < wc._len; i++) {
if (c.isNA(i))
continue;;
c.set(i, wc.at8(i));
}
}
}
@Override public Chunk chunkForChunkIdx(int cidx) {
return new CategoricalWrappedChunk(masterVec().chunkForChunkIdx(cidx), this);
}
/** Compute a mapping from the 'from' domain to the 'to' domain. Strings in
* the 'from' domain not in the 'to' domain are mapped past the end of the
* 'to' values. Strings in the 'to' domain not in the 'from' domain
* simply do not appear in the mapping. The returned map is always the same
* length as the 'from' domain. Its contents have values from both
* domains; the resulting domain is as big as the largest value in the map,
* and only has strings from the 'from' domain (which probably overlap
* somewhat with the 'to' domain).
*
* <p> Example: from={"Blue","Red","Green"}, to={"Green","Yellow","Blue"}.<br>
* "Yellow" does not appear in the 'from' domain; "Red" does not appear in the 'to' domain.<br>
* Returned map is {2,3,0}.<br>
* Map length matches the 'from' domain length.<br>
* Largest value is 3, so the domain is size 4.<br>
* Domain is: {"Green","Yellow","Blue","Red"}<br>
* Extra values in the 'from' domain appear, in-order in the 'from' domain, at the end.
* @return mapping
*/
private void computeMap( String[] from, String[] to, boolean fromIsBad ) {
// Identity? Build the cheapo non-map
if( from==to || Arrays.equals(from,to) ) {
_map = ArrayUtils.seq(0,to.length);
setDomain(to);
return;
}
// The source Vec does not have a domain, hence is an integer column. The
// to[] mapping has the set of unique numbers, we need to map from those
// numbers to the index to the numbers.
if( from==null) {
setDomain(to);
if( fromIsBad ) { _map = new int[0]; return; }
int min = Integer.valueOf(to[0]);
int max = Integer.valueOf(to[to.length-1]);
Vec mvec = masterVec();
if( !(mvec.isInt() && mvec.min() >= min && mvec.max() <= max) )
throw new NumberFormatException(); // Unable to figure out a valid mapping
// FIXME this is a bit of a hack to allow adapTo calls to play nice with negative ints in the domain...
if( Integer.valueOf(to[0]) < 0 ) {
_p=Math.max(0,max);
_map = new int[(_p /*positive array of values*/) + (-1*min /*negative array of values*/) + 1 /*one more to store "max" value*/];
for(int i=0;i<to.length;++i) {
int v = Integer.valueOf(to[i]);
if( v < 0 ) v = -1*v+_p;
_map[v] = i;
}
return;
}
_map = new int[max+1];
for( int i=0; i<to.length; i++ )
_map[Integer.valueOf(to[i])] = i;
return;
}
// The desired result Vec does not have a domain, hence is a numeric
// column. For classification of numbers, we did an original toCategoricalVec
// wrapping the numeric values up as Strings for the classes. Unwind that,
// converting numeric strings back to their original numbers.
_map = new int[from.length];
if( to == null ) {
for( int i=0; i<from.length; i++ )
_map[i] = Integer.valueOf(from[i]);
return;
}
// Full string-to-string mapping
HashMap<String,Integer> h = new HashMap<>();
for( int i=0; i<to.length; i++ ) h.put(to[i],i);
String[] ss = to;
int extra = to.length;
int actualLen = extra;
for( int j=0; j<from.length; j++ ) {
Integer x = h.get(from[j]);
if( x!=null ) _map[j] = x;
else {
_map[j] = extra++;
if (extra > ss.length) {
ss = Arrays.copyOf(ss, 2*ss.length);
}
ss[extra-1] = from[j];
actualLen = extra;
}
}
setDomain(Arrays.copyOf(ss, actualLen));
}
@Override
public Vec doCopy() {
return new CategoricalWrappedVec(group().addVec(),_rowLayout, domain(), _masterVecKey);
}
public static class CategoricalWrappedChunk extends Chunk {
public final transient Chunk _c; // Test-set map
final transient int[] _map;
final transient int _p;
CategoricalWrappedChunk(Chunk c, CategoricalWrappedVec vec) {
this(c, vec, vec._map, vec._p);
}
private CategoricalWrappedChunk(Chunk c, Vec vec, int[] map, int p) {
_c = c; set_len(_c._len);
_start = _c._start; _vec = vec; _cidx = _c._cidx;
_map = map; _p = p;
}
// Returns the mapped value. {@code _map} covers all the values in the
// master Chunk, so no AIOOBE. Missing values in the master Chunk return
// the usual NaN.
@Override protected double atd_impl(int idx) { return _c.isNA_impl(idx) ? Double.NaN : at8_impl(idx); }
// Returns the mapped value. {@code _map} covers all the values in the
// master Chunk, so no AIOOBE. Missing values in the master Chunk throw
// the normal missing-value exception when loading from the master.
@Override protected long at8_impl(int idx) {
int at8 = (int)_c.at8_impl(idx);
if( at8 >= 0 ) return _map[at8];
else return _map[-1*at8+_p];
}
// Returns true if the masterVec is missing, false otherwise
@Override protected boolean isNA_impl(int idx) { return _c.isNA_impl(idx); }
@Override boolean set_impl(int idx, long l) { return false; }
@Override boolean set_impl(int idx, double d) { return false; }
@Override boolean set_impl(int idx, float f) { return false; }
@Override boolean setNA_impl(int idx) { return false; }
@Override public ChunkVisitor processRows(ChunkVisitor nc, int from, int to){
for( int i=from; i< to; i++ )
if(isNA(i))nc.addNAs(1);
else nc.addValue(at8(i));
return nc;
}
@Override public ChunkVisitor processRows(ChunkVisitor nc, int... rows){
for( int i:rows)
if(isNA(i))nc.addNAs(1);
else nc.addValue(at8(i));
return nc;
}
public static AutoBuffer write_impl(CategoricalWrappedVec v,AutoBuffer bb) { throw water.H2O.fail(); }
@Override protected final void initFromBytes () { throw water.H2O.fail(); }
@Override public boolean hasNA() { return _c.hasNA(); }
@Override
public Chunk deepCopy() {
return extractRows(new NewChunk(this),0,_c._len).compress();
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/Chunk.java
|
package water.fvec;
import water.*;
import water.parser.BufferedString;
import java.util.UUID;
/** A compression scheme, over a chunk of data - a single array of bytes.
* Chunks are mapped many-to-1 to a {@link Vec}. The <em>actual</em> vector
* header info is in the Vec - which contains info to find all the bytes of
* the distributed vector. Subclasses of this abstract class implement
* (possibly empty) compression schemes.
*
* <p>Chunks are collections of elements, and support an array-like API.
* Chunks are subsets of a Vec; while the elements in a Vec are numbered
* starting at 0, any given Chunk has some (probably non-zero) starting row,
* and a length which is smaller than the whole Vec. Chunks are limited to a
* single Java byte array in a single JVM heap, and only an int's worth of
* elements. Chunks support both the notions of a global row-number and a
* chunk-local numbering. The global row-number calls are variants of {@code
* at_abs} and {@code set_abs}. If the row is outside the current Chunk's
* range, the data will be loaded by fetching from the correct Chunk. This
* probably involves some network traffic, and if all rows are loaded then the
* entire dataset will be pulled locally (possibly triggering an OutOfMemory).
*
* <p>The chunk-local numbering supports the common {@code for} loop iterator
* pattern, using {@code at*} and {@code set} calls, and is faster than the
* global row-numbering for tight loops (because it skips some range checks):
* <pre>{@code
* for (int row = 0; row < chunk._len; row++)
* ...chunk.atd(row)...
* }</pre>
*
* <p>The array-like API allows loading and storing elements in and out of
* Chunks. When loading, values are decompressed. When storing, an attempt
* to compress back into the actual underlying Chunk subclass is made; if this
* fails the Chunk is "inflated" into a {@link NewChunk}, and the store
* completed there. Later the NewChunk will be compressed (probably into a
* different underlying Chunk subclass) and put back in the K/V store under
* the same Key - effectively replacing the original Chunk; this is done when
* {@link #close} is called, and is taken care of by the standard {@link
* MRTask} calls.
*
* <p>Chunk updates are not multi-thread safe; the caller must do correct
* synchronization. This is already handled by the Map/Reduce {MRTask)
* framework. Chunk updates are not visible cross-cluster until the {@link
* #close} is made; again this is handled by MRTask directly.
*
* <p>In addition to normal load and store operations, Chunks support the
* notion a missing element via the {@link #isNA} call, and a "next non-zero"
* notion for rapidly iterating over sparse data.
*
* <p><b>Data Types</b>
*
* <p>Chunks hold Java primitive values, timestamps, UUIDs, or Strings. All
* the Chunks in a Vec hold the same type. Most of the types are compressed.
* Integer types (boolean, byte, short, int, long) are always lossless. Float
* and Double types might lose 1 or 2 ulps in the compression. Time data is
* held as milliseconds since the Unix Epoch. UUIDs are held as 128-bit
* integers (a pair of Java longs). Strings are compressed in various obvious
* ways. Sparse data is held... sparsely; e.g. loading data in SVMLight
* format will not "blow up" the in-memory representation. Categoricals/factors
* are held as small integers, with a shared String lookup table on the side.
*
* <p>Chunks support the notion of <em>missing</em> data. Missing float and
* double data is always treated as a NaN, both if read or written. There is
* no equivalent of NaN for integer data; reading a missing integer value is a
* coding error and will be flagged. If you are working with integer data
* with missing elements, you must first check for a missing value before
* loading it:
* <pre>{@code
* if( !chk.isNA(row) ) ...chk.at8(row)....
* }</pre>
*
* <p>The same holds true for the other non-real types (timestamps, UUIDs,
* Strings, or categoricals): they must be checked for missing before being
* used.
*
* <p><b>Performance Concerns</b>
*
* <p>The standard {@code for} loop mentioned above is the fastest way to
* access data; definitely faster (and less error prone) than iterating over
* global row numbers. Iterating over a single Chunk is nearly always
* memory-bandwidth bound. Often code will iterate over a number of Chunks
* aligned together (the common use-case of looking a whole rows of a
* dataset). Again, typically such a code pattern is memory-bandwidth bound
* although the X86 will stop being able to prefetch well beyond 100 or 200
* Chunks.
*
* <p>Note that Chunk alignment is guaranteed within all the Vecs of a Frame:
* Same numbered Chunks of <em>different</em> Vecs will have the same global
* row numbering and the same length, enabling a particularly simple and
* efficient way to iterate over all rows.
*
* <p>This example computes the Euclidean distance between all the columns and
* a given point, and stores the squared distance back in the last column.
* Note that due "NaN poisoning" if any row element is missing, the entire
* distance calculated will be NaN.
* <pre>{@code
* final double[] _point; // The given point
* public void map( Chunk[] chks ) { // Map over a set of same-numbered Chunks
* for( int row=0; row < chks[0]._len; row++ ) { // For all rows
* double dist=0; // Squared distance
* for( int col=0; col < chks.length-1; col++ ) { // For all cols, except the last output col
* double d = chks[col].atd(row) - _point[col]; // Distance along this dimension
* dist += d*d; // Sum-squared-distance
* }
* chks[chks.length-1].set( row, dist ); // Store back the distance in the last col
* }
* }}</pre>
*/
public abstract class Chunk extends Iced<Chunk> implements Vec.Holder {
public Chunk() {}
private Chunk(byte [] bytes) {_mem = bytes;initFromBytes();}
/** Global starting row for this local Chunk; a read-only field. */
transient long _start = -1;
/** Global starting row for this local Chunk */
public final long start() { return _start; }
/** Global index of this chunk filled during chunk load */
transient int _cidx = -1;
/** Number of rows in this Chunk; publicly a read-only field. Odd API
* design choice: public, not-final, read-only, NO-ACCESSOR.
*
* <p>NO-ACCESSOR: This is a high-performance field, and must have a known
* zero-cost cost-model; accessors hide that cost model, and make it
* not-obvious that a loop will be properly optimized or not.
*
* <p>not-final: set in various deserializers.
* <p>Proper usage: read the field, probably in a hot loop.
* <pre>
* for( int row=0; row < chunk._len; row++ )
* ...chunk.atd(row)...
* </pre>
**/
public transient int _len;
/** Internal set of _len. Used by lots of subclasses. Not a publically visible API. */
int set_len(int len) { return _len = len; }
/** Read-only length of chunk (number of rows). */
public int len() { return _len; }
/** Normally==null, changed if chunk is written to. Not a publically readable or writable field. */
private transient Chunk _chk2;
/** Exposed for internal testing only. Not a publically visible API. */
public Chunk chk2() { return _chk2; }
/** Owning Vec; a read-only field */
transient Vec _vec;
/** Owning Vec */
public Vec vec() { return _vec; }
/** Set the owning Vec */
public void setVec(Vec vec) { _vec = vec; }
/** Set the start */
public void setStart(long start) { _start = start; }
/** The Big Data. Frequently set in the subclasses, but not otherwise a publically writable field. */
byte[] _mem;
/** Short-cut to the embedded big-data memory. Generally not useful for
* public consumption, since the data remains compressed and holding on to a
* pointer to this array defeats the user-mode spill-to-disk. */
public byte[] getBytes() { return _mem; }
public void setBytes(byte[] mem) { _mem = mem; }
final long at8_abs(long i) {
long x = i - (_start>0 ? _start : 0);
if( 0 <= x && x < _len) return at8((int) x);
throw new ArrayIndexOutOfBoundsException(""+_start+" <= "+i+" < "+(_start+ _len));
}
/** Load a {@code double} value using absolute row numbers. Returns
* Double.NaN if value is missing.
*
* <p>This version uses absolute element numbers, but must convert them to
* chunk-relative indices - requiring a load from an aliasing local var,
* leading to lower quality JIT'd code (similar issue to using iterator
* objects).
*
* <p>Slightly slower than {@link #atd} since it range-checks within a chunk.
* @return double value at the given row, or NaN if the value is missing */
final double at_abs(long i) {
long x = i - (_start>0 ? _start : 0);
if( 0 <= x && x < _len) return atd((int) x);
throw new ArrayIndexOutOfBoundsException(""+_start+" <= "+i+" < "+(_start+ _len));
}
/** Missing value status.
*
* <p>This version uses absolute element numbers, but must convert them to
* chunk-relative indices - requiring a load from an aliasing local var,
* leading to lower quality JIT'd code (similar issue to using iterator
* objects).
*
* <p>Slightly slower than {@link #isNA} since it range-checks within a chunk.
* @return true if the value is missing */
final boolean isNA_abs(long i) {
long x = i - (_start>0 ? _start : 0);
if( 0 <= x && x < _len) return isNA((int) x);
throw new ArrayIndexOutOfBoundsException(""+_start+" <= "+i+" < "+(_start+ _len));
}
/** Low half of a 128-bit UUID, or throws if the value is missing.
*
* <p>This version uses absolute element numbers, but must convert them to
* chunk-relative indices - requiring a load from an aliasing local var,
* leading to lower quality JIT'd code (similar issue to using iterator
* objects).
*
* <p>Slightly slower than {@link #at16l} since it range-checks within a chunk.
* @return Low half of a 128-bit UUID, or throws if the value is missing. */
final long at16l_abs(long i) {
long x = i - (_start>0 ? _start : 0);
if( 0 <= x && x < _len) return at16l((int) x);
throw new ArrayIndexOutOfBoundsException(""+_start+" <= "+i+" < "+(_start+ _len));
}
/** High half of a 128-bit UUID, or throws if the value is missing.
*
* <p>This version uses absolute element numbers, but must convert them to
* chunk-relative indices - requiring a load from an aliasing local var,
* leading to lower quality JIT'd code (similar issue to using iterator
* objects).
*
* <p>Slightly slower than {@link #at16h} since it range-checks within a chunk.
* @return High half of a 128-bit UUID, or throws if the value is missing. */
final long at16h_abs(long i) {
long x = i - (_start>0 ? _start : 0);
if( 0 <= x && x < _len) return at16h((int) x);
throw new ArrayIndexOutOfBoundsException(""+_start+" <= "+i+" < "+(_start+ _len));
}
/** String value using absolute row numbers, or null if missing.
*
* <p>This version uses absolute element numbers, but must convert them to
* chunk-relative indices - requiring a load from an aliasing local var,
* leading to lower quality JIT'd code (similar issue to using iterator
* objects).
*
* <p>Slightly slower than {@link #atStr} since it range-checks within a chunk.
* @return String value using absolute row numbers, or null if missing. */
final BufferedString atStr_abs(BufferedString bStr, long i) {
long x = i - (_start>0 ? _start : 0);
if( 0 <= x && x < _len) return atStr(bStr, (int) x);
throw new ArrayIndexOutOfBoundsException(""+_start+" <= "+i+" < "+(_start+ _len));
}
/** Load a {@code double} value using chunk-relative row numbers. Returns Double.NaN
* if value is missing.
* @return double value at the given row, or NaN if the value is missing */
public final double atd(int i) { return _chk2 == null ? atd_impl(i) : _chk2. atd_impl(i); }
/** Load a {@code long} value using chunk-relative row numbers. Floating
* point values are silently rounded to a long. Throws if the value is
* missing.
* @return long value at the given row, or throw if the value is missing */
public final long at8(int i) { return _chk2 == null ? at8_impl(i) : _chk2. at8_impl(i); }
/** Missing value status using chunk-relative row numbers.
*
* @return true if the value is missing */
public final boolean isNA(int i) { return _chk2 == null ?isNA_impl(i) : _chk2.isNA_impl(i); }
/** Low half of a 128-bit UUID, or throws if the value is missing.
*
* @return Low half of a 128-bit UUID, or throws if the value is missing. */
public final long at16l(int i) { return _chk2 == null ? at16l_impl(i) : _chk2.at16l_impl(i); }
/** High half of a 128-bit UUID, or throws if the value is missing.
*
* @return High half of a 128-bit UUID, or throws if the value is missing. */
public final long at16h(int i) { return _chk2 == null ? at16h_impl(i) : _chk2.at16h_impl(i); }
/** String value using chunk-relative row numbers, or null if missing.
*
* @return String value or null if missing. */
public final BufferedString atStr(BufferedString bStr, int i) { return _chk2 == null ? atStr_impl(bStr, i) : _chk2.atStr_impl(bStr, i); }
public String stringAt(int i) {
return atStr(new BufferedString(), i).toString();
}
/** Write a {@code long} using absolute row numbers. There is no way to
* write a missing value with this call. Under rare circumstances this can
* throw: if the long does not fit in a double (value is larger magnitude
* than 2^52), AND float values are stored in Vector. In this case, there
* is no common compatible data representation.
*
* <p>As with all the {@code set} calls, if the value written does not fit
* in the current compression scheme, the Chunk will be inflated into a
* NewChunk and the value written there. Later, the NewChunk will be
* compressed (after a {@link #close} call) and written back to the DKV.
* i.e., there is some interesting cost if Chunk compression-types need to
* change.
*
* <p>This version uses absolute element numbers, but must convert them to
* chunk-relative indices - requiring a load from an aliasing local var,
* leading to lower quality JIT'd code (similar issue to using iterator
* objects). */
final void set_abs(long i, long l) { long x = i-_start; if (0 <= x && x < _len) set((int) x, l); else _vec.set(i,l); }
/** Write a {@code double} using absolute row numbers; NaN will be treated as
* a missing value.
*
* <p>As with all the {@code set} calls, if the value written does not fit
* in the current compression scheme, the Chunk will be inflated into a
* NewChunk and the value written there. Later, the NewChunk will be
* compressed (after a {@link #close} call) and written back to the DKV.
* i.e., there is some interesting cost if Chunk compression-types need to
* change.
*
* <p>This version uses absolute element numbers, but must convert them to
* chunk-relative indices - requiring a load from an aliasing local var,
* leading to lower quality JIT'd code (similar issue to using iterator
* objects). */
final void set_abs(long i, double d) { long x = i-_start; if (0 <= x && x < _len) set((int) x, d); else _vec.set(i,d); }
/** Write a {@code float} using absolute row numbers; NaN will be treated as
* a missing value.
*
* <p>As with all the {@code set} calls, if the value written does not fit
* in the current compression scheme, the Chunk will be inflated into a
* NewChunk and the value written there. Later, the NewChunk will be
* compressed (after a {@link #close} call) and written back to the DKV.
* i.e., there is some interesting cost if Chunk compression-types need to
* change.
*
* <p>This version uses absolute element numbers, but must convert them to
* chunk-relative indices - requiring a load from an aliasing local var,
* leading to lower quality JIT'd code (similar issue to using iterator
* objects). */
final void set_abs( long i, float f) { long x = i-_start; if (0 <= x && x < _len) set((int) x, f); else _vec.set(i,f); }
/** Set the element as missing, using absolute row numbers.
*
* <p>As with all the {@code set} calls, if the value written does not fit
* in the current compression scheme, the Chunk will be inflated into a
* NewChunk and the value written there. Later, the NewChunk will be
* compressed (after a {@link #close} call) and written back to the DKV.
* i.e., there is some interesting cost if Chunk compression-types need to
* change.
*
* <p>This version uses absolute element numbers, but must convert them to
* chunk-relative indices - requiring a load from an aliasing local var,
* leading to lower quality JIT'd code (similar issue to using iterator
* objects). */
final void setNA_abs(long i) { long x = i-_start; if (0 <= x && x < _len) setNA((int) x); else _vec.setNA(i); }
/** Set a {@code String}, using absolute row numbers.
*
* <p>As with all the {@code set} calls, if the value written does not fit
* in the current compression scheme, the Chunk will be inflated into a
* NewChunk and the value written there. Later, the NewChunk will be
* compressed (after a {@link #close} call) and written back to the DKV.
* i.e., there is some interesting cost if Chunk compression-types need to
* change.
*
* <p>This version uses absolute element numbers, but must convert them to
* chunk-relative indices - requiring a load from an aliasing local var,
* leading to lower quality JIT'd code (similar issue to using iterator
* objects). */
public final void set_abs(long i, String str) { long x = i-_start; if (0 <= x && x < _len) set((int) x, str); else _vec.set(i,str); }
public final void set_abs(long i, UUID uuid) { long x = i-_start; if (0 <= x && x < _len) set((int) x, uuid); else _vec.set(i,uuid); }
public boolean hasFloat(){return true;}
public boolean hasNA(){return true;}
/** Replace all rows with this new chunk */
public void replaceAll( Chunk replacement ) {
assert _len == replacement._len;
_vec.preWriting(); // One-shot writing-init
_chk2 = replacement;
assert _chk2._chk2 == null; // Replacement has NOT been written into
}
public Chunk deepCopy() {
Chunk c2 = clone();
c2._vec=null;
c2._start=-1;
c2._cidx=-1;
c2._mem = _mem.clone();
c2.initFromBytes();
assert len() == c2._len;
return c2;
}
private void setWrite() {
if( _chk2 != null ) return; // Already setWrite
assert !(this instanceof NewChunk) : "Cannot direct-write into a NewChunk, only append";
setWrite(clone());
}
private void setWrite(Chunk ck) {
assert(_chk2==null);
_vec.preWriting(); // One-shot writing-init
_chk2 = ck;
assert _chk2._chk2 == null; // Clone has NOT been written into
}
/** Write a {@code long} with check-relative indexing. There is no way to
* write a missing value with this call. Under rare circumstances this can
* throw: if the long does not fit in a double (value is larger magnitude
* than 2^52), AND float values are stored in Vector. In this case, there
* is no common compatible data representation.
*
* <p>As with all the {@code set} calls, if the value written does not fit
* in the current compression scheme, the Chunk will be inflated into a
* NewChunk and the value written there. Later, the NewChunk will be
* compressed (after a {@link #close} call) and written back to the DKV.
* i.e., there is some interesting cost if Chunk compression-types need to
* change.
* @return the set value
*/
public final long set(int idx, long l) {
setWrite();
if( _chk2.set_impl(idx,l) ) return l;
(_chk2 = inflate()).set_impl(idx,l);
return l;
}
public final double [] set(double [] d){
assert d.length == _len && _chk2 == null;
setWrite(new NewChunk(this,d));
return d;
}
/** Write a {@code double} with check-relative indexing. NaN will be treated
* as a missing value.
*
* <p>As with all the {@code set} calls, if the value written does not fit
* in the current compression scheme, the Chunk will be inflated into a
* NewChunk and the value written there. Later, the NewChunk will be
* compressed (after a {@link #close} call) and written back to the DKV.
* i.e., there is some interesting cost if Chunk compression-types need to
* change.
* @return the set value
*/
public final double set(int idx, double d) {
setWrite();
if( _chk2.set_impl(idx,d) ) return d;
(_chk2 = inflate()).set_impl(idx,d);
return d;
}
/** Write a {@code float} with check-relative indexing. NaN will be treated
* as a missing value.
*
* <p>As with all the {@code set} calls, if the value written does not fit
* in the current compression scheme, the Chunk will be inflated into a
* NewChunk and the value written there. Later, the NewChunk will be
* compressed (after a {@link #close} call) and written back to the DKV.
* i.e., there is some interesting cost if Chunk compression-types need to
* change.
* @return the set value
*/
public final float set(int idx, float f) {
setWrite();
if( _chk2.set_impl(idx,f) ) return f;
(_chk2 = inflate()).set_impl(idx,f);
return f;
}
/** Set a value as missing.
*
* <p>As with all the {@code set} calls, if the value written does not fit
* in the current compression scheme, the Chunk will be inflated into a
* NewChunk and the value written there. Later, the NewChunk will be
* compressed (after a {@link #close} call) and written back to the DKV.
* i.e., there is some interesting cost if Chunk compression-types need to
* change.
* @return the set value
*/
public final boolean setNA(int idx) {
setWrite();
if( _chk2.setNA_impl(idx) ) return true;
(_chk2 = inflate()).setNA_impl(idx);
return true;
}
/** Write a {@code String} with check-relative indexing. {@code null} will
* be treated as a missing value.
*
* <p>As with all the {@code set} calls, if the value written does not fit
* in the current compression scheme, the Chunk will be inflated into a
* NewChunk and the value written there. Later, the NewChunk will be
* compressed (after a {@link #close} call) and written back to the DKV.
* i.e., there is some interesting cost if Chunk compression-types need to
* change.
* @return the set value
*/
public final String set(int idx, String str) {
setWrite();
if( _chk2.set_impl(idx,str) ) return str;
(_chk2 = inflate()).set_impl(idx,str);
return str;
}
public final UUID set(int idx, UUID uuid) {
setWrite();
long lo = uuid.getLeastSignificantBits();
long hi = uuid.getMostSignificantBits();
if( _chk2.set_impl(idx, lo, hi) ) return uuid;
_chk2 = inflate();
_chk2.set_impl(idx,lo, hi);
return uuid;
}
private Object setUnknown(int idx) {
setNA(idx);
return null;
}
/**
* @param idx index of the value in Chunk
* @param x new value to set
* @return x on success, or null if something went wrong
*/
public final Object setAny(int idx, Object x) {
return x instanceof String ? set(idx, (String) x) :
x instanceof Double ? set(idx, (Double)x) :
x instanceof Float ? set(idx, (Float)x) :
x instanceof Long ? set(idx, (Long)x) :
x instanceof Integer ? set(idx, ((Integer)x).longValue()) :
x instanceof UUID ? set(idx, (UUID) x) :
x instanceof java.util.Date ? set(idx, ((java.util.Date) x).getTime()) :
/* otherwise */ setUnknown(idx);
}
/** After writing we must call close() to register the bulk changes. If a
* NewChunk was needed, it will be compressed into some other kind of Chunk.
* The resulting Chunk (either a modified self, or a compressed NewChunk)
* will be written to the DKV. Only after that {@code DKV.put} completes
* will all readers of this Chunk witness the changes.
* @return the passed-in {@link Futures}, for flow-coding.
*/
public Futures close( int cidx, Futures fs ) {
if( this instanceof NewChunk ) _chk2 = this;
if( _chk2 == null ) return fs; // No change?
if( _chk2 instanceof NewChunk ) _chk2 = ((NewChunk)_chk2).new_close();
DKV.put(_vec.chunkKey(cidx),_chk2,fs,true); // Write updated chunk back into K/V
return fs;
}
/** @return Chunk index */
public int cidx() {
assert _cidx != -1 : "Chunk idx was not properly loaded!";
return _cidx;
}
public final Chunk setVolatile(double [] ds) {
Chunk res;
Value v = new Value(_vec.chunkKey(_cidx), res = new C8DVolatileChunk(ds),ds.length*8,Value.ICE);
DKV.put(v._key,v);
return res;
}
public final Chunk setVolatile(int[] vals) {
Chunk res;
Value v = new Value(_vec.chunkKey(_cidx), res = new C4VolatileChunk(vals),vals.length*4,Value.ICE);
DKV.put(v._key,v);
return res;
}
public boolean isVolatile() {return false;}
static class WrongType extends IllegalArgumentException {
private final Class<?> expected;
private final Class<?> actual;
public WrongType(Class<?> expected, Class<?> actual) {
super("Expected: " + expected + ", actual: " + actual);
this.expected = expected;
this.actual = actual;
}
}
static WrongType wrongType(Class<?> expected, Class<?> actual) { return new WrongType(expected, actual); }
/** Chunk-specific readers. Not a public API */
abstract double atd_impl(int idx);
abstract long at8_impl(int idx);
abstract boolean isNA_impl(int idx);
long at16l_impl(int idx) { throw wrongType(UUID.class, Object.class); }
long at16h_impl(int idx) { throw wrongType(UUID.class, Object.class); }
BufferedString atStr_impl(BufferedString bStr, int idx) { throw new IllegalArgumentException("Not a String"); }
/** Chunk-specific writer. Returns false if the value does not fit in the
* current compression scheme. */
abstract boolean set_impl (int idx, long l );
abstract boolean set_impl (int idx, double d );
abstract boolean set_impl (int idx, float f );
abstract boolean setNA_impl(int idx);
boolean set_impl (int idx, String str) { return false; }
boolean set_impl(int i, long lo, long hi) { return false; }
//Zero sparse methods:
/** Sparse Chunks have a significant number of zeros, and support for
* skipping over large runs of zeros in a row.
* @return true if this Chunk is sparse. */
public boolean isSparseZero() {return false;}
/** Sparse Chunks have a significant number of zeros, and support for
* skipping over large runs of zeros in a row.
* @return At least as large as the count of non-zeros, but may be significantly smaller than the {@link #_len} */
public int sparseLenZero() {return _len;}
/**
* Skips a section of either NAs or Zeros in a sparse chunk.
* Note: This method can only be used when NAs and Zeros have the same meaning to the caller!
* @param rid Start search from this index (excluded).
* @return Index of a next non-sparse value (this can include NA and Zero
* depending on if the chunk is zero-sparse or na-sparse).
*/
public int nextNZ(int rid){ return rid + 1;}
/**
* Version of nextNZ() that allows caller to prevent skipping NAs.
* @param rid Start search from this index (excluded).
* @param onlyTrueZero if true, only actual Zeros can be skipped. NA-sparse chunks will be treated as dense.
* @return Index of a next non-sparse value.
*/
public int nextNZ(int rid, boolean onlyTrueZero) { return rid + 1; }
/**
* Get indeces of non-zero values stored in this chunk
* @return array of chunk-relative indices of values stored in this chunk. */
public int nonzeros(int [] res) {
int k = 0;
for( int i = 0; i < _len; ++i)
if(atd(i) != 0)
res[k++] = i;
return k;
}
//NA sparse methods:
/** Sparse Chunks have a significant number of NAs, and support for
* skipping over large runs of NAs in a row.
* @return true if this Chunk is sparseNA. */
public boolean isSparseNA() {return false;}
/** Sparse Chunks have a significant number of NAs, and support for
* skipping over large runs of NAs in a row.
* @return At least as large as the count of non-NAs, but may be significantly smaller than the {@link #_len} */
public int sparseLenNA() {return _len;}
/** Report the Chunk min-value (excluding NAs), or NaN if unknown. Actual
* min can be higher than reported. Used to short-cut RollupStats for
* constant and boolean chunks. */
double min() { return Double.NaN; }
/** Report the Chunk max-value (excluding NAs), or NaN if unknown. Actual
* max can be lower than reported. Used to short-cut RollupStats for
* constant and boolean chunks. */
double max() { return Double.NaN; }
public final NewChunk inflate(){ return extractRows(new NewChunk(this), 0,_len);}
/** Return the next Chunk, or null if at end. Mostly useful for parsers or
* optimized stencil calculations that want to "roll off the end" of a
* Chunk, but in a highly optimized way. */
public Chunk nextChunk( ) { return _vec.nextChunk(this); }
/** @return String version of a Chunk, class name and range*/
@Override public String toString() { return getClass().getSimpleName() + "[" + _start + ".." + (_start + _len - 1) + "]"; }
/** In memory size in bytes of the compressed Chunk plus embedded array. */
public long byteSize() {
long s= _mem == null ? 0 : _mem.length;
s += (2+5)*8 + 12; // 2 hdr words, 5 other words, @8bytes each, plus mem array hdr
if( _chk2 != null ) s += _chk2.byteSize();
return s;
}
/** Custom serializers implemented by Chunk subclasses: the _mem field
* contains ALL the fields already. */
public final AutoBuffer write_impl(AutoBuffer bb) {return bb.putA1(_mem);}
@Override
public byte [] asBytes(){return _mem;}
@Override
public final Chunk reloadFromBytes(byte [] ary){
_mem = ary;
initFromBytes();
return this;
}
protected abstract void initFromBytes();
public final Chunk read_impl(AutoBuffer ab){
_mem = ab.getA1();
initFromBytes();
return this;
}
// /** Custom deserializers, implemented by Chunk subclasses: the _mem field
// * contains ALL the fields already. Init _start to -1, so we know we have
// * not filled in other fields. Leave _vec and _chk2 null, leave _len
// * unknown. */
// abstract public Chunk read_impl( AutoBuffer ab );
// -----------------
// Support for fixed-width format printing
// private String pformat () { return pformat0(); }
// private int pformat__len { return pformat_len0(); }
/** Fixed-width format printing support. Filled in by the subclasses. */
public byte precision() { return -1; } // Digits after the decimal, or -1 for "all"
// protected String pformat0() {
// long min = (long)_vec.min();
// if( min < 0 ) return "% "+pformat_len0()+"d";
// return "%"+pformat_len0()+"d";
// }
// protected int pformat_len0() {
// int len=0;
// long min = (long)_vec.min();
// if( min < 0 ) len++;
// long max = Math.max(Math.abs(min),Math.abs((long)_vec.max()));
// throw H2O.unimpl();
// //for( int i=1; i<DParseTask.powers10i.length; i++ )
// // if( max < DParseTask.powers10i[i] )
// // return i+len;
// //return 20;
// }
// protected int pformat_len0( double scale, int lg ) {
// double dx = Math.log10(scale);
// int x = (int)dx;
// throw H2O.unimpl();
// //if( DParseTask.pow10i(x) != scale ) throw H2O.unimpl();
// //int w=1/*blank/sign*/+lg/*compression limits digits*/+1/*dot*/+1/*e*/+1/*neg exp*/+2/*digits of exp*/;
// //return w;
// }
/** Used by the parser to help report various internal bugs. Not intended for public use. */
public final void reportBrokenCategorical(int i, int j, long l, int[] cmap, int levels) {
StringBuilder sb = new StringBuilder("Categorical renumber task, column # " + i + ": Found OOB index " + l + " (expected 0 - " + cmap.length + ", global domain has " + levels + " levels) pulled from " + getClass().getSimpleName() + "\n");
int k = 0;
for(; k < Math.min(5,_len); ++k)
sb.append("at8_abs[" + (k+_start) + "] = " + atd(k) + ", _chk2 = " + (_chk2 != null?_chk2.atd(k):"") + "\n");
k = Math.max(k,j-2);
sb.append("...\n");
for(; k < Math.min(_len,j+2); ++k)
sb.append("at8_abs[" + (k+_start) + "] = " + atd(k) + ", _chk2 = " + (_chk2 != null?_chk2.atd(k):"") + "\n");
sb.append("...\n");
k = Math.max(k,_len-5);
for(; k < _len; ++k)
sb.append("at8_abs[" + (k+_start) + "] = " + atd(k) + ", _chk2 = " + (_chk2 != null?_chk2.atd(k):"") + "\n");
throw new RuntimeException(sb.toString());
}
public abstract <T extends ChunkVisitor> T processRows(T v, int from, int to);
public abstract <T extends ChunkVisitor> T processRows(T v, int [] ids);
// convenience methods wrapping around visitor interface
public NewChunk extractRows(NewChunk nc, int from, int to){
return processRows(new ChunkVisitor.NewChunkVisitor(nc),from,to)._nc;
}
public NewChunk extractRows(NewChunk nc, int[] rows){
return processRows(new ChunkVisitor.NewChunkVisitor(nc),rows)._nc;
}
public NewChunk extractRows(NewChunk nc, int row){
return processRows(new ChunkVisitor.NewChunkVisitor(nc),row,row+1)._nc;
}
/**
* Dense bulk interface, fetch values from the given range
* @param vals
* @param from
* @param to
*/
public double [] getDoubles(double[] vals, int from, int to){ return getDoubles(vals,from,to, Double.NaN);}
public double [] getDoubles(double [] vals, int from, int to, double NA){
return processRows(new ChunkVisitor.DoubleAryVisitor(vals,NA),from,to).vals;
}
public double[] getDoubles() {
return getDoubles(MemoryManager.malloc8d(_len), 0, _len);
}
public int [] getIntegers(int [] vals, int from, int to, int NA){
return processRows(new ChunkVisitor.IntAryVisitor(vals,NA),from,to).vals;
}
/**
* Dense bulk interface, fetch values from the given ids
* @param vals
* @param ids
*/
public double[] getDoubles(double [] vals, int [] ids){
return processRows(new ChunkVisitor.DoubleAryVisitor(vals),ids).vals;
}
/**
* Sparse bulk interface, stream through the compressed values and extract them into dense double array.
* @param vals holds extracted values, length must be >= this.sparseLen()
* @param ids holds extracted chunk-relative row ids, length must be >= this.sparseLen()
* @return number of extracted (non-zero) elements, equal to sparseLen()
*/
public int getSparseDoubles(double[] vals, int[] ids){return getSparseDoubles(vals,ids,Double.NaN);}
public int getSparseDoubles(double [] vals, int [] ids, double NA) {
return processRows(new ChunkVisitor.SparseDoubleAryVisitor(vals,ids,isSparseNA(),NA),0,_len).sparseLen();
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/ChunkUtils.java
|
package water.fvec;
import water.DKV;
import water.Key;
/**
* Simple helper class which publishes some frame and chunk package private methods as public
*/
public class ChunkUtils {
public static NewChunk[] createNewChunks(String name, byte[] vecTypes, int chunkId){
return Frame.createNewChunks(name, vecTypes, chunkId);
}
public static NewChunk[] createNewChunks(String name, byte[] vecTypes, int chunkId, boolean[] sparse) {
return Frame.createNewChunks(name, vecTypes, chunkId, sparse);
}
public static void closeNewChunks(NewChunk[] nchks){
Frame.closeNewChunks(nchks);
}
public static Chunk[] getChunks(Frame fr, int cidx) {
Chunk[] chunks = new Chunk[fr.vecs().length];
for(int i=0; i<fr.vecs().length; i++){
chunks[i] = fr.vec(i).chunkForChunkIdx(cidx);
}
return chunks;
}
public static void initFrame(String keyName, String[] names) {
Frame fr = new water.fvec.Frame(Key.<Frame>make(keyName));
fr.preparePartialFrame(names);
// Save it directly to DKV
fr.update();
}
public static Frame finalizeFrame(String keyName, long[] rowsPerChunk, byte[] colTypes, String[][] colDomains) {
return finalizeFrame(keyName, rowsPerChunk, colTypes, colDomains, true);
}
public static Frame finalizeFrame(String keyName, long[] rowsPerChunk, byte[] colTypes, String[][] colDomains, boolean unlock) {
Frame fr = DKV.getGet(keyName);
fr.finalizePartialFrame(rowsPerChunk, colDomains, colTypes, unlock);
return fr;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/ChunkVisitor.java
|
package water.fvec;
import water.parser.BufferedString;
import water.util.PrettyPrint;
/**
* Created by tomas on 3/8/17.
* Base class for using visitor pattern with chunks.
*/
public abstract class ChunkVisitor {
public boolean expandedVals() {
return false;
}
void addValue(BufferedString bs) {
throw new UnsupportedOperationException();
}
void addValue(long uuid_lo, long uuid_hi) {
throw new UnsupportedOperationException();
}
void addValue(int val) {
throw new UnsupportedOperationException();
}
void addValue(double val) {
throw new UnsupportedOperationException();
}
void addValue(long val) {
throw new UnsupportedOperationException();
}
void addValue(long m, int e) {
addValue(PrettyPrint.pow10(m,e));
}
void addZeros(int zeros) {
throw new UnsupportedOperationException();
}
void addNAs(int nas) {
throw new UnsupportedOperationException();
}
/**
* Visitor wrapper around NewChunk. Usefull for extracting rows from chunks.
*/
public static final class NewChunkVisitor extends ChunkVisitor {
final NewChunk _nc;
public NewChunkVisitor(NewChunk nc){_nc = nc;}
@Override
public boolean expandedVals(){return true;}
@Override
void addValue(BufferedString bs){_nc.addStr(bs);}
@Override
void addValue(long uuid_lo, long uuid_hi){_nc.addUUID(uuid_lo,uuid_hi);}
@Override
void addValue(int val) {_nc.addNum(val,0);}
@Override
void addValue(long val) {_nc.addNum(val,0);}
@Override
void addValue(long val, int exp) {_nc.addNum(val,exp);}
@Override
void addValue(double val) {_nc.addNum(val);}
@Override
void addZeros(int zeros) {_nc.addZeros(zeros);}
@Override
void addNAs(int nas) {_nc.addNAs(nas);}
}
/**
* Simple chunk visitor for extracting rows from chunks into a double array.
*/
public static final class DoubleAryVisitor extends ChunkVisitor {
public final double [] vals;
private int _k = 0;
private final double _na;
DoubleAryVisitor(double [] vals){this(vals,Double.NaN);}
DoubleAryVisitor(double [] vals, double NA){
this.vals = vals; _na = NA;}
@Override
void addValue(int val) {
vals[_k++] = val;}
@Override
void addValue(long val) {
vals[_k++] = val;}
@Override
void addValue(double val) {
vals[_k++] = Double.isNaN(val)?_na:val;}
@Override
void addZeros(int zeros) {
int k = _k;
int kmax = k +zeros;
for(;k < kmax; k++) vals[k] = 0;
_k = kmax;
}
@Override
void addNAs(int nas) {
int k = _k;
int kmax = k + nas;
for(;k < kmax; k++) vals[k] = _na;
_k = kmax;
}
}
/**
* Simple chunk visitor for extracting rows from chunks into a sparse double array.
*/
public static final class SparseDoubleAryVisitor extends ChunkVisitor {
public final boolean naSparse;
public final double [] vals;
public final int [] ids;
private int _sparseLen;
private int _len;
private final double _na;
public int sparseLen(){return _sparseLen;}
SparseDoubleAryVisitor(double [] vals, int [] ids){this(vals,ids,false,Double.NaN);}
SparseDoubleAryVisitor(double [] vals, int [] ids, boolean naSparse){this(vals, ids, naSparse, Double.NaN);}
SparseDoubleAryVisitor(double [] vals, int [] ids, boolean naSparse, double NA){this.vals = vals; this.ids = ids; _na = NA; this.naSparse = naSparse;}
@Override
void addValue(int val) {ids[_sparseLen] = _len++; vals[_sparseLen++] = val;}
@Override
void addValue(long val) {ids[_sparseLen] = _len++; vals[_sparseLen++] = val;}
@Override
void addValue(double val) {ids[_sparseLen] = _len++; vals[_sparseLen++] = Double.isNaN(val)?_na:val;}
@Override
void addZeros(int zeros) {
if(naSparse) {
int kmax = _sparseLen + zeros;
for (int k = _sparseLen; k < kmax; k++) {
ids[k] = _len++;
vals[k] = 0;
}
_sparseLen = kmax;
} else
_len += zeros;
}
@Override
void addNAs(int nas) {
if(!naSparse) {
int kmax = _sparseLen + nas;
for (int k = _sparseLen; k < kmax; k++) {
ids[k] = _len++;
vals[k] = _na;
}
_sparseLen = kmax;
} else
_len += nas;
}
}
/**
* Chunk visitor for combining values from chunk with values from a given double array
*/
public static final class CombiningDoubleAryVisitor extends ChunkVisitor {
public final double [] vals;
private int _k = 0;
private final double _na;
public CombiningDoubleAryVisitor(double [] vals){this(vals,Double.NaN);}
CombiningDoubleAryVisitor(double [] vals, double NA){
this.vals = vals; _na = NA;}
@Override
void addValue(int val) {
vals[_k++] += val;}
@Override
void addValue(long val) {
vals[_k++] += val;}
@Override
void addValue(double val) {
if (Double.isNaN(val))
vals[_k++] = _na;
else
vals[_k++] += val;}
@Override
void addZeros(int zeros) {
_k += zeros;
}
@Override
void addNAs(int nas) {
int k = _k;
int kmax = k + nas;
for(;k < kmax; k++) vals[k] = _na;
_k = kmax;
}
public void reset() {
_k = 0;
}
}
/**
* Simple chunk visitor for extracting rows from chunks into a integer array.
*/
public static final class IntAryVisitor extends ChunkVisitor {
public final int [] vals;
private int _k = 0;
private final int _na;
IntAryVisitor(int [] vals, int NA){this.vals = vals; _na = NA;}
@Override
public void addValue(int val) {vals[_k++] = val;}
@Override
public void addValue(long val) {
if(Integer.MAX_VALUE < val || val < Integer.MIN_VALUE)
throw new RuntimeException(val + " does not fit into int");
vals[_k++] = (int)val;
}
@Override
public void addValue(double val) {
if (Double.isNaN(val)) {
vals[_k++] = _na;
} else {
int i = (int) val;
if (i != val)
throw new RuntimeException(val + " does not fit into int");
vals[_k++] = i;
}
}
@Override
public void addZeros(int zeros) {
int k = _k;
int kmax = k +zeros;
for(;k < kmax; k++)vals[k] = 0;
_k = kmax;
}
@Override
public void addNAs(int nas) {
int k = _k;
int kmax = k + nas;
for(;k < kmax; k++)vals[k] = _na;
_k = kmax;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/CreateInteractions.java
|
package water.fvec;
import hex.Interaction;
import jsr166y.CountedCompleter;
import water.*;
import water.util.IcedHashMap;
import water.util.IcedLong;
import java.util.*;
/**
* Helper to create interaction features between categorical columns
*/
public class CreateInteractions extends H2O.H2OCountedCompleter {
public CreateInteractions(Interaction ci) { _job = ci._job._key; _ci = ci; }
// used for testing
public CreateInteractions(int maxFactors, int minOccurrence) {
_job=null;
_ci=new Interaction();
_ci._max_factors=maxFactors;
_ci._min_occurrence=minOccurrence;
}
final private Interaction _ci;
static final private int _missing = Integer.MIN_VALUE; //marker for missing factor level
static final String _other = "other"; // name for lost factor levels
private Frame _target;
final private Key<Job> _job;
private Map<Long, Long> _sortedMap = null;
private static Map<Long, Long> mySort(Map<IcedLong, IcedLong> unsortMap) {
List<Map.Entry<IcedLong, IcedLong>> list = new LinkedList<>(unsortMap.entrySet());
// Sorting the list based on values
Collections.sort(list, new Comparator<Map.Entry<IcedLong, IcedLong>>() {
public int compare(Map.Entry<IcedLong, IcedLong> o1, Map.Entry<IcedLong, IcedLong> o2) {
return ((Long)o2.getValue()._val).compareTo(o1.getValue()._val);
}
});
// Maintaining insertion order with the help of LinkedList
Map sortedMap = new LinkedHashMap<>();
for (Map.Entry<IcedLong, IcedLong> entry : list) {
sortedMap.put(entry.getKey()._val, entry.getValue()._val);
}
return sortedMap;
}
// Create a combined domain from the categorical values that map to domain A and domain B
// Both categorical integers are combined into a long = (int,int), and the unsortedMap keeps the occurrence count for each pair-wise interaction
public String[] makeDomain(Map<IcedLong, IcedLong> unsortedMap, String[] dA, String[] dB) {
String[] _domain;
// Log.info("Collected hash table");
// Log.info(java.util.Arrays.deepToString(unsortedMap.entrySet().toArray()));
// Log.info("Interaction between " + dA.length + " and " + dB.length + " factor levels => " +
// ((long)dA.length * dB.length) + " possible factors.");
_sortedMap = mySort(unsortedMap);
// create domain of the most frequent unique factors
long factorCount = 0;
// Log.info("Found " + _sortedMap.size() + " unique interaction factors (out of " + ((long)dA.length * (long)dB.length) + ").");
_domain = new String[_sortedMap.size()]; //TODO: use ArrayList here, then convert to array
Iterator it2 = _sortedMap.entrySet().iterator();
int d = 0;
while (it2.hasNext()) {
Map.Entry kv = (Map.Entry)it2.next();
final long ab = (Long)kv.getKey();
final long count = (Long)kv.getValue();
if (factorCount < _ci._max_factors && count >= _ci._min_occurrence) {
factorCount++;
// extract the two original factor categoricals
String feature = "";
if (dA != dB) {
int a = (int)(ab >> 32);
final String fA = a != _missing ? dA[a] : "NA";
feature = fA + "_";
}
int b = (int) ab;
String fB = b != _missing ? dB[b] : "NA";
feature += fB;
// Log.info("Adding interaction feature " + feature + ", occurrence count: " + count);
// Log.info("Total number of interaction factors so far: " + factorCount);
_domain[d++] = feature;
} else break;
}
if (d < _sortedMap.size()) {
// Log.info("Truncated map to " + _sortedMap.size() + " elements.");
String[] copy = new String[d+1];
System.arraycopy(_domain, 0, copy, 0, d);
copy[d] = _other;
_domain = copy;
Map tm = new LinkedHashMap<>();
it2 = _sortedMap.entrySet().iterator();
while (--d >= 0) {
Map.Entry kv = (Map.Entry) it2.next();
tm.put(kv.getKey(), kv.getValue());
}
_sortedMap = tm;
}
// Log.info("Created domain: " + Arrays.deepToString(_domain));
return _domain;
}
private ArrayList<int[]> interactions() {
ArrayList<int[]> al = new ArrayList<>();
if (!_ci._pairwise || _ci._factors.length < 3) {
al.add(_ci._factors);
} else {
// pair-wise
for (int i = 0; i < _ci._factors.length; ++i) {
for (int j = i + 1; j < _ci._factors.length; ++j) {
al.add(new int[]{_ci._factors[i], _ci._factors[j]});
}
}
}
return al;
}
public int work() {
ArrayList<int[]> al = interactions();
int work=0;
for (int l=0; l<al.size(); ++l) {
int[] factors = al.get(l);
int start = factors.length == 1 ? 0 : 1;
for (int i = start; i < factors.length; ++i) {
work++;
}
}
return work;
}
@Override
public void compute2() {
DKV.remove(_ci._job._result);
Frame source_frame = DKV.getGet(_ci._source_frame);
ArrayList<int[]> al = interactions();
for (int l=0; l<al.size(); ++l) {
int[] factors = al.get(l);
int idx1 = factors[0];
Vec tmp = null;
int start = factors.length == 1 ? 0 : 1;
Frame _out = null;
for (int i = start; i < factors.length; ++i) {
String name;
int idx2 = factors[i];
if (i > 1) {
idx1 = _out.find(tmp);
assert idx1 >= 0;
name = _out._names[idx1] + "_" + source_frame._names[idx2];
} else {
name = source_frame._names[idx1] + "_" + source_frame._names[idx2];
}
// Log.info("Combining columns " + idx1 + " and " + idx2);
final Vec A = i > 1 ? _out.vecs()[idx1] : source_frame.vecs()[idx1];
final Vec B = source_frame.vecs()[idx2];
// Pass 1: compute unique domains of all interaction features
createInteractionDomain pass1 = new createInteractionDomain(A._key.equals(B._key), _ci._interactOnNA).doAll(A, B);
// Create a new Vec based on the domain
final Vec vec = source_frame.anyVec().makeZero(makeDomain(pass1._unsortedMap, A.domain(), B.domain()));
if (i > 1) {
_out.add(name, vec);
} else {
assert(_out == null);
_out = new Frame(new String[]{name}, new Vec[]{vec});
}
final Vec C = _out.lastVec();
// Create array of categorical pairs, in the same (sorted) order as in the _domain map -> for linear lookup
// Note: "other" is not mapped in keys, so keys.length can be 1 less than domain.length
long[] keys = new long[_sortedMap.size()];
int pos = 0;
for (long k : _sortedMap.keySet()) {
keys[pos++] = k;
}
assert (C.domain().length == keys.length || C.domain().length == keys.length + 1); // domain might contain _other
// Pass 2: fill Vec values
new fillInteractionCategoricals(A._key.equals(B._key), keys).doAll(A, B, C);
tmp = C;
// remove temporary vec
if (i > 1) {
final int idx = _out.vecs().length - 2; //second-last vec
// Log.info("Removing column " + _out._names[idx]);
_out.remove(idx).remove();
}
_ci._job.update(1);
}
if (_target == null) {
_target = new Frame(_ci._job._result, _out.names(), _out.vecs());
_target.delete_and_lock(_job);
} else {
_target.add(_out);
}
}
tryComplete();
}
@Override
public void onCompletion(CountedCompleter caller) {
_target.update(_job);
_target.unlock(_job);
}
// Create interaction domain
public static class createInteractionDomain extends MRTask<createInteractionDomain> {
// INPUT
final private boolean _same; // self interaction
final private boolean _interactOnNA; // allow NAs to count as lvls
final private int[] _restrictedEnumA;
final private int[] _restrictedEnumB;
// OUTPUT
private IcedHashMap<IcedLong, IcedLong> _unsortedMap = null;
public IcedHashMap<IcedLong, IcedLong> getMap() { return _unsortedMap; }
public createInteractionDomain(boolean same, boolean interactOnNA) { _same = same; _interactOnNA=interactOnNA; _restrictedEnumA = _restrictedEnumB =null; }
// TODO: continue to extend functionality here and bridge to InteractionWrappedVec so that code can be shared
public createInteractionDomain(boolean same, boolean interactOnNA, int[] restrictedEnumLeft, int[] restrictedEnumRite) {
_same = same; _interactOnNA=interactOnNA;
_restrictedEnumA =restrictedEnumLeft;
_restrictedEnumB =restrictedEnumRite;
}
@Override
public void map(Chunk A, Chunk B) {
_unsortedMap = new IcedHashMap<>();
// find unique interaction domain
HashSet<Integer> restrictedA = _restrictedEnumA==null?null: new HashSet<Integer>(),
restrictedB = _restrictedEnumB==null?null: new HashSet<Integer>();
if( restrictedA!=null ) for (int i: _restrictedEnumA) restrictedA.add(i);
if( restrictedB!=null ) for (int i: _restrictedEnumB) restrictedB.add(i);
for (int r = 0; r < A._len; r++) {
int a = A.isNA(r) ? _missing : (int)A.at8(r);
if( !_interactOnNA && a==_missing ) continue; // most readable way to express
if( restrictedA!=null && !restrictedA.contains(a) ) continue; // not part of the limited set
long ab;
if (!_same) {
int b = B.isNA(r) ? _missing : (int)B.at8(r);
if( !_interactOnNA && b==_missing ) continue;
if( restrictedB!=null && !restrictedB.contains(b) ) continue; // not part of the limited set
// key: combine both ints into a long
ab = ((long) a << 32) | (b & 0xFFFFFFFFL);
assert a == (int) (ab >> 32);
assert b == (int) ab;
} else {
if (a == _missing) continue;
ab = (long)a;
}
// add key to hash map, and count occurrences (for pruning)
IcedLong AB = new IcedLong(ab);
if (_unsortedMap.containsKey(AB)) {
_unsortedMap.get(AB)._val++;
} else {
_unsortedMap.put(AB, new IcedLong(1));
}
}
}
@Override
public void reduce(createInteractionDomain mrt) {
assert(mrt._unsortedMap != null);
assert(_unsortedMap != null);
for (Map.Entry<IcedLong,IcedLong> e : mrt._unsortedMap.entrySet()) {
IcedLong x = _unsortedMap.get(e.getKey());
if (x != null) {
x._val+=e.getValue()._val;
} else {
_unsortedMap.put(e.getKey(), e.getValue());
}
}
mrt._unsortedMap = null;
// Log.info("Merged hash tables");
// Log.info(java.util.Arrays.deepToString(_unsortedMap.entrySet().toArray()));
}
}
// Fill interaction categoricals in last Vec in Frame
private static class fillInteractionCategoricals extends MRTask<fillInteractionCategoricals> {
// INPUT
boolean _same;
final long[] _keys; //minimum information to be sent over the wire
transient private java.util.List<java.util.Map.Entry<Long,Integer>> _valToIndex; //node-local shared helper for binary search
public fillInteractionCategoricals(boolean same, long[] keys) {
_same = same; _keys = keys;
}
@Override
protected void setupLocal() {
// turn _keys into a sorted array of pairs
_valToIndex = new java.util.ArrayList<>(); // map factor level (int,int) to domain index (long)
for (int i=0;i<_keys.length;++i) {
_valToIndex.add(new AbstractMap.SimpleEntry<>(_keys[i], i));
}
// sort by key (the factor level)
Collections.sort(_valToIndex, new Comparator<Map.Entry<Long, Integer>>() {
@Override public int compare(Map.Entry<Long, Integer> o1, Map.Entry<Long, Integer> o2) { return o1.getKey().compareTo(o2.getKey()); }
});
}
@Override
public void map(Chunk A, Chunk B, Chunk C) {
// find unique interaction domain
for (int r = 0; r < A._len; r++) {
final int a = A.isNA(r) ? _missing : (int)A.at8(r);
long ab;
if (!_same) {
final int b = B.isNA(r) ? _missing : (int) B.at8(r);
ab = ((long) a << 32) | (b & 0xFFFFFFFFL); // key: combine both ints into a long
} else {
ab = (long)a;
}
if (_same && A.isNA(r)) {
C.setNA(r);
} else {
// find _domain index for given factor level ab
int level = -1;
int pos = Collections.binarySearch(_valToIndex, new AbstractMap.SimpleEntry<Long,Integer>(ab,0), new Comparator<Map.Entry<Long, Integer>>() {
@Override public int compare(Map.Entry<Long, Integer> o1, Map.Entry<Long, Integer> o2) { return o1.getKey().compareTo(o2.getKey()); }
});
if (pos >= 0) {
level = _valToIndex.get(pos).getValue();
assert _keys[level] == ab; //confirm that binary search in _valToIndex worked
}
if (level < 0) {
for (int i=0; i<_keys.length; ++i) {
assert (_keys[i] != ab);
}
level = _fr.lastVec().domain().length-1;
assert _fr.lastVec().domain()[level].equals(_other);
}
C.set(r, level);
}
}
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/FileVec.java
|
package water.fvec;
import water.*;
import water.util.Log;
import water.util.MathUtils;
import water.util.UnsafeUtils;
public abstract class FileVec extends ByteVec {
long _len; // File length
private final byte _be;
public byte getBackend() {
return _be;
}
public String getPath() {
return getPathForKey(_key);
}
// Returns String with path for given key.
public static String getPathForKey(Key k) {
final int off = k._kb[0]==Key.CHK || k._kb[0]==Key.VEC ? Vec.KEY_PREFIX_LEN : 0;
String p = new String(k._kb,off,k._kb.length-off);
if(p.startsWith("nfs:/"))
p = p.substring("nfs:/".length());
else if (p.startsWith("nfs:\\"))
p = p.substring("nfs:\\".length());
return p;
}
/** Log-2 of Chunk size. */
public static final int DFLT_LOG2_CHUNK_SIZE = 20/*1Meg*/+2/*4Meg*/;
/** Default Chunk size in bytes, useful when breaking up large arrays into
* "bite-sized" chunks. Bigger increases batch sizes, lowers overhead
* costs, lower increases fine-grained parallelism. */
public static final int DFLT_CHUNK_SIZE = 1 << DFLT_LOG2_CHUNK_SIZE;
public int _chunkSize = DFLT_CHUNK_SIZE;
public int _nChunks = -1;
protected FileVec(Key key, long len, byte be) {
super(key,-1/*no rowLayout*/);
_len = len;
_be = be;
}
public void setNChunks(int n){
_nChunks = n;
setChunkSize((int)length()/n);
}
/**
* Chunk size must be positive, 1G or less, and a power of two.
* Any values that aren't a power of two will be reduced to the
* first power of two lower than the provided chunkSize.
* <p>
* Since, optimal chunk size is not known during FileVec instantiation,
* setter is required to both set it, and keep it in sync with
* _log2ChkSize.
* </p>
* @param chunkSize requested chunk size to be used when parsing
* @return actual _chunkSize setting
*/
public int setChunkSize(int chunkSize) { return setChunkSize(null, chunkSize); }
public int setChunkSize(Frame fr, int chunkSize) {
// Clear cached chunks first
// Peeking into a file before the chunkSize has been set
// will load chunks of the file in DFLT_CHUNK_SIZE amounts.
// If this side-effect is not reversed when _chunkSize differs
// from the default value, parsing will either double read
// sections (_chunkSize < DFLT_CHUNK_SIZE) or skip data
// (_chunkSize > DFLT_CHUNK_SIZE). This reverses this side-effect.
Futures fs = new Futures();
Keyed.remove(_key, fs, true);
fs.blockForPending();
if (chunkSize <= 0) throw new IllegalArgumentException("Chunk sizes must be > 0.");
if (chunkSize > (1<<30) ) throw new IllegalArgumentException("Chunk sizes must be < 1G.");
_chunkSize = chunkSize;
//Now reset the chunk size on each node
fs = new Futures();
DKV.put(_key, this, fs);
// also update Frame to invalidate local caches
if (fr != null ) {
fr.reloadVecs();
DKV.put(fr._key, fr, fs);
}
fs.blockForPending();
return _chunkSize;
}
@Override public long length() { return _len; }
@Override public int nChunks() {
if(_nChunks != -1) // number of chunks can be set explicitly
return _nChunks;
return (int)Math.max(1,_len / _chunkSize + ((_len % _chunkSize != 0)?1:0));
}
@Override public int nonEmptyChunks() {
return nChunks();
}
@Override public boolean writable() { return false; }
/** Size of vector data. */
@Override public long byteSize(){return length(); }
// Convert a row# to a chunk#. For constant-sized chunks this is a little
// shift-and-add math. For variable-sized chunks this is a binary search,
// with a sane API (JDK has an insane API).
@Override
public int elem2ChunkIdx(long i) {
assert 0 <= i && i <= _len : " "+i+" < "+_len;
int cidx = (int)(i/_chunkSize);
int nc = nChunks();
if( i >= _len ) return nc;
if( cidx >= nc ) cidx=nc-1; // Last chunk is larger
assert 0 <= cidx && cidx < nc;
return cidx;
}
// Convert a chunk-index into a starting row #. Constant sized chunks
// (except for the last, which might be a little larger), and size-1 rows so
// this is a little shift-n-add math.
@Override long chunk2StartElem( int cidx ) { return (long)cidx*_chunkSize; }
/** Convert a chunk-key to a file offset. Size 1-byte "rows", so this is a
* direct conversion.
* @return The file offset corresponding to this Chunk index */
public static long chunkOffset ( Key ckey ) { return (long)chunkIdx(ckey)*((FileVec)Vec.getVecKey(ckey).get())._chunkSize; }
// Reverse: convert a chunk-key into a cidx
static int chunkIdx(Key ckey) { assert ckey._kb[0]==Key.CHK; return UnsafeUtils.get4(ckey._kb, 1 + 1 + 4); }
// Convert a chunk# into a chunk - does lazy-chunk creation. As chunks are
// asked-for the first time, we make the Key and an empty backing DVec.
// Touching the DVec will force the file load.
@Override public Value chunkIdx( int cidx ) {
final long nchk = nChunks();
assert 0 <= cidx && cidx < nchk;
Key dkey = chunkKey(cidx);
Value val1 = DKV.get(dkey);// Check for an existing one... will fetch data as needed
if( val1 != null ) return val1; // Found an existing one?
// Lazily create a DVec for this chunk
int len = (int)(cidx < nchk-1 ? _chunkSize : (_len-chunk2StartElem(cidx)));
// DVec is just the raw file data with a null-compression scheme
Value val2 = new Value(dkey,len,null,TypeMap.C1NCHUNK,_be);
val2.setDsk(); // It is already on disk.
// If not-home, then block till the Key is everywhere. Most calls here are
// from the parser loading a text file, and the parser splits the work such
// that most puts here are on home - so this is a simple speed optimization:
// do not make a Futures nor block on it on home.
Futures fs = dkey.home() ? null : new Futures();
// Atomically insert: fails on a race, but then return the old version
Value val3 = DKV.DputIfMatch(dkey,val2,null,fs);
if( !dkey.home() && fs != null ) fs.blockForPending();
return val3 == null ? val2 : val3;
}
/**
* Calculates safe and hopefully optimal chunk sizes. Four cases
* exist.
* <p>
* very small data < 64K per core - uses default chunk size and
* all data will be in one chunk
* <p>
* small data - data is partitioned into chunks that at least
* 4 chunks per core to help keep all cores loaded
* <p>
* default - chunks are {@value #DFLT_CHUNK_SIZE}
* <p>
* large data - if the data would create more than 2M keys per
* node, then chunk sizes larger than DFLT_CHUNK_SIZE are issued.
* <p>
* Too many keys can create enough overhead to blow out memory in
* large data parsing. # keys = (parseSize / chunkSize) * numCols.
* Key limit of 2M is a guessed "reasonable" number.
*
* @param totalSize - parse size in bytes (across all files to be parsed)
* @param numCols - number of columns expected in dataset
* @param cores - number of processing cores per node
* @param cloudsize - number of compute nodes
* @param verbose - print the parse heuristics
* @return - optimal chunk size in bytes (always a power of 2).
*/
public static int calcOptimalChunkSize(long totalSize, int numCols, long maxLineLength, int cores, int cloudsize,
boolean oldHeuristic, boolean verbose) {
long localParseSize = (long) (double) totalSize / cloudsize;
if (oldHeuristic) {
long chunkSize = (localParseSize / (cores * 4));
// Super small data check - less than 64K/thread
if (chunkSize <= (1 << 16)) {
return DFLT_CHUNK_SIZE;
}
// Small data check
chunkSize = 1L << MathUtils.log2(chunkSize); //closest power of 2
if (chunkSize < DFLT_CHUNK_SIZE
&& (localParseSize/chunkSize)*numCols < (1 << 21)) { // ignore if col cnt is high
return (int)chunkSize;
}
// Big data check
long tmp = (localParseSize * numCols / (1 << 21)); // ~ 2M keys per node
if (tmp > (1 << 30)) return (1 << 30); // Max limit is 1G
if (tmp > DFLT_CHUNK_SIZE) {
chunkSize = 1 << MathUtils.log2((int) tmp); //closest power of 2
return (int)chunkSize;
} else return DFLT_CHUNK_SIZE;
}
else {
// New Heuristic
int minNumberRows = 10; // need at least 10 rows (lines) per chunk (core)
int perNodeChunkCountLimit = 1<<21; // don't create more than 2M Chunk POJOs per node
int minParseChunkSize = 1<<12; // don't read less than this many bytes
int maxParseChunkSize = (1<<28)-1; // don't read more than this many bytes per map() thread (needs to fit into a Value object)
long chunkSize = Math.max((localParseSize / (4*cores))+1, minParseChunkSize); //lower hard limit
if(chunkSize > 1024*1024)
chunkSize = (chunkSize & 0xFFFFFE00) + 512; // align chunk size to 512B
// Super small data check - file size is smaller than 64kB
if (totalSize <= 1<<16) {
chunkSize = Math.max(DFLT_CHUNK_SIZE, (int) (minNumberRows * maxLineLength));
} else {
//round down to closest power of 2
// chunkSize = 1L << MathUtils.log2(chunkSize);
// Small data check
if (chunkSize < DFLT_CHUNK_SIZE && (localParseSize / chunkSize) * numCols < perNodeChunkCountLimit) {
chunkSize = Math.max((int)chunkSize, (int) (minNumberRows * maxLineLength));
} else {
// Adjust chunkSize such that we don't create too many chunks
int chunkCount = cores * 4 * numCols;
if (chunkCount > perNodeChunkCountLimit) {
double ratio = 1 << Math.max(2, MathUtils.log2((int) (double) chunkCount / perNodeChunkCountLimit)); //this times too many chunks globally on the cluster
chunkSize *= ratio; //need to bite off larger chunks
}
chunkSize = Math.min(maxParseChunkSize, chunkSize); // hard upper limit
// if we can read at least minNumberRows and we don't create too large Chunk POJOs, we're done
// else, fix it with a catch-all heuristic
if (chunkSize <= minNumberRows * maxLineLength) {
// might be more than default, if the max line length needs it, but no more than the size limit(s)
// also, don't ever create too large chunks
chunkSize = (int) Math.max(
DFLT_CHUNK_SIZE, //default chunk size is a good lower limit for big data
Math.min(maxParseChunkSize, minNumberRows * maxLineLength) //don't read more than 1GB, but enough to read the minimum number of rows
);
}
}
}
assert(chunkSize >= minParseChunkSize);
assert(chunkSize <= maxParseChunkSize);
if (verbose)
Log.info("ParseSetup heuristic: "
+ "cloudSize: " + cloudsize
+ ", cores: " + cores
+ ", numCols: " + numCols
+ ", maxLineLength: " + maxLineLength
+ ", totalSize: " + totalSize
+ ", localParseSize: " + localParseSize
+ ", chunkSize: " + chunkSize
+ ", numChunks: " + Math.max(1,totalSize/chunkSize)
+ ", numChunks * cols: " + (Math.max(1,totalSize/chunkSize) * numCols)
);
return (int)chunkSize;
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/FileVecUtils.java
|
package water.fvec;
import water.H2O;
import water.Key;
import water.Value;
import water.persist.PersistManager;
import java.io.IOException;
import java.util.Map;
public class FileVecUtils {
public static byte[] getFirstBytes(FileVec vec) {
return getFirstBytes(H2O.STORE, H2O.getPM(), vec);
}
static byte[] getFirstBytes(Map<Key, Value> store, PersistManager pm,
FileVec vec) {
if (store.get(vec.chunkKey(0)) != null) {
// if it looks like we have the chunk cached attempt to use it instead of fetching it again
return vec.getFirstChunkBytes();
}
try {
int max = (long) vec._chunkSize > vec._len ? (int) vec._len : vec._chunkSize;
return pm.load(Value.HDFS, vec._key, 0L, max);
} catch (IOException e) {
throw new RuntimeException("HDFS read failed", e);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/Frame.java
|
package water.fvec;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import water.*;
import water.api.FramesHandler;
import water.api.schemas3.KeyV3;
import water.exceptions.H2OFileAccessDeniedException;
import water.exceptions.H2OIllegalArgumentException;
import water.parser.BinaryFormatExporter;
import water.parser.BufferedString;
import water.rapids.Merge;
import water.util.*;
import java.io.IOException;
import java.io.InputStream;
import java.util.*;
import java.util.regex.Pattern;
/** A collection of named {@link Vec}s, essentially an R-like Distributed Data Frame.
*
* <p>Frames represent a large distributed 2-D table with named columns
* ({@link Vec}s) and numbered rows. A reasonable <em>column</em> limit is
* 100K columns, but there's no hard-coded limit. There's no real <em>row</em>
* limit except memory; Frames (and Vecs) with many billions of rows are used
* routinely.
*
* <p>A Frame is a collection of named Vecs; a Vec is a collection of numbered
* {@link Chunk}s. A Frame is small, cheaply and easily manipulated, it is
* commonly passed-by-Value. It exists on one node, and <em>may</em> be
* stored in the {@link DKV}. Vecs, on the other hand, <em>must</em> be stored in the
* {@link DKV}, as they represent the shared common management state for a collection
* of distributed Chunks.
*
* <p>Multiple Frames can reference the same Vecs, although this sharing can
* make Vec lifetime management complex. Commonly temporary Frames are used
* to work with a subset of some other Frame (often during algorithm
* execution, when some columns are dropped from the modeling process). The
* temporary Frame can simply be ignored, allowing the normal GC process to
* reclaim it. Such temp Frames usually have a {@code null} key.
*
* <p>All the Vecs in a Frame belong to the same {@link Vec.VectorGroup} which
* then enforces {@link Chunk} row alignment across Vecs (or at least enforces
* a low-cost access model). Parallel and distributed execution touching all
* the data in a Frame relies on this alignment to get good performance.
*
* <p>Example: Make a Frame from a CSV file:<pre>
* File file = ...
* NFSFileVec nfs = NFSFileVec.make(file); // NFS-backed Vec, lazily read on demand
* Frame fr = water.parser.ParseDataset.parse(Key.make("myKey"),nfs._key);
* </pre>
*
* <p>Example: Find and remove the Vec called "unique_id" from the Frame,
* since modeling with a unique_id can lead to overfitting:
* <pre>
* Vec uid = fr.remove("unique_id");
* </pre>
*
* <p>Example: Move the response column to the last position:
* <pre>
* fr.add("response",fr.remove("response"));
* </pre>
*
*/
public class Frame extends Lockable<Frame> {
/** Vec names */
public String[] _names;
private boolean _lastNameBig; // Last name is "Cxxx" and has largest number
private Key<Vec>[] _keys; // Keys for the vectors
private transient Vec[] _vecs; // The Vectors (transient to avoid network traffic)
private transient Vec _col0; // First readable vec; fast access to the VectorGroup's Chunk layout
/**
* Given a temp Frame and a base Frame from which it was created, delete the
* Vecs that aren't found in the base Frame and then delete the temp Frame.
*
* For most use cases with short-lived temp frames, use {@link Scope#protect(Frame...)} or a {@link Scope#safe(Frame...)} instead.
*/
public static void deleteTempFrameAndItsNonSharedVecs(Frame tempFrame, Frame baseFrame) {
Key[] keys = tempFrame.keys();
for( int i=0; i<keys.length; i++ )
if( baseFrame.find(keys[i]) == -1 ) //only delete vecs that aren't shared
Keyed.remove(keys[i]);
if (tempFrame._key != null)
DKV.remove(tempFrame._key); //delete the frame header
}
/**
* Fetch all Frames from the KV store.
*/
public static Frame[] fetchAll() {
// Get all the frames.
final Key[] frameKeys = KeySnapshot.globalKeysOfClass(Frame.class);
List<Frame> frames = new ArrayList<>(frameKeys.length);
for( Key key : frameKeys ) {
Frame frame = FramesHandler.getFromDKV("(none)", key);
// Weed out frames with vecs that are no longer in DKV
boolean skip = false;
for( Vec vec : frame.vecs() ) {
if (vec == null || DKV.get(vec._key) == null) {
Log.warn("Leaked frame: Frame "+frame._key+" points to one or more deleted vecs.");
skip = true;
break;
}
}
if (!skip) frames.add(frame);
}
return frames.toArray(new Frame[frames.size()]);
}
public boolean hasNAs(){
for(Vec v:bulkRollups())
if(v.naCnt() > 0) return true;
return false;
}
public boolean hasInfs() {
// return if frame contains positive infinity
for (Vec v : bulkRollups())
if (v.pinfs() > 0 || v.ninfs() > 0) return true;
return false;
}
private long _naCnt = -1;
synchronized public long naCount() {
if (_naCnt !=- 1) return _naCnt;
_naCnt = 0;
for(Vec v: vecs()) _naCnt += v.naCnt();
return _naCnt;
}
public double naFraction() {
return naCount() / (numCols() * numRows());
}
/** Creates an internal frame composed of the given Vecs and default names. The frame has no key. */
public Frame(Vec... vecs){
this((String[]) null, vecs);
}
/** Creates an internal frame composed of the given Vecs and names. The frame has no key. */
public Frame(String names[], Vec vecs[]) {
this(null, names, vecs);
}
/** Creates an empty frame with given key. */
public Frame(Key<Frame> key) {
this(key, null, new Vec[0]);
}
/** Creates a frame with given key, default names and vectors. */
public Frame(Key<Frame> key, Vec vecs[]) {
this(key, null, vecs);
}
/** Creates a frame with given key, names and vectors. */
public Frame(Key<Frame> key, String names[], Vec vecs[] ) {
super(key);
// Require all Vecs already be installed in the K/V store
for( Vec vec : vecs ) DKV.prefetch(vec._key);
for( int i = 0; i < vecs.length; i++ ) {
assert DKV.get(vecs[i]._key) != null : " null vec: " + vecs[i]._key + "; " + (names != null ? "name: " + names[i] : "index: " + i);
}
// Always require names
if( names==null ) { // Make default names, all known to be unique
setNames(new String[vecs.length]);
_keys = makeVecKeys(vecs.length);
_vecs = vecs;
for( int i=0; i<vecs.length; i++ ) _names[i] = defaultColName(i);
for( int i=0; i<vecs.length; i++ ) _keys [i] = vecs[i]._key;
for( int i=0; i<vecs.length; i++ ) checkCompatibility(_names[i],vecs[i]);
_lastNameBig = true;
} else {
// Make empty to dodge asserts, then "add()" them all which will check
// for compatible Vecs & names.
_names = new String[0];
_keys = makeVecKeys(0);
_vecs = new Vec [0];
add(names,vecs);
}
assert _names.length == vecs.length;
}
void setNamesNoCheck(String[] columns){
_names = columns;
}
public final void setNames(String[] columns){
if (_vecs != null && columns.length != _vecs.length) {
throw new IllegalArgumentException("Number of column names=" + columns.length + " must be the number of vecs=" + _vecs.length);
}
_names = columns;
}
/** Deep copy of Vecs and Keys and Names (but not data!) to a new random Key.
* The resulting Frame does not share with the original, so the set of Vecs
* can be freely hacked without disturbing the original Frame. */
public Frame( Frame fr ) {
super( Key.<Frame>make() );
setNames(fr._names.clone());
_keys = fr._keys .clone();
_vecs = fr.vecs().clone();
_lastNameBig = fr._lastNameBig;
}
/** Default column name maker */
public static String defaultColName( int col ) { return "C"+(1+col); }
/**
* Helper method to initialize `_keys` array (which requires an unchecked cast).
* @param size number of elements in the array that will be created.
*/
@SuppressWarnings("unchecked")
private Key<Vec>[] makeVecKeys(int size) {
return new Key[size];
}
// Make unique names. Efficient for the special case of appending endless
// versions of "C123" style names where the next name is +1 over the prior
// name. All other names take the O(n^2) lookup.
private int pint( String name ) {
try { return Integer.valueOf(name.substring(1)); }
catch(NumberFormatException ignored) { }
return 0;
}
public String uniquify( String name ) {
String n = name;
int lastName = 0;
if( name.length() > 0 && name.charAt(0)=='C' )
lastName = pint(name);
if( _lastNameBig && _names.length > 0 ) {
String last = _names[_names.length-1];
if( !last.equals("") && last.charAt(0)=='C' && lastName == pint(last)+1 )
return name;
}
int cnt=0, again, max=0;
do {
again = cnt;
for( String s : _names ) {
if( lastName > 0 && s.charAt(0)=='C' )
max = Math.max(max,pint(s));
if( n.equals(s) )
n = name+(cnt++);
}
} while( again != cnt );
if( lastName == max+1 ) _lastNameBig = true;
return n;
}
/** Check that the vectors are all compatible. All Vecs have their content
* sharded using same number of rows per chunk, and all names are unique.
* Throw an IAE if something does not match. */
private void checkCompatibility(String name, Vec vec ) {
if( vec instanceof AppendableVec ) return; // New Vectors are endlessly compatible
Vec v0 = anyVec();
if( v0 == null ) return; // No fixed-size Vecs in the Frame
// Vector group has to be the same, or else the layout has to be the same,
// or else the total length has to be small.
if( !v0.isCompatibleWith(vec) ) {
if(!Vec.VectorGroup.sameGroup(v0,vec))
Log.err("Unexpected incompatible vector group, " + v0.group() + " != " + vec.group());
if(!Arrays.equals(v0.espc(), vec.espc()))
Log.err("Unexpected incompatible espc, " + Arrays.toString(v0.espc()) + " != " + Arrays.toString(vec.espc()));
throw new IllegalArgumentException("Vec " + name + " is not compatible with the rest of the frame");
}
}
/** Frames are compatible if they have the same layout (number of rows and chunking) and the same vector group (chunk placement).. */
public boolean isCompatible( Frame fr ) {
if( numRows() != fr.numRows() ) return false;
for( int i=0; i<vecs().length; i++ )
if( !vecs()[i].isCompatibleWith(fr.vecs()[i]) )
return false;
return true;
}
/** Number of columns
* @return Number of columns */
public int numCols() { return _keys == null? 0 : _keys.length; }
/** Number of rows
* @return Number of rows */
public long numRows() { Vec v = anyVec(); return v==null ? 0 : v.length(); }
/** Returns the first readable vector.
* @return the first readable Vec */
public final Vec anyVec() {
Vec c0 = _col0; // single read
if( c0 != null ) return c0;
for( Vec v : vecs() )
if( v.readable() )
return (_col0 = v);
return null;
}
/** The array of column names.
* @return the array of column names */
public String[] names() { return _names; }
/** A single column name.
* @return the column name */
public String name(int i) {
return _names[i];
}
/** The array of keys.
* @return the array of keys for each vec in the frame.
*/
public Key<Vec>[] keys() { return _keys; }
public Iterable<Key<Vec>> keysList() { return Arrays.asList(_keys); }
/** The internal array of Vecs. For efficiency Frames contain an array of
* Vec Keys - and the Vecs themselves are lazily loaded from the {@link DKV}.
* @return the internal array of Vecs */
public final Vec[] vecs() {
Vec[] tvecs = _vecs; // read the content
return tvecs == null ? (_vecs=vecs_impl()) : tvecs;
}
public final Vec[] vecs(int [] idxs) {
Vec [] all = vecs();
Vec [] res = new Vec[idxs.length];
for(int i = 0; i < idxs.length; ++i)
res[i] = all[idxs[i]];
return res;
}
public Vec[] vecs(String[] names) {
Vec [] res = new Vec[names.length];
for(int i = 0; i < names.length; ++i)
res[i] = vec(names[i]);
return res;
}
// Compute vectors for caching
private Vec[] vecs_impl() {
// Load all Vec headers; load them all in parallel by starting prefetches
for( Key<Vec> key : _keys ) DKV.prefetch(key);
Vec [] vecs = new Vec[_keys.length];
for( int i=0; i<_keys.length; i++ ) vecs[i] = _keys[i].get();
return vecs;
}
/** Convenience to accessor for last Vec
* @return last Vec */
public Vec lastVec() { vecs(); return _vecs [_vecs.length -1]; }
/** Convenience to accessor for last Vec name
* @return last Vec name */
public String lastVecName() { return _names[_names.length-1]; }
/** Force a cache-flush and reload, assuming vec mappings were altered
* remotely, or that the _vecs array was shared and now needs to be a
* defensive copy.
* @return the new instance of the Frame's Vec[] */
public final Vec[] reloadVecs() { _vecs=null; return vecs(); }
/** Returns the Vec by given index, implemented by code: {@code vecs()[idx]}.
* @param idx idx of column
* @return this frame idx-th vector, never returns <code>null</code> */
public final Vec vec(int idx) { return vecs()[idx]; }
/** Return a Vec by name, or null if missing
* @return a Vec by name, or null if missing */
public Vec vec(String name) { int idx = find(name); return idx==-1 ? null : vecs()[idx]; }
/** Finds the column index with a matching name, or -1 if missing
* @return the column index with a matching name, or -1 if missing */
public int find( String name ) {
if( name == null ) return -1;
assert _names != null;
// TODO: add a hashtable: O(n) is just stupid.
for( int i=0; i<_names.length; i++ )
if( name.equals(_names[i]) )
return i;
return -1;
}
/** Finds the matching column index, or -1 if missing
* @return the matching column index, or -1 if missing
* @deprecated as many columns in a Frame could be backed by the same Vec, we can't return single column index that corresponds to a given {@code vec}.
* Please use {@link #find(String)} instead.
*/
@Deprecated()
public int find( Vec vec ) {
Vec[] vecs = vecs(); //warning: side-effect
if (vec == null) return -1;
for( int i=0; i<vecs.length; i++ )
if( vec.equals(vecs[i]) )
return i;
return -1;
}
/** Finds the matching column index, or -1 if missing
* @return the matching column index, or -1 if missing
* @deprecated as many columns in a Frame could be backed by the same Vec (and its key), we can't return single column index that corresponds to a given {@code key}.
* Please use {@link #find(String)} instead.
*/
@Deprecated
public int find( Key key ) {
for( int i=0; i<_keys.length; i++ )
if( key.equals(_keys[i]) )
return i;
return -1;
}
/** Bulk {@link #find(String)} api
* @return An array of column indices matching the {@code names} array */
public int[] find(String[] names) {
if( names == null ) return null;
int[] res = new int[names.length];
for(int i = 0; i < names.length; ++i)
res[i] = find(names[i]);
return res;
}
public void insertVec(int i, String name, Vec vec) {
String [] names = new String[_names.length+1];
Vec [] vecs = new Vec[_vecs.length+1];
Key<Vec>[] keys = makeVecKeys(_keys.length + 1);
System.arraycopy(_names,0,names,0,i);
System.arraycopy(_vecs,0,vecs,0,i);
System.arraycopy(_keys,0,keys,0,i);
names[i] = name;
vecs[i] = vec;
keys[i] = vec._key;
System.arraycopy(_names,i,names,i+1,_names.length-i);
System.arraycopy(_vecs,i,vecs,i+1,_vecs.length-i);
System.arraycopy(_keys,i,keys,i+1,_keys.length-i);
_vecs = vecs;
setNames(names);
_keys = keys;
}
/** Pair of (column name, Frame key). */
public static class VecSpecifier extends Iced implements Vec.Holder {
public Key<Frame> _frame;
public String _column_name;
public VecSpecifier() {
}
public VecSpecifier(Key<Frame> frame, String column_name) {
_frame = frame;
_column_name = column_name;
}
public Vec vec() {
Value v = DKV.get(_frame);
if (null == v) return null;
Frame f = v.get();
if (null == f) return null;
return f.vec(_column_name);
}
/**
* @param vecSpecifiers An Array of vectors specifiers to extract column names from. May not be null.
* @return An array of String with names of the columns represented by each given VecSpecifier. Possibly empty.
* @throws NullPointerException When vecSpecifiers argument is null.
*/
public static String[] vecNames(final VecSpecifier[] vecSpecifiers) throws NullPointerException {
Objects.requireNonNull(vecSpecifiers);
final String[] vecNames = new String[vecSpecifiers.length];
for (int i = 0; i < vecSpecifiers.length; i++) {
vecNames[i] = vecSpecifiers[i]._column_name;
}
return vecNames;
}
}
/** Type for every Vec */
public byte[] types() {
Vec[] vecs = vecs();
byte bs[] = new byte[vecs.length];
for( int i=0; i<vecs.length; i++ )
bs[i] = vecs[i]._type;
return bs;
}
/** String name for each Vec type */
public String[] typesStr() { // typesStr not strTypes since shows up in intelliJ next to types
Vec[] vecs = vecs();
String s[] = new String[vecs.length];
for(int i=0;i<vecs.length;++i)
s[i] = vecs[i].get_type_str();
return s;
}
/** All the domains for categorical columns; null for non-categorical columns.
* @return the domains for categorical columns */
public String[][] domains() {
Vec[] vecs = vecs();
String ds[][] = new String[vecs.length][];
for( int i=0; i<vecs.length; i++ ) {
ds[i] = vecs[i].domain();
}
return ds;
}
/** Number of categorical levels for categorical columns; -1 for non-categorical columns.
* @return the number of levels for categorical columns */
public int[] cardinality() {
Vec[] vecs = vecs();
int[] card = new int[vecs.length];
for( int i=0; i<vecs.length; i++ )
card[i] = vecs[i].cardinality();
return card;
}
public Vec[] bulkRollups() {
Futures fs = new Futures();
Vec[] vecs = vecs();
for(Vec v : vecs) v.startRollupStats(fs);
fs.blockForPending();
return vecs;
}
/** Majority class for categorical columns; -1 for non-categorical columns.
* @return the majority class for categorical columns */
public int[] modes() {
Vec[] vecs = bulkRollups();
int[] modes = new int[vecs.length];
for( int i = 0; i < vecs.length; i++ ) {
modes[i] = vecs[i].isCategorical() ? vecs[i].mode() : -1;
}
return modes;
}
/** All the column means.
* @return the mean of each column */
public double[] means() {
Vec[] vecs = bulkRollups();
double[] means = new double[vecs.length];
for( int i = 0; i < vecs.length; i++ )
means[i] = vecs[i].mean();
return means;
}
/** One over the standard deviation of each column.
* @return Reciprocal the standard deviation of each column */
public double[] mults() {
Vec[] vecs = bulkRollups();
double[] mults = new double[vecs.length];
for( int i = 0; i < vecs.length; i++ ) {
double sigma = vecs[i].sigma();
mults[i] = standardize(sigma) ? 1.0 / sigma : 1.0;
}
return mults;
}
private static boolean standardize(double sigma) {
// TODO unify handling of constant columns
return sigma > 1e-6;
}
/** The {@code Vec.byteSize} of all Vecs
* @return the {@code Vec.byteSize} of all Vecs */
public long byteSize() {
try {
Vec[] vecs = bulkRollups();
long sum = 0;
for (Vec vec : vecs) sum += vec.byteSize();
return sum;
} catch(RuntimeException ex) {
Log.debug("Failure to obtain byteSize() - missing chunks?");
return -1;
}
}
/** 64-bit checksum of the checksums of the vecs. SHA-265 checksums of the
* chunks are XORed together. Since parse always parses the same pieces of
* files into the same offsets in some chunk this checksum will be
* consistent across reparses.
* @return 64-bit Frame checksum */
@Override protected long checksum_impl(boolean noCache) {
Vec[] vecs = vecs();
long _checksum = 0;
for( int i = 0; i < _names.length; ++i ) {
long vec_checksum = vecs[i].checksum(noCache);
_checksum ^= vec_checksum;
long tmp = (2147483647L * i);
_checksum ^= tmp;
}
_checksum *= (0xBABE + Arrays.hashCode(_names));
// TODO: include column types? Vec.checksum() should include type?
return _checksum;
}
// Add a bunch of vecs
public void add( String[] names, Vec[] vecs) {
bulkAdd(names, vecs);
}
public void add( String[] names, Vec[] vecs, int cols ) {
if (null == vecs || null == names) return;
if (cols == names.length && cols == vecs.length) {
bulkAdd(names, vecs);
} else {
for (int i = 0; i < cols; i++)
add(names[i], vecs[i]);
}
}
/** Append multiple named Vecs to the Frame. Names are forced unique, by appending a
* unique number if needed.
*/
private void bulkAdd(String[] names, Vec[] vecs) {
String[] tmpnames = names.clone();
int N = names.length;
assert(names.length == vecs.length):"names = " + Arrays.toString(names) + ", vecs len = " + vecs.length;
for (int i=0; i<N; ++i) {
vecs[i] = vecs[i] != null ? makeCompatible(new Frame(vecs[i]))[0] : null;
checkCompatibility(tmpnames[i]=uniquify(tmpnames[i]),vecs[i]); // Throw IAE is mismatch
}
int ncols = _keys.length;
// make temp arrays and don't assign them back until they are fully filled - otherwise vecs() can cache null's and NPE.
String[] tmpnam = Arrays.copyOf(_names, ncols+N);
Key<Vec>[] tmpkeys = Arrays.copyOf(_keys, ncols+N);
Vec[] tmpvecs = Arrays.copyOf(_vecs, ncols+N);
for (int i=0; i<N; ++i) {
tmpnam[ncols+i] = tmpnames[i];
tmpkeys[ncols+i] = vecs[i]._key;
tmpvecs[ncols+i] = vecs[i];
}
_keys = tmpkeys;
_vecs = tmpvecs;
setNames(tmpnam);
}
/** Append a named Vec to the Frame. Names are forced unique, by appending a
* unique number if needed.
* @return the added Vec, for flow-coding */
public Vec add( String name, Vec vec ) {
vec = makeCompatible(new Frame(vec))[0];
checkCompatibility(name=uniquify(name),vec); // Throw IAE is mismatch
int ncols = _keys.length;
String[] names = Arrays.copyOf(_names,ncols+1); names[ncols] = name;
Key<Vec>[] keys = Arrays.copyOf(_keys ,ncols+1); keys [ncols] = vec._key;
Vec[] vecs = Arrays.copyOf(_vecs ,ncols+1); vecs [ncols] = vec;
_keys = keys;
_vecs = vecs;
setNames(names);
return vec;
}
/** Append a Frame onto this Frame. Names are forced unique, by appending
* unique numbers if needed.
* @return the expanded Frame, for flow-coding */
public Frame add( Frame fr ) { add(fr._names,fr.vecs().clone(),fr.numCols()); return this; }
/** Insert a named column as the first column */
public Frame prepend( String name, Vec vec ) {
if( find(name) != -1 ) throw new IllegalArgumentException("Duplicate name '"+name+"' in Frame");
if( _vecs.length != 0 ) {
if( !anyVec().group().equals(vec.group()) && !Arrays.equals(anyVec().espc(),vec.espc()) )
throw new IllegalArgumentException("Vector groups differs - adding vec '"+name+"' into the frame " + Arrays.toString(_names));
if( numRows() != vec.length() )
throw new IllegalArgumentException("Vector lengths differ - adding vec '"+name+"' into the frame " + Arrays.toString(_names));
}
final int len = _names != null ? _names.length : 0;
String[] _names2 = new String[len + 1];
Vec[] _vecs2 = new Vec[len + 1];
Key<Vec>[] _keys2 = makeVecKeys(len + 1);
_names2[0] = name;
_vecs2 [0] = vec;
_keys2 [0] = vec._key;
if (_names != null) {
System.arraycopy(_names, 0, _names2, 1, len);
System.arraycopy(_vecs, 0, _vecs2, 1, len);
System.arraycopy(_keys, 0, _keys2, 1, len);
}
_vecs = _vecs2;
_keys = _keys2;
setNames(_names2);
return this;
}
/** Swap two Vecs in-place; useful for sorting columns by some criteria */
public void swap( int lo, int hi ) {
assert 0 <= lo && lo < _keys.length;
assert 0 <= hi && hi < _keys.length;
if( lo==hi ) return;
Vec vecs[] = vecs();
Vec v = vecs [lo]; vecs [lo] = vecs [hi]; vecs [hi] = v;
Key<Vec> k = _keys[lo]; _keys[lo] = _keys[hi]; _keys[hi] = k;
String n=_names[lo]; _names[lo] = _names[hi]; _names[hi] = n;
}
/**
* Re-order the columns according to the new order specified in newOrder.
*
* @param newOrder
*/
public void reOrder(int[] newOrder) {
assert newOrder.length==_keys.length; // make sure column length match
int numCols = _keys.length;
Vec tmpvecs[] = vecs().clone();
Key<Vec> tmpkeys[] = _keys.clone();
String tmpnames[] = _names.clone();
for (int colIndex = 0; colIndex < numCols; colIndex++) {
tmpvecs[colIndex] = _vecs[newOrder[colIndex]];
tmpkeys[colIndex] = _keys[newOrder[colIndex]];
tmpnames[colIndex] = _names[newOrder[colIndex]];
}
// copy it back
for (int colIndex = 0; colIndex < numCols; colIndex++) {
_vecs[colIndex] = tmpvecs[colIndex];
_keys[colIndex] = tmpkeys[colIndex];
_names[colIndex] = tmpnames[colIndex];
}
}
/** move the provided columns to be first, in-place. For Merge currently since method='hash' was coded like that */
public void moveFirst( int cols[] ) {
boolean colsMoved[] = new boolean[_keys.length];
Vec tmpvecs[] = vecs().clone();
Key<Vec> tmpkeys[] = _keys.clone();
String tmpnames[] = _names.clone();
// Move the desired ones first
for (int i=0; i<cols.length; i++) {
int w = cols[i];
if (colsMoved[w]) throw new IllegalArgumentException("Duplicates in column numbers passed in");
if (w<0 || w>=_keys.length) throw new IllegalArgumentException("column number out of 0-based range");
colsMoved[w] = true;
tmpvecs[i] = _vecs[w];
tmpkeys[i] = _keys[w];
tmpnames[i] = _names[w];
}
// Put the other ones afterwards
int w = cols.length;
for (int i=0; i<_keys.length; i++) {
if (!colsMoved[i]) {
tmpvecs[w] = _vecs[i];
tmpkeys[w] = _keys[i];
tmpnames[w] = _names[i];
w++;
}
}
// Copy back over the original in-place
for (int i=0; i<_keys.length; i++) {
_vecs[i] = tmpvecs[i];
_keys[i] = tmpkeys[i];
_names[i] = tmpnames[i];
}
}
/** Returns a subframe of this frame containing only vectors with desired names.
*
* @param names list of vector names
* @return a new frame which collects vectors from this frame with desired names.
* @throws IllegalArgumentException if there is no vector with desired name in this frame.
*/
public Frame subframe(String[] names) {
Vec[] vecs = new Vec[names.length];
vecs(); // Preload the vecs
HashMap<String, Integer> map = new HashMap<>((int) ((names.length/0.75f)+1)); // avoid rehashing by set up initial capacity
for (int i = 0; i < _names.length; i++)
map.put(_names[i], i);
int missingCnt = 0;
for (int i = 0; i < names.length; i++)
if (map.containsKey(names[i])) {
vecs[i] = _vecs[map.get(names[i])];
} else
missingCnt++;
if (missingCnt > 0) {
StringBuilder sb = new StringBuilder();
final int maxReported = missingCnt <= 5 ? missingCnt : 5;
int reported = 0;
for (int i = 0; i < names.length && reported < maxReported; i++) {
if (vecs[i] == null) {
sb.append('\'').append(names[i]).append('\'');
reported++;
if (reported < maxReported) {
sb.append(", ");
}
}
}
if (reported < missingCnt) {
sb.append(" (and other ").append(missingCnt - reported).append(")");
}
throw new IllegalArgumentException("Frame `" + _key + "` doesn't contain columns: " + sb.toString() + ".");
}
return new Frame(Key.<Frame>make("subframe" + Key.make().toString()), names, vecs);
}
/** Allow rollups for all written-into vecs; used by {@link MRTask} once
* writing is complete.
* @return the original Futures, for flow-coding */
public Futures postWrite(Futures fs) {
for( Vec v : vecs() ) v.postWrite(fs);
return fs;
}
/** Actually remove/delete all Vecs from memory, not just from the Frame.
* @return the original Futures, for flow-coding */
@Override protected Futures remove_impl(Futures fs, boolean cascade) {
final Key<Vec>[] keys = _keys;
if( keys.length==0 ) return fs;
// Get the nChunks without calling anyVec - which loads all Vecs eagerly,
// only to delete them. Supports Frames with some Vecs already deleted, as
// a Scope cleanup action might delete Vecs out of order.
Vec v = _col0;
if (v == null) {
Vec[] vecs = _vecs; // Read once, in case racily being cleared
if (vecs != null)
for (Vec vec : vecs)
if ((v = vec) != null) // Stop on finding the 1st Vec
break;
}
if (v == null) // Ok, now do DKV gets
for (Key<Vec> _key1 : _keys)
if ((v = _key1.get()) != null)
break; // Stop on finding the 1st Vec
if (v == null)
return fs;
_vecs = new Vec[0];
setNames(new String[0]);
_keys = makeVecKeys(0);
if (cascade) { // removing the vecs from mem only if cascading (default behaviour)
// Bulk dumb local remove - no JMM, no ordering, no safety.
Vec.bulk_remove(keys, v.nChunks());
}
return fs;
}
@Override
public Frame delete_and_lock(Key<Job> job_key) {
return super.delete_and_lock(job_key, true); // for Frames, remove dependencies (Vecs) by default when forcing internal delete
}
/**
* Removes this {@link Frame} object and all directly linked {@link Keyed} objects and POJOs, while retaining
* the keys defined by the retainedKeys parameter. Aimed to be used for removal of {@link Frame} objects pointing
* to shared resources (Vectors, Chunks etc.) internally.
* <p>
* WARNING: UNSTABLE API, might be removed/replaced at any time.
*
* @param futures An instance of {@link Futures} for synchronization
* @param retainedKeys A {@link Set} of keys to retain. The set may be immutable, as it shall not be modified.
* @return An instance of {@link Futures} for synchronization
*/
public final Futures retain(final Futures futures, final Set<Key> retainedKeys) {
if (_key != null) DKV.remove(_key);
final Key[] delCandidateKeys = _keys;
if (delCandidateKeys.length == 0) return futures;
// Get the nChunks without calling anyVec - which loads all Vecs eagerly,
// only to delete them. Supports Frames with some Vecs already deleted, as
// a Scope cleanup action might delete Vecs out of order.
Vec v = _col0;
if (v == null) {
Vec[] vecs = _vecs; // Read once, in case racily being cleared
if (vecs != null)
for (Vec vec : vecs)
if ((v = vec) != null) // Stop on finding the 1st Vec
break;
}
if (v == null) // Ok, now do DKV gets
for (Key<Vec> _key1 : _keys)
if ((v = _key1.get()) != null)
break; // Stop on finding the 1st Vec
if (v == null)
return futures;
_vecs = new Vec[0];
setNames(new String[0]);
_keys = makeVecKeys(0);
final List<Key> deletedKeys= new ArrayList<>();
for (int i = 0; i < delCandidateKeys.length; i++) {
if(!retainedKeys.contains(delCandidateKeys[i])){
deletedKeys.add(delCandidateKeys[i]);
}
}
// Bulk dumb local remove - no JMM, no ordering, no safety.
Vec.bulk_remove(deletedKeys.toArray(new Key[]{}), v.nChunks());
return futures;
}
/** Write out K/V pairs, in this case Vecs. */
@Override protected AutoBuffer writeAll_impl(AutoBuffer ab) {
ab.putA8(anyVec().espc());
for (Key k : _keys)
ab.putKey(k);
return super.writeAll_impl(ab);
}
@Override protected Keyed readAll_impl(AutoBuffer ab, Futures fs) {
long[] espc = ab.getA8();
_keys = new Vec.VectorGroup().addVecs(_keys.length);
// I am modifying self => I need to make an update
// This is more of a workaround, readAll_impl methods are not expected to modify self
DKV.put(this, fs);
int rowLayout = Vec.ESPC.rowLayout(_keys[0], espc);
for (Key<Vec> key : _keys) {
Vec v = ab.get();
v._key = key;
v._rowLayout = rowLayout;
v.readAll_impl(ab, fs);
DKV.put(v, fs);
}
return super.readAll_impl(ab,fs);
}
/** Replace one column with another. Caller must perform global update (DKV.put) on
* this updated frame.
* @return The old column, for flow-coding */
public Vec replace(int col, Vec nv) {
Vec rv = vecs()[col];
nv = ((new Frame(rv)).makeCompatible(new Frame(nv)))[0];
DKV.put(nv);
assert DKV.get(nv._key)!=null; // Already in DKV
assert rv.isCompatibleWith(nv);
_vecs[col] = nv;
_keys[col] = nv._key;
return rv;
}
/** Create a subframe from given interval of columns.
* @param startIdx index of first column (inclusive)
* @param endIdx index of the last column (exclusive)
* @return a new Frame containing specified interval of columns */
public Frame subframe(int startIdx, int endIdx) {
return new Frame(Arrays.copyOfRange(_names,startIdx,endIdx),Arrays.copyOfRange(vecs(),startIdx,endIdx));
}
/** Split this Frame; return a subframe created from the given column interval, and
* remove those columns from this Frame.
* @param startIdx index of first column (inclusive)
* @param endIdx index of the last column (exclusive)
* @return a new Frame containing specified interval of columns */
public Frame extractFrame(int startIdx, int endIdx) {
Frame f = subframe(startIdx, endIdx);
remove(startIdx, endIdx);
return f;
}
/** Removes the column with a matching name.
* @return The removed column */
public Vec remove( String name ) { return remove(find(name)); }
public Frame remove( String[] names ) {
for( String name : names )
remove(find(name));
return this;
}
/** Removes a list of columns by index; the index list must be sorted
* @return an array of the removed columns */
public Vec[] remove( int[] idxs ) {
for( int i : idxs )
if(i < 0 || i >= vecs().length)
throw new ArrayIndexOutOfBoundsException();
Arrays.sort(idxs);
Vec[] res = new Vec[idxs.length];
Vec[] rem = new Vec[_vecs.length-idxs.length];
String[] names = new String[rem.length];
Key<Vec>[] keys = makeVecKeys(rem.length);
int j = 0;
int k = 0;
int l = 0;
for(int i = 0; i < _vecs.length; ++i) {
if(j < idxs.length && i == idxs[j]) {
++j;
res[k++] = _vecs[i];
} else {
rem [l] = _vecs [i];
names[l] = _names[i];
keys [l] = _keys [i];
++l;
}
}
_vecs = rem;
setNames(names);
_keys = keys;
assert l == rem.length && k == idxs.length;
return res;
}
/** Removes a numbered column.
* @return the removed column */
public final Vec remove( int idx ) {
int len = _names.length;
if( idx < 0 || idx >= len ) return null;
Vec v = vecs()[idx];
if( v == _col0 ) _col0 = null;
_vecs = ArrayUtils.remove(_vecs, idx);
setNames(ArrayUtils.remove(_names, idx));
_keys = ArrayUtils.remove(_keys, idx);
return v;
}
/**
* Remove all the vecs from frame.
*/
public Vec[] removeAll() {
return remove(0, _names.length);
}
/** Remove given interval of columns from frame. Motivated by R intervals.
* @param startIdx - start index of column (inclusive)
* @param endIdx - end index of column (exclusive)
* @return array of removed columns */
Vec[] remove(int startIdx, int endIdx) {
int len = _names.length;
int nlen = len - (endIdx-startIdx);
String[] names = new String[nlen];
Key<Vec>[] keys = makeVecKeys(nlen);
Vec[] vecs = new Vec[nlen];
vecs();
if (startIdx > 0) {
System.arraycopy(_names, 0, names, 0, startIdx);
System.arraycopy(_vecs, 0, vecs, 0, startIdx);
System.arraycopy(_keys, 0, keys, 0, startIdx);
}
nlen -= startIdx;
if (endIdx < _names.length+1) {
System.arraycopy(_names, endIdx, names, startIdx, nlen);
System.arraycopy(_vecs, endIdx, vecs, startIdx, nlen);
System.arraycopy(_keys, endIdx, keys, startIdx, nlen);
}
Vec[] vecX = Arrays.copyOfRange(_vecs,startIdx,endIdx);
_vecs = vecs;
_keys = keys;
setNames(names);
_col0 = null;
return vecX;
}
/** Restructure a Frame completely */
public void restructure( String[] names, Vec[] vecs) {
restructure(names, vecs, vecs.length);
}
/** Restructure a Frame completely, but only for a specified number of columns (counting up) */
public void restructure( String[] names, Vec[] vecs, int cols) {
// Make empty to dodge asserts, then "add()" them all which will check for
// compatible Vecs & names.
_keys = makeVecKeys(0);
_vecs = new Vec [0];
setNames(new String[0]);
add(names,vecs,cols);
}
// --------------------------------------------
// Utilities to help external Frame constructors, e.g. Spark.
// Make an initial Frame & lock it for writing. Build Vec Keys.
void preparePartialFrame( String[] names ) {
// Nuke any prior frame (including freeing storage) & lock this one
if( _keys != null ) delete_and_lock();
else write_lock();
_keys = new Vec.VectorGroup().addVecs(names.length);
setNamesNoCheck(names);
// No Vectors tho!!! These will be added *after* the import
}
// Only serialize strings, not H2O internal structures
// Make NewChunks to for holding data from e.g. Spark. Once per set of
// Chunks in a Frame, before filling them. This can be called in parallel
// for different Chunk#'s (cidx); each Chunk can be filled in parallel.
static NewChunk[] createNewChunks(String name, byte[] type, int cidx) {
boolean[] sparse = new boolean[type.length];
Arrays.fill(sparse, false);
return createNewChunks(name, type, cidx, sparse);
}
static NewChunk[] createNewChunks(String name, byte[] type, int cidx, boolean[] sparse) {
Frame fr = (Frame) Key.make(name).get();
NewChunk[] nchks = new NewChunk[fr.numCols()];
for (int i = 0; i < nchks.length; i++) {
nchks[i] = new NewChunk(new AppendableVec(fr._keys[i], type[i]), cidx, sparse[i]);
}
return nchks;
}
// Compress & DKV.put NewChunks. Once per set of Chunks in a Frame, after
// filling them. Can be called in parallel for different sets of Chunks.
static void closeNewChunks(NewChunk[] nchks) {
Futures fs = new Futures();
for (NewChunk nchk : nchks) {
nchk.close(fs);
}
fs.blockForPending();
}
// Build real Vecs from loose Chunks, and finalize this Frame. Called once
// after any number of [create,close]NewChunks. This method also unlocks the frame.
void finalizePartialFrame(long[] espc, String[][] domains, byte[] types) {
finalizePartialFrame(espc, domains, types, true);
}
// Build real Vecs from loose Chunks, and finalize this Frame. Called once
// after any number of [create,close]NewChunks.
void finalizePartialFrame(long[] espc, String[][] domains, byte[] types, boolean unlock) {
// Compute elems-per-chunk.
// Roll-up elem counts, so espc[i] is the starting element# of chunk i.
int nchunk = espc.length;
while( nchunk > 1 && espc[nchunk-1] == 0 )
nchunk--;
long espc2[] = new long[nchunk+1]; // Shorter array
long x=0; // Total row count so far
for( int i=0; i<nchunk; i++ ) {
espc2[i] = x; // Start elem# for chunk i
x += espc[i]; // Raise total elem count
}
espc2[nchunk]=x; // Total element count in last
// For all Key/Vecs - insert Vec header
Futures fs = new Futures();
_vecs = new Vec[_keys.length];
for( int i=0; i<_keys.length; i++ ) {
// Nuke the extra chunks
for (int j = nchunk; j < espc.length; j++)
DKV.remove(Vec.chunkKey(_keys[i], j), fs);
// Insert Vec header
Vec vec = _vecs[i] = new Vec( _keys[i],
Vec.ESPC.rowLayout(_keys[i],espc2),
domains!=null ? domains[i] : null,
types[i]);
// Here we have to save vectors since
// saving during unlock will invoke Frame vector
// refresh
DKV.put(_keys[i],vec,fs);
}
fs.blockForPending();
if (unlock) {
unlock();
}
}
// --------------------------------------------------------------------------
static final int MAX_EQ2_COLS = 100000; // Limit of columns user is allowed to request
/** In support of R, a generic Deep Copy and Slice.
*
* <p>Semantics are a little odd, to match R's. Each dimension spec can be:<ul>
* <li><em>null</em> - all of them
* <li><em>a sorted list of negative numbers (no dups)</em> - all BUT these
* <li><em>an unordered list of positive</em> - just these, allowing dups
* </ul>
*
* <p>The numbering is 1-based; zero's are not allowed in the lists, nor are out-of-range values.
* @return the sliced Frame
*/
public Frame deepSlice( Object orows, Object ocols ) {
// ocols is either a long[] or a Frame-of-1-Vec
long[] cols;
if( ocols == null ) cols = null;
else if (ocols instanceof long[]) cols = (long[])ocols;
else if (ocols instanceof Frame) {
Frame fr = (Frame) ocols;
if (fr.numCols() != 1)
throw new IllegalArgumentException("Columns Frame must have only one column (actually has " + fr.numCols() + " columns)");
long n = fr.anyVec().length();
if (n > MAX_EQ2_COLS)
throw new IllegalArgumentException("Too many requested columns (requested " + n +", max " + MAX_EQ2_COLS + ")");
cols = new long[(int)n];
Vec.Reader v = fr.anyVec().new Reader();
for (long i = 0; i < v.length(); i++)
cols[(int)i] = v.at8(i);
} else
throw new IllegalArgumentException("Columns is specified by an unsupported data type (" + ocols.getClass().getName() + ")");
// Since cols is probably short convert to a positive list.
int c2[];
if( cols==null ) {
c2 = new int[numCols()];
for( int i=0; i<c2.length; i++ ) c2[i]=i;
} else if( cols.length==0 ) {
c2 = new int[0];
} else if( cols[0] >= 0 ) {
c2 = new int[cols.length];
for( int i=0; i<cols.length; i++ )
c2[i] = (int)cols[i]; // Conversion of 1-based cols to 0-based is handled by a 1-based front-end!
} else {
c2 = new int[numCols()-cols.length];
int j=0;
for( int i=0; i<numCols(); i++ ) {
if( j >= cols.length || i < (-(1+cols[j])) ) c2[i-j] = i;
else j++;
}
}
for (int aC2 : c2)
if (aC2 >= numCols())
throw new IllegalArgumentException("Trying to select column " + (aC2 + 1) + " but only " + numCols() + " present.");
if( c2.length==0 )
throw new IllegalArgumentException("No columns selected (did you try to select column 0 instead of column 1?)");
// Do Da Slice
// orows is either a long[] or a Vec
if (numRows() == 0) {
return new MRTask() {
@Override public void map(Chunk[] chks, NewChunk[] nchks) { for (NewChunk nc : nchks) nc.addNA(); }
}.doAll(types(c2), this).outputFrame(names(c2), domains(c2));
}
if (orows == null)
return new DeepSlice(null,c2,vecs()).doAll(types(c2),this).outputFrame(names(c2),domains(c2));
else if (orows instanceof long[]) {
final long CHK_ROWS=1000000;
final long[] rows = (long[])orows;
if (this.numRows() == 0) {
return this;
}
if( rows.length==0 || rows[0] < 0 ) {
if (rows.length != 0 && rows[0] < 0) {
Vec v0 = this.anyVec().makeZero();
Vec v = new MRTask() {
@Override public void map(Chunk cs) {
for (long er : rows) {
if (er >= 0) continue;
er = Math.abs(er);
if (er < cs._start || er > (cs._len + cs._start - 1)) continue;
cs.set((int) (er - cs._start), 1);
}
}
}.doAll(v0).getResult()._fr.anyVec();
Keyed.remove(v0._key);
Frame slicedFrame = new DeepSlice(rows, c2, vecs()).doAll(types(c2), this.add("select_vec", v)).outputFrame(names(c2), domains(c2));
Keyed.remove(v._key);
Keyed.remove(this.remove(this.numCols() - 1)._key);
return slicedFrame;
} else {
return new DeepSlice(rows.length == 0 ? null : rows, c2, vecs()).doAll(types(c2), this).outputFrame(names(c2), domains(c2));
}
}
// Vec'ize the index array
Futures fs = new Futures();
AppendableVec av = new AppendableVec(Vec.newKey(),Vec.T_NUM);
int r = 0;
int c = 0;
while (r < rows.length) {
NewChunk nc = new NewChunk(av, c);
long end = Math.min(r+CHK_ROWS, rows.length);
for (; r < end; r++) {
nc.addNum(rows[r]);
}
nc.close(c++, fs);
}
Vec c0 = av.layout_and_close(fs); // c0 is the row index vec
fs.blockForPending();
Frame ff = new Frame(new String[]{"rownames"}, new Vec[]{c0});
Frame fr2 = new Slice(c2, this).doAll(types(c2),ff).outputFrame(names(c2), domains(c2));
Keyed.remove(c0._key);
Keyed.remove(av._key);
ff.delete();
return fr2;
}
Frame frows = (Frame)orows;
// It's a compatible Vec; use it as boolean selector.
// Build column names for the result.
Vec [] vecs = new Vec[c2.length];
String [] names = new String[c2.length];
for(int i = 0; i < c2.length; ++i){
vecs[i] = _vecs[c2[i]];
names[i] = _names[c2[i]];
}
Frame ff = new Frame(names, vecs);
ff.add("predicate", frows.anyVec());
return new DeepSelect().doAll(types(c2),ff).outputFrame(names(c2),domains(c2));
}
// Slice and return in the form of new chunks.
private static class Slice extends MRTask<Slice> {
final Frame _base; // the base frame to slice from
final int[] _cols;
Slice(int[] cols, Frame base) { _cols = cols; _base = base; }
@Override public void map(Chunk[] ix, NewChunk[] ncs) {
final Vec[] vecs = new Vec[_cols.length];
final Vec anyv = _base.anyVec();
final long nrow = anyv.length();
long r = ix[0].at8(0);
int last_ci = anyv.elem2ChunkIdx(r<nrow?r:0); // memoize the last chunk index
long last_c0 = anyv.espc()[last_ci]; // ... last chunk start
long last_c1 = anyv.espc()[last_ci + 1]; // ... last chunk end
Chunk[] last_cs = new Chunk[vecs.length]; // ... last chunks
for (int c = 0; c < _cols.length; c++) {
vecs[c] = _base.vecs()[_cols[c]];
last_cs[c] = vecs[c].chunkForChunkIdx(last_ci);
}
for (int i = 0; i < ix[0]._len; i++) {
// select one row
r = ix[0].at8(i); // next row to select
if (r < 0) continue;
if (r >= nrow) {
for (int c = 0; c < vecs.length; c++) ncs[c].addNA();
} else {
if (r < last_c0 || r >= last_c1) {
last_ci = anyv.elem2ChunkIdx(r);
last_c0 = anyv.espc()[last_ci];
last_c1 = anyv.espc()[last_ci + 1];
for (int c = 0; c < vecs.length; c++)
last_cs[c] = vecs[c].chunkForChunkIdx(last_ci);
}
int ir = (int)(r - last_cs[0].start());
for (int c = 0; c < vecs.length; c++)
last_cs[c].extractRows(ncs[c],ir);
}
}
}
}
// Convert len rows starting at off to a 2-d ascii table
@Override public String toString( ) {
return ("Frame key: " + _key + "\n") +
" cols: " + numCols() + "\n" +
" rows: " + numRows() + "\n" +
" chunks: " + (anyVec() == null ? "N/A" : anyVec().nChunks()) + "\n" +
" size: " + byteSize() + "\n";
}
public String toString(long off, int len) { return toTwoDimTable(off, len).toString(); }
public String toString(long off, int len, boolean rollups) { return toTwoDimTable(off, len, rollups).toString(); }
public TwoDimTable toTwoDimTable() { return toTwoDimTable(0,10); }
public TwoDimTable toTwoDimTable(long off, int len ) { return toTwoDimTable(off,len,true); }
public TwoDimTable toTwoDimTable(long off, int len, boolean rollups ) {
if( off > numRows() ) off = numRows();
if( off+len > numRows() ) len = (int)(numRows()-off);
String[] rowHeaders = new String[len];
int H=0;
if( rollups ) {
H = 5;
rowHeaders = new String[len+H];
rowHeaders[0] = "min";
rowHeaders[1] = "mean";
rowHeaders[2] = "stddev";
rowHeaders[3] = "max";
rowHeaders[4] = "missing";
for( int i=0; i<len; i++ ) rowHeaders[i+H]=""+(off+i);
}
final int ncols = numCols();
final Vec[] vecs = vecs();
String[] coltypes = new String[ncols];
String[][] strCells = new String[len+H][ncols];
double[][] dblCells = new double[len+H][ncols];
final BufferedString tmpStr = new BufferedString();
for( int i=0; i<ncols; i++ ) {
if( DKV.get(_keys[i]) == null ) { // deleted Vec in Frame
coltypes[i] = "string";
for( int j=0; j<len+H; j++ ) dblCells[j][i] = TwoDimTable.emptyDouble;
for( int j=0; j<len; j++ ) strCells[j+H][i] = "NO_VEC";
continue;
}
Vec vec = vecs[i];
if( rollups ) {
dblCells[0][i] = vec.min();
dblCells[1][i] = vec.mean();
dblCells[2][i] = vec.sigma();
dblCells[3][i] = vec.max();
dblCells[4][i] = vec.naCnt();
}
switch( vec.get_type() ) {
case Vec.T_BAD:
coltypes[i] = "string";
for( int j=0; j<len; j++ ) { strCells[j+H][i] = null; dblCells[j+H][i] = TwoDimTable.emptyDouble; }
break;
case Vec.T_STR :
coltypes[i] = "string";
for( int j=0; j<len; j++ ) { strCells[j+H][i] = vec.isNA(off+j) ? "" : vec.atStr(tmpStr,off+j).toString(); dblCells[j+H][i] = TwoDimTable.emptyDouble; }
break;
case Vec.T_CAT:
coltypes[i] = "string";
for( int j=0; j<len; j++ ) { strCells[j+H][i] = vec.isNA(off+j) ? "" : vec.factor(vec.at8(off+j)); dblCells[j+H][i] = TwoDimTable.emptyDouble; }
break;
case Vec.T_TIME:
coltypes[i] = "string";
DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss");
for( int j=0; j<len; j++ ) { strCells[j+H][i] = vec.isNA(off+j) ? "" : fmt.print(vec.at8(off+j)); dblCells[j+H][i] = TwoDimTable.emptyDouble; }
break;
case Vec.T_NUM:
coltypes[i] = vec.isInt() ? "long" : "double";
for( int j=0; j<len; j++ ) { dblCells[j+H][i] = vec.isNA(off+j) ? TwoDimTable.emptyDouble : vec.at(off + j); strCells[j+H][i] = null; }
break;
case Vec.T_UUID:
throw H2O.unimpl();
default:
System.err.println("bad vector type during debug print: "+vec.get_type());
throw H2O.fail();
}
}
return new TwoDimTable("Frame "+_key,numRows()+" rows and "+numCols()+" cols",rowHeaders,/* clone the names, the TwoDimTable will replace nulls with ""*/_names.clone(),coltypes,null, "", strCells, dblCells);
}
// Bulk (expensive) copy from 2nd cols into 1st cols.
// Sliced by the given cols & rows
private static class DeepSlice extends MRTask<DeepSlice> {
final int _cols[];
final long _rows[];
final byte _isInt[];
DeepSlice( long rows[], int cols[], Vec vecs[] ) {
_cols=cols;
_rows=rows;
_isInt = new byte[cols.length];
for( int i=0; i<cols.length; i++ )
_isInt[i] = (byte)(vecs[cols[i]].isInt() ? 1 : 0);
}
@Override public boolean logVerbose() { return false; }
@Override public void map( Chunk chks[], NewChunk nchks[] ) {
long rstart = chks[0]._start;
int rlen = chks[0]._len; // Total row count
int rx = 0; // Which row to in/ex-clude
int rlo = 0; // Lo/Hi for this block of rows
int rhi = rlen;
while (true) { // Still got rows to include?
if (_rows != null) { // Got a row selector?
if (rx >= _rows.length) break; // All done with row selections
long r = _rows[rx++];// Next row selector
if (r < rstart) continue;
rlo = (int) (r - rstart);
rhi = rlo + 1; // Stop at the next row
while (rx < _rows.length && (_rows[rx] - rstart) == rhi && rhi < rlen) {
rx++;
rhi++; // Grab sequential rows
}
}
// Process this next set of rows
// For all cols in the new set;
BufferedString tmpStr = new BufferedString();
for (int i = 0; i < _cols.length; i++)
chks[_cols[i]].extractRows(nchks[i], rlo,rhi);
rlo = rhi;
if (_rows == null) break;
}
}
}
/**
* Create a copy of the input Frame and return that copied Frame. All Vecs in this are copied in parallel.
* Caller must do the DKV.put
* @param keyName Key for resulting frame. If null, no key will be given.
* @return The fresh copy of fr.
*/
public Frame deepCopy(String keyName) {
final Vec [] vecs = vecs().clone();
Key [] ks = anyVec().group().addVecs(vecs.length);
Futures fs = new Futures();
for(int i = 0; i < vecs.length; ++i)
DKV.put(vecs[i] = new Vec(ks[i], anyVec()._rowLayout, vecs[i].domain(),vecs()[i]._type),fs);
new MRTask() {
@Override public void map(Chunk[] cs) {
int cidx = cs[0].cidx();
for(int i = 0; i < cs.length; ++i)
DKV.put(vecs[i].chunkKey(cidx),cs[i].deepCopy(),_fs);
}
}.doAll(this);//.outputFrame(keyName==null?null:Key.make(keyName),this.names(),this.domains());
fs.blockForPending();
return new Frame((keyName==null?null:Key.<Frame>make(keyName)),this.names(),vecs);
}
/**
* Last column is a bit vec indicating whether or not to take the row.
*/
public static class DeepSelect extends MRTask<DeepSelect> {
@Override public void map( Chunk[] chks, NewChunk [] nchks ) {
Chunk pred = chks[chks.length - 1];
int[] ids = pred.getIntegers(new int[pred._len],0,pred._len,0);
int zeros = 0;
for(int i = 0; i < ids.length; ++i)
if(ids[i] == 1){
ids[i-zeros] = i;
} else zeros++;
ids = Arrays.copyOf(ids,ids.length-zeros);
for (int c = 0; c < chks.length-1; ++c)
chks[c].extractRows(nchks[c], ids);
}
}
private String[][] domains(int [] cols){
Vec[] vecs = vecs();
String[][] res = new String[cols.length][];
for(int i = 0; i < cols.length; ++i)
res[i] = vecs[cols[i]].domain();
return res;
}
private String [] names(int [] cols){
if(_names == null)return null;
String [] res = new String[cols.length];
for(int i = 0; i < cols.length; ++i)
res[i] = _names[cols[i]];
return res;
}
private byte[] types(int [] cols){
Vec[] vecs = vecs();
byte[] res = new byte[cols.length];
for(int i = 0; i < cols.length; ++i)
res[i] = vecs[cols[i]]._type;
return res;
}
public Vec[] makeCompatible( Frame f) {return makeCompatible(f,false);}
/** Return array of Vectors if 'f' is compatible with 'this', else return a new
* array of Vectors compatible with 'this' and a copy of 'f's data otherwise. Note
* that this can, in the worst case, copy all of {@code this}s' data.
* @return This Frame's data in an array of Vectors that is compatible with {@code f}. */
public Vec[] makeCompatible( Frame f, boolean force) {
// Small data frames are always "compatible"
if (anyVec() == null) // Or it is empty (no columns)
return f.vecs(); // Then must be compatible
Vec v1 = anyVec();
Vec v2 = f.anyVec();
if (v1 != null && v2 != null && v1.length() != v2.length())
throw new IllegalArgumentException("Can not make vectors of different length compatible!");
if (v1 == null || v2 == null || (!force && v1.isCompatibleWith(v2)))
return f.vecs();
// Ok, here make some new Vecs with compatible layout
Key<?> k = Key.make();
H2O.submitTask(new RebalanceDataSet(this, f, k)).join();
Frame f2 = (Frame)k.get();
DKV.remove(k);
for (Vec v : f2.vecs()) Scope.track(v);
return f2.vecs();
}
/**
* Make rows of a given frame distributed similarly to this frame.
* This method is useful e.g. when we have a training frame, and we
* want to adapt the testing frame so that there is proportionally
* similar amount of testing data in each chunk to the amount of
* training data.
* The resulting frame will have the same number of chunks as this
* frame.
*
* @param f frame that we want to re-distributed
* @param newKey key for a newly created frame
* @return new frame or the given frame if nothing needs to be done
*/
public Frame makeSimilarlyDistributed(Frame f, Key<Frame> newKey) {
final Vec model = anyVec();
final Vec source = f.anyVec();
if (model == null || source == null) { // model/source frame has no columns
return f;
}
if (model.length() == 0) {
throw new IllegalStateException("Cannot make frame similarly distributed to a model frame that has no rows.");
}
final long[] newESPC = new long[model.espc().length];
final double ratio = (double) source.length() / model.length();
int firstNonEmpty = -1;
long totalAllocated = 0;
for (int i = 0; i < newESPC.length - 1; i++) {
int newLen = (int) (ratio * model.chunkLen(i));
totalAllocated += newLen;
newESPC[i + 1] = newLen + newESPC[i];
if (firstNonEmpty == -1 && newLen > 0) {
firstNonEmpty = i;
}
}
assert firstNonEmpty >= 0;
// put rows that we didn't allocate due to rounding errors into a first non-empty chunk
final int diff = (int) (source.length() - totalAllocated);
assert diff >= 0;
for (int i = firstNonEmpty; i < newESPC.length - 1; i++) {
newESPC[i + 1] += diff;
}
H2O.submitTask(new RebalanceDataSet(newESPC, new Vec.VectorGroup(), f, newKey)).join();
return newKey.get();
}
public static Job export(Frame fr, String path, String frameName, boolean overwrite, int nParts) {
return export(fr, path, frameName, overwrite, nParts, null, new CSVStreamParams());
}
public static Job export(Frame fr, String path, String frameName, boolean overwrite, int nParts,
String compression, CSVStreamParams csvParms) {
return export(fr, path, frameName, overwrite, nParts, false, compression, csvParms);
}
public static Job export(Frame fr, String path, String frameName, boolean overwrite, int nParts, boolean parallel,
String compression, CSVStreamParams csvParms) {
boolean forceSingle = nParts == 1;
// Validate input
if (H2O.getPM().isFileAccessDenied(path)) {
throw new H2OFileAccessDeniedException("File " + path + " access denied");
}
if (forceSingle) {
boolean fileExists = H2O.getPM().exists(path);
if (overwrite && fileExists) {
Log.warn("File " + path + " exists, but will be overwritten!");
} else if (!overwrite && fileExists) {
throw new H2OIllegalArgumentException(path, "exportFrame", "File " + path + " already exists!");
}
} else {
if (! H2O.getPM().isEmptyDirectoryAllNodes(path)) {
throw new H2OIllegalArgumentException(path, "exportFrame", "Cannot use path " + path +
" to store part files! The target needs to be either an existing empty directory or not exist yet.");
}
}
CompressionFactory compressionFactory = compression != null ? CompressionFactory.make(compression) : null;
Job job = new Job<>(fr._key, "water.fvec.Frame", "Export dataset");
FrameUtils.ExportTaskDriver t = new FrameUtils.ExportTaskDriver(
fr, path, frameName, overwrite, job, nParts, parallel, compressionFactory, csvParms);
return job.start(t, fr.anyVec().nChunks());
}
public static Job exportParquet(Frame fr, String path, boolean overwrite, String compression, boolean writeChecksum, boolean tzAdjustFromLocal) {
// Validate input
if (H2O.getPM().isFileAccessDenied(path)) {
throw new H2OFileAccessDeniedException("File " + path + " access denied");
}
if (! H2O.getPM().isEmptyDirectoryAllNodes(path)) {
throw new H2OIllegalArgumentException(path, "exportFrame", "Cannot use path " + path +
" to store part files! The target needs to be either an existing empty directory or not exist yet.");
}
BinaryFormatExporter parquetExporter = null;
ServiceLoader<BinaryFormatExporter> parquetExporters = ServiceLoader.load(BinaryFormatExporter.class);
for (BinaryFormatExporter exporter : parquetExporters) {
if (exporter.supports(ExportFileFormat.parquet)) {
parquetExporter = exporter;
break;
}
}
if (parquetExporter == null) {
Log.warn("No parquet exporter on the classpath!");
throw new RuntimeException("No parquet exporter on the classpath!");
}
Job job = new Job<>(fr._key, "water.fvec.Frame", "Export dataset");
H2O.H2OCountedCompleter t = parquetExporter.export(fr, path, overwrite, compression, writeChecksum, tzAdjustFromLocal);
return job.start(t, fr.anyVec().nChunks());
}
/** Convert this Frame to a CSV (in an {@link InputStream}), that optionally
* is compatible with R 3.1's recent change to read.csv()'s behavior.
*
* WARNING: Note that the end of a file is denoted by the read function
* returning 0 instead of -1.
*
* @return An InputStream containing this Frame as a CSV */
public InputStream toCSV(CSVStreamParams parms) {
return new CSVStream(this, parms);
}
public static class CSVStreamParams extends Iced<CSVStreamParams> {
public static final char DEFAULT_SEPARATOR = ',';
public static final char DEFAULT_ESCAPE = '"';
boolean _headers = true;
boolean _quoteColumnNames = true;
boolean _hexString = false;
boolean _escapeQuotes = true;
char _separator = DEFAULT_SEPARATOR;
char _escapeCharacter = DEFAULT_ESCAPE;
public CSVStreamParams setHeaders(boolean headers) {
_headers = headers;
return this;
}
public CSVStreamParams noHeader() {
setHeaders(false);
return this;
}
public CSVStreamParams setQuoteColumnNames(boolean quoteColumnNames) {
_quoteColumnNames = quoteColumnNames;
return this;
}
public CSVStreamParams setHexString(boolean hex_string) {
_hexString = hex_string;
return this;
}
public CSVStreamParams setSeparator(byte separator) {
_separator = (char) separator;
return this;
}
public CSVStreamParams setEscapeQuotes(boolean backslash_escape) {
_escapeQuotes = backslash_escape;
return this;
}
public CSVStreamParams setEscapeChar(char escapeChar) {
_escapeCharacter = escapeChar;
return this;
}
}
public static class CSVStream extends InputStream {
private static final Pattern DOUBLE_QUOTE_PATTERN = Pattern.compile("\"");
private static final Set<Character> SPECIAL_CHARS = Collections.unmodifiableSet(new HashSet<>(
Arrays.asList('\\', '|', '(', ')')
));
private final CSVStreamParams _parms;
private final Pattern escapingPattern;
private final String escapeReplacement;
byte[] _line;
int _position;
int _chkRow;
Chunk[] _curChks;
int _lastChkIdx;
public volatile int _curChkIdx; // used only for progress reporting
private transient final String[][] _escapedCategoricalVecDomains;
private transient final VecEncoder[] _encoders;
public CSVStream(Frame fr, CSVStreamParams parms) {
this(firstChunks(fr), parms._headers ? fr.names() : null, fr.anyVec().nChunks(), parms);
}
public CSVStream(Chunk[] chks, String[] names, int nChunks, CSVStreamParams parms) {
if (chks == null) nChunks = 0;
_lastChkIdx = (chks != null) ? chks[0].cidx() + nChunks - 1 : -1;
_parms = Objects.requireNonNull(parms);
if (_parms._escapeCharacter != CSVStreamParams.DEFAULT_ESCAPE) {
String escapeCharacterEscaped =
(SPECIAL_CHARS.contains(_parms._escapeCharacter) ? "\\" : "") + _parms._escapeCharacter;
escapingPattern = Pattern.compile("(\"|" + escapeCharacterEscaped + ")");
escapeReplacement = escapeCharacterEscaped + "$1";
} else {
escapingPattern = DOUBLE_QUOTE_PATTERN;
escapeReplacement = "\"\"";
}
StringBuilder sb = new StringBuilder();
if (names != null) {
appendColumnName(sb, names[0]);
for (int i = 1; i < names.length; i++)
appendColumnName(sb.append(_parms._separator), names[i]);
sb.append('\n');
}
_line = StringUtils.bytesOf(sb);
_chkRow = -1; // first process the header line
_curChks = chks;
_escapedCategoricalVecDomains = escapeCategoricalVecDomains(_curChks);
if (_curChks != null) {
_encoders = new VecEncoder[_curChks.length];
for (int i = 0; i < _curChks.length; i++) {
Vec v = _curChks[i]._vec;
if (v.isCategorical()) {
_encoders[i] = VecEncoder.CAT;
} else if (v.isUUID()) {
_encoders[i] = VecEncoder.UUID;
} else if (v.isInt()) {
_encoders[i] = VecEncoder.INT;
} else if (v.isString()) {
_encoders[i] = VecEncoder.STRING;
} else {
_encoders[i] = VecEncoder.NUM;
}
}
} else
_encoders = null;
}
private void appendColumnName(StringBuilder sb, String name) {
if (_parms._quoteColumnNames)
sb.append('"');
sb.append(name);
if (_parms._quoteColumnNames)
sb.append('"');
}
private static Chunk[] firstChunks(Frame fr) {
Vec anyvec = fr.anyVec();
if (anyvec == null || anyvec.nChunks() == 0 || anyvec.length() == 0) {
return null;
}
Chunk[] chks = new Chunk[fr.vecs().length];
for (int i = 0; i < fr.vecs().length; i++) {
chks[i] = fr.vec(i).chunkForRow(0);
}
return chks;
}
/**
* Escapes categorical levels of vectors and puts them in a map of escaped categorical levels.
* Only the domains with at least one level with an escaped quote are saved. If a domain does not need
* any escaping, it is considered better practice to reach to the `vec.domain()` method itself and not duplicate entries
* in memory here.
*
* @param chunks
* @return A 2D array of String[][]. Elements can be null of give domain does not need escaping.
*/
private String[][] escapeCategoricalVecDomains(final Chunk[] chunks) {
if(chunks == null) return null;
final String[][] localEscapedCategoricalVecDomains = new String[chunks.length][];
for (int i = 0; i < chunks.length; i++) {
final Vec vec = chunks[i].vec();
if (!vec.isCategorical()) continue;
final String[] originalDomain = vec.domain();
final String[] escapedDomain = new String[originalDomain.length];
for (int level = 0; level < originalDomain.length; level++) {
escapedDomain[level] = '"' + escapeQuotesForCsv(originalDomain[level]) + '"';
}
localEscapedCategoricalVecDomains[i] = escapedDomain;
}
return localEscapedCategoricalVecDomains;
}
public int getCurrentRowSize() {
int av = available();
assert av > 0;
return _line.length;
}
enum VecEncoder {CAT, UUID, INT, STRING, NUM};
byte[] getBytesForRow() {
StringBuilder sb = new StringBuilder();
final BufferedString unescapedTempStr = new BufferedString();
for (int i = 0; i < _curChks.length; i++) {
if (i > 0) sb.append(_parms._separator);
if (!_curChks[i].isNA(_chkRow)) {
switch (_encoders[i]) {
case NUM:
// R 3.1 unfortunately changed the behavior of read.csv().
// (Really type.convert()).
//
// Numeric values with too much precision now trigger a type conversion in R 3.1 into a factor.
//
// See these discussions:
// https://bugs.r-project.org/bugzilla/show_bug.cgi?id=15751
// https://stat.ethz.ch/pipermail/r-devel/2014-April/068778.html
// http://stackoverflow.com/questions/23072988/preserve-old-pre-3-1-0-type-convert-behavior
double d = _curChks[i].atd(_chkRow);
String s = _parms._hexString ? Double.toHexString(d) : RyuDouble.doubleToString(d);
sb.append(s);
break;
case CAT:
final String escapedCat = _escapedCategoricalVecDomains[i][(int) _curChks[i].at8(_chkRow)];
sb.append(escapedCat);
break;
case INT:
sb.append(_curChks[i].at8(_chkRow));
break;
case STRING:
final String escapedString = escapeQuotesForCsv(_curChks[i].atStr(unescapedTempStr, _chkRow).toString());
sb.append('"').append(escapedString).append('"');
break;
case UUID:
sb.append(PrettyPrint.UUID(_curChks[i].at16l(_chkRow), _curChks[i].at16h(_chkRow)));
break;
default:
throw new IllegalStateException("Unknown encoder " + _encoders[i]);
}
}
}
sb.append('\n');
return StringUtils.bytesOf(sb);
}
/**
* Escapes double-quotes (ASCII 34) in a String.
*
* @param unescapedString An unescaped {@link String} to escape
* @return String with escaped double-quotes, if found.
*/
private String escapeQuotesForCsv(final String unescapedString) {
if (!_parms._escapeQuotes) return unescapedString;
return escapingPattern.matcher(unescapedString).replaceAll(escapeReplacement);
}
@Override
public int available() {
// Case 1: There is more data left to read from the current line.
if (_position != _line.length) {
return _line.length - _position;
}
// Case 2: There are no chunks to work with (eg. the whole Frame was empty).
if (_curChks == null) {
return -1;
}
_chkRow++;
Chunk anyChunk = _curChks[0];
// Case 3: Out of data.
if (anyChunk._start + _chkRow >= anyChunk._vec.length()) {
return -1;
}
// Case 4: Out of data in the current chunks => fast-forward to the next set of non-empty chunks.
if (_chkRow == anyChunk.len()) {
_curChkIdx = anyChunk._vec.elem2ChunkIdx(anyChunk._start + _chkRow); // skips empty chunks
// Case 4: Processed all requested chunks.
if (_curChkIdx > _lastChkIdx) {
return -1;
}
// fetch the next non-empty chunks
Chunk[] newChks = new Chunk[_curChks.length];
for (int i = 0; i < _curChks.length; i++) {
newChks[i] = _curChks[i]._vec.chunkForChunkIdx(_curChkIdx);
// flush the remote chunk
Key oldKey = _curChks[i]._vec.chunkKey(_curChks[i]._cidx);
if (! oldKey.home()) {
H2O.raw_remove(oldKey);
}
}
_curChks = newChks;
_chkRow = 0;
}
// Case 5: Return data for the current row.
_line = getBytesForRow();
_position = 0;
return _line.length;
}
@Override public void close() throws IOException {
super.close();
_line = null;
}
@Override public int read() throws IOException {
return available() == 0 ? -1 : _line[_position++];
}
@Override public int read(byte[] b, int off, int len) throws IOException {
int n = available();
if(n > 0) {
n = Math.min(n, len);
System.arraycopy(_line, _position, b, off, n);
_position += n;
}
return n;
}
}
@Override public Class<KeyV3.FrameKeyV3> makeSchema() { return KeyV3.FrameKeyV3.class; }
/** Sort rows of a frame, using the set of columns as keys. User can specify sorting direction for each sorting
* column in a integer array. For example, if we want to sort columns 0, 1, 2 and want to sort 0 in ascending
* direction, 1 and 2 in descending direction for frame fr, the call to make is fr.sort(new int[]{0,1,2},
* new int[]{1, -1, -1}.
*
* @return Copy of frame, sorted */
public Frame sort( int[] cols ) {
return Merge.sort(this,cols);
}
public Frame sort(int[] cols, int[] ascending) {
return Merge.sort(this, cols, ascending);
}
/**
* A structure for fast lookup in the set of frame's vectors.
* Purpose of this class is to avoid multiple O(n) searches in {@link Frame}'s vectors.
*
* @return An instance of {@link FrameVecRegistry}
*/
public FrameVecRegistry frameVecRegistry() {
return new FrameVecRegistry();
}
/**
* Returns the original frame with specific column converted to categorical
*/
public Frame toCategoricalCol(int columIdx){
write_lock();
replace(columIdx, vec(columIdx).toCategoricalVec()).remove();
// Update frame in DKV
update();
unlock();
return this;
}
/**
* Returns the original frame with specific column converted to categorical
*/
public Frame toCategoricalCol(String column){
return toCategoricalCol(find(column));
}
public class FrameVecRegistry {
private LinkedHashMap<String, Vec> vecMap;
private FrameVecRegistry() {
vecMap = new LinkedHashMap<>(_vecs.length);
for (int i = 0; i < _vecs.length; i++) {
vecMap.put(_names[i], _vecs[i]);
}
}
/**
* Finds a Vec by column name
*
* @param colName Column name to search for, case-sensitive
* @return An instance of {@link Vec}, if found. Otherwise null.
*/
public Vec findByColName(final String colName) {
return vecMap.get(colName);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/FrameCreator.java
|
package water.fvec;
import hex.CreateFrame;
import jsr166y.CountedCompleter;
import water.*;
import water.util.ArrayUtils;
import water.util.FrameUtils;
import java.util.Arrays;
import java.util.Random;
import java.util.UUID;
import static water.fvec.Vec.makeCon;
import water.util.RandomUtils;
/**
* Helper to make up a Frame from scratch, with random content
*/
public class FrameCreator extends H2O.H2OCountedCompleter {
transient Vec _v;
final private CreateFrame _createFrame;
private int[] _cat_cols;
private int[] _int_cols;
private int[] _real_cols;
private int[] _bin_cols;
private int[] _time_cols;
private int[] _string_cols;
private String[][] _domain;
private Frame _out;
public FrameCreator(CreateFrame createFrame) {
_createFrame = createFrame;
int[] idx = _createFrame.has_response ? ArrayUtils.seq(1, _createFrame.cols + 1) : ArrayUtils.seq(0, _createFrame.cols);
int[] shuffled_idx = new int[idx.length];
ArrayUtils.shuffleArray(idx, idx.length, shuffled_idx, _createFrame.seed_for_column_types, 0);
// Sometimes the client requests, say, 0.3 categorical columns. By the time this number arrives here, it becomes
// something like 0.299999999997. If we just multiply by the number of columns (say 10000) and take integer part,
// we'd have 2999 columns only -- not what the client expects. This is why we add 0.1 to each count before taking
// the floor part.
int catcols = (int)(_createFrame.categorical_fraction * _createFrame.cols + 0.1);
int intcols = (int)(_createFrame.integer_fraction * _createFrame.cols + 0.1);
int bincols = (int)(_createFrame.binary_fraction * _createFrame.cols + 0.1);
int timecols = (int)(_createFrame.time_fraction * _createFrame.cols + 0.1);
int stringcols = (int)(_createFrame.string_fraction * _createFrame.cols + 0.1);
int realcols = _createFrame.cols - catcols - intcols - bincols - timecols - stringcols;
// At this point we might accidentally allocated too many columns. In such a case, adjust their counts.
if (realcols < 0 && catcols > 0) { catcols--; realcols++; }
if (realcols < 0 && intcols > 0) { intcols--; realcols++; }
if (realcols < 0 && bincols > 0) { bincols--; realcols++; }
if (realcols < 0 && timecols > 0) { timecols--; realcols++; }
if (realcols < 0 && stringcols > 0) { stringcols--; realcols++; }
assert(catcols >= 0);
assert(intcols >= 0);
assert(bincols >= 0);
assert(realcols >= 0);
assert(timecols >= 0);
assert(stringcols >= 0);
_cat_cols = Arrays.copyOfRange(shuffled_idx, 0, catcols);
_int_cols = Arrays.copyOfRange(shuffled_idx, catcols, catcols+intcols);
_real_cols = Arrays.copyOfRange(shuffled_idx, catcols+intcols, catcols+intcols+realcols);
_bin_cols = Arrays.copyOfRange(shuffled_idx, catcols+intcols+realcols, catcols+intcols+realcols+bincols);
_time_cols = Arrays.copyOfRange(shuffled_idx, catcols+intcols+realcols+bincols, catcols+intcols+realcols+bincols+timecols);
_string_cols = Arrays.copyOfRange(shuffled_idx, catcols+intcols+realcols+bincols+timecols, catcols+intcols+realcols+bincols+timecols+stringcols);
// create domains for categorical variables
_domain = new String[_createFrame.cols + (_createFrame.has_response ? 1 : 0)][];
if(createFrame.randomize) {
if(_createFrame.has_response) {
assert (_createFrame.response_factors >= 1);
_domain[0] = _createFrame.response_factors == 1 ? null : new String[_createFrame.response_factors];
if (_domain[0] != null) {
for (int i = 0; i < _domain[0].length; ++i) {
_domain[0][i] = Integer.toString(i);
}
}
}
for (int c : _cat_cols) {
_domain[c] = new String[_createFrame.factors];
for (int i = 0; i < _createFrame.factors; ++i) {
_domain[c][i] = "c" + c + ".l" + i;
}
}
}
// All columns together fill one chunk
final int rows_per_chunk = FileVec.calcOptimalChunkSize(
(int)((float)(catcols+intcols)*_createFrame.rows*4 //4 bytes for categoricals and integers
+(float)bincols *_createFrame.rows*1*_createFrame.binary_ones_fraction //sparse uses a fraction of one byte (or even less)
+(float)(realcols+timecols+stringcols) *_createFrame.rows*8), //8 bytes for real and time (long) values
_createFrame.cols, _createFrame.cols*4, H2ORuntime.availableProcessors(), H2O.getCloudSize(), false, true);
_v = makeCon(_createFrame.value, _createFrame.rows, (int)Math.ceil(Math.log1p(rows_per_chunk)),false);
}
public int nChunks() { return _v.nChunks(); }
@Override public void compute2() {
int totcols = _createFrame.cols + (_createFrame.has_response ? 1 : 0);
Vec[] vecs = new Vec[totcols];
if(_createFrame.randomize) {
byte[] types = new byte[vecs.length];
for (int i : _cat_cols) types[i] = Vec.T_CAT;
for (int i : _bin_cols) types[i] = Vec.T_NUM;
for (int i : _int_cols) types[i] = Vec.T_NUM;
for (int i : _real_cols) types[i] = Vec.T_NUM;
for (int i : _time_cols) types[i] = Vec.T_TIME;
for (int i : _string_cols) types[i] = Vec.T_STR;
if (_createFrame.has_response) {
types[0] = _createFrame.response_factors == 1 ? Vec.T_NUM : Vec.T_CAT;
}
vecs = _v.makeZeros(totcols, _domain, types);
} else {
for (int i = 0; i < vecs.length; ++i)
vecs[i] = _v.makeCon(_createFrame.value);
}
_v.remove();
_v=null;
String[] names = new String[vecs.length];
if(_createFrame.has_response) {
names[0] = "response";
for (int i = 1; i < vecs.length; i++) names[i] = "C" + i;
} else {
for (int i = 0; i < vecs.length; i++) names[i] = "C" + (i+1);
}
_out = new Frame(_createFrame._job._result, names, vecs);
assert _out.numRows() == _createFrame.rows;
assert _out.numCols() == totcols;
_out.delete_and_lock(_createFrame._job._key);
// fill with random values
new FrameRandomizer(_createFrame, _cat_cols, _int_cols, _real_cols, _bin_cols, _time_cols, _string_cols).doAll(_out);
//overwrite a fraction with N/A
FrameUtils.MissingInserter mi = new FrameUtils.MissingInserter(_createFrame._job._result, _createFrame.seed, _createFrame.missing_fraction);
mi.execImpl().get();
tryComplete();
}
@Override public void onCompletion(CountedCompleter caller){
_out.update(_createFrame._job._key);
_out.unlock(_createFrame._job._key);
}
private static class FrameRandomizer extends MRTask<FrameRandomizer> {
final private CreateFrame _createFrame;
final private int[] _cat_cols;
final private int[] _int_cols;
final private int[] _real_cols;
final private int[] _bin_cols;
final private int[] _time_cols;
final private int[] _string_cols;
public FrameRandomizer(CreateFrame createFrame, int[] cat_cols, int[] int_cols, int[] real_cols, int[] bin_cols, int[] time_cols, int[] string_cols){
_createFrame = createFrame;
_cat_cols = cat_cols;
_int_cols = int_cols;
_real_cols = real_cols;
_bin_cols = bin_cols;
_time_cols = time_cols;
_string_cols = string_cols;
}
//row+col-dependent RNG for reproducibility with different number of VMs, chunks, etc.
void setSeed(Random rng, int col, long row) {
rng.setSeed(_createFrame.seed + _createFrame.cols * row + col);
rng.setSeed(rng.nextLong());
}
@Override public void map (Chunk[]cs){
Job<Frame> job = _createFrame._job;
if (job.stop_requested()) return;
if (!_createFrame.randomize) return;
final Random rng = RandomUtils.getRNG(new Random().nextLong());
// response
if(_createFrame.has_response) {
for (int r = 0; r < cs[0]._len; r++) {
setSeed(rng, 0, cs[0]._start + r);
if (_createFrame.response_factors > 1)
cs[0].set(r, (int) (rng.nextDouble() * _createFrame.response_factors)); //classification
else if (_createFrame.positive_response)
cs[0].set(r, _createFrame.real_range * rng.nextDouble()); //regression with positive response
else
cs[0].set(r, _createFrame.real_range * (1 - 2 * rng.nextDouble())); //regression
}
}
job.update(1);
for (int c : _cat_cols) {
for (int r = 0; r < cs[c]._len; r++) {
setSeed(rng, c, cs[c]._start + r);
cs[c].set(r, (int)(rng.nextDouble() * _createFrame.factors));
}
}
job.update(1);
for (int c : _int_cols) {
for (int r = 0; r < cs[c]._len; r++) {
setSeed(rng, c, cs[c]._start + r);
cs[c].set(r, -_createFrame.integer_range + (long)(rng.nextDouble()*(2*_createFrame.integer_range+1)));
}
}
job.update(1);
for (int c : _real_cols) {
for (int r = 0; r < cs[c]._len; r++) {
setSeed(rng, c, cs[c]._start + r);
cs[c].set(r, _createFrame.real_range * (1 - 2 * rng.nextDouble()));
}
}
job.update(1);
for (int c : _bin_cols) {
for (int r = 0; r < cs[c]._len; r++) {
setSeed(rng, c, cs[c]._start + r);
cs[c].set(r, rng.nextFloat() > _createFrame.binary_ones_fraction ? 0 : 1);
}
}
job.update(1);
for (int c : _time_cols) {
for (int r = 0; r < cs[c]._len; r++) {
setSeed(rng, c, cs[c]._start + r);
cs[c].set(r, Math.abs(rng.nextLong() % (50L*365*24*3600*1000))); //make a random moment in time between 1970 and 2020
}
}
job.update(1);
byte[] by = new byte[8];
for (int c : _string_cols) {
for (int r = 0; r < cs[c]._len; r++) {
setSeed(rng, c, cs[c]._start + r);
for (int i=0;i<by.length;++i)
by[i] = (byte)(65+rng.nextInt(25));
cs[c].set(r, new String(by));
}
}
job.update(1);
}
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/GcsFileVec.java
|
package water.fvec;
import water.DKV;
import water.Futures;
import water.Key;
import water.Value;
public class GcsFileVec extends FileVec {
private GcsFileVec(Key key, long len) {
super(key, len, Value.GCS);
}
public static Key make(String path, long size) {
Futures fs = new Futures();
Key<Frame> key = make(path, size, fs);
fs.blockForPending();
return key;
}
public static Key<Frame> make(String path, long size, Futures fs) {
Key<Frame> frameKey = Key.make(path);
Key<Vec> vecKey = Vec.newKey(frameKey);
new Frame(frameKey).delete_and_lock();
// Insert the top-level FileVec key into the store
Vec vec = new GcsFileVec(vecKey, size);
DKV.put(vecKey, vec, fs);
Frame frame = new Frame(frameKey, new String[]{path}, new Vec[]{vec});
frame.update();
frame.unlock();
return frameKey;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/HDFSFileVec.java
|
package water.fvec;
import water.*;
/**
* Vec representation of file stored on HDFS.
*/
public final class HDFSFileVec extends FileVec {
private HDFSFileVec(Key key, long len) {
super(key, len, Value.HDFS);
}
public static Key make(String path, long size) {
Futures fs = new Futures();
Key key = make(path, size, fs);
fs.blockForPending();
return key;
}
public static Key make(String path, long size, Futures fs) {
Key k = Key.make(path);
Key k2 = Vec.newKey(k);
new Frame(k).delete_and_lock();
// Insert the top-level FileVec key into the store
Vec v = new HDFSFileVec(k2,size);
DKV.put(k2, v, fs);
Frame fr = new Frame(k,new String[]{path},new Vec[]{v});
fr.update();
fr.unlock();
return k;
}
@Override
public byte[] getFirstBytes() {
return FileVecUtils.getFirstBytes(this);
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/HTTPFileVec.java
|
package water.fvec;
import water.*;
/**
* FileVec backed by an HTTP/HTTPS data source
*/
public class HTTPFileVec extends FileVec {
private HTTPFileVec(Key key, long len) {
super(key, len, Value.HTTP);
}
public static Key make(String path, long size) {
Futures fs = new Futures();
Key key = make(path, size, fs);
fs.blockForPending();
return key;
}
public static Key make(String path, long size, Futures fs) {
Key k = Key.make(path);
Key k2 = Vec.newKey(k);
new Frame(k).delete_and_lock();
// Insert the top-level FileVec key into the store
Vec v = new HTTPFileVec(k2, size);
DKV.put(k2, v, fs);
Frame fr = new Frame(k, new String[]{path}, new Vec[]{v});
fr.update();
fr.unlock();
return k;
}
}
|
0
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water
|
java-sources/ai/h2o/h2o-core/3.46.0.7/water/fvec/InteractionWrappedVec.java
|
package water.fvec;
import water.*;
import water.util.ArrayUtils;
import water.util.IcedHashMap;
import water.util.IcedLong;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
/**
*
* This class represents an interaction between two Vec instances.
*
* Another virtual Vec (akin to TransformWrappedVec) used to represent a
* never-materialized interaction between two columns.
*
* There are 3 types of interactions to consider: Num-Num, Num-Enum, and Enum-Enums
* Creation of these Vec instances is cheap except for the Enum-Enum case (since it is not
* known apriori the co-occurrence of enums between any two categorical columns). So in
* this specific case, an MRTask is done to collect the domain.
*
*
* @author spencer
*/
public class InteractionWrappedVec extends WrappedVec {
private final Key<Vec> _masterVecKey1;
private final Key<Vec> _masterVecKey2;
private transient Vec _masterVec1;
private transient Vec _masterVec2;
private String[] _v1Domain;
private String[] _v2Domain;
public boolean _useAllFactorLevels;
public boolean _skipMissing;
public boolean _standardize;
private long[] _bins;
private String[] _missingDomains;
public transient GetMeanTask t;
private String[] _v1Enums; // only interact these enums from vec 1
private String[] _v2Enums; // only interact these enums from vec 2
public InteractionWrappedVec(Key<Vec> key, int rowLayout, String[] vec1DomainLimit, String[] vec2DomainLimit, boolean useAllFactorLevels, boolean skipMissing, boolean standardize, Key<Vec> masterVecKey1, Key<Vec> masterVecKey2) {
super(key, rowLayout, null);
_masterVecKey1=masterVecKey1;
_masterVecKey2=masterVecKey2;
_v1Enums=vec1DomainLimit;
_v2Enums=vec2DomainLimit;
_masterVec1=_masterVecKey1.get();
_masterVec2=_masterVecKey2.get();
_useAllFactorLevels=useAllFactorLevels;
_skipMissing=skipMissing;
setupDomain(_standardize=standardize); // performs MRTask if both vecs are categorical!!
DKV.put(this);
if (t != null) t.doAll(this);
}
public String[] v1Domain() { return _v1Enums==null?_v1Domain:_v1Enums; }
public String[] v2Domain() { return _v2Enums==null?_v2Domain:_v2Enums; }
@Override public String[] domain() { // always returns the "correct" domains, so accidental mixup of domain vs domains is ok
String[] res1 = v1Domain();
String[] res2 = v2Domain();
return res1 == null? res2 : res2 == null? res1 : super.domain();
}
public Vec v1() { return _masterVec1==null?(_masterVec1=_masterVecKey1.get()):_masterVec1; }
public Vec v2() { return _masterVec2==null?(_masterVec2=_masterVecKey2.get()):_masterVec2; }
/**
* Obtain the length of the expanded (i.e. one-hot expanded) interaction column.
*/
public int expandedLength() {
if (isNumericInteraction()) return 1; // 2 numeric columns -> 1 column
else if( isCategorical() ) return domain().length; // 2 cat -> domains (limited) length
else if( _v1Domain!=null ) return _v1Enums==null?_v1Domain.length - (_useAllFactorLevels?0:1):_v1Enums.length-(_useAllFactorLevels?0:1);
else return _v2Enums==null?_v2Domain.length - (_useAllFactorLevels?0:1):_v2Enums.length - (_useAllFactorLevels?0:1);
}
public double[] getMeans() {
if( null!=_v1Domain && null!=_v2Domain ) {
double[] res = new double[domain().length];
Arrays.fill(res,Double.NaN);
return res;
} else if( null==_v1Domain && null==_v2Domain ) return new double[]{super.mean()};
return new GetMeanTask(v1Domain()==null?v2Domain().length:v1Domain().length).doAll(this)._d;
}
public double getSub(int i) {
if (t == null) return mean();
return t._d[i];
}
public double getMul(int i) {
double sigma = (t == null)? sigma() : t._sigma[i];
return sigma == 0? 1.0 : 1.0/sigma;
}
public double getSigma(int i) {
return (t == null)? sigma() : t._sigma[i];
}
private static class GetMeanTask extends MRTask<GetMeanTask> {
private double[] _d; // means, NA skipped
private double[] _sigma; // sds, NA skipped
private long _rows;
private final int _len;
GetMeanTask(int len) { _len=len; }
@Override public void map(Chunk c) {
_d = new double[_len];
_sigma = new double[_len];
InteractionWrappedChunk cc = (InteractionWrappedChunk)c;
Chunk lC = cc._c[0]; Chunk rC = cc._c[1]; // get the "left" chk and the "rite" chk
if( cc._c2IsCat ) { lC=rC; rC=cc._c[0]; } // left is always cat
long rows=0;
for(int rid=0;rid<c._len;++rid) {
if( lC.isNA(rid) || rC.isNA(rid) ) continue; // skipmissing
int idx = (int)lC.at8(rid);
rows++;
for(int i=0;i<_d.length;++i) {
double x = i==idx?rC.atd(rid):0;
double delta = x - _d[i];
_d[i] += delta / rows;
_sigma[i] += delta * (x - _d[i]);
}
}
_rows=rows;
}
@Override public void reduce(GetMeanTask t) {
if (_rows == 0) { _d = t._d; _sigma = t._sigma; }
else if(t._rows != 0){
for(int i=0;i<_d.length;++i) {
double delta = _d[i] - t._d[i];
_d[i] = (_d[i]* _rows + t._d[i] * t._rows) / (_rows + t._rows);
_sigma[i] += t._sigma[i] + delta * delta * _rows * t._rows / (_rows + t._rows);
}
}
_rows += t._rows;
}
@Override public void postGlobal() {
for(int i=0;i<_sigma.length;++i )
_sigma[i] = Math.sqrt(_sigma[i]/(_rows-1));
}
}
public boolean isNumericInteraction() {
return null==v1Domain() && null==v2Domain();
}
@Override public double mean() {
if (null==t && isNumericInteraction())
return super.mean();
return 0;
}
@Override public double sigma() {
if (null==t && isNumericInteraction())
return super.sigma();
return 1;
}
@Override public int mode() {
if( !isCategorical() ) {
if (v1Domain() != null) {
assert v2Domain() == null; // Only one of them can have a domain (it would be categorical otherwise)
return _masterVec1.mode();
}
if (v2Domain() != null) {
assert v1Domain() == null; // Only one of them can have a domain (it would be categorical otherwise)
return _masterVec2.mode();
}
// Numerical interactions don't have a mode
throw H2O.unimpl();
}
return ArrayUtils.maxIndex(_bins);
}
public long[] getBins() { return _bins; }
public String[] missingDomains() { return _missingDomains; }
private void setupDomain(boolean standardize) {
if( _masterVec1.isCategorical() || _masterVec2.isCategorical() ) {
_v1Domain = _masterVec1.domain();
_v2Domain = _masterVec2.domain();
if( _v1Domain!=null && _v2Domain!=null ) {
CombineDomainTask t =new CombineDomainTask(_v1Domain, _v2Domain,_v1Enums,_v2Enums, _useAllFactorLevels,_skipMissing).doAll(_masterVec1, _masterVec2);
setDomain(t._dom);
_bins=t._bins;
_type = Vec.T_CAT; // vec is T_NUM up to this point
_missingDomains=t._missingDom;
} else
t = standardize?new GetMeanTask(v1Domain()==null?v2Domain().length:v1Domain().length):null;
}
if( null==_v1Domain && null==_v2Domain ) _useAllFactorLevels=true; // just makes life easier to have this when the vec is categorical
}
private static class CombineDomainTask extends MRTask<CombineDomainTask> {
private String[] _dom; // out, sorted (uses Arrays.sort)
private long[] _bins; // out, sorted according to _dom
private String[] _missingDom; // out, the missing levels due to !_useAllLvls
private final String _left[]; // in
private final String _rite[]; // in
private final String _leftLimit[]; // in
private final String _riteLimit[]; // in
private final boolean _useAllLvls; // in
private final boolean _skipMissing; // in
private IcedHashMap<String, IcedLong> _perChkMap;
private IcedHashMap<String, String> _perChkMapMissing; // skipped cats
CombineDomainTask(String[] left, String[] rite, String[] leftLimit, String[] riteLimit, boolean useAllLvls, boolean skipMissing) {
_left = left;
_rite = rite;
_leftLimit = leftLimit;
_riteLimit = riteLimit;
_useAllLvls = useAllLvls;
_skipMissing = skipMissing;
}
@Override public void map(Chunk[] c) {
_perChkMap = new IcedHashMap<>();
if( !_useAllLvls ) _perChkMapMissing = new IcedHashMap<>();
Chunk left = c[0];
Chunk rite = c[1];
String k;
HashSet<String> A = _leftLimit == null ? null : new HashSet<String>();
HashSet<String> B = _riteLimit == null ? null : new HashSet<String>();
if (A != null) Collections.addAll(A, _leftLimit);
if (B != null) Collections.addAll(B, _riteLimit);
int lval,rval;
String l,r;
boolean leftIsNA, riteIsNA;
for (int i = 0; i < left._len; ++i)
if( (!((leftIsNA=left.isNA(i)) | (riteIsNA=rite.isNA(i)))) ) {
lval = (int)left.at8(i);
rval = (int)rite.at8(i);
if( !_useAllLvls && ( 0==lval || 0==rval )) { // deal with all illegal levels here when _useAllLvls==False
_perChkMapMissing.putIfAbsent(_left[lval] + "_" + _rite[rval],"");
continue;
}
l = _left[lval];
r = _rite[rval];
if (A != null && !A.contains(l)) continue;
if (B != null && !B.contains(r)) continue;
if( null!=_perChkMap.putIfAbsent((k = l + "_" + r), new IcedLong(1)) )
_perChkMap.get(k)._val++;
} else if( !_skipMissing ) {
if( !(leftIsNA && riteIsNA) ) { // not both missing
if( leftIsNA ) {
r = _rite[rval=(int)rite.at8(i)];
if( !_useAllLvls && 0==rval ) {
_perChkMapMissing.putIfAbsent("NA_" + _rite[rval],"");
continue;
}
if( B!=null && !B.contains(r) ) continue;
if( null!=_perChkMap.putIfAbsent((k="NA_"+r), new IcedLong(1)) )
_perChkMap.get(k)._val++;
} else {
l = _left[lval=(int)left.at8(i)];
if( !_useAllLvls && 0==lval ) {
_perChkMapMissing.putIfAbsent(_left[lval] + "_NA","");
continue;
}
if( null!=A && !A.contains(l) ) continue;
if( null!=_perChkMap.putIfAbsent((k=l+"_NA"), new IcedLong(1)) )
_perChkMap.get(k)._val++;
}
}
}
}
@Override public void reduce(CombineDomainTask t) {
for (Map.Entry<String, IcedLong> e : t._perChkMap.entrySet()) {
IcedLong i = _perChkMap.get(e.getKey());
if (i != null) i._val += e.getValue()._val;
else _perChkMap.put(e.getKey(), e.getValue());
}
t._perChkMap = null;
if(_perChkMapMissing==null && t._perChkMapMissing!=null ) {
_perChkMapMissing=new IcedHashMap<>();
_perChkMapMissing.putAll(t._perChkMapMissing);
}
else if( _perChkMapMissing!=null && t._perChkMapMissing!=null ) {
for (String s: t._perChkMapMissing.keySet())
_perChkMapMissing.putIfAbsent(s,"");
}
t._perChkMapMissing=null;
}
@Override public void postGlobal() {
Arrays.sort(_dom = _perChkMap.keySet().toArray(new String[_perChkMap.size()]));
int idx = 0;
_bins = new long[_perChkMap.size()];
for(String s:_dom)
_bins[idx++] = _perChkMap.get(s)._val;
if( _missingDom!=null )
Arrays.sort(_missingDom = _perChkMapMissing.keySet().toArray(new String[_perChkMapMissing.size()]));
}
}
@Override public Chunk chunkForChunkIdx(int cidx) {
Chunk[] cs = new Chunk[2];
cs[0] = (_masterVec1!=null?_masterVec1: (_masterVec1=_masterVecKey1.get())).chunkForChunkIdx(cidx);
cs[1] = (_masterVec2!=null?_masterVec2: (_masterVec2=_masterVecKey2.get())).chunkForChunkIdx(cidx);
return new InteractionWrappedChunk(this, cs);
}
@Override public Vec doCopy() {
InteractionWrappedVec v = new InteractionWrappedVec(group().addVec(), _rowLayout,_v1Enums,_v2Enums, _useAllFactorLevels, _skipMissing, _standardize, _masterVecKey1, _masterVecKey2);
if( null!=domain() ) v.setDomain(domain());
if( null!=_v1Domain ) v._v1Domain=_v1Domain.clone();
if( null!=_v2Domain ) v._v2Domain=_v2Domain.clone();
return v;
}
@Override protected AutoBuffer writeAll_impl(AutoBuffer ab) {
ab.putAStr(_v1Domain);
ab.putAStr(_v2Domain);
ab.putZ(_useAllFactorLevels);
ab.putZ(_skipMissing);
ab.putZ(_standardize);
ab.putAStr(_missingDomains);
ab.putAStr(_v1Enums);
ab.putAStr(_v2Enums);
return super.writeAll_impl(ab);
}
@Override protected Keyed readAll_impl(AutoBuffer ab, Futures fs) {
_v1Domain=ab.getAStr();
_v2Domain=ab.getAStr();
_useAllFactorLevels=ab.getZ();
_skipMissing=ab.getZ();
_standardize=ab.getZ();
_missingDomains=ab.getAStr();
_v1Enums=ab.getAStr();
_v2Enums=ab.getAStr();
return super.readAll_impl(ab,fs);
}
public static class InteractionWrappedChunk extends Chunk {
public final transient Chunk[] _c;
public final boolean _c1IsCat; // left chunk is categorical
public final boolean _c2IsCat; // rite chunk is categorical
public final boolean _isCat; // this vec is categorical
InteractionWrappedChunk(InteractionWrappedVec transformWrappedVec, Chunk[] c) {
// set all the chunk fields
_c = c; set_len(_c[0]._len);
_start = _c[0]._start; _vec = transformWrappedVec; _cidx = _c[0]._cidx;
_c1IsCat=_c[0]._vec.isCategorical();
_c2IsCat=_c[1]._vec.isCategorical();
_isCat = _vec.isCategorical();
}
@Override public double atd_impl(int idx) {
if( _isCat )
if( isNA_impl(idx) ) return Double.NaN;
if (_isCat) {
int val = Arrays.binarySearch(_vec.domain(), getKey(idx));
return ((val < 0)?-1:val);
} else
return ( _c1IsCat?1: (_c[0].atd(idx))) * ( _c2IsCat?1: (_c[1].atd(idx)) );
}
@Override public long at8_impl(int idx) {
if (_isCat) {
long val = Arrays.binarySearch(_vec.domain(), getKey(idx)); // can give bad value like -3
return ((val < 0)?-1:val); // illegal domain should always return -1
} else
return ( _c1IsCat?1:_c[0].at8(idx) ) * ( _c2IsCat?1:_c[1].at8(idx) );
}
private String getKey(int idx) { return _c[0]._vec.domain()[(int)_c[0].at8(idx)] + "_" + _c[1]._vec.domain()[(int)_c[1].at8(idx)]; }
@Override public boolean isNA_impl(int idx) { return _c[0].isNA(idx) || _c[1].isNA(idx); }
// Returns true if the masterVec is missing, false otherwise
@Override public boolean set_impl(int idx, long l) { return false; }
@Override public boolean set_impl(int idx, double d) { return false; }
@Override public boolean set_impl(int idx, float f) { return false; }
@Override public boolean setNA_impl(int idx) { return false; }
@Override public ChunkVisitor processRows(ChunkVisitor nc, int from, int to){
for(int i = from; i < to; i++)
nc.addValue(atd(i));
return nc;
}
@Override public ChunkVisitor processRows(ChunkVisitor nc, int... rows){
for(int i:rows)
nc.addValue(atd(i));
return nc;
}
@Override protected final void initFromBytes () { throw water.H2O.fail(); }
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.